Angel Pons | 0612b27 | 2020-04-05 15:46:56 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Subrata Banik | 01ae11b | 2017-03-04 23:32:41 +0530 | [diff] [blame] | 2 | |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 3 | #include <cbmem.h> |
Subrata Banik | b6df6b0 | 2020-01-03 15:29:02 +0530 | [diff] [blame] | 4 | #include <console/console.h> |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 5 | #include <device/device.h> |
| 6 | #include <device/pci.h> |
| 7 | #include <device/pci_ids.h> |
Werner Zeh | d12530c | 2018-12-14 13:09:12 +0100 | [diff] [blame] | 8 | #include <intelblocks/acpi.h> |
Subrata Banik | b6df6b0 | 2020-01-03 15:29:02 +0530 | [diff] [blame] | 9 | #include <intelblocks/cfg.h> |
Subrata Banik | 01ae11b | 2017-03-04 23:32:41 +0530 | [diff] [blame] | 10 | #include <intelblocks/systemagent.h> |
Lijian Zhao | 357e552 | 2019-04-11 13:07:00 -0700 | [diff] [blame] | 11 | #include <smbios.h> |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 12 | #include <soc/iomap.h> |
Subrata Banik | 01ae11b | 2017-03-04 23:32:41 +0530 | [diff] [blame] | 13 | #include <soc/pci_devs.h> |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 14 | #include <soc/systemagent.h> |
| 15 | #include "systemagent_def.h" |
Subrata Banik | 01ae11b | 2017-03-04 23:32:41 +0530 | [diff] [blame] | 16 | |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 17 | /* SoC override function */ |
Aaron Durbin | 6403167 | 2018-04-21 14:45:32 -0600 | [diff] [blame] | 18 | __weak void soc_systemagent_init(struct device *dev) |
Subrata Banik | 01ae11b | 2017-03-04 23:32:41 +0530 | [diff] [blame] | 19 | { |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 20 | /* no-op */ |
Subrata Banik | 01ae11b | 2017-03-04 23:32:41 +0530 | [diff] [blame] | 21 | } |
| 22 | |
Aaron Durbin | 6403167 | 2018-04-21 14:45:32 -0600 | [diff] [blame] | 23 | __weak void soc_add_fixed_mmio_resources(struct device *dev, |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 24 | int *resource_cnt) |
| 25 | { |
| 26 | /* no-op */ |
| 27 | } |
| 28 | |
Aaron Durbin | 6403167 | 2018-04-21 14:45:32 -0600 | [diff] [blame] | 29 | __weak int soc_get_uncore_prmmr_base_and_mask(uint64_t *base, |
Pratik Prajapati | 82cdfa7 | 2017-08-28 14:48:55 -0700 | [diff] [blame] | 30 | uint64_t *mask) |
| 31 | { |
| 32 | /* return failure for this dummy API */ |
| 33 | return -1; |
| 34 | } |
| 35 | |
Furquan Shaikh | 0f007d8 | 2020-04-24 06:41:18 -0700 | [diff] [blame] | 36 | __weak unsigned long sa_write_acpi_tables(const struct device *dev, |
Werner Zeh | d12530c | 2018-12-14 13:09:12 +0100 | [diff] [blame] | 37 | unsigned long current, |
| 38 | struct acpi_rsdp *rsdp) |
| 39 | { |
| 40 | return current; |
| 41 | } |
| 42 | |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 43 | /* |
| 44 | * Add all known fixed MMIO ranges that hang off the host bridge/memory |
| 45 | * controller device. |
| 46 | */ |
| 47 | void sa_add_fixed_mmio_resources(struct device *dev, int *resource_cnt, |
| 48 | const struct sa_mmio_descriptor *sa_fixed_resources, size_t count) |
| 49 | { |
| 50 | int i; |
| 51 | int index = *resource_cnt; |
| 52 | |
| 53 | for (i = 0; i < count; i++) { |
| 54 | uintptr_t base; |
| 55 | size_t size; |
| 56 | |
| 57 | size = sa_fixed_resources[i].size; |
| 58 | base = sa_fixed_resources[i].base; |
| 59 | |
| 60 | mmio_resource(dev, index++, base / KiB, size / KiB); |
| 61 | } |
| 62 | |
| 63 | *resource_cnt = index; |
| 64 | } |
| 65 | |
| 66 | /* |
| 67 | * DRAM memory mapped register |
| 68 | * |
| 69 | * TOUUD: This 64 bit register defines the Top of Upper Usable DRAM |
| 70 | * TOLUD: This 32 bit register defines the Top of Low Usable DRAM |
| 71 | * BGSM: This register contains the base address of stolen DRAM memory for GTT |
| 72 | * TSEG: This register contains the base address of TSEG DRAM memory |
| 73 | */ |
| 74 | static const struct sa_mem_map_descriptor sa_memory_map[MAX_MAP_ENTRIES] = { |
| 75 | { TOUUD, true, "TOUUD" }, |
| 76 | { TOLUD, false, "TOLUD" }, |
| 77 | { BGSM, false, "BGSM" }, |
| 78 | { TSEG, false, "TSEG" }, |
| 79 | }; |
| 80 | |
| 81 | /* Read DRAM memory map register value through PCI configuration space */ |
Elyes HAOUAS | 4a13126 | 2018-09-16 17:35:48 +0200 | [diff] [blame] | 82 | static void sa_read_map_entry(struct device *dev, |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 83 | const struct sa_mem_map_descriptor *entry, uint64_t *result) |
| 84 | { |
| 85 | uint64_t value = 0; |
| 86 | |
| 87 | if (entry->is_64_bit) { |
| 88 | value = pci_read_config32(dev, entry->reg + 4); |
| 89 | value <<= 32; |
| 90 | } |
| 91 | |
| 92 | value |= pci_read_config32(dev, entry->reg); |
| 93 | /* All registers are on a 1MiB granularity. */ |
| 94 | value = ALIGN_DOWN(value, 1 * MiB); |
| 95 | |
| 96 | *result = value; |
| 97 | } |
| 98 | |
Furquan Shaikh | 1085fee | 2020-05-07 16:04:16 -0700 | [diff] [blame^] | 99 | /* |
| 100 | * This function will get above 4GB mmio enable config specific to soc. |
| 101 | * |
| 102 | * Return values: |
| 103 | * 0 = Above 4GB memory is not enable |
| 104 | * 1 = Above 4GB memory is enable |
| 105 | */ |
| 106 | static int get_enable_above_4GB_mmio(void) |
| 107 | { |
| 108 | const struct soc_intel_common_config *common_config; |
| 109 | common_config = chip_get_common_soc_structure(); |
| 110 | |
| 111 | return common_config->enable_above_4GB_mmio; |
| 112 | } |
| 113 | |
| 114 | /* Fill MMIO resource above 4GB into GNVS */ |
| 115 | void sa_fill_gnvs(global_nvs_t *gnvs) |
| 116 | { |
| 117 | if (!get_enable_above_4GB_mmio()) |
| 118 | return; |
| 119 | |
| 120 | struct device *sa_dev = pcidev_path_on_root(SA_DEVFN_ROOT); |
| 121 | |
| 122 | gnvs->e4gm = 1; |
| 123 | sa_read_map_entry(sa_dev, &sa_memory_map[SA_TOUUD_REG], &gnvs->a4gb); |
| 124 | gnvs->a4gs = ABOVE_4GB_MEM_BASE_SIZE; |
| 125 | printk(BIOS_DEBUG, "PCI space above 4GB MMIO is from 0x%llx to len = 0x%llx\n", |
| 126 | gnvs->a4gb, gnvs->a4gs); |
| 127 | } |
| 128 | |
| 129 | |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 130 | static void sa_get_mem_map(struct device *dev, uint64_t *values) |
| 131 | { |
| 132 | int i; |
| 133 | for (i = 0; i < MAX_MAP_ENTRIES; i++) |
| 134 | sa_read_map_entry(dev, &sa_memory_map[i], &values[i]); |
| 135 | } |
| 136 | |
| 137 | /* |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 138 | * These are the host memory ranges that should be added: |
| 139 | * - 0 -> 0xa0000: cacheable |
| 140 | * - 0xc0000 -> top_of_ram : cacheable |
Michael Niewöhner | 40f893e | 2019-10-21 18:58:04 +0200 | [diff] [blame] | 141 | * - top_of_ram -> BGSM: cacheable with standard MTRRs and reserved |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 142 | * - BGSM -> TOLUD: not cacheable with standard MTRRs and reserved |
| 143 | * - 4GiB -> TOUUD: cacheable |
| 144 | * |
| 145 | * The default SMRAM space is reserved so that the range doesn't |
| 146 | * have to be saved during S3 Resume. Once marked reserved the OS |
| 147 | * cannot use the memory. This is a bit of an odd place to reserve |
| 148 | * the region, but the CPU devices don't have dev_ops->read_resources() |
| 149 | * called on them. |
| 150 | * |
| 151 | * The range 0xa0000 -> 0xc0000 does not have any resources |
| 152 | * associated with it to handle legacy VGA memory. If this range |
| 153 | * is not omitted the mtrr code will setup the area as cacheable |
| 154 | * causing VGA access to not work. |
| 155 | * |
| 156 | * The TSEG region is mapped as cacheable so that one can perform |
| 157 | * SMRAM relocation faster. Once the SMRR is enabled the SMRR takes |
| 158 | * precedence over the existing MTRRs covering this region. |
| 159 | * |
| 160 | * It should be noted that cacheable entry types need to be added in |
| 161 | * order. The reason is that the current MTRR code assumes this and |
| 162 | * falls over itself if it isn't. |
| 163 | * |
| 164 | * The resource index starts low and should not meet or exceed |
| 165 | * PCI_BASE_ADDRESS_0. |
| 166 | */ |
| 167 | static void sa_add_dram_resources(struct device *dev, int *resource_count) |
| 168 | { |
| 169 | uintptr_t base_k, touud_k; |
Michael Niewöhner | 40f893e | 2019-10-21 18:58:04 +0200 | [diff] [blame] | 170 | size_t size_k; |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 171 | uint64_t sa_map_values[MAX_MAP_ENTRIES]; |
| 172 | uintptr_t top_of_ram; |
| 173 | int index = *resource_count; |
| 174 | |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 175 | top_of_ram = (uintptr_t)cbmem_top(); |
| 176 | |
| 177 | /* 0 - > 0xa0000 */ |
| 178 | base_k = 0; |
| 179 | size_k = (0xa0000 / KiB) - base_k; |
| 180 | ram_resource(dev, index++, base_k, size_k); |
| 181 | |
| 182 | /* 0xc0000 -> top_of_ram */ |
| 183 | base_k = 0xc0000 / KiB; |
| 184 | size_k = (top_of_ram / KiB) - base_k; |
| 185 | ram_resource(dev, index++, base_k, size_k); |
| 186 | |
| 187 | sa_get_mem_map(dev, &sa_map_values[0]); |
| 188 | |
Michael Niewöhner | 40f893e | 2019-10-21 18:58:04 +0200 | [diff] [blame] | 189 | /* top_of_ram -> BGSM */ |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 190 | base_k = top_of_ram; |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 191 | size_k = sa_map_values[SA_BGSM_REG] - base_k; |
| 192 | reserved_ram_resource(dev, index++, base_k / KiB, size_k / KiB); |
| 193 | |
| 194 | /* BGSM -> TOLUD */ |
| 195 | base_k = sa_map_values[SA_BGSM_REG]; |
| 196 | size_k = sa_map_values[SA_TOLUD_REG] - base_k; |
| 197 | mmio_resource(dev, index++, base_k / KiB, size_k / KiB); |
| 198 | |
| 199 | /* 4GiB -> TOUUD */ |
| 200 | base_k = 4 * (GiB / KiB); /* 4GiB */ |
| 201 | touud_k = sa_map_values[SA_TOUUD_REG] / KiB; |
| 202 | size_k = touud_k - base_k; |
| 203 | if (touud_k > base_k) |
| 204 | ram_resource(dev, index++, base_k, size_k); |
| 205 | |
| 206 | /* |
| 207 | * Reserve everything between A segment and 1MB: |
| 208 | * |
| 209 | * 0xa0000 - 0xbffff: legacy VGA |
| 210 | * 0xc0000 - 0xfffff: RAM |
| 211 | */ |
| 212 | mmio_resource(dev, index++, 0xa0000 / KiB, (0xc0000 - 0xa0000) / KiB); |
| 213 | reserved_ram_resource(dev, index++, 0xc0000 / KiB, |
| 214 | (1*MiB - 0xc0000) / KiB); |
| 215 | |
| 216 | *resource_count = index; |
| 217 | } |
| 218 | |
| 219 | static bool is_imr_enabled(uint32_t imr_base_reg) |
| 220 | { |
| 221 | return !!(imr_base_reg & (1 << 31)); |
| 222 | } |
| 223 | |
Elyes HAOUAS | 4a13126 | 2018-09-16 17:35:48 +0200 | [diff] [blame] | 224 | static void imr_resource(struct device *dev, int idx, uint32_t base, |
| 225 | uint32_t mask) |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 226 | { |
| 227 | uint32_t base_k, size_k; |
| 228 | /* Bits 28:0 encode the base address bits 38:10, hence the KiB unit. */ |
| 229 | base_k = (base & 0x0fffffff); |
| 230 | /* Bits 28:0 encode the AND mask used for comparison, in KiB. */ |
| 231 | size_k = ((~mask & 0x0fffffff) + 1); |
| 232 | /* |
| 233 | * IMRs sit in lower DRAM. Mark them cacheable, otherwise we run |
| 234 | * out of MTRRs. Memory reserved by IMRs is not usable for host |
| 235 | * so mark it reserved. |
| 236 | */ |
| 237 | reserved_ram_resource(dev, idx, base_k, size_k); |
| 238 | } |
| 239 | |
| 240 | /* |
| 241 | * Add IMR ranges that hang off the host bridge/memory |
| 242 | * controller device in case CONFIG_SA_ENABLE_IMR is selected by SoC. |
| 243 | */ |
| 244 | static void sa_add_imr_resources(struct device *dev, int *resource_cnt) |
| 245 | { |
| 246 | size_t i, imr_offset; |
| 247 | uint32_t base, mask; |
| 248 | int index = *resource_cnt; |
| 249 | |
| 250 | for (i = 0; i < MCH_NUM_IMRS; i++) { |
| 251 | imr_offset = i * MCH_IMR_PITCH; |
| 252 | base = MCHBAR32(imr_offset + MCH_IMR0_BASE); |
| 253 | mask = MCHBAR32(imr_offset + MCH_IMR0_MASK); |
| 254 | |
| 255 | if (is_imr_enabled(base)) |
| 256 | imr_resource(dev, index++, base, mask); |
| 257 | } |
| 258 | |
| 259 | *resource_cnt = index; |
| 260 | } |
| 261 | |
| 262 | static void systemagent_read_resources(struct device *dev) |
| 263 | { |
| 264 | int index = 0; |
| 265 | |
| 266 | /* Read standard PCI resources. */ |
| 267 | pci_dev_read_resources(dev); |
| 268 | |
| 269 | /* Add all fixed MMIO resources. */ |
| 270 | soc_add_fixed_mmio_resources(dev, &index); |
| 271 | /* Calculate and add DRAM resources. */ |
| 272 | sa_add_dram_resources(dev, &index); |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 273 | if (CONFIG(SA_ENABLE_IMR)) |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 274 | /* Add the isolated memory ranges (IMRs). */ |
| 275 | sa_add_imr_resources(dev, &index); |
| 276 | } |
| 277 | |
Lijian Zhao | 357e552 | 2019-04-11 13:07:00 -0700 | [diff] [blame] | 278 | #if CONFIG(GENERATE_SMBIOS_TABLES) |
| 279 | static int sa_smbios_write_type_16(struct device *dev, int *handle, |
| 280 | unsigned long *current) |
| 281 | { |
| 282 | struct smbios_type16 *t = (struct smbios_type16 *)*current; |
| 283 | int len = sizeof(struct smbios_type16); |
| 284 | |
| 285 | struct memory_info *meminfo; |
| 286 | meminfo = cbmem_find(CBMEM_ID_MEMINFO); |
| 287 | if (meminfo == NULL) |
| 288 | return 0; /* can't find mem info in cbmem */ |
| 289 | |
| 290 | memset(t, 0, sizeof(struct smbios_type16)); |
| 291 | t->type = SMBIOS_PHYS_MEMORY_ARRAY; |
| 292 | t->handle = *handle; |
| 293 | t->length = len - 2; |
| 294 | t->location = MEMORY_ARRAY_LOCATION_SYSTEM_BOARD; |
| 295 | t->use = MEMORY_ARRAY_USE_SYSTEM; |
| 296 | /* TBD, meminfo hob have information about ECC */ |
| 297 | t->memory_error_correction = MEMORY_ARRAY_ECC_NONE; |
| 298 | /* no error information handle available */ |
| 299 | t->memory_error_information_handle = 0xFFFE; |
| 300 | t->maximum_capacity = 32 * (GiB / KiB); /* 32GB as default */ |
| 301 | t->number_of_memory_devices = meminfo->dimm_cnt; |
| 302 | |
| 303 | *current += len; |
| 304 | *handle += 1; |
| 305 | return len; |
| 306 | } |
| 307 | #endif |
| 308 | |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 309 | void enable_power_aware_intr(void) |
| 310 | { |
| 311 | uint8_t pair; |
| 312 | |
| 313 | /* Enable Power Aware Interrupt Routing */ |
| 314 | pair = MCHBAR8(MCH_PAIR); |
| 315 | pair &= ~0x7; /* Clear 2:0 */ |
| 316 | pair |= 0x4; /* Fixed Priority */ |
| 317 | MCHBAR8(MCH_PAIR) = pair; |
| 318 | } |
| 319 | |
| 320 | static struct device_operations systemagent_ops = { |
Elyes HAOUAS | 1d19127 | 2018-11-27 12:23:48 +0100 | [diff] [blame] | 321 | .read_resources = systemagent_read_resources, |
| 322 | .set_resources = pci_dev_set_resources, |
| 323 | .enable_resources = pci_dev_enable_resources, |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 324 | .init = soc_systemagent_init, |
Subrata Banik | 6bbc91a | 2017-12-07 14:55:51 +0530 | [diff] [blame] | 325 | .ops_pci = &pci_dev_ops_pci, |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 326 | #if CONFIG(HAVE_ACPI_TABLES) |
Werner Zeh | d12530c | 2018-12-14 13:09:12 +0100 | [diff] [blame] | 327 | .write_acpi_tables = sa_write_acpi_tables, |
| 328 | #endif |
Lijian Zhao | 357e552 | 2019-04-11 13:07:00 -0700 | [diff] [blame] | 329 | #if CONFIG(GENERATE_SMBIOS_TABLES) |
| 330 | .get_smbios_data = sa_smbios_write_type_16, |
| 331 | #endif |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 332 | }; |
| 333 | |
| 334 | static const unsigned short systemagent_ids[] = { |
| 335 | PCI_DEVICE_ID_INTEL_GLK_NB, |
| 336 | PCI_DEVICE_ID_INTEL_APL_NB, |
Lijian Zhao | bbedef9 | 2017-07-29 16:38:38 -0700 | [diff] [blame] | 337 | PCI_DEVICE_ID_INTEL_CNL_ID_U, |
| 338 | PCI_DEVICE_ID_INTEL_CNL_ID_Y, |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 339 | PCI_DEVICE_ID_INTEL_SKL_ID_U, |
| 340 | PCI_DEVICE_ID_INTEL_SKL_ID_Y, |
| 341 | PCI_DEVICE_ID_INTEL_SKL_ID_ULX, |
Maxim Polyakov | dde937c | 2019-09-09 15:50:03 +0300 | [diff] [blame] | 342 | PCI_DEVICE_ID_INTEL_SKL_ID_H_4, |
Keno Fischer | 1044eba | 2019-06-07 01:55:56 -0400 | [diff] [blame] | 343 | PCI_DEVICE_ID_INTEL_SKL_ID_H_2, |
| 344 | PCI_DEVICE_ID_INTEL_SKL_ID_S_2, |
| 345 | PCI_DEVICE_ID_INTEL_SKL_ID_S_4, |
Lean Sheng Tan | 38c3ff7 | 2019-05-27 13:06:35 +0800 | [diff] [blame] | 346 | PCI_DEVICE_ID_INTEL_WHL_ID_W_2, |
| 347 | PCI_DEVICE_ID_INTEL_WHL_ID_W_4, |
Gaggery Tsai | e415a4c | 2018-03-21 22:36:18 +0800 | [diff] [blame] | 348 | PCI_DEVICE_ID_INTEL_KBL_ID_S, |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 349 | PCI_DEVICE_ID_INTEL_SKL_ID_H_EM, |
| 350 | PCI_DEVICE_ID_INTEL_KBL_ID_U, |
| 351 | PCI_DEVICE_ID_INTEL_KBL_ID_Y, |
| 352 | PCI_DEVICE_ID_INTEL_KBL_ID_H, |
| 353 | PCI_DEVICE_ID_INTEL_KBL_U_R, |
V Sowmya | acc2a48 | 2018-01-23 15:27:23 +0530 | [diff] [blame] | 354 | PCI_DEVICE_ID_INTEL_KBL_ID_DT, |
Christian Walter | 3d84038 | 2019-05-17 19:37:16 +0200 | [diff] [blame] | 355 | PCI_DEVICE_ID_INTEL_KBL_ID_DT_2, |
Maulik | fc19ab5 | 2018-01-05 22:40:35 +0530 | [diff] [blame] | 356 | PCI_DEVICE_ID_INTEL_CFL_ID_U, |
Christian Walter | ccac15a | 2019-08-13 09:55:37 +0200 | [diff] [blame] | 357 | PCI_DEVICE_ID_INTEL_CFL_ID_U_2, |
praveen hodagatta pranesh | e26c4a4 | 2018-09-20 03:49:45 +0800 | [diff] [blame] | 358 | PCI_DEVICE_ID_INTEL_CFL_ID_H, |
Christian Walter | ccac15a | 2019-08-13 09:55:37 +0200 | [diff] [blame] | 359 | PCI_DEVICE_ID_INTEL_CFL_ID_H_4, |
Lean Sheng Tan | 38c3ff7 | 2019-05-27 13:06:35 +0800 | [diff] [blame] | 360 | PCI_DEVICE_ID_INTEL_CFL_ID_H_8, |
praveen hodagatta pranesh | e26c4a4 | 2018-09-20 03:49:45 +0800 | [diff] [blame] | 361 | PCI_DEVICE_ID_INTEL_CFL_ID_S, |
Christian Walter | ccac15a | 2019-08-13 09:55:37 +0200 | [diff] [blame] | 362 | PCI_DEVICE_ID_INTEL_CFL_ID_S_DT_2, |
Felix Singer | d298ffe | 2019-07-28 13:27:11 +0200 | [diff] [blame] | 363 | PCI_DEVICE_ID_INTEL_CFL_ID_S_DT_4, |
Lean Sheng Tan | 38c3ff7 | 2019-05-27 13:06:35 +0800 | [diff] [blame] | 364 | PCI_DEVICE_ID_INTEL_CFL_ID_S_DT_8, |
Christian Walter | ccac15a | 2019-08-13 09:55:37 +0200 | [diff] [blame] | 365 | PCI_DEVICE_ID_INTEL_CFL_ID_S_WS_4, |
| 366 | PCI_DEVICE_ID_INTEL_CFL_ID_S_WS_6, |
Lean Sheng Tan | 38c3ff7 | 2019-05-27 13:06:35 +0800 | [diff] [blame] | 367 | PCI_DEVICE_ID_INTEL_CFL_ID_S_WS_8, |
Christian Walter | ccac15a | 2019-08-13 09:55:37 +0200 | [diff] [blame] | 368 | PCI_DEVICE_ID_INTEL_CFL_ID_S_S_4, |
| 369 | PCI_DEVICE_ID_INTEL_CFL_ID_S_S_6, |
| 370 | PCI_DEVICE_ID_INTEL_CFL_ID_S_S_8, |
Aamir Bohra | 9eac039 | 2018-06-30 12:07:04 +0530 | [diff] [blame] | 371 | PCI_DEVICE_ID_INTEL_ICL_ID_U, |
| 372 | PCI_DEVICE_ID_INTEL_ICL_ID_U_2_2, |
| 373 | PCI_DEVICE_ID_INTEL_ICL_ID_Y, |
| 374 | PCI_DEVICE_ID_INTEL_ICL_ID_Y_2, |
Ronak Kanabar | f606a2f | 2019-02-04 16:06:50 +0530 | [diff] [blame] | 375 | PCI_DEVICE_ID_INTEL_CML_ULT, |
Subrata Banik | ba8af58 | 2019-02-27 15:00:55 +0530 | [diff] [blame] | 376 | PCI_DEVICE_ID_INTEL_CML_ULT_2_2, |
Ronak Kanabar | f606a2f | 2019-02-04 16:06:50 +0530 | [diff] [blame] | 377 | PCI_DEVICE_ID_INTEL_CML_ULT_6_2, |
| 378 | PCI_DEVICE_ID_INTEL_CML_ULX, |
| 379 | PCI_DEVICE_ID_INTEL_CML_S, |
Gaggery Tsai | fdcc9ab | 2019-11-04 20:49:10 -0800 | [diff] [blame] | 380 | PCI_DEVICE_ID_INTEL_CML_S_G0G1_P0P1_6_2, |
| 381 | PCI_DEVICE_ID_INTEL_CML_S_P0P1_8_2, |
| 382 | PCI_DEVICE_ID_INTEL_CML_S_P0P1_10_2, |
Gaggery Tsai | 39e1f44 | 2020-01-08 15:22:13 -0800 | [diff] [blame] | 383 | PCI_DEVICE_ID_INTEL_CML_S_G0G1_4, |
| 384 | PCI_DEVICE_ID_INTEL_CML_S_G0G1_2, |
Ronak Kanabar | f606a2f | 2019-02-04 16:06:50 +0530 | [diff] [blame] | 385 | PCI_DEVICE_ID_INTEL_CML_H, |
Jamie Chen | 6bb9aaf | 2019-12-20 19:30:33 +0800 | [diff] [blame] | 386 | PCI_DEVICE_ID_INTEL_CML_H_4_2, |
Ronak Kanabar | f606a2f | 2019-02-04 16:06:50 +0530 | [diff] [blame] | 387 | PCI_DEVICE_ID_INTEL_CML_H_8_2, |
Ravi Sarawadi | 6b5bf40 | 2019-10-21 22:25:04 -0700 | [diff] [blame] | 388 | PCI_DEVICE_ID_INTEL_TGL_ID_U, |
Subrata Banik | ae69575 | 2019-11-12 12:47:43 +0530 | [diff] [blame] | 389 | PCI_DEVICE_ID_INTEL_TGL_ID_U_1, |
Srinidhi N Kaushik | 1d812e8 | 2020-02-07 15:51:09 -0800 | [diff] [blame] | 390 | PCI_DEVICE_ID_INTEL_TGL_ID_U_2_2, |
Ravi Sarawadi | 6b5bf40 | 2019-10-21 22:25:04 -0700 | [diff] [blame] | 391 | PCI_DEVICE_ID_INTEL_TGL_ID_Y, |
Tan, Lean Sheng | 2613609 | 2020-01-20 19:13:56 -0800 | [diff] [blame] | 392 | PCI_DEVICE_ID_INTEL_JSL_EHL, |
| 393 | PCI_DEVICE_ID_INTEL_EHL_ID_1, |
Meera Ravindranath | 3f4af0d | 2020-02-12 16:01:22 +0530 | [diff] [blame] | 394 | PCI_DEVICE_ID_INTEL_JSL_ID_1, |
Maulik V Vaghela | 8745a27 | 2020-04-22 12:13:40 +0530 | [diff] [blame] | 395 | PCI_DEVICE_ID_INTEL_JSL_ID_2, |
| 396 | PCI_DEVICE_ID_INTEL_JSL_ID_3, |
| 397 | PCI_DEVICE_ID_INTEL_JSL_ID_4, |
Subrata Banik | 7609c65 | 2017-05-19 14:50:09 +0530 | [diff] [blame] | 398 | 0 |
| 399 | }; |
| 400 | |
| 401 | static const struct pci_driver systemagent_driver __pci_driver = { |
| 402 | .ops = &systemagent_ops, |
| 403 | .vendor = PCI_VENDOR_ID_INTEL, |
| 404 | .devices = systemagent_ids |
| 405 | }; |