blob: 24c5a7e5cd1edffe404627170977aac22c329a91 [file] [log] [blame]
Angel Pons0612b272020-04-05 15:46:56 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Subrata Banik01ae11b2017-03-04 23:32:41 +05302
Arthur Heymans08769c62022-05-09 14:33:15 +02003#include <acpi/acpigen.h>
Subrata Banik7609c652017-05-19 14:50:09 +05304#include <cbmem.h>
Subrata Banikb6df6b02020-01-03 15:29:02 +05305#include <console/console.h>
Furquan Shaikhcc35f722020-05-12 16:25:31 -07006#include <cpu/cpu.h>
Subrata Banik7609c652017-05-19 14:50:09 +05307#include <device/device.h>
8#include <device/pci.h>
9#include <device/pci_ids.h>
Werner Zehd12530c2018-12-14 13:09:12 +010010#include <intelblocks/acpi.h>
Subrata Banikb6df6b02020-01-03 15:29:02 +053011#include <intelblocks/cfg.h>
Subrata Banik01ae11b2017-03-04 23:32:41 +053012#include <intelblocks/systemagent.h>
Lijian Zhao357e5522019-04-11 13:07:00 -070013#include <smbios.h>
Subrata Banik7609c652017-05-19 14:50:09 +053014#include <soc/iomap.h>
Kyösti Mälkkid6c57142020-12-21 15:17:01 +020015#include <soc/nvs.h>
Subrata Banik01ae11b2017-03-04 23:32:41 +053016#include <soc/pci_devs.h>
Subrata Banik7609c652017-05-19 14:50:09 +053017#include <soc/systemagent.h>
Patrick Rudolph5e007802020-07-27 15:37:43 +020018#include <types.h>
Subrata Banik7609c652017-05-19 14:50:09 +053019#include "systemagent_def.h"
Subrata Banik01ae11b2017-03-04 23:32:41 +053020
Subrata Banik7609c652017-05-19 14:50:09 +053021/* SoC override function */
Aaron Durbin64031672018-04-21 14:45:32 -060022__weak void soc_systemagent_init(struct device *dev)
Subrata Banik01ae11b2017-03-04 23:32:41 +053023{
Subrata Banik7609c652017-05-19 14:50:09 +053024 /* no-op */
Subrata Banik01ae11b2017-03-04 23:32:41 +053025}
26
Aaron Durbin64031672018-04-21 14:45:32 -060027__weak void soc_add_fixed_mmio_resources(struct device *dev,
Subrata Banik7609c652017-05-19 14:50:09 +053028 int *resource_cnt)
29{
30 /* no-op */
31}
32
Eran Mitrani400c3002022-05-25 16:29:19 -070033__weak void soc_add_configurable_mmio_resources(struct device *dev,
34 int *resource_cnt)
35{
36 /* no-op */
37}
38
Aaron Durbin64031672018-04-21 14:45:32 -060039__weak int soc_get_uncore_prmmr_base_and_mask(uint64_t *base,
Pratik Prajapati82cdfa72017-08-28 14:48:55 -070040 uint64_t *mask)
41{
42 /* return failure for this dummy API */
43 return -1;
44}
45
Furquan Shaikh0f007d82020-04-24 06:41:18 -070046__weak unsigned long sa_write_acpi_tables(const struct device *dev,
Werner Zehd12530c2018-12-14 13:09:12 +010047 unsigned long current,
48 struct acpi_rsdp *rsdp)
49{
50 return current;
51}
52
Patrick Rudolphbf72dcb2020-05-12 16:04:47 +020053__weak uint32_t soc_systemagent_max_chan_capacity_mib(u8 capid0_a_ddrsz)
54{
55 return 32768; /* 32 GiB per channel */
56}
57
Angel Pons6724ba42021-01-31 15:06:59 +010058static uint8_t sa_get_ecc_type(const uint32_t capid0_a)
Patrick Rudolph5e007802020-07-27 15:37:43 +020059{
Angel Pons6724ba42021-01-31 15:06:59 +010060 return capid0_a & CAPID_ECCDIS ? MEMORY_ARRAY_ECC_NONE : MEMORY_ARRAY_ECC_SINGLE_BIT;
Patrick Rudolph5e007802020-07-27 15:37:43 +020061}
62
63static size_t sa_slots_per_channel(const uint32_t capid0_a)
64{
65 return !(capid0_a & CAPID_DDPCD) + 1;
66}
67
68static size_t sa_number_of_channels(const uint32_t capid0_a)
69{
70 return !(capid0_a & CAPID_PDCD) + 1;
71}
72
73static void sa_soc_systemagent_init(struct device *dev)
74{
75 soc_systemagent_init(dev);
76
77 struct memory_info *m = cbmem_find(CBMEM_ID_MEMINFO);
78 if (m == NULL)
79 return;
80
81 const uint32_t capid0_a = pci_read_config32(dev, CAPID0_A);
82
Angel Pons6724ba42021-01-31 15:06:59 +010083 m->ecc_type = sa_get_ecc_type(capid0_a);
Patrick Rudolph5e007802020-07-27 15:37:43 +020084 m->max_capacity_mib = soc_systemagent_max_chan_capacity_mib(CAPID_DDRSZ(capid0_a)) *
85 sa_number_of_channels(capid0_a);
86 m->number_of_devices = sa_slots_per_channel(capid0_a) *
87 sa_number_of_channels(capid0_a);
88}
89
Subrata Banik7609c652017-05-19 14:50:09 +053090/*
91 * Add all known fixed MMIO ranges that hang off the host bridge/memory
92 * controller device.
93 */
94void sa_add_fixed_mmio_resources(struct device *dev, int *resource_cnt,
95 const struct sa_mmio_descriptor *sa_fixed_resources, size_t count)
96{
97 int i;
98 int index = *resource_cnt;
99
100 for (i = 0; i < count; i++) {
101 uintptr_t base;
102 size_t size;
103
104 size = sa_fixed_resources[i].size;
105 base = sa_fixed_resources[i].base;
106
Nico Huberbbd07042022-08-16 17:47:08 +0200107 printk(BIOS_DEBUG,
108 "SA MMIO resource: %-8s -> base = 0x%08llx, size = 0x%08llx\n",
Eran Mitrani400c3002022-05-25 16:29:19 -0700109 sa_fixed_resources[i].description, sa_fixed_resources[i].base,
110 sa_fixed_resources[i].size);
111
Arthur Heymansd5e70b22023-07-05 11:58:20 +0200112 mmio_range(dev, index++, base, size);
Subrata Banik7609c652017-05-19 14:50:09 +0530113 }
114
115 *resource_cnt = index;
116}
117
118/*
119 * DRAM memory mapped register
120 *
121 * TOUUD: This 64 bit register defines the Top of Upper Usable DRAM
122 * TOLUD: This 32 bit register defines the Top of Low Usable DRAM
123 * BGSM: This register contains the base address of stolen DRAM memory for GTT
124 * TSEG: This register contains the base address of TSEG DRAM memory
125 */
126static const struct sa_mem_map_descriptor sa_memory_map[MAX_MAP_ENTRIES] = {
127 { TOUUD, true, "TOUUD" },
128 { TOLUD, false, "TOLUD" },
129 { BGSM, false, "BGSM" },
130 { TSEG, false, "TSEG" },
131};
132
133/* Read DRAM memory map register value through PCI configuration space */
Arthur Heymans08769c62022-05-09 14:33:15 +0200134static void sa_read_map_entry(const struct device *dev,
Subrata Banik7609c652017-05-19 14:50:09 +0530135 const struct sa_mem_map_descriptor *entry, uint64_t *result)
136{
137 uint64_t value = 0;
138
139 if (entry->is_64_bit) {
140 value = pci_read_config32(dev, entry->reg + 4);
141 value <<= 32;
142 }
143
144 value |= pci_read_config32(dev, entry->reg);
145 /* All registers are on a 1MiB granularity. */
146 value = ALIGN_DOWN(value, 1 * MiB);
147
148 *result = value;
149}
150
151static void sa_get_mem_map(struct device *dev, uint64_t *values)
152{
153 int i;
154 for (i = 0; i < MAX_MAP_ENTRIES; i++)
155 sa_read_map_entry(dev, &sa_memory_map[i], &values[i]);
156}
157
158/*
Subrata Banik7609c652017-05-19 14:50:09 +0530159 * These are the host memory ranges that should be added:
160 * - 0 -> 0xa0000: cacheable
161 * - 0xc0000 -> top_of_ram : cacheable
Subrata Banik239272e2020-07-29 11:01:26 +0530162 * - top_of_ram -> TOLUD: not cacheable with standard MTRRs and reserved
Subrata Banik7609c652017-05-19 14:50:09 +0530163 * - 4GiB -> TOUUD: cacheable
164 *
165 * The default SMRAM space is reserved so that the range doesn't
166 * have to be saved during S3 Resume. Once marked reserved the OS
167 * cannot use the memory. This is a bit of an odd place to reserve
168 * the region, but the CPU devices don't have dev_ops->read_resources()
169 * called on them.
170 *
171 * The range 0xa0000 -> 0xc0000 does not have any resources
172 * associated with it to handle legacy VGA memory. If this range
173 * is not omitted the mtrr code will setup the area as cacheable
174 * causing VGA access to not work.
175 *
Subrata Banik239272e2020-07-29 11:01:26 +0530176 * Don't need to mark the entire top_of_ram till TOLUD range (used
177 * for stolen memory like GFX and ME, PTT, DPR, PRMRR, TSEG etc) as
178 * cacheable for OS usage as coreboot already done with mpinit w/ smm
179 * relocation early.
Subrata Banik7609c652017-05-19 14:50:09 +0530180 *
181 * It should be noted that cacheable entry types need to be added in
182 * order. The reason is that the current MTRR code assumes this and
183 * falls over itself if it isn't.
184 *
185 * The resource index starts low and should not meet or exceed
186 * PCI_BASE_ADDRESS_0.
187 */
188static void sa_add_dram_resources(struct device *dev, int *resource_count)
189{
Subrata Banik7609c652017-05-19 14:50:09 +0530190 uint64_t sa_map_values[MAX_MAP_ENTRIES];
191 uintptr_t top_of_ram;
192 int index = *resource_count;
193
Subrata Banik7609c652017-05-19 14:50:09 +0530194 top_of_ram = (uintptr_t)cbmem_top();
195
196 /* 0 - > 0xa0000 */
Arthur Heymansc37fd872022-10-13 13:06:24 +0200197 ram_from_to(dev, index++, 0, 0xa0000);
Subrata Banik7609c652017-05-19 14:50:09 +0530198
199 /* 0xc0000 -> top_of_ram */
Arthur Heymansc37fd872022-10-13 13:06:24 +0200200 ram_from_to(dev, index++, 0xc0000, top_of_ram);
Subrata Banik7609c652017-05-19 14:50:09 +0530201
202 sa_get_mem_map(dev, &sa_map_values[0]);
203
Arthur Heymans2f5025e2022-10-26 08:19:46 +0200204 /*
205 * top_of_ram -> TOLUD: This contains TSEG which needs to be uncacheable
206 * for proper operation of the smihandler.
207 */
208 mmio_from_to(dev, index++, top_of_ram, sa_map_values[SA_TOLUD_REG]);
Subrata Banik7609c652017-05-19 14:50:09 +0530209
210 /* 4GiB -> TOUUD */
Kyösti Mälkki0a18d642021-06-28 21:43:31 +0300211 upper_ram_end(dev, index++, sa_map_values[SA_TOUUD_REG]);
Subrata Banik7609c652017-05-19 14:50:09 +0530212
213 /*
214 * Reserve everything between A segment and 1MB:
215 *
216 * 0xa0000 - 0xbffff: legacy VGA
217 * 0xc0000 - 0xfffff: RAM
218 */
Arthur Heymansc37fd872022-10-13 13:06:24 +0200219 mmio_from_to(dev, index++, 0xa0000, 0xc0000);
220 reserved_ram_from_to(dev, index++, 0xc0000, 1 * MiB);
Subrata Banik7609c652017-05-19 14:50:09 +0530221
222 *resource_count = index;
223}
224
225static bool is_imr_enabled(uint32_t imr_base_reg)
226{
227 return !!(imr_base_reg & (1 << 31));
228}
229
Elyes HAOUAS4a131262018-09-16 17:35:48 +0200230static void imr_resource(struct device *dev, int idx, uint32_t base,
231 uint32_t mask)
Subrata Banik7609c652017-05-19 14:50:09 +0530232{
233 uint32_t base_k, size_k;
234 /* Bits 28:0 encode the base address bits 38:10, hence the KiB unit. */
235 base_k = (base & 0x0fffffff);
236 /* Bits 28:0 encode the AND mask used for comparison, in KiB. */
237 size_k = ((~mask & 0x0fffffff) + 1);
238 /*
239 * IMRs sit in lower DRAM. Mark them cacheable, otherwise we run
240 * out of MTRRs. Memory reserved by IMRs is not usable for host
241 * so mark it reserved.
242 */
Arthur Heymansd5e70b22023-07-05 11:58:20 +0200243 reserved_ram_range(dev, idx, base_k * KiB, size_k * KiB);
Subrata Banik7609c652017-05-19 14:50:09 +0530244}
245
246/*
247 * Add IMR ranges that hang off the host bridge/memory
Martin Rothf48acbd2020-07-24 12:24:27 -0600248 * controller device in case CONFIG(SA_ENABLE_IMR) is selected by SoC.
Subrata Banik7609c652017-05-19 14:50:09 +0530249 */
250static void sa_add_imr_resources(struct device *dev, int *resource_cnt)
251{
252 size_t i, imr_offset;
253 uint32_t base, mask;
254 int index = *resource_cnt;
255
256 for (i = 0; i < MCH_NUM_IMRS; i++) {
257 imr_offset = i * MCH_IMR_PITCH;
258 base = MCHBAR32(imr_offset + MCH_IMR0_BASE);
259 mask = MCHBAR32(imr_offset + MCH_IMR0_MASK);
260
261 if (is_imr_enabled(base))
262 imr_resource(dev, index++, base, mask);
263 }
264
265 *resource_cnt = index;
266}
267
268static void systemagent_read_resources(struct device *dev)
269{
270 int index = 0;
271
272 /* Read standard PCI resources. */
273 pci_dev_read_resources(dev);
274
275 /* Add all fixed MMIO resources. */
276 soc_add_fixed_mmio_resources(dev, &index);
Eran Mitrani400c3002022-05-25 16:29:19 -0700277
278 /* Add all configurable MMIO resources. */
279 soc_add_configurable_mmio_resources(dev, &index);
280
Subrata Banik7609c652017-05-19 14:50:09 +0530281 /* Calculate and add DRAM resources. */
282 sa_add_dram_resources(dev, &index);
Julius Wernercd49cce2019-03-05 16:53:33 -0800283 if (CONFIG(SA_ENABLE_IMR))
Subrata Banik7609c652017-05-19 14:50:09 +0530284 /* Add the isolated memory ranges (IMRs). */
285 sa_add_imr_resources(dev, &index);
286}
287
288void enable_power_aware_intr(void)
289{
290 uint8_t pair;
291
292 /* Enable Power Aware Interrupt Routing */
293 pair = MCHBAR8(MCH_PAIR);
294 pair &= ~0x7; /* Clear 2:0 */
295 pair |= 0x4; /* Fixed Priority */
296 MCHBAR8(MCH_PAIR) = pair;
297}
298
Tim Wawrzynczakd87af792021-08-24 09:20:14 -0600299void sa_lock_pam(void)
300{
301 const struct device *dev = pcidev_path_on_root(SA_DEVFN_ROOT);
Sean Rhodes9a3e9a42022-08-07 20:59:57 +0100302 if (!CONFIG(HAVE_PAM0_REGISTER) || !dev)
Tim Wawrzynczakd87af792021-08-24 09:20:14 -0600303 return;
304
305 pci_or_config8(dev, PAM0, PAM_LOCK);
306}
307
Arthur Heymans08769c62022-05-09 14:33:15 +0200308void ssdt_set_above_4g_pci(const struct device *dev)
309{
310 if (dev->path.type != DEVICE_PATH_DOMAIN)
311 return;
312
313 uint64_t touud;
314 sa_read_map_entry(pcidev_path_on_root(SA_DEVFN_ROOT), &sa_memory_map[SA_TOUUD_REG],
315 &touud);
Jeremy Compostellaba757a72023-12-20 09:07:04 -0800316 const uint64_t len = POWER_OF_2(soc_phys_address_size()) - touud;
Arthur Heymans08769c62022-05-09 14:33:15 +0200317
318 const char *scope = acpi_device_path(dev);
319 acpigen_write_scope(scope);
320 acpigen_write_name_qword("A4GB", touud);
321 acpigen_write_name_qword("A4GS", len);
322 acpigen_pop_len();
323
324 printk(BIOS_DEBUG, "PCI space above 4GB MMIO is at 0x%llx, len = 0x%llx\n", touud, len);
325}
326
Nico Huber57686192022-08-06 19:11:55 +0200327struct device_operations systemagent_ops = {
Elyes HAOUAS1d191272018-11-27 12:23:48 +0100328 .read_resources = systemagent_read_resources,
329 .set_resources = pci_dev_set_resources,
330 .enable_resources = pci_dev_enable_resources,
Patrick Rudolph5e007802020-07-27 15:37:43 +0200331 .init = sa_soc_systemagent_init,
Subrata Banik6bbc91a2017-12-07 14:55:51 +0530332 .ops_pci = &pci_dev_ops_pci,
Julius Wernercd49cce2019-03-05 16:53:33 -0800333#if CONFIG(HAVE_ACPI_TABLES)
Werner Zehd12530c2018-12-14 13:09:12 +0100334 .write_acpi_tables = sa_write_acpi_tables,
335#endif
Subrata Banik7609c652017-05-19 14:50:09 +0530336};
337
338static const unsigned short systemagent_ids[] = {
Wonkyu Kim9f401072020-11-13 15:16:32 -0800339 PCI_DID_INTEL_MTL_M_ID,
340 PCI_DID_INTEL_MTL_P_ID_1,
341 PCI_DID_INTEL_MTL_P_ID_2,
Subrata Banik88f863c2022-06-13 20:42:44 +0530342 PCI_DID_INTEL_MTL_P_ID_3,
Sridhar Siricillace4dc662022-11-14 08:47:34 +0530343 PCI_DID_INTEL_MTL_P_ID_4,
Sridhar Siricillaebe7f7c2023-02-06 12:39:01 +0530344 PCI_DID_INTEL_MTL_P_ID_5,
Felix Singer43b7f412022-03-07 04:34:52 +0100345 PCI_DID_INTEL_GLK_NB,
346 PCI_DID_INTEL_APL_NB,
347 PCI_DID_INTEL_CNL_ID_U,
348 PCI_DID_INTEL_CNL_ID_Y,
Felix Singer43b7f412022-03-07 04:34:52 +0100349 PCI_DID_INTEL_WHL_ID_W_2,
350 PCI_DID_INTEL_WHL_ID_W_4,
Felix Singer43b7f412022-03-07 04:34:52 +0100351 PCI_DID_INTEL_CFL_ID_U,
352 PCI_DID_INTEL_CFL_ID_U_2,
353 PCI_DID_INTEL_CFL_ID_H,
354 PCI_DID_INTEL_CFL_ID_H_4,
355 PCI_DID_INTEL_CFL_ID_H_8,
356 PCI_DID_INTEL_CFL_ID_S,
357 PCI_DID_INTEL_CFL_ID_S_DT_2,
358 PCI_DID_INTEL_CFL_ID_S_DT_4,
359 PCI_DID_INTEL_CFL_ID_S_DT_8,
360 PCI_DID_INTEL_CFL_ID_S_WS_4,
361 PCI_DID_INTEL_CFL_ID_S_WS_6,
362 PCI_DID_INTEL_CFL_ID_S_WS_8,
363 PCI_DID_INTEL_CFL_ID_S_S_4,
364 PCI_DID_INTEL_CFL_ID_S_S_6,
365 PCI_DID_INTEL_CFL_ID_S_S_8,
Felix Singer43b7f412022-03-07 04:34:52 +0100366 PCI_DID_INTEL_CML_ULT,
367 PCI_DID_INTEL_CML_ULT_2_2,
368 PCI_DID_INTEL_CML_ULT_6_2,
369 PCI_DID_INTEL_CML_ULX,
370 PCI_DID_INTEL_CML_S,
371 PCI_DID_INTEL_CML_S_G0G1_P0P1_6_2,
372 PCI_DID_INTEL_CML_S_P0P1_8_2,
373 PCI_DID_INTEL_CML_S_P0P1_10_2,
374 PCI_DID_INTEL_CML_S_G0G1_4,
375 PCI_DID_INTEL_CML_S_G0G1_2,
376 PCI_DID_INTEL_CML_H,
377 PCI_DID_INTEL_CML_H_4_2,
378 PCI_DID_INTEL_CML_H_8_2,
379 PCI_DID_INTEL_TGL_ID_U_2_2,
380 PCI_DID_INTEL_TGL_ID_U_4_2,
381 PCI_DID_INTEL_TGL_ID_Y_2_2,
382 PCI_DID_INTEL_TGL_ID_Y_4_2,
383 PCI_DID_INTEL_TGL_ID_H_6_1,
384 PCI_DID_INTEL_TGL_ID_H_8_1,
385 PCI_DID_INTEL_EHL_ID_0,
386 PCI_DID_INTEL_EHL_ID_1,
387 PCI_DID_INTEL_EHL_ID_1A,
388 PCI_DID_INTEL_EHL_ID_2,
389 PCI_DID_INTEL_EHL_ID_2_1,
390 PCI_DID_INTEL_EHL_ID_3,
391 PCI_DID_INTEL_EHL_ID_3A,
392 PCI_DID_INTEL_EHL_ID_4,
393 PCI_DID_INTEL_EHL_ID_5,
394 PCI_DID_INTEL_EHL_ID_6,
395 PCI_DID_INTEL_EHL_ID_7,
396 PCI_DID_INTEL_EHL_ID_8,
397 PCI_DID_INTEL_EHL_ID_9,
398 PCI_DID_INTEL_EHL_ID_10,
399 PCI_DID_INTEL_EHL_ID_11,
400 PCI_DID_INTEL_EHL_ID_12,
401 PCI_DID_INTEL_EHL_ID_13,
402 PCI_DID_INTEL_EHL_ID_14,
403 PCI_DID_INTEL_EHL_ID_15,
404 PCI_DID_INTEL_JSL_ID_1,
405 PCI_DID_INTEL_JSL_ID_2,
406 PCI_DID_INTEL_JSL_ID_3,
407 PCI_DID_INTEL_JSL_ID_4,
408 PCI_DID_INTEL_JSL_ID_5,
409 PCI_DID_INTEL_ADL_S_ID_1,
410 PCI_DID_INTEL_ADL_S_ID_2,
411 PCI_DID_INTEL_ADL_S_ID_3,
412 PCI_DID_INTEL_ADL_S_ID_4,
413 PCI_DID_INTEL_ADL_S_ID_5,
414 PCI_DID_INTEL_ADL_S_ID_6,
415 PCI_DID_INTEL_ADL_S_ID_7,
416 PCI_DID_INTEL_ADL_S_ID_8,
417 PCI_DID_INTEL_ADL_S_ID_9,
418 PCI_DID_INTEL_ADL_S_ID_10,
419 PCI_DID_INTEL_ADL_S_ID_11,
420 PCI_DID_INTEL_ADL_S_ID_12,
421 PCI_DID_INTEL_ADL_S_ID_13,
422 PCI_DID_INTEL_ADL_S_ID_14,
423 PCI_DID_INTEL_ADL_S_ID_15,
424 PCI_DID_INTEL_ADL_P_ID_1,
425 PCI_DID_INTEL_ADL_P_ID_3,
426 PCI_DID_INTEL_ADL_P_ID_4,
427 PCI_DID_INTEL_ADL_P_ID_5,
428 PCI_DID_INTEL_ADL_P_ID_6,
429 PCI_DID_INTEL_ADL_P_ID_7,
430 PCI_DID_INTEL_ADL_P_ID_8,
431 PCI_DID_INTEL_ADL_P_ID_9,
432 PCI_DID_INTEL_ADL_P_ID_10,
433 PCI_DID_INTEL_ADL_M_ID_1,
434 PCI_DID_INTEL_ADL_M_ID_2,
435 PCI_DID_INTEL_ADL_N_ID_1,
436 PCI_DID_INTEL_ADL_N_ID_2,
437 PCI_DID_INTEL_ADL_N_ID_3,
438 PCI_DID_INTEL_ADL_N_ID_4,
Tim Crawford53c6eea2023-07-07 09:59:56 -0600439 PCI_DID_INTEL_RPL_HX_ID_1,
440 PCI_DID_INTEL_RPL_HX_ID_2,
441 PCI_DID_INTEL_RPL_HX_ID_3,
442 PCI_DID_INTEL_RPL_HX_ID_4,
443 PCI_DID_INTEL_RPL_HX_ID_5,
444 PCI_DID_INTEL_RPL_HX_ID_6,
445 PCI_DID_INTEL_RPL_HX_ID_7,
446 PCI_DID_INTEL_RPL_HX_ID_8,
Max Fritz573e6de2022-11-19 01:54:44 +0100447 PCI_DID_INTEL_RPL_S_ID_1,
448 PCI_DID_INTEL_RPL_S_ID_2,
449 PCI_DID_INTEL_RPL_S_ID_3,
450 PCI_DID_INTEL_RPL_S_ID_4,
451 PCI_DID_INTEL_RPL_S_ID_5,
Bora Guvendika15b25f2022-02-28 14:43:49 -0800452 PCI_DID_INTEL_RPL_P_ID_1,
453 PCI_DID_INTEL_RPL_P_ID_2,
zhixingma529a64b2022-06-13 15:06:27 -0700454 PCI_DID_INTEL_RPL_P_ID_3,
Lawrence Chang0a5da512022-10-19 14:38:41 +0800455 PCI_DID_INTEL_RPL_P_ID_4,
Marx Wang39ede0a2022-12-20 10:48:33 +0800456 PCI_DID_INTEL_RPL_P_ID_5,
Subrata Banik7609c652017-05-19 14:50:09 +0530457 0
458};
459
460static const struct pci_driver systemagent_driver __pci_driver = {
461 .ops = &systemagent_ops,
Felix Singer43b7f412022-03-07 04:34:52 +0100462 .vendor = PCI_VID_INTEL,
Subrata Banik7609c652017-05-19 14:50:09 +0530463 .devices = systemagent_ids
464};