blob: e6bbfc7d6394fda5f58ca1facef15394164d52e9 [file] [log] [blame]
Angel Pons0612b272020-04-05 15:46:56 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Subrata Banik01ae11b2017-03-04 23:32:41 +05302
Subrata Banik7609c652017-05-19 14:50:09 +05303#include <cbmem.h>
Subrata Banikb6df6b02020-01-03 15:29:02 +05304#include <console/console.h>
Furquan Shaikhcc35f722020-05-12 16:25:31 -07005#include <cpu/cpu.h>
Subrata Banik7609c652017-05-19 14:50:09 +05306#include <device/device.h>
7#include <device/pci.h>
8#include <device/pci_ids.h>
Werner Zehd12530c2018-12-14 13:09:12 +01009#include <intelblocks/acpi.h>
Subrata Banikb6df6b02020-01-03 15:29:02 +053010#include <intelblocks/cfg.h>
Subrata Banik01ae11b2017-03-04 23:32:41 +053011#include <intelblocks/systemagent.h>
Lijian Zhao357e5522019-04-11 13:07:00 -070012#include <smbios.h>
Subrata Banik7609c652017-05-19 14:50:09 +053013#include <soc/iomap.h>
Subrata Banik01ae11b2017-03-04 23:32:41 +053014#include <soc/pci_devs.h>
Subrata Banik7609c652017-05-19 14:50:09 +053015#include <soc/systemagent.h>
16#include "systemagent_def.h"
Subrata Banik01ae11b2017-03-04 23:32:41 +053017
Subrata Banik7609c652017-05-19 14:50:09 +053018/* SoC override function */
Aaron Durbin64031672018-04-21 14:45:32 -060019__weak void soc_systemagent_init(struct device *dev)
Subrata Banik01ae11b2017-03-04 23:32:41 +053020{
Subrata Banik7609c652017-05-19 14:50:09 +053021 /* no-op */
Subrata Banik01ae11b2017-03-04 23:32:41 +053022}
23
Aaron Durbin64031672018-04-21 14:45:32 -060024__weak void soc_add_fixed_mmio_resources(struct device *dev,
Subrata Banik7609c652017-05-19 14:50:09 +053025 int *resource_cnt)
26{
27 /* no-op */
28}
29
Aaron Durbin64031672018-04-21 14:45:32 -060030__weak int soc_get_uncore_prmmr_base_and_mask(uint64_t *base,
Pratik Prajapati82cdfa72017-08-28 14:48:55 -070031 uint64_t *mask)
32{
33 /* return failure for this dummy API */
34 return -1;
35}
36
Furquan Shaikh0f007d82020-04-24 06:41:18 -070037__weak unsigned long sa_write_acpi_tables(const struct device *dev,
Werner Zehd12530c2018-12-14 13:09:12 +010038 unsigned long current,
39 struct acpi_rsdp *rsdp)
40{
41 return current;
42}
43
Patrick Rudolphbf72dcb2020-05-12 16:04:47 +020044__weak uint32_t soc_systemagent_max_chan_capacity_mib(u8 capid0_a_ddrsz)
45{
46 return 32768; /* 32 GiB per channel */
47}
48
Subrata Banik7609c652017-05-19 14:50:09 +053049/*
50 * Add all known fixed MMIO ranges that hang off the host bridge/memory
51 * controller device.
52 */
53void sa_add_fixed_mmio_resources(struct device *dev, int *resource_cnt,
54 const struct sa_mmio_descriptor *sa_fixed_resources, size_t count)
55{
56 int i;
57 int index = *resource_cnt;
58
59 for (i = 0; i < count; i++) {
60 uintptr_t base;
61 size_t size;
62
63 size = sa_fixed_resources[i].size;
64 base = sa_fixed_resources[i].base;
65
66 mmio_resource(dev, index++, base / KiB, size / KiB);
67 }
68
69 *resource_cnt = index;
70}
71
72/*
73 * DRAM memory mapped register
74 *
75 * TOUUD: This 64 bit register defines the Top of Upper Usable DRAM
76 * TOLUD: This 32 bit register defines the Top of Low Usable DRAM
77 * BGSM: This register contains the base address of stolen DRAM memory for GTT
78 * TSEG: This register contains the base address of TSEG DRAM memory
79 */
80static const struct sa_mem_map_descriptor sa_memory_map[MAX_MAP_ENTRIES] = {
81 { TOUUD, true, "TOUUD" },
82 { TOLUD, false, "TOLUD" },
83 { BGSM, false, "BGSM" },
84 { TSEG, false, "TSEG" },
85};
86
87/* Read DRAM memory map register value through PCI configuration space */
Elyes HAOUAS4a131262018-09-16 17:35:48 +020088static void sa_read_map_entry(struct device *dev,
Subrata Banik7609c652017-05-19 14:50:09 +053089 const struct sa_mem_map_descriptor *entry, uint64_t *result)
90{
91 uint64_t value = 0;
92
93 if (entry->is_64_bit) {
94 value = pci_read_config32(dev, entry->reg + 4);
95 value <<= 32;
96 }
97
98 value |= pci_read_config32(dev, entry->reg);
99 /* All registers are on a 1MiB granularity. */
100 value = ALIGN_DOWN(value, 1 * MiB);
101
102 *result = value;
103}
104
Furquan Shaikh1085fee2020-05-07 16:04:16 -0700105/* Fill MMIO resource above 4GB into GNVS */
Kyösti Mälkki0c1dd9c2020-06-17 23:37:49 +0300106void sa_fill_gnvs(struct global_nvs *gnvs)
Furquan Shaikh1085fee2020-05-07 16:04:16 -0700107{
Furquan Shaikh1085fee2020-05-07 16:04:16 -0700108 struct device *sa_dev = pcidev_path_on_root(SA_DEVFN_ROOT);
109
Furquan Shaikh1085fee2020-05-07 16:04:16 -0700110 sa_read_map_entry(sa_dev, &sa_memory_map[SA_TOUUD_REG], &gnvs->a4gb);
Furquan Shaikhcc35f722020-05-12 16:25:31 -0700111 gnvs->a4gs = POWER_OF_2(cpu_phys_address_size()) - gnvs->a4gb;
112 printk(BIOS_DEBUG, "PCI space above 4GB MMIO is at 0x%llx, len = 0x%llx\n",
Furquan Shaikh1085fee2020-05-07 16:04:16 -0700113 gnvs->a4gb, gnvs->a4gs);
114}
115
116
Subrata Banik7609c652017-05-19 14:50:09 +0530117static void sa_get_mem_map(struct device *dev, uint64_t *values)
118{
119 int i;
120 for (i = 0; i < MAX_MAP_ENTRIES; i++)
121 sa_read_map_entry(dev, &sa_memory_map[i], &values[i]);
122}
123
124/*
Subrata Banik7609c652017-05-19 14:50:09 +0530125 * These are the host memory ranges that should be added:
126 * - 0 -> 0xa0000: cacheable
127 * - 0xc0000 -> top_of_ram : cacheable
Michael Niewöhner40f893e2019-10-21 18:58:04 +0200128 * - top_of_ram -> BGSM: cacheable with standard MTRRs and reserved
Subrata Banik7609c652017-05-19 14:50:09 +0530129 * - BGSM -> TOLUD: not cacheable with standard MTRRs and reserved
130 * - 4GiB -> TOUUD: cacheable
131 *
132 * The default SMRAM space is reserved so that the range doesn't
133 * have to be saved during S3 Resume. Once marked reserved the OS
134 * cannot use the memory. This is a bit of an odd place to reserve
135 * the region, but the CPU devices don't have dev_ops->read_resources()
136 * called on them.
137 *
138 * The range 0xa0000 -> 0xc0000 does not have any resources
139 * associated with it to handle legacy VGA memory. If this range
140 * is not omitted the mtrr code will setup the area as cacheable
141 * causing VGA access to not work.
142 *
143 * The TSEG region is mapped as cacheable so that one can perform
144 * SMRAM relocation faster. Once the SMRR is enabled the SMRR takes
145 * precedence over the existing MTRRs covering this region.
146 *
147 * It should be noted that cacheable entry types need to be added in
148 * order. The reason is that the current MTRR code assumes this and
149 * falls over itself if it isn't.
150 *
151 * The resource index starts low and should not meet or exceed
152 * PCI_BASE_ADDRESS_0.
153 */
154static void sa_add_dram_resources(struct device *dev, int *resource_count)
155{
156 uintptr_t base_k, touud_k;
Michael Niewöhner40f893e2019-10-21 18:58:04 +0200157 size_t size_k;
Subrata Banik7609c652017-05-19 14:50:09 +0530158 uint64_t sa_map_values[MAX_MAP_ENTRIES];
159 uintptr_t top_of_ram;
160 int index = *resource_count;
161
Subrata Banik7609c652017-05-19 14:50:09 +0530162 top_of_ram = (uintptr_t)cbmem_top();
163
164 /* 0 - > 0xa0000 */
165 base_k = 0;
166 size_k = (0xa0000 / KiB) - base_k;
167 ram_resource(dev, index++, base_k, size_k);
168
169 /* 0xc0000 -> top_of_ram */
170 base_k = 0xc0000 / KiB;
171 size_k = (top_of_ram / KiB) - base_k;
172 ram_resource(dev, index++, base_k, size_k);
173
174 sa_get_mem_map(dev, &sa_map_values[0]);
175
Michael Niewöhner40f893e2019-10-21 18:58:04 +0200176 /* top_of_ram -> BGSM */
Subrata Banik7609c652017-05-19 14:50:09 +0530177 base_k = top_of_ram;
Subrata Banik7609c652017-05-19 14:50:09 +0530178 size_k = sa_map_values[SA_BGSM_REG] - base_k;
179 reserved_ram_resource(dev, index++, base_k / KiB, size_k / KiB);
180
181 /* BGSM -> TOLUD */
182 base_k = sa_map_values[SA_BGSM_REG];
183 size_k = sa_map_values[SA_TOLUD_REG] - base_k;
184 mmio_resource(dev, index++, base_k / KiB, size_k / KiB);
185
186 /* 4GiB -> TOUUD */
187 base_k = 4 * (GiB / KiB); /* 4GiB */
188 touud_k = sa_map_values[SA_TOUUD_REG] / KiB;
189 size_k = touud_k - base_k;
190 if (touud_k > base_k)
191 ram_resource(dev, index++, base_k, size_k);
192
193 /*
194 * Reserve everything between A segment and 1MB:
195 *
196 * 0xa0000 - 0xbffff: legacy VGA
197 * 0xc0000 - 0xfffff: RAM
198 */
199 mmio_resource(dev, index++, 0xa0000 / KiB, (0xc0000 - 0xa0000) / KiB);
200 reserved_ram_resource(dev, index++, 0xc0000 / KiB,
201 (1*MiB - 0xc0000) / KiB);
202
203 *resource_count = index;
204}
205
206static bool is_imr_enabled(uint32_t imr_base_reg)
207{
208 return !!(imr_base_reg & (1 << 31));
209}
210
Elyes HAOUAS4a131262018-09-16 17:35:48 +0200211static void imr_resource(struct device *dev, int idx, uint32_t base,
212 uint32_t mask)
Subrata Banik7609c652017-05-19 14:50:09 +0530213{
214 uint32_t base_k, size_k;
215 /* Bits 28:0 encode the base address bits 38:10, hence the KiB unit. */
216 base_k = (base & 0x0fffffff);
217 /* Bits 28:0 encode the AND mask used for comparison, in KiB. */
218 size_k = ((~mask & 0x0fffffff) + 1);
219 /*
220 * IMRs sit in lower DRAM. Mark them cacheable, otherwise we run
221 * out of MTRRs. Memory reserved by IMRs is not usable for host
222 * so mark it reserved.
223 */
224 reserved_ram_resource(dev, idx, base_k, size_k);
225}
226
227/*
228 * Add IMR ranges that hang off the host bridge/memory
229 * controller device in case CONFIG_SA_ENABLE_IMR is selected by SoC.
230 */
231static void sa_add_imr_resources(struct device *dev, int *resource_cnt)
232{
233 size_t i, imr_offset;
234 uint32_t base, mask;
235 int index = *resource_cnt;
236
237 for (i = 0; i < MCH_NUM_IMRS; i++) {
238 imr_offset = i * MCH_IMR_PITCH;
239 base = MCHBAR32(imr_offset + MCH_IMR0_BASE);
240 mask = MCHBAR32(imr_offset + MCH_IMR0_MASK);
241
242 if (is_imr_enabled(base))
243 imr_resource(dev, index++, base, mask);
244 }
245
246 *resource_cnt = index;
247}
248
249static void systemagent_read_resources(struct device *dev)
250{
251 int index = 0;
252
253 /* Read standard PCI resources. */
254 pci_dev_read_resources(dev);
255
256 /* Add all fixed MMIO resources. */
257 soc_add_fixed_mmio_resources(dev, &index);
258 /* Calculate and add DRAM resources. */
259 sa_add_dram_resources(dev, &index);
Julius Wernercd49cce2019-03-05 16:53:33 -0800260 if (CONFIG(SA_ENABLE_IMR))
Subrata Banik7609c652017-05-19 14:50:09 +0530261 /* Add the isolated memory ranges (IMRs). */
262 sa_add_imr_resources(dev, &index);
263}
264
Lijian Zhao357e5522019-04-11 13:07:00 -0700265#if CONFIG(GENERATE_SMBIOS_TABLES)
Patrick Rudolphbf72dcb2020-05-12 16:04:47 +0200266static bool sa_supports_ecc(const uint32_t capida)
267{
268 return !(capida & CAPID_ECCDIS);
269}
270
271static size_t sa_slots_per_channel(const uint32_t capida)
272{
273 return !(capida & CAPID_DDPCD) + 1;
274}
275
276static size_t sa_number_of_channels(const uint32_t capida)
277{
278 return !(capida & CAPID_PDCD) + 1;
279}
280
Lijian Zhao357e5522019-04-11 13:07:00 -0700281static int sa_smbios_write_type_16(struct device *dev, int *handle,
282 unsigned long *current)
283{
284 struct smbios_type16 *t = (struct smbios_type16 *)*current;
285 int len = sizeof(struct smbios_type16);
Patrick Rudolphbf72dcb2020-05-12 16:04:47 +0200286 const uint32_t capida = pci_read_config32(dev, CAPID0_A);
Lijian Zhao357e5522019-04-11 13:07:00 -0700287
288 struct memory_info *meminfo;
289 meminfo = cbmem_find(CBMEM_ID_MEMINFO);
290 if (meminfo == NULL)
291 return 0; /* can't find mem info in cbmem */
292
293 memset(t, 0, sizeof(struct smbios_type16));
294 t->type = SMBIOS_PHYS_MEMORY_ARRAY;
295 t->handle = *handle;
296 t->length = len - 2;
297 t->location = MEMORY_ARRAY_LOCATION_SYSTEM_BOARD;
298 t->use = MEMORY_ARRAY_USE_SYSTEM;
Patrick Rudolphbf72dcb2020-05-12 16:04:47 +0200299 t->memory_error_correction = sa_supports_ecc(capida) ? MEMORY_ARRAY_ECC_SINGLE_BIT :
300 MEMORY_ARRAY_ECC_NONE;
Lijian Zhao357e5522019-04-11 13:07:00 -0700301 /* no error information handle available */
302 t->memory_error_information_handle = 0xFFFE;
Patrick Rudolphbf72dcb2020-05-12 16:04:47 +0200303 t->maximum_capacity = soc_systemagent_max_chan_capacity_mib(CAPID_DDRSZ(capida)) *
304 sa_number_of_channels(capida) * (MiB / KiB);
305 t->number_of_memory_devices = sa_slots_per_channel(capida) *
306 sa_number_of_channels(capida);
Lijian Zhao357e5522019-04-11 13:07:00 -0700307
308 *current += len;
309 *handle += 1;
310 return len;
311}
312#endif
313
Subrata Banik7609c652017-05-19 14:50:09 +0530314void enable_power_aware_intr(void)
315{
316 uint8_t pair;
317
318 /* Enable Power Aware Interrupt Routing */
319 pair = MCHBAR8(MCH_PAIR);
320 pair &= ~0x7; /* Clear 2:0 */
321 pair |= 0x4; /* Fixed Priority */
322 MCHBAR8(MCH_PAIR) = pair;
323}
324
325static struct device_operations systemagent_ops = {
Elyes HAOUAS1d191272018-11-27 12:23:48 +0100326 .read_resources = systemagent_read_resources,
327 .set_resources = pci_dev_set_resources,
328 .enable_resources = pci_dev_enable_resources,
Subrata Banik7609c652017-05-19 14:50:09 +0530329 .init = soc_systemagent_init,
Subrata Banik6bbc91a2017-12-07 14:55:51 +0530330 .ops_pci = &pci_dev_ops_pci,
Julius Wernercd49cce2019-03-05 16:53:33 -0800331#if CONFIG(HAVE_ACPI_TABLES)
Werner Zehd12530c2018-12-14 13:09:12 +0100332 .write_acpi_tables = sa_write_acpi_tables,
333#endif
Lijian Zhao357e5522019-04-11 13:07:00 -0700334#if CONFIG(GENERATE_SMBIOS_TABLES)
335 .get_smbios_data = sa_smbios_write_type_16,
336#endif
Subrata Banik7609c652017-05-19 14:50:09 +0530337};
338
339static const unsigned short systemagent_ids[] = {
340 PCI_DEVICE_ID_INTEL_GLK_NB,
341 PCI_DEVICE_ID_INTEL_APL_NB,
Lijian Zhaobbedef92017-07-29 16:38:38 -0700342 PCI_DEVICE_ID_INTEL_CNL_ID_U,
343 PCI_DEVICE_ID_INTEL_CNL_ID_Y,
Subrata Banik7609c652017-05-19 14:50:09 +0530344 PCI_DEVICE_ID_INTEL_SKL_ID_U,
345 PCI_DEVICE_ID_INTEL_SKL_ID_Y,
346 PCI_DEVICE_ID_INTEL_SKL_ID_ULX,
Maxim Polyakovdde937c2019-09-09 15:50:03 +0300347 PCI_DEVICE_ID_INTEL_SKL_ID_H_4,
Keno Fischer1044eba2019-06-07 01:55:56 -0400348 PCI_DEVICE_ID_INTEL_SKL_ID_H_2,
349 PCI_DEVICE_ID_INTEL_SKL_ID_S_2,
350 PCI_DEVICE_ID_INTEL_SKL_ID_S_4,
Lean Sheng Tan38c3ff72019-05-27 13:06:35 +0800351 PCI_DEVICE_ID_INTEL_WHL_ID_W_2,
352 PCI_DEVICE_ID_INTEL_WHL_ID_W_4,
Gaggery Tsaie415a4c2018-03-21 22:36:18 +0800353 PCI_DEVICE_ID_INTEL_KBL_ID_S,
Subrata Banik7609c652017-05-19 14:50:09 +0530354 PCI_DEVICE_ID_INTEL_SKL_ID_H_EM,
355 PCI_DEVICE_ID_INTEL_KBL_ID_U,
356 PCI_DEVICE_ID_INTEL_KBL_ID_Y,
357 PCI_DEVICE_ID_INTEL_KBL_ID_H,
358 PCI_DEVICE_ID_INTEL_KBL_U_R,
V Sowmyaacc2a482018-01-23 15:27:23 +0530359 PCI_DEVICE_ID_INTEL_KBL_ID_DT,
Christian Walter3d840382019-05-17 19:37:16 +0200360 PCI_DEVICE_ID_INTEL_KBL_ID_DT_2,
Maulikfc19ab52018-01-05 22:40:35 +0530361 PCI_DEVICE_ID_INTEL_CFL_ID_U,
Christian Walterccac15a2019-08-13 09:55:37 +0200362 PCI_DEVICE_ID_INTEL_CFL_ID_U_2,
praveen hodagatta praneshe26c4a42018-09-20 03:49:45 +0800363 PCI_DEVICE_ID_INTEL_CFL_ID_H,
Christian Walterccac15a2019-08-13 09:55:37 +0200364 PCI_DEVICE_ID_INTEL_CFL_ID_H_4,
Lean Sheng Tan38c3ff72019-05-27 13:06:35 +0800365 PCI_DEVICE_ID_INTEL_CFL_ID_H_8,
praveen hodagatta praneshe26c4a42018-09-20 03:49:45 +0800366 PCI_DEVICE_ID_INTEL_CFL_ID_S,
Christian Walterccac15a2019-08-13 09:55:37 +0200367 PCI_DEVICE_ID_INTEL_CFL_ID_S_DT_2,
Felix Singerd298ffe2019-07-28 13:27:11 +0200368 PCI_DEVICE_ID_INTEL_CFL_ID_S_DT_4,
Lean Sheng Tan38c3ff72019-05-27 13:06:35 +0800369 PCI_DEVICE_ID_INTEL_CFL_ID_S_DT_8,
Christian Walterccac15a2019-08-13 09:55:37 +0200370 PCI_DEVICE_ID_INTEL_CFL_ID_S_WS_4,
371 PCI_DEVICE_ID_INTEL_CFL_ID_S_WS_6,
Lean Sheng Tan38c3ff72019-05-27 13:06:35 +0800372 PCI_DEVICE_ID_INTEL_CFL_ID_S_WS_8,
Christian Walterccac15a2019-08-13 09:55:37 +0200373 PCI_DEVICE_ID_INTEL_CFL_ID_S_S_4,
374 PCI_DEVICE_ID_INTEL_CFL_ID_S_S_6,
375 PCI_DEVICE_ID_INTEL_CFL_ID_S_S_8,
Aamir Bohra9eac0392018-06-30 12:07:04 +0530376 PCI_DEVICE_ID_INTEL_ICL_ID_U,
377 PCI_DEVICE_ID_INTEL_ICL_ID_U_2_2,
378 PCI_DEVICE_ID_INTEL_ICL_ID_Y,
379 PCI_DEVICE_ID_INTEL_ICL_ID_Y_2,
Ronak Kanabarf606a2f2019-02-04 16:06:50 +0530380 PCI_DEVICE_ID_INTEL_CML_ULT,
Subrata Banikba8af582019-02-27 15:00:55 +0530381 PCI_DEVICE_ID_INTEL_CML_ULT_2_2,
Ronak Kanabarf606a2f2019-02-04 16:06:50 +0530382 PCI_DEVICE_ID_INTEL_CML_ULT_6_2,
383 PCI_DEVICE_ID_INTEL_CML_ULX,
384 PCI_DEVICE_ID_INTEL_CML_S,
Gaggery Tsaifdcc9ab2019-11-04 20:49:10 -0800385 PCI_DEVICE_ID_INTEL_CML_S_G0G1_P0P1_6_2,
386 PCI_DEVICE_ID_INTEL_CML_S_P0P1_8_2,
387 PCI_DEVICE_ID_INTEL_CML_S_P0P1_10_2,
Gaggery Tsai39e1f442020-01-08 15:22:13 -0800388 PCI_DEVICE_ID_INTEL_CML_S_G0G1_4,
389 PCI_DEVICE_ID_INTEL_CML_S_G0G1_2,
Ronak Kanabarf606a2f2019-02-04 16:06:50 +0530390 PCI_DEVICE_ID_INTEL_CML_H,
Jamie Chen6bb9aaf2019-12-20 19:30:33 +0800391 PCI_DEVICE_ID_INTEL_CML_H_4_2,
Ronak Kanabarf606a2f2019-02-04 16:06:50 +0530392 PCI_DEVICE_ID_INTEL_CML_H_8_2,
Ravi Sarawadi6b5bf402019-10-21 22:25:04 -0700393 PCI_DEVICE_ID_INTEL_TGL_ID_U,
Subrata Banikae695752019-11-12 12:47:43 +0530394 PCI_DEVICE_ID_INTEL_TGL_ID_U_1,
Srinidhi N Kaushik1d812e82020-02-07 15:51:09 -0800395 PCI_DEVICE_ID_INTEL_TGL_ID_U_2_2,
Ravi Sarawadi6b5bf402019-10-21 22:25:04 -0700396 PCI_DEVICE_ID_INTEL_TGL_ID_Y,
Tan, Lean Sheng26136092020-01-20 19:13:56 -0800397 PCI_DEVICE_ID_INTEL_JSL_EHL,
398 PCI_DEVICE_ID_INTEL_EHL_ID_1,
Meera Ravindranath3f4af0d2020-02-12 16:01:22 +0530399 PCI_DEVICE_ID_INTEL_JSL_ID_1,
Maulik V Vaghela8745a272020-04-22 12:13:40 +0530400 PCI_DEVICE_ID_INTEL_JSL_ID_2,
401 PCI_DEVICE_ID_INTEL_JSL_ID_3,
402 PCI_DEVICE_ID_INTEL_JSL_ID_4,
Subrata Banik7609c652017-05-19 14:50:09 +0530403 0
404};
405
406static const struct pci_driver systemagent_driver __pci_driver = {
407 .ops = &systemagent_ops,
408 .vendor = PCI_VENDOR_ID_INTEL,
409 .devices = systemagent_ids
410};