blob: e260650e8f1933048e27cf8cf11318a0dcb6900e [file] [log] [blame]
Vladimir Serbinenko888d5592013-11-13 17:53:38 +01001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2011 The Chromium OS Authors. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; version 2 of
9 * the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010015 */
16
17/*
18 * This is a ramstage driver for the Intel Management Engine found in the
19 * 6-series chipset. It handles the required boot-time messages over the
20 * MMIO-based Management Engine Interface to tell the ME that the BIOS is
21 * finished with POST. Additional messages are defined for debug but are
22 * not used unless the console loglevel is high enough.
23 */
24
25#include <arch/acpi.h>
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010026#include <arch/io.h>
27#include <console/console.h>
28#include <device/pci_ids.h>
29#include <device/pci_def.h>
30#include <string.h>
31#include <delay.h>
32#include <elog.h>
Arthur Heymansd2d2aef2018-01-16 14:19:37 +010033#include <southbridge/intel/common/rcba.h>
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010034
35#ifdef __SMM__
Kyösti Mälkkib4a45dc2013-07-26 08:53:59 +030036#include <arch/io.h>
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010037#else
38# include <device/device.h>
39# include <device/pci.h>
40#endif
41
42#include "me.h"
43#include "pch.h"
44
Martin Roth7a1a3ad2017-06-24 21:29:38 -060045#if IS_ENABLED(CONFIG_CHROMEOS)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010046#include <vendorcode/google/chromeos/gnvs.h>
47#endif
48
49#ifndef __SMM__
50/* Path that the BIOS should take based on ME state */
51static const char *me_bios_path_values[] = {
52 [ME_NORMAL_BIOS_PATH] = "Normal",
53 [ME_S3WAKE_BIOS_PATH] = "S3 Wake",
54 [ME_ERROR_BIOS_PATH] = "Error",
55 [ME_RECOVERY_BIOS_PATH] = "Recovery",
56 [ME_DISABLE_BIOS_PATH] = "Disable",
57 [ME_FIRMWARE_UPDATE_BIOS_PATH] = "Firmware Update",
58};
59#endif
60
61/* MMIO base address for MEI interface */
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -080062static u32 *mei_base_address;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010063
Martin Roth7a1a3ad2017-06-24 21:29:38 -060064#if IS_ENABLED(CONFIG_DEBUG_INTEL_ME)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010065static void mei_dump(void *ptr, int dword, int offset, const char *type)
66{
67 struct mei_csr *csr;
68
69 printk(BIOS_SPEW, "%-9s[%02x] : ", type, offset);
70
71 switch (offset) {
72 case MEI_H_CSR:
73 case MEI_ME_CSR_HA:
74 csr = ptr;
75 if (!csr) {
76 printk(BIOS_SPEW, "ERROR: 0x%08x\n", dword);
77 break;
78 }
79 printk(BIOS_SPEW, "cbd=%u cbrp=%02u cbwp=%02u ready=%u "
80 "reset=%u ig=%u is=%u ie=%u\n", csr->buffer_depth,
81 csr->buffer_read_ptr, csr->buffer_write_ptr,
82 csr->ready, csr->reset, csr->interrupt_generate,
83 csr->interrupt_status, csr->interrupt_enable);
84 break;
85 case MEI_ME_CB_RW:
86 case MEI_H_CB_WW:
87 printk(BIOS_SPEW, "CB: 0x%08x\n", dword);
88 break;
89 default:
90 printk(BIOS_SPEW, "0x%08x\n", offset);
91 break;
92 }
93}
94#else
95# define mei_dump(ptr,dword,offset,type) do {} while (0)
96#endif
97
98/*
99 * ME/MEI access helpers using memcpy to avoid aliasing.
100 */
101
102static inline void mei_read_dword_ptr(void *ptr, int offset)
103{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800104 u32 dword = read32(mei_base_address + (offset/sizeof(u32)));
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100105 memcpy(ptr, &dword, sizeof(dword));
106 mei_dump(ptr, dword, offset, "READ");
107}
108
109static inline void mei_write_dword_ptr(void *ptr, int offset)
110{
111 u32 dword = 0;
112 memcpy(&dword, ptr, sizeof(dword));
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800113 write32(mei_base_address + (offset/sizeof(u32)), dword);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100114 mei_dump(ptr, dword, offset, "WRITE");
115}
116
117#ifndef __SMM__
118static inline void pci_read_dword_ptr(device_t dev, void *ptr, int offset)
119{
120 u32 dword = pci_read_config32(dev, offset);
121 memcpy(ptr, &dword, sizeof(dword));
122 mei_dump(ptr, dword, offset, "PCI READ");
123}
124#endif
125
126static inline void read_host_csr(struct mei_csr *csr)
127{
128 mei_read_dword_ptr(csr, MEI_H_CSR);
129}
130
131static inline void write_host_csr(struct mei_csr *csr)
132{
133 mei_write_dword_ptr(csr, MEI_H_CSR);
134}
135
Edward O'Callaghan78d33b62014-06-26 22:13:15 +1000136#ifdef __SMM__
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100137static inline void read_me_csr(struct mei_csr *csr)
138{
139 mei_read_dword_ptr(csr, MEI_ME_CSR_HA);
140}
141
142static inline void write_cb(u32 dword)
143{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800144 write32(mei_base_address + (MEI_H_CB_WW/sizeof(u32)), dword);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100145 mei_dump(NULL, dword, MEI_H_CB_WW, "WRITE");
146}
147
148static inline u32 read_cb(void)
149{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800150 u32 dword = read32(mei_base_address + (MEI_ME_CB_RW/sizeof(u32)));
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100151 mei_dump(NULL, dword, MEI_ME_CB_RW, "READ");
152 return dword;
153}
154
155/* Wait for ME ready bit to be asserted */
156static int mei_wait_for_me_ready(void)
157{
158 struct mei_csr me;
159 unsigned try = ME_RETRY;
160
161 while (try--) {
162 read_me_csr(&me);
163 if (me.ready)
164 return 0;
165 udelay(ME_DELAY);
166 }
167
168 printk(BIOS_ERR, "ME: failed to become ready\n");
169 return -1;
170}
171
172static void mei_reset(void)
173{
174 struct mei_csr host;
175
176 if (mei_wait_for_me_ready() < 0)
177 return;
178
179 /* Reset host and ME circular buffers for next message */
180 read_host_csr(&host);
181 host.reset = 1;
182 host.interrupt_generate = 1;
183 write_host_csr(&host);
184
185 if (mei_wait_for_me_ready() < 0)
186 return;
187
188 /* Re-init and indicate host is ready */
189 read_host_csr(&host);
190 host.interrupt_generate = 1;
191 host.ready = 1;
192 host.reset = 0;
193 write_host_csr(&host);
194}
195
196static int mei_send_msg(struct mei_header *mei, struct mkhi_header *mkhi,
197 void *req_data)
198{
199 struct mei_csr host;
200 unsigned ndata, n;
201 u32 *data;
202
203 /* Number of dwords to write, ignoring MKHI */
204 ndata = mei->length >> 2;
205
206 /* Pad non-dword aligned request message length */
207 if (mei->length & 3)
208 ndata++;
209 if (!ndata) {
210 printk(BIOS_DEBUG, "ME: request does not include MKHI\n");
211 return -1;
212 }
213 ndata++; /* Add MEI header */
214
215 /*
216 * Make sure there is still room left in the circular buffer.
217 * Reset the buffer pointers if the requested message will not fit.
218 */
219 read_host_csr(&host);
220 if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
221 printk(BIOS_ERR, "ME: circular buffer full, resetting...\n");
222 mei_reset();
223 read_host_csr(&host);
224 }
225
226 /*
227 * This implementation does not handle splitting large messages
228 * across multiple transactions. Ensure the requested length
229 * will fit in the available circular buffer depth.
230 */
231 if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
232 printk(BIOS_ERR, "ME: message (%u) too large for buffer (%u)\n",
233 ndata + 2, host.buffer_depth);
234 return -1;
235 }
236
237 /* Write MEI header */
238 mei_write_dword_ptr(mei, MEI_H_CB_WW);
239 ndata--;
240
241 /* Write MKHI header */
242 mei_write_dword_ptr(mkhi, MEI_H_CB_WW);
243 ndata--;
244
245 /* Write message data */
246 data = req_data;
247 for (n = 0; n < ndata; ++n)
248 write_cb(*data++);
249
250 /* Generate interrupt to the ME */
251 read_host_csr(&host);
252 host.interrupt_generate = 1;
253 write_host_csr(&host);
254
255 /* Make sure ME is ready after sending request data */
256 return mei_wait_for_me_ready();
257}
258
259static int mei_recv_msg(struct mei_header *mei, struct mkhi_header *mkhi,
260 void *rsp_data, int rsp_bytes)
261{
262 struct mei_header mei_rsp;
263 struct mkhi_header mkhi_rsp;
264 struct mei_csr me, host;
265 unsigned ndata, n;
266 unsigned expected;
267 u32 *data;
268
269 /* Total number of dwords to read from circular buffer */
270 expected = (rsp_bytes + sizeof(mei_rsp) + sizeof(mkhi_rsp)) >> 2;
271 if (rsp_bytes & 3)
272 expected++;
273
274 /*
275 * The interrupt status bit does not appear to indicate that the
276 * message has actually been received. Instead we wait until the
277 * expected number of dwords are present in the circular buffer.
278 */
279 for (n = ME_RETRY; n; --n) {
280 read_me_csr(&me);
281 if ((me.buffer_write_ptr - me.buffer_read_ptr) >= expected)
282 break;
283 udelay(ME_DELAY);
284 }
285 if (!n) {
286 printk(BIOS_ERR, "ME: timeout waiting for data: expected "
287 "%u, available %u\n", expected,
288 me.buffer_write_ptr - me.buffer_read_ptr);
289 return -1;
290 }
291
292 /* Read and verify MEI response header from the ME */
293 mei_read_dword_ptr(&mei_rsp, MEI_ME_CB_RW);
294 if (!mei_rsp.is_complete) {
295 printk(BIOS_ERR, "ME: response is not complete\n");
296 return -1;
297 }
298
299 /* Handle non-dword responses and expect at least MKHI header */
300 ndata = mei_rsp.length >> 2;
301 if (mei_rsp.length & 3)
302 ndata++;
303 if (ndata != (expected - 1)) {
304 printk(BIOS_ERR, "ME: response is missing data\n");
305 return -1;
306 }
307
308 /* Read and verify MKHI response header from the ME */
309 mei_read_dword_ptr(&mkhi_rsp, MEI_ME_CB_RW);
310 if (!mkhi_rsp.is_response ||
311 mkhi->group_id != mkhi_rsp.group_id ||
312 mkhi->command != mkhi_rsp.command) {
313 printk(BIOS_ERR, "ME: invalid response, group %u ?= %u, "
314 "command %u ?= %u, is_response %u\n", mkhi->group_id,
315 mkhi_rsp.group_id, mkhi->command, mkhi_rsp.command,
316 mkhi_rsp.is_response);
317 return -1;
318 }
319 ndata--; /* MKHI header has been read */
320
321 /* Make sure caller passed a buffer with enough space */
322 if (ndata != (rsp_bytes >> 2)) {
323 printk(BIOS_ERR, "ME: not enough room in response buffer: "
324 "%u != %u\n", ndata, rsp_bytes >> 2);
325 return -1;
326 }
327
328 /* Read response data from the circular buffer */
329 data = rsp_data;
330 for (n = 0; n < ndata; ++n)
331 *data++ = read_cb();
332
333 /* Tell the ME that we have consumed the response */
334 read_host_csr(&host);
335 host.interrupt_status = 1;
336 host.interrupt_generate = 1;
337 write_host_csr(&host);
338
339 return mei_wait_for_me_ready();
340}
341
342static inline int mei_sendrecv(struct mei_header *mei, struct mkhi_header *mkhi,
343 void *req_data, void *rsp_data, int rsp_bytes)
344{
345 if (mei_send_msg(mei, mkhi, req_data) < 0)
346 return -1;
347 if (mei_recv_msg(mei, mkhi, rsp_data, rsp_bytes) < 0)
348 return -1;
349 return 0;
350}
351
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100352/* Send END OF POST message to the ME */
353static int mkhi_end_of_post(void)
354{
355 struct mkhi_header mkhi = {
356 .group_id = MKHI_GROUP_ID_GEN,
357 .command = MKHI_END_OF_POST,
358 };
359 struct mei_header mei = {
360 .is_complete = 1,
361 .host_address = MEI_HOST_ADDRESS,
362 .client_address = MEI_ADDRESS_MKHI,
363 .length = sizeof(mkhi),
364 };
365
366 /* Send request and wait for response */
367 if (mei_sendrecv(&mei, &mkhi, NULL, NULL, 0) < 0) {
368 printk(BIOS_ERR, "ME: END OF POST message failed\n");
369 return -1;
370 }
371
372 printk(BIOS_INFO, "ME: END OF POST message successful\n");
373 return 0;
374}
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100375
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100376static void intel_me7_finalize_smm(void)
377{
378 struct me_hfs hfs;
379 u32 reg32;
380
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800381 mei_base_address = (u32 *)
382 (pci_read_config32(PCH_ME_DEV, PCI_BASE_ADDRESS_0) & ~0xf);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100383
384 /* S3 path will have hidden this device already */
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800385 if (!mei_base_address || mei_base_address == (u32 *)0xfffffff0)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100386 return;
387
388 /* Make sure ME is in a mode that expects EOP */
389 reg32 = pci_read_config32(PCH_ME_DEV, PCI_ME_HFS);
390 memcpy(&hfs, &reg32, sizeof(u32));
391
392 /* Abort and leave device alone if not normal mode */
393 if (hfs.fpt_bad ||
394 hfs.working_state != ME_HFS_CWS_NORMAL ||
395 hfs.operation_mode != ME_HFS_MODE_NORMAL)
396 return;
397
398 /* Try to send EOP command so ME stops accepting other commands */
399 mkhi_end_of_post();
400
401 /* Make sure IO is disabled */
402 reg32 = pci_read_config32(PCH_ME_DEV, PCI_COMMAND);
403 reg32 &= ~(PCI_COMMAND_MASTER |
404 PCI_COMMAND_MEMORY | PCI_COMMAND_IO);
405 pci_write_config32(PCH_ME_DEV, PCI_COMMAND, reg32);
406
407 /* Hide the PCI device */
408 RCBA32_OR(FD2, PCH_DISABLE_MEI1);
409}
410
411void intel_me_finalize_smm(void)
412{
413 u32 did = pci_read_config32(PCH_ME_DEV, PCI_VENDOR_ID);
414 switch (did) {
415 case 0x1c3a8086:
416 intel_me7_finalize_smm();
417 break;
418 case 0x1e3a8086:
419 intel_me8_finalize_smm();
420 break;
421 default:
422 printk(BIOS_ERR, "No finalize handler for ME %08x.\n", did);
423 }
424}
425#else /* !__SMM__ */
426
427/* Determine the path that we should take based on ME status */
428static me_bios_path intel_me_path(device_t dev)
429{
430 me_bios_path path = ME_DISABLE_BIOS_PATH;
431 struct me_hfs hfs;
432 struct me_gmes gmes;
433
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100434 /* S3 wake skips all MKHI messages */
Kyösti Mälkkic3ed8862014-06-19 19:50:51 +0300435 if (acpi_is_wakeup_s3())
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100436 return ME_S3WAKE_BIOS_PATH;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100437
438 pci_read_dword_ptr(dev, &hfs, PCI_ME_HFS);
439 pci_read_dword_ptr(dev, &gmes, PCI_ME_GMES);
440
441 /* Check and dump status */
442 intel_me_status(&hfs, &gmes);
443
444 /* Check Current Working State */
445 switch (hfs.working_state) {
446 case ME_HFS_CWS_NORMAL:
447 path = ME_NORMAL_BIOS_PATH;
448 break;
449 case ME_HFS_CWS_REC:
450 path = ME_RECOVERY_BIOS_PATH;
451 break;
452 default:
453 path = ME_DISABLE_BIOS_PATH;
454 break;
455 }
456
457 /* Check Current Operation Mode */
458 switch (hfs.operation_mode) {
459 case ME_HFS_MODE_NORMAL:
460 break;
461 case ME_HFS_MODE_DEBUG:
462 case ME_HFS_MODE_DIS:
463 case ME_HFS_MODE_OVER_JMPR:
464 case ME_HFS_MODE_OVER_MEI:
465 default:
466 path = ME_DISABLE_BIOS_PATH;
467 break;
468 }
469
470 /* Check for any error code and valid firmware */
471 if (hfs.error_code || hfs.fpt_bad)
472 path = ME_ERROR_BIOS_PATH;
473
Martin Roth7a1a3ad2017-06-24 21:29:38 -0600474#if IS_ENABLED(CONFIG_ELOG)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100475 if (path != ME_NORMAL_BIOS_PATH) {
476 struct elog_event_data_me_extended data = {
477 .current_working_state = hfs.working_state,
478 .operation_state = hfs.operation_state,
479 .operation_mode = hfs.operation_mode,
480 .error_code = hfs.error_code,
481 .progress_code = gmes.progress_code,
482 .current_pmevent = gmes.current_pmevent,
483 .current_state = gmes.current_state,
484 };
485 elog_add_event_byte(ELOG_TYPE_MANAGEMENT_ENGINE, path);
486 elog_add_event_raw(ELOG_TYPE_MANAGEMENT_ENGINE_EXT,
487 &data, sizeof(data));
488 }
489#endif
490
491 return path;
492}
493
494/* Prepare ME for MEI messages */
495static int intel_mei_setup(device_t dev)
496{
497 struct resource *res;
498 struct mei_csr host;
499 u32 reg32;
500
501 /* Find the MMIO base for the ME interface */
502 res = find_resource(dev, PCI_BASE_ADDRESS_0);
503 if (!res || res->base == 0 || res->size == 0) {
504 printk(BIOS_DEBUG, "ME: MEI resource not present!\n");
505 return -1;
506 }
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800507 mei_base_address = (u32 *)(uintptr_t)res->base;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100508
509 /* Ensure Memory and Bus Master bits are set */
510 reg32 = pci_read_config32(dev, PCI_COMMAND);
511 reg32 |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
512 pci_write_config32(dev, PCI_COMMAND, reg32);
513
514 /* Clean up status for next message */
515 read_host_csr(&host);
516 host.interrupt_generate = 1;
517 host.ready = 1;
518 host.reset = 0;
519 write_host_csr(&host);
520
521 return 0;
522}
523
524/* Read the Extend register hash of ME firmware */
525static int intel_me_extend_valid(device_t dev)
526{
527 struct me_heres status;
528 u32 extend[8] = {0};
529 int i, count = 0;
530
531 pci_read_dword_ptr(dev, &status, PCI_ME_HERES);
532 if (!status.extend_feature_present) {
533 printk(BIOS_ERR, "ME: Extend Feature not present\n");
534 return -1;
535 }
536
537 if (!status.extend_reg_valid) {
538 printk(BIOS_ERR, "ME: Extend Register not valid\n");
539 return -1;
540 }
541
542 switch (status.extend_reg_algorithm) {
543 case PCI_ME_EXT_SHA1:
544 count = 5;
545 printk(BIOS_DEBUG, "ME: Extend SHA-1: ");
546 break;
547 case PCI_ME_EXT_SHA256:
548 count = 8;
549 printk(BIOS_DEBUG, "ME: Extend SHA-256: ");
550 break;
551 default:
552 printk(BIOS_ERR, "ME: Extend Algorithm %d unknown\n",
553 status.extend_reg_algorithm);
554 return -1;
555 }
556
557 for (i = 0; i < count; ++i) {
558 extend[i] = pci_read_config32(dev, PCI_ME_HER(i));
559 printk(BIOS_DEBUG, "%08x", extend[i]);
560 }
561 printk(BIOS_DEBUG, "\n");
562
Martin Roth7a1a3ad2017-06-24 21:29:38 -0600563#if IS_ENABLED(CONFIG_CHROMEOS)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100564 /* Save hash in NVS for the OS to verify */
565 chromeos_set_me_hash(extend, count);
566#endif
567
568 return 0;
569}
570
571/* Hide the ME virtual PCI devices */
572static void intel_me_hide(device_t dev)
573{
574 dev->enabled = 0;
575 pch_enable(dev);
576}
577
578/* Check whether ME is present and do basic init */
579static void intel_me_init(device_t dev)
580{
581 me_bios_path path = intel_me_path(dev);
582
583 /* Do initial setup and determine the BIOS path */
584 printk(BIOS_NOTICE, "ME: BIOS path: %s\n", me_bios_path_values[path]);
585
586 switch (path) {
587 case ME_S3WAKE_BIOS_PATH:
588 intel_me_hide(dev);
589 break;
590
591 case ME_NORMAL_BIOS_PATH:
592 /* Validate the extend register */
593 if (intel_me_extend_valid(dev) < 0)
594 break; /* TODO: force recovery mode */
595
596 /* Prepare MEI MMIO interface */
597 if (intel_mei_setup(dev) < 0)
598 break;
599
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100600 /*
601 * Leave the ME unlocked in this path.
602 * It will be locked via SMI command later.
603 */
604 break;
605
606 case ME_ERROR_BIOS_PATH:
607 case ME_RECOVERY_BIOS_PATH:
608 case ME_DISABLE_BIOS_PATH:
609 case ME_FIRMWARE_UPDATE_BIOS_PATH:
610 break;
611 }
612}
613
614static void set_subsystem(device_t dev, unsigned vendor, unsigned device)
615{
616 if (!vendor || !device) {
617 pci_write_config32(dev, PCI_SUBSYSTEM_VENDOR_ID,
618 pci_read_config32(dev, PCI_VENDOR_ID));
619 } else {
620 pci_write_config32(dev, PCI_SUBSYSTEM_VENDOR_ID,
621 ((device & 0xffff) << 16) | (vendor & 0xffff));
622 }
623}
624
625static struct pci_operations pci_ops = {
626 .set_subsystem = set_subsystem,
627};
628
629static struct device_operations device_ops = {
630 .read_resources = pci_dev_read_resources,
631 .set_resources = pci_dev_set_resources,
632 .enable_resources = pci_dev_enable_resources,
633 .init = intel_me_init,
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100634 .ops_pci = &pci_ops,
635};
636
637static const unsigned short pci_device_ids[] = { 0x1c3a, 0x3b64,
638 0 };
639
640
641static const struct pci_driver intel_me __pci_driver = {
642 .ops = &device_ops,
643 .vendor = PCI_VENDOR_ID_INTEL,
644 .devices = pci_device_ids
645};
646
647#endif /* !__SMM__ */