blob: 8eafd355c030244ef466c5e9840ac085a054018e [file] [log] [blame]
Vladimir Serbinenko888d5592013-11-13 17:53:38 +01001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2011 The Chromium OS Authors. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; version 2 of
9 * the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010015 */
16
17/*
18 * This is a ramstage driver for the Intel Management Engine found in the
19 * 6-series chipset. It handles the required boot-time messages over the
20 * MMIO-based Management Engine Interface to tell the ME that the BIOS is
21 * finished with POST. Additional messages are defined for debug but are
22 * not used unless the console loglevel is high enough.
23 */
24
25#include <arch/acpi.h>
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010026#include <arch/io.h>
Kyösti Mälkkif1b58b72019-03-01 13:43:02 +020027#include <device/pci_ops.h>
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010028#include <console/console.h>
29#include <device/pci_ids.h>
30#include <device/pci_def.h>
31#include <string.h>
32#include <delay.h>
33#include <elog.h>
34
Elyes HAOUASead574e2018-11-11 20:52:30 +010035#ifndef __SMM__
36#include <device/device.h>
37#include <device/pci.h>
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010038#endif
39
40#include "me.h"
41#include "pch.h"
42
Martin Roth7a1a3ad2017-06-24 21:29:38 -060043#if IS_ENABLED(CONFIG_CHROMEOS)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010044#include <vendorcode/google/chromeos/gnvs.h>
45#endif
46
47#ifndef __SMM__
48/* Path that the BIOS should take based on ME state */
49static const char *me_bios_path_values[] = {
50 [ME_NORMAL_BIOS_PATH] = "Normal",
51 [ME_S3WAKE_BIOS_PATH] = "S3 Wake",
52 [ME_ERROR_BIOS_PATH] = "Error",
53 [ME_RECOVERY_BIOS_PATH] = "Recovery",
54 [ME_DISABLE_BIOS_PATH] = "Disable",
55 [ME_FIRMWARE_UPDATE_BIOS_PATH] = "Firmware Update",
56};
57#endif
58
59/* MMIO base address for MEI interface */
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -080060static u32 *mei_base_address;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010061
Martin Roth7a1a3ad2017-06-24 21:29:38 -060062#if IS_ENABLED(CONFIG_DEBUG_INTEL_ME)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010063static void mei_dump(void *ptr, int dword, int offset, const char *type)
64{
65 struct mei_csr *csr;
66
67 printk(BIOS_SPEW, "%-9s[%02x] : ", type, offset);
68
69 switch (offset) {
70 case MEI_H_CSR:
71 case MEI_ME_CSR_HA:
72 csr = ptr;
73 if (!csr) {
74 printk(BIOS_SPEW, "ERROR: 0x%08x\n", dword);
75 break;
76 }
77 printk(BIOS_SPEW, "cbd=%u cbrp=%02u cbwp=%02u ready=%u "
78 "reset=%u ig=%u is=%u ie=%u\n", csr->buffer_depth,
79 csr->buffer_read_ptr, csr->buffer_write_ptr,
80 csr->ready, csr->reset, csr->interrupt_generate,
81 csr->interrupt_status, csr->interrupt_enable);
82 break;
83 case MEI_ME_CB_RW:
84 case MEI_H_CB_WW:
85 printk(BIOS_SPEW, "CB: 0x%08x\n", dword);
86 break;
87 default:
88 printk(BIOS_SPEW, "0x%08x\n", offset);
89 break;
90 }
91}
92#else
93# define mei_dump(ptr,dword,offset,type) do {} while (0)
94#endif
95
96/*
97 * ME/MEI access helpers using memcpy to avoid aliasing.
98 */
99
100static inline void mei_read_dword_ptr(void *ptr, int offset)
101{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800102 u32 dword = read32(mei_base_address + (offset/sizeof(u32)));
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100103 memcpy(ptr, &dword, sizeof(dword));
104 mei_dump(ptr, dword, offset, "READ");
105}
106
107static inline void mei_write_dword_ptr(void *ptr, int offset)
108{
109 u32 dword = 0;
110 memcpy(&dword, ptr, sizeof(dword));
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800111 write32(mei_base_address + (offset/sizeof(u32)), dword);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100112 mei_dump(ptr, dword, offset, "WRITE");
113}
114
115#ifndef __SMM__
Elyes HAOUASbe841402018-05-13 13:40:39 +0200116static inline void pci_read_dword_ptr(struct device *dev,void *ptr,
117 int offset)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100118{
119 u32 dword = pci_read_config32(dev, offset);
120 memcpy(ptr, &dword, sizeof(dword));
121 mei_dump(ptr, dword, offset, "PCI READ");
122}
123#endif
124
125static inline void read_host_csr(struct mei_csr *csr)
126{
127 mei_read_dword_ptr(csr, MEI_H_CSR);
128}
129
130static inline void write_host_csr(struct mei_csr *csr)
131{
132 mei_write_dword_ptr(csr, MEI_H_CSR);
133}
134
Edward O'Callaghan78d33b62014-06-26 22:13:15 +1000135#ifdef __SMM__
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100136static inline void read_me_csr(struct mei_csr *csr)
137{
138 mei_read_dword_ptr(csr, MEI_ME_CSR_HA);
139}
140
141static inline void write_cb(u32 dword)
142{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800143 write32(mei_base_address + (MEI_H_CB_WW/sizeof(u32)), dword);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100144 mei_dump(NULL, dword, MEI_H_CB_WW, "WRITE");
145}
146
147static inline u32 read_cb(void)
148{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800149 u32 dword = read32(mei_base_address + (MEI_ME_CB_RW/sizeof(u32)));
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100150 mei_dump(NULL, dword, MEI_ME_CB_RW, "READ");
151 return dword;
152}
153
154/* Wait for ME ready bit to be asserted */
155static int mei_wait_for_me_ready(void)
156{
157 struct mei_csr me;
158 unsigned try = ME_RETRY;
159
160 while (try--) {
161 read_me_csr(&me);
162 if (me.ready)
163 return 0;
164 udelay(ME_DELAY);
165 }
166
167 printk(BIOS_ERR, "ME: failed to become ready\n");
168 return -1;
169}
170
171static void mei_reset(void)
172{
173 struct mei_csr host;
174
175 if (mei_wait_for_me_ready() < 0)
176 return;
177
178 /* Reset host and ME circular buffers for next message */
179 read_host_csr(&host);
180 host.reset = 1;
181 host.interrupt_generate = 1;
182 write_host_csr(&host);
183
184 if (mei_wait_for_me_ready() < 0)
185 return;
186
187 /* Re-init and indicate host is ready */
188 read_host_csr(&host);
189 host.interrupt_generate = 1;
190 host.ready = 1;
191 host.reset = 0;
192 write_host_csr(&host);
193}
194
195static int mei_send_msg(struct mei_header *mei, struct mkhi_header *mkhi,
196 void *req_data)
197{
198 struct mei_csr host;
199 unsigned ndata, n;
200 u32 *data;
201
202 /* Number of dwords to write, ignoring MKHI */
203 ndata = mei->length >> 2;
204
205 /* Pad non-dword aligned request message length */
206 if (mei->length & 3)
207 ndata++;
208 if (!ndata) {
209 printk(BIOS_DEBUG, "ME: request does not include MKHI\n");
210 return -1;
211 }
212 ndata++; /* Add MEI header */
213
214 /*
215 * Make sure there is still room left in the circular buffer.
216 * Reset the buffer pointers if the requested message will not fit.
217 */
218 read_host_csr(&host);
219 if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
220 printk(BIOS_ERR, "ME: circular buffer full, resetting...\n");
221 mei_reset();
222 read_host_csr(&host);
223 }
224
225 /*
226 * This implementation does not handle splitting large messages
227 * across multiple transactions. Ensure the requested length
228 * will fit in the available circular buffer depth.
229 */
230 if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
231 printk(BIOS_ERR, "ME: message (%u) too large for buffer (%u)\n",
232 ndata + 2, host.buffer_depth);
233 return -1;
234 }
235
236 /* Write MEI header */
237 mei_write_dword_ptr(mei, MEI_H_CB_WW);
238 ndata--;
239
240 /* Write MKHI header */
241 mei_write_dword_ptr(mkhi, MEI_H_CB_WW);
242 ndata--;
243
244 /* Write message data */
245 data = req_data;
246 for (n = 0; n < ndata; ++n)
247 write_cb(*data++);
248
249 /* Generate interrupt to the ME */
250 read_host_csr(&host);
251 host.interrupt_generate = 1;
252 write_host_csr(&host);
253
254 /* Make sure ME is ready after sending request data */
255 return mei_wait_for_me_ready();
256}
257
258static int mei_recv_msg(struct mei_header *mei, struct mkhi_header *mkhi,
259 void *rsp_data, int rsp_bytes)
260{
261 struct mei_header mei_rsp;
262 struct mkhi_header mkhi_rsp;
263 struct mei_csr me, host;
264 unsigned ndata, n;
265 unsigned expected;
266 u32 *data;
267
268 /* Total number of dwords to read from circular buffer */
269 expected = (rsp_bytes + sizeof(mei_rsp) + sizeof(mkhi_rsp)) >> 2;
270 if (rsp_bytes & 3)
271 expected++;
272
273 /*
274 * The interrupt status bit does not appear to indicate that the
275 * message has actually been received. Instead we wait until the
276 * expected number of dwords are present in the circular buffer.
277 */
278 for (n = ME_RETRY; n; --n) {
279 read_me_csr(&me);
280 if ((me.buffer_write_ptr - me.buffer_read_ptr) >= expected)
281 break;
282 udelay(ME_DELAY);
283 }
284 if (!n) {
285 printk(BIOS_ERR, "ME: timeout waiting for data: expected "
286 "%u, available %u\n", expected,
287 me.buffer_write_ptr - me.buffer_read_ptr);
288 return -1;
289 }
290
291 /* Read and verify MEI response header from the ME */
292 mei_read_dword_ptr(&mei_rsp, MEI_ME_CB_RW);
293 if (!mei_rsp.is_complete) {
294 printk(BIOS_ERR, "ME: response is not complete\n");
295 return -1;
296 }
297
298 /* Handle non-dword responses and expect at least MKHI header */
299 ndata = mei_rsp.length >> 2;
300 if (mei_rsp.length & 3)
301 ndata++;
302 if (ndata != (expected - 1)) {
303 printk(BIOS_ERR, "ME: response is missing data\n");
304 return -1;
305 }
306
307 /* Read and verify MKHI response header from the ME */
308 mei_read_dword_ptr(&mkhi_rsp, MEI_ME_CB_RW);
309 if (!mkhi_rsp.is_response ||
310 mkhi->group_id != mkhi_rsp.group_id ||
311 mkhi->command != mkhi_rsp.command) {
312 printk(BIOS_ERR, "ME: invalid response, group %u ?= %u, "
313 "command %u ?= %u, is_response %u\n", mkhi->group_id,
314 mkhi_rsp.group_id, mkhi->command, mkhi_rsp.command,
315 mkhi_rsp.is_response);
316 return -1;
317 }
318 ndata--; /* MKHI header has been read */
319
320 /* Make sure caller passed a buffer with enough space */
321 if (ndata != (rsp_bytes >> 2)) {
322 printk(BIOS_ERR, "ME: not enough room in response buffer: "
323 "%u != %u\n", ndata, rsp_bytes >> 2);
324 return -1;
325 }
326
327 /* Read response data from the circular buffer */
328 data = rsp_data;
329 for (n = 0; n < ndata; ++n)
330 *data++ = read_cb();
331
332 /* Tell the ME that we have consumed the response */
333 read_host_csr(&host);
334 host.interrupt_status = 1;
335 host.interrupt_generate = 1;
336 write_host_csr(&host);
337
338 return mei_wait_for_me_ready();
339}
340
341static inline int mei_sendrecv(struct mei_header *mei, struct mkhi_header *mkhi,
342 void *req_data, void *rsp_data, int rsp_bytes)
343{
344 if (mei_send_msg(mei, mkhi, req_data) < 0)
345 return -1;
346 if (mei_recv_msg(mei, mkhi, rsp_data, rsp_bytes) < 0)
347 return -1;
348 return 0;
349}
350
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100351/* Send END OF POST message to the ME */
352static int mkhi_end_of_post(void)
353{
354 struct mkhi_header mkhi = {
355 .group_id = MKHI_GROUP_ID_GEN,
356 .command = MKHI_END_OF_POST,
357 };
358 struct mei_header mei = {
359 .is_complete = 1,
360 .host_address = MEI_HOST_ADDRESS,
361 .client_address = MEI_ADDRESS_MKHI,
362 .length = sizeof(mkhi),
363 };
364
365 /* Send request and wait for response */
366 if (mei_sendrecv(&mei, &mkhi, NULL, NULL, 0) < 0) {
367 printk(BIOS_ERR, "ME: END OF POST message failed\n");
368 return -1;
369 }
370
371 printk(BIOS_INFO, "ME: END OF POST message successful\n");
372 return 0;
373}
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100374
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100375static void intel_me7_finalize_smm(void)
376{
377 struct me_hfs hfs;
378 u32 reg32;
379
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800380 mei_base_address = (u32 *)
381 (pci_read_config32(PCH_ME_DEV, PCI_BASE_ADDRESS_0) & ~0xf);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100382
383 /* S3 path will have hidden this device already */
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800384 if (!mei_base_address || mei_base_address == (u32 *)0xfffffff0)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100385 return;
386
387 /* Make sure ME is in a mode that expects EOP */
388 reg32 = pci_read_config32(PCH_ME_DEV, PCI_ME_HFS);
389 memcpy(&hfs, &reg32, sizeof(u32));
390
391 /* Abort and leave device alone if not normal mode */
392 if (hfs.fpt_bad ||
393 hfs.working_state != ME_HFS_CWS_NORMAL ||
394 hfs.operation_mode != ME_HFS_MODE_NORMAL)
395 return;
396
397 /* Try to send EOP command so ME stops accepting other commands */
398 mkhi_end_of_post();
399
400 /* Make sure IO is disabled */
401 reg32 = pci_read_config32(PCH_ME_DEV, PCI_COMMAND);
402 reg32 &= ~(PCI_COMMAND_MASTER |
403 PCI_COMMAND_MEMORY | PCI_COMMAND_IO);
404 pci_write_config32(PCH_ME_DEV, PCI_COMMAND, reg32);
405
406 /* Hide the PCI device */
407 RCBA32_OR(FD2, PCH_DISABLE_MEI1);
408}
409
410void intel_me_finalize_smm(void)
411{
412 u32 did = pci_read_config32(PCH_ME_DEV, PCI_VENDOR_ID);
413 switch (did) {
414 case 0x1c3a8086:
415 intel_me7_finalize_smm();
416 break;
417 case 0x1e3a8086:
418 intel_me8_finalize_smm();
419 break;
420 default:
421 printk(BIOS_ERR, "No finalize handler for ME %08x.\n", did);
422 }
423}
424#else /* !__SMM__ */
425
426/* Determine the path that we should take based on ME status */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200427static me_bios_path intel_me_path(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100428{
429 me_bios_path path = ME_DISABLE_BIOS_PATH;
430 struct me_hfs hfs;
431 struct me_gmes gmes;
432
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100433 /* S3 wake skips all MKHI messages */
Kyösti Mälkkic3ed8862014-06-19 19:50:51 +0300434 if (acpi_is_wakeup_s3())
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100435 return ME_S3WAKE_BIOS_PATH;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100436
437 pci_read_dword_ptr(dev, &hfs, PCI_ME_HFS);
438 pci_read_dword_ptr(dev, &gmes, PCI_ME_GMES);
439
440 /* Check and dump status */
441 intel_me_status(&hfs, &gmes);
442
443 /* Check Current Working State */
444 switch (hfs.working_state) {
445 case ME_HFS_CWS_NORMAL:
446 path = ME_NORMAL_BIOS_PATH;
447 break;
448 case ME_HFS_CWS_REC:
449 path = ME_RECOVERY_BIOS_PATH;
450 break;
451 default:
452 path = ME_DISABLE_BIOS_PATH;
453 break;
454 }
455
456 /* Check Current Operation Mode */
457 switch (hfs.operation_mode) {
458 case ME_HFS_MODE_NORMAL:
459 break;
460 case ME_HFS_MODE_DEBUG:
461 case ME_HFS_MODE_DIS:
462 case ME_HFS_MODE_OVER_JMPR:
463 case ME_HFS_MODE_OVER_MEI:
464 default:
465 path = ME_DISABLE_BIOS_PATH;
466 break;
467 }
468
469 /* Check for any error code and valid firmware */
470 if (hfs.error_code || hfs.fpt_bad)
471 path = ME_ERROR_BIOS_PATH;
472
Martin Roth7a1a3ad2017-06-24 21:29:38 -0600473#if IS_ENABLED(CONFIG_ELOG)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100474 if (path != ME_NORMAL_BIOS_PATH) {
475 struct elog_event_data_me_extended data = {
476 .current_working_state = hfs.working_state,
477 .operation_state = hfs.operation_state,
478 .operation_mode = hfs.operation_mode,
479 .error_code = hfs.error_code,
480 .progress_code = gmes.progress_code,
481 .current_pmevent = gmes.current_pmevent,
482 .current_state = gmes.current_state,
483 };
484 elog_add_event_byte(ELOG_TYPE_MANAGEMENT_ENGINE, path);
485 elog_add_event_raw(ELOG_TYPE_MANAGEMENT_ENGINE_EXT,
486 &data, sizeof(data));
487 }
488#endif
489
490 return path;
491}
492
493/* Prepare ME for MEI messages */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200494static int intel_mei_setup(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100495{
496 struct resource *res;
497 struct mei_csr host;
498 u32 reg32;
499
500 /* Find the MMIO base for the ME interface */
501 res = find_resource(dev, PCI_BASE_ADDRESS_0);
502 if (!res || res->base == 0 || res->size == 0) {
503 printk(BIOS_DEBUG, "ME: MEI resource not present!\n");
504 return -1;
505 }
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800506 mei_base_address = (u32 *)(uintptr_t)res->base;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100507
508 /* Ensure Memory and Bus Master bits are set */
509 reg32 = pci_read_config32(dev, PCI_COMMAND);
510 reg32 |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
511 pci_write_config32(dev, PCI_COMMAND, reg32);
512
513 /* Clean up status for next message */
514 read_host_csr(&host);
515 host.interrupt_generate = 1;
516 host.ready = 1;
517 host.reset = 0;
518 write_host_csr(&host);
519
520 return 0;
521}
522
523/* Read the Extend register hash of ME firmware */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200524static int intel_me_extend_valid(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100525{
526 struct me_heres status;
527 u32 extend[8] = {0};
528 int i, count = 0;
529
530 pci_read_dword_ptr(dev, &status, PCI_ME_HERES);
531 if (!status.extend_feature_present) {
532 printk(BIOS_ERR, "ME: Extend Feature not present\n");
533 return -1;
534 }
535
536 if (!status.extend_reg_valid) {
537 printk(BIOS_ERR, "ME: Extend Register not valid\n");
538 return -1;
539 }
540
541 switch (status.extend_reg_algorithm) {
542 case PCI_ME_EXT_SHA1:
543 count = 5;
544 printk(BIOS_DEBUG, "ME: Extend SHA-1: ");
545 break;
546 case PCI_ME_EXT_SHA256:
547 count = 8;
548 printk(BIOS_DEBUG, "ME: Extend SHA-256: ");
549 break;
550 default:
551 printk(BIOS_ERR, "ME: Extend Algorithm %d unknown\n",
552 status.extend_reg_algorithm);
553 return -1;
554 }
555
556 for (i = 0; i < count; ++i) {
557 extend[i] = pci_read_config32(dev, PCI_ME_HER(i));
558 printk(BIOS_DEBUG, "%08x", extend[i]);
559 }
560 printk(BIOS_DEBUG, "\n");
561
Martin Roth7a1a3ad2017-06-24 21:29:38 -0600562#if IS_ENABLED(CONFIG_CHROMEOS)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100563 /* Save hash in NVS for the OS to verify */
564 chromeos_set_me_hash(extend, count);
565#endif
566
567 return 0;
568}
569
570/* Hide the ME virtual PCI devices */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200571static void intel_me_hide(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100572{
573 dev->enabled = 0;
574 pch_enable(dev);
575}
576
577/* Check whether ME is present and do basic init */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200578static void intel_me_init(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100579{
580 me_bios_path path = intel_me_path(dev);
581
582 /* Do initial setup and determine the BIOS path */
583 printk(BIOS_NOTICE, "ME: BIOS path: %s\n", me_bios_path_values[path]);
584
585 switch (path) {
586 case ME_S3WAKE_BIOS_PATH:
587 intel_me_hide(dev);
588 break;
589
590 case ME_NORMAL_BIOS_PATH:
591 /* Validate the extend register */
592 if (intel_me_extend_valid(dev) < 0)
593 break; /* TODO: force recovery mode */
594
595 /* Prepare MEI MMIO interface */
596 if (intel_mei_setup(dev) < 0)
597 break;
598
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100599 /*
600 * Leave the ME unlocked in this path.
601 * It will be locked via SMI command later.
602 */
603 break;
604
605 case ME_ERROR_BIOS_PATH:
606 case ME_RECOVERY_BIOS_PATH:
607 case ME_DISABLE_BIOS_PATH:
608 case ME_FIRMWARE_UPDATE_BIOS_PATH:
609 break;
610 }
611}
612
Elyes HAOUASbe841402018-05-13 13:40:39 +0200613static void set_subsystem(struct device *dev, unsigned vendor,
614 unsigned device)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100615{
616 if (!vendor || !device) {
617 pci_write_config32(dev, PCI_SUBSYSTEM_VENDOR_ID,
618 pci_read_config32(dev, PCI_VENDOR_ID));
619 } else {
620 pci_write_config32(dev, PCI_SUBSYSTEM_VENDOR_ID,
621 ((device & 0xffff) << 16) | (vendor & 0xffff));
622 }
623}
624
625static struct pci_operations pci_ops = {
626 .set_subsystem = set_subsystem,
627};
628
629static struct device_operations device_ops = {
630 .read_resources = pci_dev_read_resources,
631 .set_resources = pci_dev_set_resources,
632 .enable_resources = pci_dev_enable_resources,
633 .init = intel_me_init,
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100634 .ops_pci = &pci_ops,
635};
636
637static const unsigned short pci_device_ids[] = { 0x1c3a, 0x3b64,
638 0 };
639
640
641static const struct pci_driver intel_me __pci_driver = {
642 .ops = &device_ops,
643 .vendor = PCI_VENDOR_ID_INTEL,
644 .devices = pci_device_ids
645};
646
647#endif /* !__SMM__ */