blob: e8974d8491fee8956b5e36fc56400a5ddbb212bf [file] [log] [blame]
Vladimir Serbinenko888d5592013-11-13 17:53:38 +01001/*
2 * This file is part of the coreboot project.
3 *
Vladimir Serbinenko888d5592013-11-13 17:53:38 +01004 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation; version 2 of
8 * the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010014 */
15
16/*
17 * This is a ramstage driver for the Intel Management Engine found in the
18 * 6-series chipset. It handles the required boot-time messages over the
19 * MMIO-based Management Engine Interface to tell the ME that the BIOS is
20 * finished with POST. Additional messages are defined for debug but are
21 * not used unless the console loglevel is high enough.
22 */
23
24#include <arch/acpi.h>
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010025#include <console/console.h>
Kyösti Mälkki21d6a272019-11-05 18:50:38 +020026#include <device/device.h>
27#include <device/mmio.h>
28#include <device/pci.h>
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010029#include <device/pci_def.h>
Kyösti Mälkki21d6a272019-11-05 18:50:38 +020030#include <device/pci_ids.h>
31#include <device/pci_ops.h>
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010032#include <string.h>
33#include <delay.h>
34#include <elog.h>
35
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010036#include "me.h"
37#include "pch.h"
38
Julius Wernercd49cce2019-03-05 16:53:33 -080039#if CONFIG(CHROMEOS)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010040#include <vendorcode/google/chromeos/gnvs.h>
41#endif
42
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010043/* Path that the BIOS should take based on ME state */
Kyösti Mälkki21d6a272019-11-05 18:50:38 +020044static const char *me_bios_path_values[] __unused = {
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010045 [ME_NORMAL_BIOS_PATH] = "Normal",
46 [ME_S3WAKE_BIOS_PATH] = "S3 Wake",
47 [ME_ERROR_BIOS_PATH] = "Error",
48 [ME_RECOVERY_BIOS_PATH] = "Recovery",
49 [ME_DISABLE_BIOS_PATH] = "Disable",
50 [ME_FIRMWARE_UPDATE_BIOS_PATH] = "Firmware Update",
51};
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010052
53/* MMIO base address for MEI interface */
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -080054static u32 *mei_base_address;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010055
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010056static void mei_dump(void *ptr, int dword, int offset, const char *type)
57{
58 struct mei_csr *csr;
59
Kyösti Mälkkic86fc8e2019-11-06 06:32:27 +020060 if (!CONFIG(DEBUG_INTEL_ME))
61 return;
62
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010063 printk(BIOS_SPEW, "%-9s[%02x] : ", type, offset);
64
65 switch (offset) {
66 case MEI_H_CSR:
67 case MEI_ME_CSR_HA:
68 csr = ptr;
69 if (!csr) {
70 printk(BIOS_SPEW, "ERROR: 0x%08x\n", dword);
71 break;
72 }
73 printk(BIOS_SPEW, "cbd=%u cbrp=%02u cbwp=%02u ready=%u "
74 "reset=%u ig=%u is=%u ie=%u\n", csr->buffer_depth,
75 csr->buffer_read_ptr, csr->buffer_write_ptr,
76 csr->ready, csr->reset, csr->interrupt_generate,
77 csr->interrupt_status, csr->interrupt_enable);
78 break;
79 case MEI_ME_CB_RW:
80 case MEI_H_CB_WW:
81 printk(BIOS_SPEW, "CB: 0x%08x\n", dword);
82 break;
83 default:
84 printk(BIOS_SPEW, "0x%08x\n", offset);
85 break;
86 }
87}
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010088
89/*
90 * ME/MEI access helpers using memcpy to avoid aliasing.
91 */
92
93static inline void mei_read_dword_ptr(void *ptr, int offset)
94{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -080095 u32 dword = read32(mei_base_address + (offset/sizeof(u32)));
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010096 memcpy(ptr, &dword, sizeof(dword));
97 mei_dump(ptr, dword, offset, "READ");
98}
99
100static inline void mei_write_dword_ptr(void *ptr, int offset)
101{
102 u32 dword = 0;
103 memcpy(&dword, ptr, sizeof(dword));
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800104 write32(mei_base_address + (offset/sizeof(u32)), dword);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100105 mei_dump(ptr, dword, offset, "WRITE");
106}
107
Kyösti Mälkki21d6a272019-11-05 18:50:38 +0200108#ifndef __SIMPLE_DEVICE__
Elyes HAOUASbe841402018-05-13 13:40:39 +0200109static inline void pci_read_dword_ptr(struct device *dev,void *ptr,
110 int offset)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100111{
112 u32 dword = pci_read_config32(dev, offset);
113 memcpy(ptr, &dword, sizeof(dword));
114 mei_dump(ptr, dword, offset, "PCI READ");
115}
116#endif
117
118static inline void read_host_csr(struct mei_csr *csr)
119{
120 mei_read_dword_ptr(csr, MEI_H_CSR);
121}
122
123static inline void write_host_csr(struct mei_csr *csr)
124{
125 mei_write_dword_ptr(csr, MEI_H_CSR);
126}
127
128static inline void read_me_csr(struct mei_csr *csr)
129{
130 mei_read_dword_ptr(csr, MEI_ME_CSR_HA);
131}
132
133static inline void write_cb(u32 dword)
134{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800135 write32(mei_base_address + (MEI_H_CB_WW/sizeof(u32)), dword);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100136 mei_dump(NULL, dword, MEI_H_CB_WW, "WRITE");
137}
138
139static inline u32 read_cb(void)
140{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800141 u32 dword = read32(mei_base_address + (MEI_ME_CB_RW/sizeof(u32)));
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100142 mei_dump(NULL, dword, MEI_ME_CB_RW, "READ");
143 return dword;
144}
145
146/* Wait for ME ready bit to be asserted */
147static int mei_wait_for_me_ready(void)
148{
149 struct mei_csr me;
Martin Rothff744bf2019-10-23 21:46:03 -0600150 unsigned int try = ME_RETRY;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100151
152 while (try--) {
153 read_me_csr(&me);
154 if (me.ready)
155 return 0;
156 udelay(ME_DELAY);
157 }
158
159 printk(BIOS_ERR, "ME: failed to become ready\n");
160 return -1;
161}
162
163static void mei_reset(void)
164{
165 struct mei_csr host;
166
167 if (mei_wait_for_me_ready() < 0)
168 return;
169
170 /* Reset host and ME circular buffers for next message */
171 read_host_csr(&host);
172 host.reset = 1;
173 host.interrupt_generate = 1;
174 write_host_csr(&host);
175
176 if (mei_wait_for_me_ready() < 0)
177 return;
178
179 /* Re-init and indicate host is ready */
180 read_host_csr(&host);
181 host.interrupt_generate = 1;
182 host.ready = 1;
183 host.reset = 0;
184 write_host_csr(&host);
185}
186
187static int mei_send_msg(struct mei_header *mei, struct mkhi_header *mkhi,
188 void *req_data)
189{
190 struct mei_csr host;
Martin Rothff744bf2019-10-23 21:46:03 -0600191 unsigned int ndata, n;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100192 u32 *data;
193
194 /* Number of dwords to write, ignoring MKHI */
195 ndata = mei->length >> 2;
196
197 /* Pad non-dword aligned request message length */
198 if (mei->length & 3)
199 ndata++;
200 if (!ndata) {
201 printk(BIOS_DEBUG, "ME: request does not include MKHI\n");
202 return -1;
203 }
204 ndata++; /* Add MEI header */
205
206 /*
207 * Make sure there is still room left in the circular buffer.
208 * Reset the buffer pointers if the requested message will not fit.
209 */
210 read_host_csr(&host);
211 if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
212 printk(BIOS_ERR, "ME: circular buffer full, resetting...\n");
213 mei_reset();
214 read_host_csr(&host);
215 }
216
217 /*
218 * This implementation does not handle splitting large messages
219 * across multiple transactions. Ensure the requested length
220 * will fit in the available circular buffer depth.
221 */
222 if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
223 printk(BIOS_ERR, "ME: message (%u) too large for buffer (%u)\n",
224 ndata + 2, host.buffer_depth);
225 return -1;
226 }
227
228 /* Write MEI header */
229 mei_write_dword_ptr(mei, MEI_H_CB_WW);
230 ndata--;
231
232 /* Write MKHI header */
233 mei_write_dword_ptr(mkhi, MEI_H_CB_WW);
234 ndata--;
235
236 /* Write message data */
237 data = req_data;
238 for (n = 0; n < ndata; ++n)
239 write_cb(*data++);
240
241 /* Generate interrupt to the ME */
242 read_host_csr(&host);
243 host.interrupt_generate = 1;
244 write_host_csr(&host);
245
246 /* Make sure ME is ready after sending request data */
247 return mei_wait_for_me_ready();
248}
249
250static int mei_recv_msg(struct mei_header *mei, struct mkhi_header *mkhi,
251 void *rsp_data, int rsp_bytes)
252{
253 struct mei_header mei_rsp;
254 struct mkhi_header mkhi_rsp;
255 struct mei_csr me, host;
Martin Rothff744bf2019-10-23 21:46:03 -0600256 unsigned int ndata, n;
257 unsigned int expected;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100258 u32 *data;
259
260 /* Total number of dwords to read from circular buffer */
261 expected = (rsp_bytes + sizeof(mei_rsp) + sizeof(mkhi_rsp)) >> 2;
262 if (rsp_bytes & 3)
263 expected++;
264
265 /*
266 * The interrupt status bit does not appear to indicate that the
267 * message has actually been received. Instead we wait until the
268 * expected number of dwords are present in the circular buffer.
269 */
270 for (n = ME_RETRY; n; --n) {
271 read_me_csr(&me);
272 if ((me.buffer_write_ptr - me.buffer_read_ptr) >= expected)
273 break;
274 udelay(ME_DELAY);
275 }
276 if (!n) {
277 printk(BIOS_ERR, "ME: timeout waiting for data: expected "
278 "%u, available %u\n", expected,
279 me.buffer_write_ptr - me.buffer_read_ptr);
280 return -1;
281 }
282
283 /* Read and verify MEI response header from the ME */
284 mei_read_dword_ptr(&mei_rsp, MEI_ME_CB_RW);
285 if (!mei_rsp.is_complete) {
286 printk(BIOS_ERR, "ME: response is not complete\n");
287 return -1;
288 }
289
290 /* Handle non-dword responses and expect at least MKHI header */
291 ndata = mei_rsp.length >> 2;
292 if (mei_rsp.length & 3)
293 ndata++;
294 if (ndata != (expected - 1)) {
295 printk(BIOS_ERR, "ME: response is missing data\n");
296 return -1;
297 }
298
299 /* Read and verify MKHI response header from the ME */
300 mei_read_dword_ptr(&mkhi_rsp, MEI_ME_CB_RW);
301 if (!mkhi_rsp.is_response ||
302 mkhi->group_id != mkhi_rsp.group_id ||
303 mkhi->command != mkhi_rsp.command) {
304 printk(BIOS_ERR, "ME: invalid response, group %u ?= %u, "
305 "command %u ?= %u, is_response %u\n", mkhi->group_id,
306 mkhi_rsp.group_id, mkhi->command, mkhi_rsp.command,
307 mkhi_rsp.is_response);
308 return -1;
309 }
310 ndata--; /* MKHI header has been read */
311
312 /* Make sure caller passed a buffer with enough space */
313 if (ndata != (rsp_bytes >> 2)) {
314 printk(BIOS_ERR, "ME: not enough room in response buffer: "
315 "%u != %u\n", ndata, rsp_bytes >> 2);
316 return -1;
317 }
318
319 /* Read response data from the circular buffer */
320 data = rsp_data;
321 for (n = 0; n < ndata; ++n)
322 *data++ = read_cb();
323
324 /* Tell the ME that we have consumed the response */
325 read_host_csr(&host);
326 host.interrupt_status = 1;
327 host.interrupt_generate = 1;
328 write_host_csr(&host);
329
330 return mei_wait_for_me_ready();
331}
332
333static inline int mei_sendrecv(struct mei_header *mei, struct mkhi_header *mkhi,
334 void *req_data, void *rsp_data, int rsp_bytes)
335{
336 if (mei_send_msg(mei, mkhi, req_data) < 0)
337 return -1;
338 if (mei_recv_msg(mei, mkhi, rsp_data, rsp_bytes) < 0)
339 return -1;
340 return 0;
341}
342
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100343/* Send END OF POST message to the ME */
Kyösti Mälkki21d6a272019-11-05 18:50:38 +0200344static int __unused mkhi_end_of_post(void)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100345{
346 struct mkhi_header mkhi = {
347 .group_id = MKHI_GROUP_ID_GEN,
348 .command = MKHI_END_OF_POST,
349 };
350 struct mei_header mei = {
351 .is_complete = 1,
352 .host_address = MEI_HOST_ADDRESS,
353 .client_address = MEI_ADDRESS_MKHI,
354 .length = sizeof(mkhi),
355 };
356
357 /* Send request and wait for response */
358 if (mei_sendrecv(&mei, &mkhi, NULL, NULL, 0) < 0) {
359 printk(BIOS_ERR, "ME: END OF POST message failed\n");
360 return -1;
361 }
362
363 printk(BIOS_INFO, "ME: END OF POST message successful\n");
364 return 0;
365}
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100366
Kyösti Mälkki21d6a272019-11-05 18:50:38 +0200367#ifdef __SIMPLE_DEVICE__
368
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100369static void intel_me7_finalize_smm(void)
370{
371 struct me_hfs hfs;
372 u32 reg32;
373
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800374 mei_base_address = (u32 *)
375 (pci_read_config32(PCH_ME_DEV, PCI_BASE_ADDRESS_0) & ~0xf);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100376
377 /* S3 path will have hidden this device already */
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800378 if (!mei_base_address || mei_base_address == (u32 *)0xfffffff0)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100379 return;
380
381 /* Make sure ME is in a mode that expects EOP */
382 reg32 = pci_read_config32(PCH_ME_DEV, PCI_ME_HFS);
383 memcpy(&hfs, &reg32, sizeof(u32));
384
385 /* Abort and leave device alone if not normal mode */
386 if (hfs.fpt_bad ||
387 hfs.working_state != ME_HFS_CWS_NORMAL ||
388 hfs.operation_mode != ME_HFS_MODE_NORMAL)
389 return;
390
391 /* Try to send EOP command so ME stops accepting other commands */
392 mkhi_end_of_post();
393
394 /* Make sure IO is disabled */
395 reg32 = pci_read_config32(PCH_ME_DEV, PCI_COMMAND);
396 reg32 &= ~(PCI_COMMAND_MASTER |
397 PCI_COMMAND_MEMORY | PCI_COMMAND_IO);
398 pci_write_config32(PCH_ME_DEV, PCI_COMMAND, reg32);
399
400 /* Hide the PCI device */
401 RCBA32_OR(FD2, PCH_DISABLE_MEI1);
402}
403
404void intel_me_finalize_smm(void)
405{
406 u32 did = pci_read_config32(PCH_ME_DEV, PCI_VENDOR_ID);
407 switch (did) {
408 case 0x1c3a8086:
409 intel_me7_finalize_smm();
410 break;
411 case 0x1e3a8086:
412 intel_me8_finalize_smm();
413 break;
414 default:
415 printk(BIOS_ERR, "No finalize handler for ME %08x.\n", did);
416 }
417}
Kyösti Mälkki21d6a272019-11-05 18:50:38 +0200418#else /* !__SIMPLE_DEVICE__ */
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100419
420/* Determine the path that we should take based on ME status */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200421static me_bios_path intel_me_path(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100422{
423 me_bios_path path = ME_DISABLE_BIOS_PATH;
424 struct me_hfs hfs;
425 struct me_gmes gmes;
426
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100427 /* S3 wake skips all MKHI messages */
Kyösti Mälkkic3ed8862014-06-19 19:50:51 +0300428 if (acpi_is_wakeup_s3())
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100429 return ME_S3WAKE_BIOS_PATH;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100430
431 pci_read_dword_ptr(dev, &hfs, PCI_ME_HFS);
432 pci_read_dword_ptr(dev, &gmes, PCI_ME_GMES);
433
434 /* Check and dump status */
435 intel_me_status(&hfs, &gmes);
436
437 /* Check Current Working State */
438 switch (hfs.working_state) {
439 case ME_HFS_CWS_NORMAL:
440 path = ME_NORMAL_BIOS_PATH;
441 break;
442 case ME_HFS_CWS_REC:
443 path = ME_RECOVERY_BIOS_PATH;
444 break;
445 default:
446 path = ME_DISABLE_BIOS_PATH;
447 break;
448 }
449
450 /* Check Current Operation Mode */
451 switch (hfs.operation_mode) {
452 case ME_HFS_MODE_NORMAL:
453 break;
454 case ME_HFS_MODE_DEBUG:
455 case ME_HFS_MODE_DIS:
456 case ME_HFS_MODE_OVER_JMPR:
457 case ME_HFS_MODE_OVER_MEI:
458 default:
459 path = ME_DISABLE_BIOS_PATH;
460 break;
461 }
462
463 /* Check for any error code and valid firmware */
464 if (hfs.error_code || hfs.fpt_bad)
465 path = ME_ERROR_BIOS_PATH;
466
Kyösti Mälkkibe5317f2019-11-06 12:07:21 +0200467 if (CONFIG(ELOG) && path != ME_NORMAL_BIOS_PATH) {
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100468 struct elog_event_data_me_extended data = {
469 .current_working_state = hfs.working_state,
470 .operation_state = hfs.operation_state,
471 .operation_mode = hfs.operation_mode,
472 .error_code = hfs.error_code,
473 .progress_code = gmes.progress_code,
474 .current_pmevent = gmes.current_pmevent,
475 .current_state = gmes.current_state,
476 };
477 elog_add_event_byte(ELOG_TYPE_MANAGEMENT_ENGINE, path);
478 elog_add_event_raw(ELOG_TYPE_MANAGEMENT_ENGINE_EXT,
479 &data, sizeof(data));
480 }
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100481
482 return path;
483}
484
485/* Prepare ME for MEI messages */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200486static int intel_mei_setup(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100487{
488 struct resource *res;
489 struct mei_csr host;
490 u32 reg32;
491
492 /* Find the MMIO base for the ME interface */
493 res = find_resource(dev, PCI_BASE_ADDRESS_0);
494 if (!res || res->base == 0 || res->size == 0) {
495 printk(BIOS_DEBUG, "ME: MEI resource not present!\n");
496 return -1;
497 }
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800498 mei_base_address = (u32 *)(uintptr_t)res->base;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100499
500 /* Ensure Memory and Bus Master bits are set */
501 reg32 = pci_read_config32(dev, PCI_COMMAND);
502 reg32 |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
503 pci_write_config32(dev, PCI_COMMAND, reg32);
504
505 /* Clean up status for next message */
506 read_host_csr(&host);
507 host.interrupt_generate = 1;
508 host.ready = 1;
509 host.reset = 0;
510 write_host_csr(&host);
511
512 return 0;
513}
514
515/* Read the Extend register hash of ME firmware */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200516static int intel_me_extend_valid(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100517{
518 struct me_heres status;
519 u32 extend[8] = {0};
520 int i, count = 0;
521
522 pci_read_dword_ptr(dev, &status, PCI_ME_HERES);
523 if (!status.extend_feature_present) {
524 printk(BIOS_ERR, "ME: Extend Feature not present\n");
525 return -1;
526 }
527
528 if (!status.extend_reg_valid) {
529 printk(BIOS_ERR, "ME: Extend Register not valid\n");
530 return -1;
531 }
532
533 switch (status.extend_reg_algorithm) {
534 case PCI_ME_EXT_SHA1:
535 count = 5;
536 printk(BIOS_DEBUG, "ME: Extend SHA-1: ");
537 break;
538 case PCI_ME_EXT_SHA256:
539 count = 8;
540 printk(BIOS_DEBUG, "ME: Extend SHA-256: ");
541 break;
542 default:
543 printk(BIOS_ERR, "ME: Extend Algorithm %d unknown\n",
544 status.extend_reg_algorithm);
545 return -1;
546 }
547
548 for (i = 0; i < count; ++i) {
549 extend[i] = pci_read_config32(dev, PCI_ME_HER(i));
550 printk(BIOS_DEBUG, "%08x", extend[i]);
551 }
552 printk(BIOS_DEBUG, "\n");
553
Julius Wernercd49cce2019-03-05 16:53:33 -0800554#if CONFIG(CHROMEOS)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100555 /* Save hash in NVS for the OS to verify */
556 chromeos_set_me_hash(extend, count);
557#endif
558
559 return 0;
560}
561
562/* Hide the ME virtual PCI devices */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200563static void intel_me_hide(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100564{
565 dev->enabled = 0;
566 pch_enable(dev);
567}
568
569/* Check whether ME is present and do basic init */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200570static void intel_me_init(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100571{
572 me_bios_path path = intel_me_path(dev);
573
574 /* Do initial setup and determine the BIOS path */
575 printk(BIOS_NOTICE, "ME: BIOS path: %s\n", me_bios_path_values[path]);
576
577 switch (path) {
578 case ME_S3WAKE_BIOS_PATH:
579 intel_me_hide(dev);
580 break;
581
582 case ME_NORMAL_BIOS_PATH:
583 /* Validate the extend register */
584 if (intel_me_extend_valid(dev) < 0)
585 break; /* TODO: force recovery mode */
586
587 /* Prepare MEI MMIO interface */
588 if (intel_mei_setup(dev) < 0)
589 break;
590
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100591 /*
592 * Leave the ME unlocked in this path.
593 * It will be locked via SMI command later.
594 */
595 break;
596
597 case ME_ERROR_BIOS_PATH:
598 case ME_RECOVERY_BIOS_PATH:
599 case ME_DISABLE_BIOS_PATH:
600 case ME_FIRMWARE_UPDATE_BIOS_PATH:
601 break;
602 }
603}
604
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100605static struct pci_operations pci_ops = {
Subrata Banik4a0f0712019-03-20 14:29:47 +0530606 .set_subsystem = pci_dev_set_subsystem,
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100607};
608
609static struct device_operations device_ops = {
610 .read_resources = pci_dev_read_resources,
611 .set_resources = pci_dev_set_resources,
612 .enable_resources = pci_dev_enable_resources,
613 .init = intel_me_init,
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100614 .ops_pci = &pci_ops,
615};
616
Felix Singer838fbc72019-11-21 21:23:32 +0100617static const unsigned short pci_device_ids[] = {
618 0x1c3a,
619 PCI_DID_INTEL_IBEXPEAK_HECI1,
620 0
621};
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100622
623
624static const struct pci_driver intel_me __pci_driver = {
625 .ops = &device_ops,
626 .vendor = PCI_VENDOR_ID_INTEL,
627 .devices = pci_device_ids
628};
629
Kyösti Mälkki21d6a272019-11-05 18:50:38 +0200630#endif /* !__SIMPLE_DEVICE__ */