blob: bc56012b122c2614f28315b226aad80c422753c8 [file] [log] [blame]
Vladimir Serbinenko888d5592013-11-13 17:53:38 +01001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2011 The Chromium OS Authors. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; version 2 of
9 * the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
19 * MA 02110-1301 USA
20 */
21
22/*
23 * This is a ramstage driver for the Intel Management Engine found in the
24 * 6-series chipset. It handles the required boot-time messages over the
25 * MMIO-based Management Engine Interface to tell the ME that the BIOS is
26 * finished with POST. Additional messages are defined for debug but are
27 * not used unless the console loglevel is high enough.
28 */
29
30#include <arch/acpi.h>
31#include <arch/hlt.h>
32#include <arch/io.h>
33#include <console/console.h>
34#include <device/pci_ids.h>
35#include <device/pci_def.h>
36#include <string.h>
37#include <delay.h>
38#include <elog.h>
39
40#ifdef __SMM__
41#include <arch/pci_mmio_cfg.h>
42#else
43# include <device/device.h>
44# include <device/pci.h>
45#endif
46
47#include "me.h"
48#include "pch.h"
49
50#if CONFIG_CHROMEOS
51#include <vendorcode/google/chromeos/gnvs.h>
52#endif
53
54#ifndef __SMM__
55/* Path that the BIOS should take based on ME state */
56static const char *me_bios_path_values[] = {
57 [ME_NORMAL_BIOS_PATH] = "Normal",
58 [ME_S3WAKE_BIOS_PATH] = "S3 Wake",
59 [ME_ERROR_BIOS_PATH] = "Error",
60 [ME_RECOVERY_BIOS_PATH] = "Recovery",
61 [ME_DISABLE_BIOS_PATH] = "Disable",
62 [ME_FIRMWARE_UPDATE_BIOS_PATH] = "Firmware Update",
63};
64#endif
65
66/* MMIO base address for MEI interface */
67static u32 mei_base_address;
68
69#if CONFIG_DEBUG_INTEL_ME
70static void mei_dump(void *ptr, int dword, int offset, const char *type)
71{
72 struct mei_csr *csr;
73
74 printk(BIOS_SPEW, "%-9s[%02x] : ", type, offset);
75
76 switch (offset) {
77 case MEI_H_CSR:
78 case MEI_ME_CSR_HA:
79 csr = ptr;
80 if (!csr) {
81 printk(BIOS_SPEW, "ERROR: 0x%08x\n", dword);
82 break;
83 }
84 printk(BIOS_SPEW, "cbd=%u cbrp=%02u cbwp=%02u ready=%u "
85 "reset=%u ig=%u is=%u ie=%u\n", csr->buffer_depth,
86 csr->buffer_read_ptr, csr->buffer_write_ptr,
87 csr->ready, csr->reset, csr->interrupt_generate,
88 csr->interrupt_status, csr->interrupt_enable);
89 break;
90 case MEI_ME_CB_RW:
91 case MEI_H_CB_WW:
92 printk(BIOS_SPEW, "CB: 0x%08x\n", dword);
93 break;
94 default:
95 printk(BIOS_SPEW, "0x%08x\n", offset);
96 break;
97 }
98}
99#else
100# define mei_dump(ptr,dword,offset,type) do {} while (0)
101#endif
102
103/*
104 * ME/MEI access helpers using memcpy to avoid aliasing.
105 */
106
107static inline void mei_read_dword_ptr(void *ptr, int offset)
108{
109 u32 dword = read32(mei_base_address + offset);
110 memcpy(ptr, &dword, sizeof(dword));
111 mei_dump(ptr, dword, offset, "READ");
112}
113
114static inline void mei_write_dword_ptr(void *ptr, int offset)
115{
116 u32 dword = 0;
117 memcpy(&dword, ptr, sizeof(dword));
118 write32(mei_base_address + offset, dword);
119 mei_dump(ptr, dword, offset, "WRITE");
120}
121
122#ifndef __SMM__
123static inline void pci_read_dword_ptr(device_t dev, void *ptr, int offset)
124{
125 u32 dword = pci_read_config32(dev, offset);
126 memcpy(ptr, &dword, sizeof(dword));
127 mei_dump(ptr, dword, offset, "PCI READ");
128}
129#endif
130
131static inline void read_host_csr(struct mei_csr *csr)
132{
133 mei_read_dword_ptr(csr, MEI_H_CSR);
134}
135
136static inline void write_host_csr(struct mei_csr *csr)
137{
138 mei_write_dword_ptr(csr, MEI_H_CSR);
139}
140
141static inline void read_me_csr(struct mei_csr *csr)
142{
143 mei_read_dword_ptr(csr, MEI_ME_CSR_HA);
144}
145
146static inline void write_cb(u32 dword)
147{
148 write32(mei_base_address + MEI_H_CB_WW, dword);
149 mei_dump(NULL, dword, MEI_H_CB_WW, "WRITE");
150}
151
152static inline u32 read_cb(void)
153{
154 u32 dword = read32(mei_base_address + MEI_ME_CB_RW);
155 mei_dump(NULL, dword, MEI_ME_CB_RW, "READ");
156 return dword;
157}
158
159/* Wait for ME ready bit to be asserted */
160static int mei_wait_for_me_ready(void)
161{
162 struct mei_csr me;
163 unsigned try = ME_RETRY;
164
165 while (try--) {
166 read_me_csr(&me);
167 if (me.ready)
168 return 0;
169 udelay(ME_DELAY);
170 }
171
172 printk(BIOS_ERR, "ME: failed to become ready\n");
173 return -1;
174}
175
176static void mei_reset(void)
177{
178 struct mei_csr host;
179
180 if (mei_wait_for_me_ready() < 0)
181 return;
182
183 /* Reset host and ME circular buffers for next message */
184 read_host_csr(&host);
185 host.reset = 1;
186 host.interrupt_generate = 1;
187 write_host_csr(&host);
188
189 if (mei_wait_for_me_ready() < 0)
190 return;
191
192 /* Re-init and indicate host is ready */
193 read_host_csr(&host);
194 host.interrupt_generate = 1;
195 host.ready = 1;
196 host.reset = 0;
197 write_host_csr(&host);
198}
199
200static int mei_send_msg(struct mei_header *mei, struct mkhi_header *mkhi,
201 void *req_data)
202{
203 struct mei_csr host;
204 unsigned ndata, n;
205 u32 *data;
206
207 /* Number of dwords to write, ignoring MKHI */
208 ndata = mei->length >> 2;
209
210 /* Pad non-dword aligned request message length */
211 if (mei->length & 3)
212 ndata++;
213 if (!ndata) {
214 printk(BIOS_DEBUG, "ME: request does not include MKHI\n");
215 return -1;
216 }
217 ndata++; /* Add MEI header */
218
219 /*
220 * Make sure there is still room left in the circular buffer.
221 * Reset the buffer pointers if the requested message will not fit.
222 */
223 read_host_csr(&host);
224 if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
225 printk(BIOS_ERR, "ME: circular buffer full, resetting...\n");
226 mei_reset();
227 read_host_csr(&host);
228 }
229
230 /*
231 * This implementation does not handle splitting large messages
232 * across multiple transactions. Ensure the requested length
233 * will fit in the available circular buffer depth.
234 */
235 if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
236 printk(BIOS_ERR, "ME: message (%u) too large for buffer (%u)\n",
237 ndata + 2, host.buffer_depth);
238 return -1;
239 }
240
241 /* Write MEI header */
242 mei_write_dword_ptr(mei, MEI_H_CB_WW);
243 ndata--;
244
245 /* Write MKHI header */
246 mei_write_dword_ptr(mkhi, MEI_H_CB_WW);
247 ndata--;
248
249 /* Write message data */
250 data = req_data;
251 for (n = 0; n < ndata; ++n)
252 write_cb(*data++);
253
254 /* Generate interrupt to the ME */
255 read_host_csr(&host);
256 host.interrupt_generate = 1;
257 write_host_csr(&host);
258
259 /* Make sure ME is ready after sending request data */
260 return mei_wait_for_me_ready();
261}
262
263static int mei_recv_msg(struct mei_header *mei, struct mkhi_header *mkhi,
264 void *rsp_data, int rsp_bytes)
265{
266 struct mei_header mei_rsp;
267 struct mkhi_header mkhi_rsp;
268 struct mei_csr me, host;
269 unsigned ndata, n;
270 unsigned expected;
271 u32 *data;
272
273 /* Total number of dwords to read from circular buffer */
274 expected = (rsp_bytes + sizeof(mei_rsp) + sizeof(mkhi_rsp)) >> 2;
275 if (rsp_bytes & 3)
276 expected++;
277
278 /*
279 * The interrupt status bit does not appear to indicate that the
280 * message has actually been received. Instead we wait until the
281 * expected number of dwords are present in the circular buffer.
282 */
283 for (n = ME_RETRY; n; --n) {
284 read_me_csr(&me);
285 if ((me.buffer_write_ptr - me.buffer_read_ptr) >= expected)
286 break;
287 udelay(ME_DELAY);
288 }
289 if (!n) {
290 printk(BIOS_ERR, "ME: timeout waiting for data: expected "
291 "%u, available %u\n", expected,
292 me.buffer_write_ptr - me.buffer_read_ptr);
293 return -1;
294 }
295
296 /* Read and verify MEI response header from the ME */
297 mei_read_dword_ptr(&mei_rsp, MEI_ME_CB_RW);
298 if (!mei_rsp.is_complete) {
299 printk(BIOS_ERR, "ME: response is not complete\n");
300 return -1;
301 }
302
303 /* Handle non-dword responses and expect at least MKHI header */
304 ndata = mei_rsp.length >> 2;
305 if (mei_rsp.length & 3)
306 ndata++;
307 if (ndata != (expected - 1)) {
308 printk(BIOS_ERR, "ME: response is missing data\n");
309 return -1;
310 }
311
312 /* Read and verify MKHI response header from the ME */
313 mei_read_dword_ptr(&mkhi_rsp, MEI_ME_CB_RW);
314 if (!mkhi_rsp.is_response ||
315 mkhi->group_id != mkhi_rsp.group_id ||
316 mkhi->command != mkhi_rsp.command) {
317 printk(BIOS_ERR, "ME: invalid response, group %u ?= %u, "
318 "command %u ?= %u, is_response %u\n", mkhi->group_id,
319 mkhi_rsp.group_id, mkhi->command, mkhi_rsp.command,
320 mkhi_rsp.is_response);
321 return -1;
322 }
323 ndata--; /* MKHI header has been read */
324
325 /* Make sure caller passed a buffer with enough space */
326 if (ndata != (rsp_bytes >> 2)) {
327 printk(BIOS_ERR, "ME: not enough room in response buffer: "
328 "%u != %u\n", ndata, rsp_bytes >> 2);
329 return -1;
330 }
331
332 /* Read response data from the circular buffer */
333 data = rsp_data;
334 for (n = 0; n < ndata; ++n)
335 *data++ = read_cb();
336
337 /* Tell the ME that we have consumed the response */
338 read_host_csr(&host);
339 host.interrupt_status = 1;
340 host.interrupt_generate = 1;
341 write_host_csr(&host);
342
343 return mei_wait_for_me_ready();
344}
345
346static inline int mei_sendrecv(struct mei_header *mei, struct mkhi_header *mkhi,
347 void *req_data, void *rsp_data, int rsp_bytes)
348{
349 if (mei_send_msg(mei, mkhi, req_data) < 0)
350 return -1;
351 if (mei_recv_msg(mei, mkhi, rsp_data, rsp_bytes) < 0)
352 return -1;
353 return 0;
354}
355
356#ifdef __SMM__
357/* Send END OF POST message to the ME */
358static int mkhi_end_of_post(void)
359{
360 struct mkhi_header mkhi = {
361 .group_id = MKHI_GROUP_ID_GEN,
362 .command = MKHI_END_OF_POST,
363 };
364 struct mei_header mei = {
365 .is_complete = 1,
366 .host_address = MEI_HOST_ADDRESS,
367 .client_address = MEI_ADDRESS_MKHI,
368 .length = sizeof(mkhi),
369 };
370
371 /* Send request and wait for response */
372 if (mei_sendrecv(&mei, &mkhi, NULL, NULL, 0) < 0) {
373 printk(BIOS_ERR, "ME: END OF POST message failed\n");
374 return -1;
375 }
376
377 printk(BIOS_INFO, "ME: END OF POST message successful\n");
378 return 0;
379}
380#endif
381
382#if (CONFIG_DEFAULT_CONSOLE_LOGLEVEL >= BIOS_DEBUG) && !defined(__SMM__) && (CONFIG_NORTHBRIDGE_INTEL_SANDYBRIDGE || CONFIG_NORTHBRIDGE_INTEL_IVYBRIDGE)
383/* Get ME firmware version */
384static int mkhi_get_fw_version(void)
385{
386 struct me_fw_version version;
387 struct mkhi_header mkhi = {
388 .group_id = MKHI_GROUP_ID_GEN,
389 .command = MKHI_GET_FW_VERSION,
390 };
391 struct mei_header mei = {
392 .is_complete = 1,
393 .host_address = MEI_HOST_ADDRESS,
394 .client_address = MEI_ADDRESS_MKHI,
395 .length = sizeof(mkhi),
396 };
397
398 /* Send request and wait for response */
399 if (mei_sendrecv(&mei, &mkhi, NULL, &version, sizeof(version)) < 0) {
400 printk(BIOS_ERR, "ME: GET FW VERSION message failed\n");
401 return -1;
402 }
403
404 printk(BIOS_INFO, "ME: Firmware Version %u.%u.%u.%u (code) "
405 "%u.%u.%u.%u (recovery)\n",
406 version.code_major, version.code_minor,
407 version.code_build_number, version.code_hot_fix,
408 version.recovery_major, version.recovery_minor,
409 version.recovery_build_number, version.recovery_hot_fix);
410
411 return 0;
412}
413
414static inline void print_cap(const char *name, int state)
415{
416 printk(BIOS_DEBUG, "ME Capability: %-30s : %sabled\n",
417 name, state ? "en" : "dis");
418}
419
420/* Get ME Firmware Capabilities */
421static int mkhi_get_fwcaps(void)
422{
423 u32 rule_id = 0;
424 struct me_fwcaps cap;
425 struct mkhi_header mkhi = {
426 .group_id = MKHI_GROUP_ID_FWCAPS,
427 .command = MKHI_FWCAPS_GET_RULE,
428 };
429 struct mei_header mei = {
430 .is_complete = 1,
431 .host_address = MEI_HOST_ADDRESS,
432 .client_address = MEI_ADDRESS_MKHI,
433 .length = sizeof(mkhi) + sizeof(rule_id),
434 };
435
436 /* Send request and wait for response */
437 if (mei_sendrecv(&mei, &mkhi, &rule_id, &cap, sizeof(cap)) < 0) {
438 printk(BIOS_ERR, "ME: GET FWCAPS message failed\n");
439 return -1;
440 }
441
442 print_cap("Full Network manageability", cap.caps_sku.full_net);
443 print_cap("Regular Network manageability", cap.caps_sku.std_net);
444 print_cap("Manageability", cap.caps_sku.manageability);
445 print_cap("Small business technology", cap.caps_sku.small_business);
446 print_cap("Level III manageability", cap.caps_sku.l3manageability);
447 print_cap("IntelR Anti-Theft (AT)", cap.caps_sku.intel_at);
448 print_cap("IntelR Capability Licensing Service (CLS)",
449 cap.caps_sku.intel_cls);
450 print_cap("IntelR Power Sharing Technology (MPC)",
451 cap.caps_sku.intel_mpc);
452 print_cap("ICC Over Clocking", cap.caps_sku.icc_over_clocking);
453 print_cap("Protected Audio Video Path (PAVP)", cap.caps_sku.pavp);
454 print_cap("IPV6", cap.caps_sku.ipv6);
455 print_cap("KVM Remote Control (KVM)", cap.caps_sku.kvm);
456 print_cap("Outbreak Containment Heuristic (OCH)", cap.caps_sku.och);
457 print_cap("Virtual LAN (VLAN)", cap.caps_sku.vlan);
458 print_cap("TLS", cap.caps_sku.tls);
459 print_cap("Wireless LAN (WLAN)", cap.caps_sku.wlan);
460
461 return 0;
462}
463#endif
464
465#if CONFIG_CHROMEOS && 0 /* DISABLED */
466/* Tell ME to issue a global reset */
467int mkhi_global_reset(void)
468{
469 struct me_global_reset reset = {
470 .request_origin = GLOBAL_RESET_BIOS_POST,
471 .reset_type = CBM_RR_GLOBAL_RESET,
472 };
473 struct mkhi_header mkhi = {
474 .group_id = MKHI_GROUP_ID_CBM,
475 .command = MKHI_GLOBAL_RESET,
476 };
477 struct mei_header mei = {
478 .is_complete = 1,
479 .length = sizeof(mkhi) + sizeof(reset),
480 .host_address = MEI_HOST_ADDRESS,
481 .client_address = MEI_ADDRESS_MKHI,
482 };
483
484 printk(BIOS_NOTICE, "ME: Requesting global reset\n");
485
486 /* Send request and wait for response */
487 if (mei_sendrecv(&mei, &mkhi, &reset, NULL, 0) < 0) {
488 /* No response means reset will happen shortly... */
489 hlt();
490 }
491
492 /* If the ME responded it rejected the reset request */
493 printk(BIOS_ERR, "ME: Global Reset failed\n");
494 return -1;
495}
496#endif
497
498#ifdef __SMM__
499static void intel_me7_finalize_smm(void)
500{
501 struct me_hfs hfs;
502 u32 reg32;
503
504 mei_base_address =
505 pci_read_config32(PCH_ME_DEV, PCI_BASE_ADDRESS_0) & ~0xf;
506
507 /* S3 path will have hidden this device already */
508 if (!mei_base_address || mei_base_address == 0xfffffff0)
509 return;
510
511 /* Make sure ME is in a mode that expects EOP */
512 reg32 = pci_read_config32(PCH_ME_DEV, PCI_ME_HFS);
513 memcpy(&hfs, &reg32, sizeof(u32));
514
515 /* Abort and leave device alone if not normal mode */
516 if (hfs.fpt_bad ||
517 hfs.working_state != ME_HFS_CWS_NORMAL ||
518 hfs.operation_mode != ME_HFS_MODE_NORMAL)
519 return;
520
521 /* Try to send EOP command so ME stops accepting other commands */
522 mkhi_end_of_post();
523
524 /* Make sure IO is disabled */
525 reg32 = pci_read_config32(PCH_ME_DEV, PCI_COMMAND);
526 reg32 &= ~(PCI_COMMAND_MASTER |
527 PCI_COMMAND_MEMORY | PCI_COMMAND_IO);
528 pci_write_config32(PCH_ME_DEV, PCI_COMMAND, reg32);
529
530 /* Hide the PCI device */
531 RCBA32_OR(FD2, PCH_DISABLE_MEI1);
532}
533
534void intel_me_finalize_smm(void)
535{
536 u32 did = pci_read_config32(PCH_ME_DEV, PCI_VENDOR_ID);
537 switch (did) {
538 case 0x1c3a8086:
539 intel_me7_finalize_smm();
540 break;
541 case 0x1e3a8086:
542 intel_me8_finalize_smm();
543 break;
544 default:
545 printk(BIOS_ERR, "No finalize handler for ME %08x.\n", did);
546 }
547}
548#else /* !__SMM__ */
549
550/* Determine the path that we should take based on ME status */
551static me_bios_path intel_me_path(device_t dev)
552{
553 me_bios_path path = ME_DISABLE_BIOS_PATH;
554 struct me_hfs hfs;
555 struct me_gmes gmes;
556
557#if CONFIG_HAVE_ACPI_RESUME
558 /* S3 wake skips all MKHI messages */
559 if (acpi_slp_type == 3) {
560 return ME_S3WAKE_BIOS_PATH;
561 }
562#endif
563
564 pci_read_dword_ptr(dev, &hfs, PCI_ME_HFS);
565 pci_read_dword_ptr(dev, &gmes, PCI_ME_GMES);
566
567 /* Check and dump status */
568 intel_me_status(&hfs, &gmes);
569
570 /* Check Current Working State */
571 switch (hfs.working_state) {
572 case ME_HFS_CWS_NORMAL:
573 path = ME_NORMAL_BIOS_PATH;
574 break;
575 case ME_HFS_CWS_REC:
576 path = ME_RECOVERY_BIOS_PATH;
577 break;
578 default:
579 path = ME_DISABLE_BIOS_PATH;
580 break;
581 }
582
583 /* Check Current Operation Mode */
584 switch (hfs.operation_mode) {
585 case ME_HFS_MODE_NORMAL:
586 break;
587 case ME_HFS_MODE_DEBUG:
588 case ME_HFS_MODE_DIS:
589 case ME_HFS_MODE_OVER_JMPR:
590 case ME_HFS_MODE_OVER_MEI:
591 default:
592 path = ME_DISABLE_BIOS_PATH;
593 break;
594 }
595
596 /* Check for any error code and valid firmware */
597 if (hfs.error_code || hfs.fpt_bad)
598 path = ME_ERROR_BIOS_PATH;
599
600#if CONFIG_ELOG
601 if (path != ME_NORMAL_BIOS_PATH) {
602 struct elog_event_data_me_extended data = {
603 .current_working_state = hfs.working_state,
604 .operation_state = hfs.operation_state,
605 .operation_mode = hfs.operation_mode,
606 .error_code = hfs.error_code,
607 .progress_code = gmes.progress_code,
608 .current_pmevent = gmes.current_pmevent,
609 .current_state = gmes.current_state,
610 };
611 elog_add_event_byte(ELOG_TYPE_MANAGEMENT_ENGINE, path);
612 elog_add_event_raw(ELOG_TYPE_MANAGEMENT_ENGINE_EXT,
613 &data, sizeof(data));
614 }
615#endif
616
617 return path;
618}
619
620/* Prepare ME for MEI messages */
621static int intel_mei_setup(device_t dev)
622{
623 struct resource *res;
624 struct mei_csr host;
625 u32 reg32;
626
627 /* Find the MMIO base for the ME interface */
628 res = find_resource(dev, PCI_BASE_ADDRESS_0);
629 if (!res || res->base == 0 || res->size == 0) {
630 printk(BIOS_DEBUG, "ME: MEI resource not present!\n");
631 return -1;
632 }
633 mei_base_address = res->base;
634
635 /* Ensure Memory and Bus Master bits are set */
636 reg32 = pci_read_config32(dev, PCI_COMMAND);
637 reg32 |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
638 pci_write_config32(dev, PCI_COMMAND, reg32);
639
640 /* Clean up status for next message */
641 read_host_csr(&host);
642 host.interrupt_generate = 1;
643 host.ready = 1;
644 host.reset = 0;
645 write_host_csr(&host);
646
647 return 0;
648}
649
650/* Read the Extend register hash of ME firmware */
651static int intel_me_extend_valid(device_t dev)
652{
653 struct me_heres status;
654 u32 extend[8] = {0};
655 int i, count = 0;
656
657 pci_read_dword_ptr(dev, &status, PCI_ME_HERES);
658 if (!status.extend_feature_present) {
659 printk(BIOS_ERR, "ME: Extend Feature not present\n");
660 return -1;
661 }
662
663 if (!status.extend_reg_valid) {
664 printk(BIOS_ERR, "ME: Extend Register not valid\n");
665 return -1;
666 }
667
668 switch (status.extend_reg_algorithm) {
669 case PCI_ME_EXT_SHA1:
670 count = 5;
671 printk(BIOS_DEBUG, "ME: Extend SHA-1: ");
672 break;
673 case PCI_ME_EXT_SHA256:
674 count = 8;
675 printk(BIOS_DEBUG, "ME: Extend SHA-256: ");
676 break;
677 default:
678 printk(BIOS_ERR, "ME: Extend Algorithm %d unknown\n",
679 status.extend_reg_algorithm);
680 return -1;
681 }
682
683 for (i = 0; i < count; ++i) {
684 extend[i] = pci_read_config32(dev, PCI_ME_HER(i));
685 printk(BIOS_DEBUG, "%08x", extend[i]);
686 }
687 printk(BIOS_DEBUG, "\n");
688
689#if CONFIG_CHROMEOS
690 /* Save hash in NVS for the OS to verify */
691 chromeos_set_me_hash(extend, count);
692#endif
693
694 return 0;
695}
696
697/* Hide the ME virtual PCI devices */
698static void intel_me_hide(device_t dev)
699{
700 dev->enabled = 0;
701 pch_enable(dev);
702}
703
704/* Check whether ME is present and do basic init */
705static void intel_me_init(device_t dev)
706{
707 me_bios_path path = intel_me_path(dev);
708
709 /* Do initial setup and determine the BIOS path */
710 printk(BIOS_NOTICE, "ME: BIOS path: %s\n", me_bios_path_values[path]);
711
712 switch (path) {
713 case ME_S3WAKE_BIOS_PATH:
714 intel_me_hide(dev);
715 break;
716
717 case ME_NORMAL_BIOS_PATH:
718 /* Validate the extend register */
719 if (intel_me_extend_valid(dev) < 0)
720 break; /* TODO: force recovery mode */
721
722 /* Prepare MEI MMIO interface */
723 if (intel_mei_setup(dev) < 0)
724 break;
725
726#if (CONFIG_DEFAULT_CONSOLE_LOGLEVEL >= BIOS_DEBUG) && (CONFIG_NORTHBRIDGE_INTEL_SANDYBRIDGE || CONFIG_NORTHBRIDGE_INTEL_IVYBRIDGE)
727 /* Print ME firmware version */
728 mkhi_get_fw_version();
729 /* Print ME firmware capabilities */
730 mkhi_get_fwcaps();
731#endif
732
733 /*
734 * Leave the ME unlocked in this path.
735 * It will be locked via SMI command later.
736 */
737 break;
738
739 case ME_ERROR_BIOS_PATH:
740 case ME_RECOVERY_BIOS_PATH:
741 case ME_DISABLE_BIOS_PATH:
742 case ME_FIRMWARE_UPDATE_BIOS_PATH:
743 break;
744 }
745}
746
747static void set_subsystem(device_t dev, unsigned vendor, unsigned device)
748{
749 if (!vendor || !device) {
750 pci_write_config32(dev, PCI_SUBSYSTEM_VENDOR_ID,
751 pci_read_config32(dev, PCI_VENDOR_ID));
752 } else {
753 pci_write_config32(dev, PCI_SUBSYSTEM_VENDOR_ID,
754 ((device & 0xffff) << 16) | (vendor & 0xffff));
755 }
756}
757
758static struct pci_operations pci_ops = {
759 .set_subsystem = set_subsystem,
760};
761
762static struct device_operations device_ops = {
763 .read_resources = pci_dev_read_resources,
764 .set_resources = pci_dev_set_resources,
765 .enable_resources = pci_dev_enable_resources,
766 .init = intel_me_init,
767 .scan_bus = scan_static_bus,
768 .ops_pci = &pci_ops,
769};
770
771static const unsigned short pci_device_ids[] = { 0x1c3a, 0x3b64,
772 0 };
773
774
775static const struct pci_driver intel_me __pci_driver = {
776 .ops = &device_ops,
777 .vendor = PCI_VENDOR_ID_INTEL,
778 .devices = pci_device_ids
779};
780
781#endif /* !__SMM__ */