blob: 1bbce066bfee7e25209841feb4f779331500ba2c [file] [log] [blame]
Stefan Reinauer8e073822012-04-04 00:07:22 +02001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2011 The Chromium OS Authors. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; version 2 of
9 * the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
Stefan Reinauer8e073822012-04-04 00:07:22 +020015 */
16
17/*
18 * This is a ramstage driver for the Intel Management Engine found in the
19 * 6-series chipset. It handles the required boot-time messages over the
20 * MMIO-based Management Engine Interface to tell the ME that the BIOS is
21 * finished with POST. Additional messages are defined for debug but are
22 * not used unless the console loglevel is high enough.
23 */
24
25#include <arch/acpi.h>
Stefan Reinauer8e073822012-04-04 00:07:22 +020026#include <arch/io.h>
27#include <console/console.h>
28#include <device/pci_ids.h>
29#include <device/pci_def.h>
30#include <string.h>
31#include <delay.h>
Duncan Lauriec1c94352012-07-13 10:11:54 -070032#include <elog.h>
Patrick Georgi546953c2014-11-29 10:38:17 +010033#include <halt.h>
Stefan Reinauer8e073822012-04-04 00:07:22 +020034
Elyes HAOUASead574e2018-11-11 20:52:30 +010035#ifndef __SMM__
36#include <device/device.h>
37#include <device/pci.h>
Stefan Reinauer8e073822012-04-04 00:07:22 +020038#endif
39
40#include "me.h"
41#include "pch.h"
42
Martin Roth7a1a3ad2017-06-24 21:29:38 -060043#if IS_ENABLED(CONFIG_CHROMEOS)
Stefan Reinauer8e073822012-04-04 00:07:22 +020044#include <vendorcode/google/chromeos/gnvs.h>
45#endif
46
47#ifndef __SMM__
48/* Path that the BIOS should take based on ME state */
49static const char *me_bios_path_values[] = {
50 [ME_NORMAL_BIOS_PATH] = "Normal",
51 [ME_S3WAKE_BIOS_PATH] = "S3 Wake",
52 [ME_ERROR_BIOS_PATH] = "Error",
53 [ME_RECOVERY_BIOS_PATH] = "Recovery",
54 [ME_DISABLE_BIOS_PATH] = "Disable",
55 [ME_FIRMWARE_UPDATE_BIOS_PATH] = "Firmware Update",
56};
57#endif
58
59/* MMIO base address for MEI interface */
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -080060static u32 *mei_base_address;
Stefan Reinauer8e073822012-04-04 00:07:22 +020061
Martin Roth7a1a3ad2017-06-24 21:29:38 -060062#if IS_ENABLED(CONFIG_DEBUG_INTEL_ME)
Stefan Reinauer8e073822012-04-04 00:07:22 +020063static void mei_dump(void *ptr, int dword, int offset, const char *type)
64{
65 struct mei_csr *csr;
66
67 printk(BIOS_SPEW, "%-9s[%02x] : ", type, offset);
68
69 switch (offset) {
70 case MEI_H_CSR:
71 case MEI_ME_CSR_HA:
72 csr = ptr;
73 if (!csr) {
74 printk(BIOS_SPEW, "ERROR: 0x%08x\n", dword);
75 break;
76 }
77 printk(BIOS_SPEW, "cbd=%u cbrp=%02u cbwp=%02u ready=%u "
78 "reset=%u ig=%u is=%u ie=%u\n", csr->buffer_depth,
79 csr->buffer_read_ptr, csr->buffer_write_ptr,
80 csr->ready, csr->reset, csr->interrupt_generate,
81 csr->interrupt_status, csr->interrupt_enable);
82 break;
83 case MEI_ME_CB_RW:
84 case MEI_H_CB_WW:
85 printk(BIOS_SPEW, "CB: 0x%08x\n", dword);
86 break;
87 default:
88 printk(BIOS_SPEW, "0x%08x\n", offset);
89 break;
90 }
91}
92#else
93# define mei_dump(ptr,dword,offset,type) do {} while (0)
94#endif
95
96/*
97 * ME/MEI access helpers using memcpy to avoid aliasing.
98 */
99
100static inline void mei_read_dword_ptr(void *ptr, int offset)
101{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800102 u32 dword = read32(mei_base_address + (offset/sizeof(u32)));
Stefan Reinauer8e073822012-04-04 00:07:22 +0200103 memcpy(ptr, &dword, sizeof(dword));
104 mei_dump(ptr, dword, offset, "READ");
105}
106
107static inline void mei_write_dword_ptr(void *ptr, int offset)
108{
109 u32 dword = 0;
110 memcpy(&dword, ptr, sizeof(dword));
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800111 write32(mei_base_address + (offset/sizeof(u32)), dword);
Stefan Reinauer8e073822012-04-04 00:07:22 +0200112 mei_dump(ptr, dword, offset, "WRITE");
113}
114
115#ifndef __SMM__
Elyes HAOUASdc035282018-09-18 13:28:49 +0200116static inline void pci_read_dword_ptr(struct device *dev, void *ptr, int offset)
Stefan Reinauer8e073822012-04-04 00:07:22 +0200117{
118 u32 dword = pci_read_config32(dev, offset);
119 memcpy(ptr, &dword, sizeof(dword));
120 mei_dump(ptr, dword, offset, "PCI READ");
121}
122#endif
123
124static inline void read_host_csr(struct mei_csr *csr)
125{
126 mei_read_dword_ptr(csr, MEI_H_CSR);
127}
128
129static inline void write_host_csr(struct mei_csr *csr)
130{
131 mei_write_dword_ptr(csr, MEI_H_CSR);
132}
133
134static inline void read_me_csr(struct mei_csr *csr)
135{
136 mei_read_dword_ptr(csr, MEI_ME_CSR_HA);
137}
138
139static inline void write_cb(u32 dword)
140{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800141 write32(mei_base_address + (MEI_H_CB_WW/sizeof(u32)), dword);
Stefan Reinauer8e073822012-04-04 00:07:22 +0200142 mei_dump(NULL, dword, MEI_H_CB_WW, "WRITE");
143}
144
145static inline u32 read_cb(void)
146{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800147 u32 dword = read32(mei_base_address + (MEI_ME_CB_RW/sizeof(u32)));
Stefan Reinauer8e073822012-04-04 00:07:22 +0200148 mei_dump(NULL, dword, MEI_ME_CB_RW, "READ");
149 return dword;
150}
151
152/* Wait for ME ready bit to be asserted */
153static int mei_wait_for_me_ready(void)
154{
155 struct mei_csr me;
156 unsigned try = ME_RETRY;
157
158 while (try--) {
159 read_me_csr(&me);
160 if (me.ready)
161 return 0;
162 udelay(ME_DELAY);
163 }
164
165 printk(BIOS_ERR, "ME: failed to become ready\n");
166 return -1;
167}
168
169static void mei_reset(void)
170{
171 struct mei_csr host;
172
173 if (mei_wait_for_me_ready() < 0)
174 return;
175
176 /* Reset host and ME circular buffers for next message */
177 read_host_csr(&host);
178 host.reset = 1;
179 host.interrupt_generate = 1;
180 write_host_csr(&host);
181
182 if (mei_wait_for_me_ready() < 0)
183 return;
184
185 /* Re-init and indicate host is ready */
186 read_host_csr(&host);
187 host.interrupt_generate = 1;
188 host.ready = 1;
189 host.reset = 0;
190 write_host_csr(&host);
191}
192
193static int mei_send_msg(struct mei_header *mei, struct mkhi_header *mkhi,
194 void *req_data)
195{
196 struct mei_csr host;
197 unsigned ndata, n;
198 u32 *data;
199
200 /* Number of dwords to write, ignoring MKHI */
201 ndata = mei->length >> 2;
202
203 /* Pad non-dword aligned request message length */
204 if (mei->length & 3)
205 ndata++;
206 if (!ndata) {
207 printk(BIOS_DEBUG, "ME: request does not include MKHI\n");
208 return -1;
209 }
210 ndata++; /* Add MEI header */
211
212 /*
213 * Make sure there is still room left in the circular buffer.
214 * Reset the buffer pointers if the requested message will not fit.
215 */
216 read_host_csr(&host);
217 if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
218 printk(BIOS_ERR, "ME: circular buffer full, resetting...\n");
219 mei_reset();
220 read_host_csr(&host);
221 }
222
223 /*
224 * This implementation does not handle splitting large messages
225 * across multiple transactions. Ensure the requested length
226 * will fit in the available circular buffer depth.
227 */
228 if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
229 printk(BIOS_ERR, "ME: message (%u) too large for buffer (%u)\n",
230 ndata + 2, host.buffer_depth);
231 return -1;
232 }
233
234 /* Write MEI header */
235 mei_write_dword_ptr(mei, MEI_H_CB_WW);
236 ndata--;
237
238 /* Write MKHI header */
239 mei_write_dword_ptr(mkhi, MEI_H_CB_WW);
240 ndata--;
241
242 /* Write message data */
243 data = req_data;
244 for (n = 0; n < ndata; ++n)
245 write_cb(*data++);
246
247 /* Generate interrupt to the ME */
248 read_host_csr(&host);
249 host.interrupt_generate = 1;
250 write_host_csr(&host);
251
252 /* Make sure ME is ready after sending request data */
253 return mei_wait_for_me_ready();
254}
255
256static int mei_recv_msg(struct mei_header *mei, struct mkhi_header *mkhi,
257 void *rsp_data, int rsp_bytes)
258{
259 struct mei_header mei_rsp;
260 struct mkhi_header mkhi_rsp;
261 struct mei_csr me, host;
262 unsigned ndata, n;
263 unsigned expected;
264 u32 *data;
265
266 /* Total number of dwords to read from circular buffer */
267 expected = (rsp_bytes + sizeof(mei_rsp) + sizeof(mkhi_rsp)) >> 2;
268 if (rsp_bytes & 3)
269 expected++;
270
271 /*
272 * The interrupt status bit does not appear to indicate that the
273 * message has actually been received. Instead we wait until the
274 * expected number of dwords are present in the circular buffer.
275 */
276 for (n = ME_RETRY; n; --n) {
277 read_me_csr(&me);
278 if ((me.buffer_write_ptr - me.buffer_read_ptr) >= expected)
279 break;
280 udelay(ME_DELAY);
281 }
282 if (!n) {
283 printk(BIOS_ERR, "ME: timeout waiting for data: expected "
284 "%u, available %u\n", expected,
285 me.buffer_write_ptr - me.buffer_read_ptr);
286 return -1;
287 }
288
289 /* Read and verify MEI response header from the ME */
290 mei_read_dword_ptr(&mei_rsp, MEI_ME_CB_RW);
291 if (!mei_rsp.is_complete) {
292 printk(BIOS_ERR, "ME: response is not complete\n");
293 return -1;
294 }
295
296 /* Handle non-dword responses and expect at least MKHI header */
297 ndata = mei_rsp.length >> 2;
298 if (mei_rsp.length & 3)
299 ndata++;
300 if (ndata != (expected - 1)) {
301 printk(BIOS_ERR, "ME: response is missing data\n");
302 return -1;
303 }
304
305 /* Read and verify MKHI response header from the ME */
306 mei_read_dword_ptr(&mkhi_rsp, MEI_ME_CB_RW);
307 if (!mkhi_rsp.is_response ||
308 mkhi->group_id != mkhi_rsp.group_id ||
309 mkhi->command != mkhi_rsp.command) {
310 printk(BIOS_ERR, "ME: invalid response, group %u ?= %u, "
311 "command %u ?= %u, is_response %u\n", mkhi->group_id,
312 mkhi_rsp.group_id, mkhi->command, mkhi_rsp.command,
313 mkhi_rsp.is_response);
314 return -1;
315 }
316 ndata--; /* MKHI header has been read */
317
318 /* Make sure caller passed a buffer with enough space */
319 if (ndata != (rsp_bytes >> 2)) {
320 printk(BIOS_ERR, "ME: not enough room in response buffer: "
321 "%u != %u\n", ndata, rsp_bytes >> 2);
322 return -1;
323 }
324
325 /* Read response data from the circular buffer */
326 data = rsp_data;
327 for (n = 0; n < ndata; ++n)
328 *data++ = read_cb();
329
330 /* Tell the ME that we have consumed the response */
331 read_host_csr(&host);
332 host.interrupt_status = 1;
333 host.interrupt_generate = 1;
334 write_host_csr(&host);
335
336 return mei_wait_for_me_ready();
337}
338
339static inline int mei_sendrecv(struct mei_header *mei, struct mkhi_header *mkhi,
340 void *req_data, void *rsp_data, int rsp_bytes)
341{
342 if (mei_send_msg(mei, mkhi, req_data) < 0)
343 return -1;
344 if (mei_recv_msg(mei, mkhi, rsp_data, rsp_bytes) < 0)
345 return -1;
346 return 0;
347}
348
Stefan Reinauer998f3a22012-06-11 15:15:46 -0700349#ifdef __SMM__
Stefan Reinauer8e073822012-04-04 00:07:22 +0200350/* Send END OF POST message to the ME */
Stefan Reinauer998f3a22012-06-11 15:15:46 -0700351static int mkhi_end_of_post(void)
Stefan Reinauer8e073822012-04-04 00:07:22 +0200352{
353 struct mkhi_header mkhi = {
354 .group_id = MKHI_GROUP_ID_GEN,
355 .command = MKHI_END_OF_POST,
356 };
357 struct mei_header mei = {
358 .is_complete = 1,
359 .host_address = MEI_HOST_ADDRESS,
360 .client_address = MEI_ADDRESS_MKHI,
361 .length = sizeof(mkhi),
362 };
363
364 /* Send request and wait for response */
365 if (mei_sendrecv(&mei, &mkhi, NULL, NULL, 0) < 0) {
366 printk(BIOS_ERR, "ME: END OF POST message failed\n");
367 return -1;
368 }
369
370 printk(BIOS_INFO, "ME: END OF POST message successful\n");
371 return 0;
372}
Stefan Reinauer998f3a22012-06-11 15:15:46 -0700373#endif
Stefan Reinauer8e073822012-04-04 00:07:22 +0200374
375#if (CONFIG_DEFAULT_CONSOLE_LOGLEVEL >= BIOS_DEBUG) && !defined(__SMM__)
376/* Get ME firmware version */
377static int mkhi_get_fw_version(void)
378{
379 struct me_fw_version version;
380 struct mkhi_header mkhi = {
381 .group_id = MKHI_GROUP_ID_GEN,
382 .command = MKHI_GET_FW_VERSION,
383 };
384 struct mei_header mei = {
385 .is_complete = 1,
386 .host_address = MEI_HOST_ADDRESS,
387 .client_address = MEI_ADDRESS_MKHI,
388 .length = sizeof(mkhi),
389 };
390
391 /* Send request and wait for response */
392 if (mei_sendrecv(&mei, &mkhi, NULL, &version, sizeof(version)) < 0) {
393 printk(BIOS_ERR, "ME: GET FW VERSION message failed\n");
394 return -1;
395 }
396
397 printk(BIOS_INFO, "ME: Firmware Version %u.%u.%u.%u (code) "
398 "%u.%u.%u.%u (recovery)\n",
399 version.code_major, version.code_minor,
400 version.code_build_number, version.code_hot_fix,
401 version.recovery_major, version.recovery_minor,
402 version.recovery_build_number, version.recovery_hot_fix);
403
404 return 0;
405}
406
407static inline void print_cap(const char *name, int state)
408{
409 printk(BIOS_DEBUG, "ME Capability: %-30s : %sabled\n",
410 name, state ? "en" : "dis");
411}
412
413/* Get ME Firmware Capabilities */
414static int mkhi_get_fwcaps(void)
415{
416 u32 rule_id = 0;
417 struct me_fwcaps cap;
418 struct mkhi_header mkhi = {
419 .group_id = MKHI_GROUP_ID_FWCAPS,
420 .command = MKHI_FWCAPS_GET_RULE,
421 };
422 struct mei_header mei = {
423 .is_complete = 1,
424 .host_address = MEI_HOST_ADDRESS,
425 .client_address = MEI_ADDRESS_MKHI,
426 .length = sizeof(mkhi) + sizeof(rule_id),
427 };
428
429 /* Send request and wait for response */
430 if (mei_sendrecv(&mei, &mkhi, &rule_id, &cap, sizeof(cap)) < 0) {
431 printk(BIOS_ERR, "ME: GET FWCAPS message failed\n");
432 return -1;
433 }
434
435 print_cap("Full Network manageability", cap.caps_sku.full_net);
436 print_cap("Regular Network manageability", cap.caps_sku.std_net);
437 print_cap("Manageability", cap.caps_sku.manageability);
438 print_cap("Small business technology", cap.caps_sku.small_business);
439 print_cap("Level III manageability", cap.caps_sku.l3manageability);
440 print_cap("IntelR Anti-Theft (AT)", cap.caps_sku.intel_at);
441 print_cap("IntelR Capability Licensing Service (CLS)",
442 cap.caps_sku.intel_cls);
443 print_cap("IntelR Power Sharing Technology (MPC)",
444 cap.caps_sku.intel_mpc);
445 print_cap("ICC Over Clocking", cap.caps_sku.icc_over_clocking);
Elyes HAOUASba28e8d2016-08-31 19:22:16 +0200446 print_cap("Protected Audio Video Path (PAVP)", cap.caps_sku.pavp);
Stefan Reinauer8e073822012-04-04 00:07:22 +0200447 print_cap("IPV6", cap.caps_sku.ipv6);
448 print_cap("KVM Remote Control (KVM)", cap.caps_sku.kvm);
449 print_cap("Outbreak Containment Heuristic (OCH)", cap.caps_sku.och);
450 print_cap("Virtual LAN (VLAN)", cap.caps_sku.vlan);
451 print_cap("TLS", cap.caps_sku.tls);
452 print_cap("Wireless LAN (WLAN)", cap.caps_sku.wlan);
453
454 return 0;
455}
456#endif
457
Martin Roth7a1a3ad2017-06-24 21:29:38 -0600458#if IS_ENABLED(CONFIG_CHROMEOS) && 0 /* DISABLED */
Stefan Reinauer8e073822012-04-04 00:07:22 +0200459/* Tell ME to issue a global reset */
460int mkhi_global_reset(void)
461{
462 struct me_global_reset reset = {
463 .request_origin = GLOBAL_RESET_BIOS_POST,
464 .reset_type = CBM_RR_GLOBAL_RESET,
465 };
466 struct mkhi_header mkhi = {
467 .group_id = MKHI_GROUP_ID_CBM,
468 .command = MKHI_GLOBAL_RESET,
469 };
470 struct mei_header mei = {
471 .is_complete = 1,
472 .length = sizeof(mkhi) + sizeof(reset),
473 .host_address = MEI_HOST_ADDRESS,
474 .client_address = MEI_ADDRESS_MKHI,
475 };
476
477 printk(BIOS_NOTICE, "ME: Requesting global reset\n");
478
479 /* Send request and wait for response */
480 if (mei_sendrecv(&mei, &mkhi, &reset, NULL, 0) < 0) {
481 /* No response means reset will happen shortly... */
Patrick Georgi546953c2014-11-29 10:38:17 +0100482 halt();
Stefan Reinauer8e073822012-04-04 00:07:22 +0200483 }
484
485 /* If the ME responded it rejected the reset request */
486 printk(BIOS_ERR, "ME: Global Reset failed\n");
487 return -1;
488}
Stefan Reinauer998f3a22012-06-11 15:15:46 -0700489#endif
Stefan Reinauer8e073822012-04-04 00:07:22 +0200490
491#ifdef __SMM__
Stefan Reinauer998f3a22012-06-11 15:15:46 -0700492static void intel_me7_finalize_smm(void)
Stefan Reinauer8e073822012-04-04 00:07:22 +0200493{
494 struct me_hfs hfs;
495 u32 reg32;
496
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800497 mei_base_address = (u32 *)
498 (pci_read_config32(PCH_ME_DEV, PCI_BASE_ADDRESS_0) & ~0xf);
Stefan Reinauer8e073822012-04-04 00:07:22 +0200499
500 /* S3 path will have hidden this device already */
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800501 if (!mei_base_address || mei_base_address == (u32 *)0xfffffff0)
Stefan Reinauer8e073822012-04-04 00:07:22 +0200502 return;
503
504 /* Make sure ME is in a mode that expects EOP */
Kyösti Mälkkifd98c652013-07-26 08:50:53 +0300505 reg32 = pci_read_config32(PCH_ME_DEV, PCI_ME_HFS);
Stefan Reinauer8e073822012-04-04 00:07:22 +0200506 memcpy(&hfs, &reg32, sizeof(u32));
507
508 /* Abort and leave device alone if not normal mode */
509 if (hfs.fpt_bad ||
510 hfs.working_state != ME_HFS_CWS_NORMAL ||
511 hfs.operation_mode != ME_HFS_MODE_NORMAL)
512 return;
513
514 /* Try to send EOP command so ME stops accepting other commands */
515 mkhi_end_of_post();
516
517 /* Make sure IO is disabled */
Kyösti Mälkkifd98c652013-07-26 08:50:53 +0300518 reg32 = pci_read_config32(PCH_ME_DEV, PCI_COMMAND);
Stefan Reinauer8e073822012-04-04 00:07:22 +0200519 reg32 &= ~(PCI_COMMAND_MASTER |
520 PCI_COMMAND_MEMORY | PCI_COMMAND_IO);
Kyösti Mälkkifd98c652013-07-26 08:50:53 +0300521 pci_write_config32(PCH_ME_DEV, PCI_COMMAND, reg32);
Stefan Reinauer8e073822012-04-04 00:07:22 +0200522
523 /* Hide the PCI device */
524 RCBA32_OR(FD2, PCH_DISABLE_MEI1);
525}
526
Stefan Reinauer998f3a22012-06-11 15:15:46 -0700527void intel_me_finalize_smm(void)
528{
Kyösti Mälkkifd98c652013-07-26 08:50:53 +0300529 u32 did = pci_read_config32(PCH_ME_DEV, PCI_VENDOR_ID);
Stefan Reinauer998f3a22012-06-11 15:15:46 -0700530 switch (did) {
Duncan Laurie708f7312012-07-10 15:15:41 -0700531 case 0x1c3a8086:
Stefan Reinauer998f3a22012-06-11 15:15:46 -0700532 intel_me7_finalize_smm();
533 break;
Duncan Laurie708f7312012-07-10 15:15:41 -0700534 case 0x1e3a8086:
Stefan Reinauer998f3a22012-06-11 15:15:46 -0700535 intel_me8_finalize_smm();
536 break;
537 default:
538 printk(BIOS_ERR, "No finalize handler for ME %08x.\n", did);
539 }
540}
Stefan Reinauer8e073822012-04-04 00:07:22 +0200541#else /* !__SMM__ */
542
543/* Determine the path that we should take based on ME status */
Elyes HAOUASdc035282018-09-18 13:28:49 +0200544static me_bios_path intel_me_path(struct device *dev)
Stefan Reinauer8e073822012-04-04 00:07:22 +0200545{
546 me_bios_path path = ME_DISABLE_BIOS_PATH;
547 struct me_hfs hfs;
548 struct me_gmes gmes;
549
Stefan Reinauer8e073822012-04-04 00:07:22 +0200550 /* S3 wake skips all MKHI messages */
Kyösti Mälkkic3ed8862014-06-19 19:50:51 +0300551 if (acpi_is_wakeup_s3())
Stefan Reinauer8e073822012-04-04 00:07:22 +0200552 return ME_S3WAKE_BIOS_PATH;
Stefan Reinauer8e073822012-04-04 00:07:22 +0200553
554 pci_read_dword_ptr(dev, &hfs, PCI_ME_HFS);
555 pci_read_dword_ptr(dev, &gmes, PCI_ME_GMES);
556
557 /* Check and dump status */
558 intel_me_status(&hfs, &gmes);
559
Stefan Reinauer8e073822012-04-04 00:07:22 +0200560 /* Check Current Working State */
561 switch (hfs.working_state) {
562 case ME_HFS_CWS_NORMAL:
563 path = ME_NORMAL_BIOS_PATH;
564 break;
565 case ME_HFS_CWS_REC:
566 path = ME_RECOVERY_BIOS_PATH;
567 break;
568 default:
569 path = ME_DISABLE_BIOS_PATH;
570 break;
571 }
572
573 /* Check Current Operation Mode */
574 switch (hfs.operation_mode) {
575 case ME_HFS_MODE_NORMAL:
576 break;
577 case ME_HFS_MODE_DEBUG:
578 case ME_HFS_MODE_DIS:
579 case ME_HFS_MODE_OVER_JMPR:
580 case ME_HFS_MODE_OVER_MEI:
581 default:
582 path = ME_DISABLE_BIOS_PATH;
583 break;
584 }
585
Duncan Laurie5c88c6f2012-09-01 14:00:23 -0700586 /* Check for any error code and valid firmware */
587 if (hfs.error_code || hfs.fpt_bad)
Stefan Reinauer8e073822012-04-04 00:07:22 +0200588 path = ME_ERROR_BIOS_PATH;
589
Martin Roth7a1a3ad2017-06-24 21:29:38 -0600590#if IS_ENABLED(CONFIG_ELOG)
Duncan Laurie5c88c6f2012-09-01 14:00:23 -0700591 if (path != ME_NORMAL_BIOS_PATH) {
592 struct elog_event_data_me_extended data = {
593 .current_working_state = hfs.working_state,
594 .operation_state = hfs.operation_state,
595 .operation_mode = hfs.operation_mode,
596 .error_code = hfs.error_code,
597 .progress_code = gmes.progress_code,
598 .current_pmevent = gmes.current_pmevent,
599 .current_state = gmes.current_state,
600 };
601 elog_add_event_byte(ELOG_TYPE_MANAGEMENT_ENGINE, path);
602 elog_add_event_raw(ELOG_TYPE_MANAGEMENT_ENGINE_EXT,
603 &data, sizeof(data));
604 }
605#endif
606
Stefan Reinauer8e073822012-04-04 00:07:22 +0200607 return path;
608}
609
610/* Prepare ME for MEI messages */
Elyes HAOUASdc035282018-09-18 13:28:49 +0200611static int intel_mei_setup(struct device *dev)
Stefan Reinauer8e073822012-04-04 00:07:22 +0200612{
613 struct resource *res;
614 struct mei_csr host;
615 u32 reg32;
616
617 /* Find the MMIO base for the ME interface */
618 res = find_resource(dev, PCI_BASE_ADDRESS_0);
619 if (!res || res->base == 0 || res->size == 0) {
620 printk(BIOS_DEBUG, "ME: MEI resource not present!\n");
621 return -1;
622 }
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800623 mei_base_address = (u32*)(uintptr_t)res->base;
Stefan Reinauer8e073822012-04-04 00:07:22 +0200624
625 /* Ensure Memory and Bus Master bits are set */
626 reg32 = pci_read_config32(dev, PCI_COMMAND);
627 reg32 |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
628 pci_write_config32(dev, PCI_COMMAND, reg32);
629
630 /* Clean up status for next message */
631 read_host_csr(&host);
632 host.interrupt_generate = 1;
633 host.ready = 1;
634 host.reset = 0;
635 write_host_csr(&host);
636
637 return 0;
638}
639
640/* Read the Extend register hash of ME firmware */
Elyes HAOUASdc035282018-09-18 13:28:49 +0200641static int intel_me_extend_valid(struct device *dev)
Stefan Reinauer8e073822012-04-04 00:07:22 +0200642{
643 struct me_heres status;
Stefan Reinauer49058c02012-06-11 14:13:09 -0700644 u32 extend[8] = {0};
Stefan Reinauer8e073822012-04-04 00:07:22 +0200645 int i, count = 0;
646
647 pci_read_dword_ptr(dev, &status, PCI_ME_HERES);
648 if (!status.extend_feature_present) {
649 printk(BIOS_ERR, "ME: Extend Feature not present\n");
650 return -1;
651 }
652
653 if (!status.extend_reg_valid) {
654 printk(BIOS_ERR, "ME: Extend Register not valid\n");
655 return -1;
656 }
657
658 switch (status.extend_reg_algorithm) {
659 case PCI_ME_EXT_SHA1:
660 count = 5;
661 printk(BIOS_DEBUG, "ME: Extend SHA-1: ");
662 break;
663 case PCI_ME_EXT_SHA256:
664 count = 8;
665 printk(BIOS_DEBUG, "ME: Extend SHA-256: ");
666 break;
667 default:
668 printk(BIOS_ERR, "ME: Extend Algorithm %d unknown\n",
669 status.extend_reg_algorithm);
670 return -1;
671 }
672
673 for (i = 0; i < count; ++i) {
674 extend[i] = pci_read_config32(dev, PCI_ME_HER(i));
675 printk(BIOS_DEBUG, "%08x", extend[i]);
676 }
677 printk(BIOS_DEBUG, "\n");
678
Martin Roth7a1a3ad2017-06-24 21:29:38 -0600679#if IS_ENABLED(CONFIG_CHROMEOS)
Stefan Reinauer8e073822012-04-04 00:07:22 +0200680 /* Save hash in NVS for the OS to verify */
681 chromeos_set_me_hash(extend, count);
682#endif
683
684 return 0;
685}
686
687/* Hide the ME virtual PCI devices */
Elyes HAOUASdc035282018-09-18 13:28:49 +0200688static void intel_me_hide(struct device *dev)
Stefan Reinauer8e073822012-04-04 00:07:22 +0200689{
690 dev->enabled = 0;
691 pch_enable(dev);
692}
693
694/* Check whether ME is present and do basic init */
Elyes HAOUASdc035282018-09-18 13:28:49 +0200695static void intel_me_init(struct device *dev)
Stefan Reinauer8e073822012-04-04 00:07:22 +0200696{
697 me_bios_path path = intel_me_path(dev);
698
699 /* Do initial setup and determine the BIOS path */
700 printk(BIOS_NOTICE, "ME: BIOS path: %s\n", me_bios_path_values[path]);
701
702 switch (path) {
703 case ME_S3WAKE_BIOS_PATH:
704 intel_me_hide(dev);
705 break;
706
707 case ME_NORMAL_BIOS_PATH:
708 /* Validate the extend register */
709 if (intel_me_extend_valid(dev) < 0)
710 break; /* TODO: force recovery mode */
711
712 /* Prepare MEI MMIO interface */
713 if (intel_mei_setup(dev) < 0)
714 break;
715
716#if (CONFIG_DEFAULT_CONSOLE_LOGLEVEL >= BIOS_DEBUG)
717 /* Print ME firmware version */
718 mkhi_get_fw_version();
719 /* Print ME firmware capabilities */
720 mkhi_get_fwcaps();
721#endif
722
723 /*
724 * Leave the ME unlocked in this path.
725 * It will be locked via SMI command later.
726 */
727 break;
728
729 case ME_ERROR_BIOS_PATH:
730 case ME_RECOVERY_BIOS_PATH:
731 case ME_DISABLE_BIOS_PATH:
732 case ME_FIRMWARE_UPDATE_BIOS_PATH:
Stefan Reinauer8e073822012-04-04 00:07:22 +0200733 break;
734 }
735}
736
Elyes HAOUASdc035282018-09-18 13:28:49 +0200737static void set_subsystem(struct device *dev, unsigned vendor, unsigned device)
Stefan Reinauer8e073822012-04-04 00:07:22 +0200738{
739 if (!vendor || !device) {
740 pci_write_config32(dev, PCI_SUBSYSTEM_VENDOR_ID,
741 pci_read_config32(dev, PCI_VENDOR_ID));
742 } else {
743 pci_write_config32(dev, PCI_SUBSYSTEM_VENDOR_ID,
744 ((device & 0xffff) << 16) | (vendor & 0xffff));
745 }
746}
747
748static struct pci_operations pci_ops = {
749 .set_subsystem = set_subsystem,
750};
751
752static struct device_operations device_ops = {
753 .read_resources = pci_dev_read_resources,
754 .set_resources = pci_dev_set_resources,
755 .enable_resources = pci_dev_enable_resources,
756 .init = intel_me_init,
Stefan Reinauer8e073822012-04-04 00:07:22 +0200757 .ops_pci = &pci_ops,
758};
759
760static const struct pci_driver intel_me __pci_driver = {
761 .ops = &device_ops,
762 .vendor = PCI_VENDOR_ID_INTEL,
763 .device = 0x1c3a,
764};
765
766#endif /* !__SMM__ */