blob: fbcb01dc3a1dae2dc9784cf73c7fe215d86bb1c3 [file] [log] [blame]
Angel Pons182dbde2020-04-02 23:49:05 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Vladimir Serbinenko888d5592013-11-13 17:53:38 +01002
3/*
4 * This is a ramstage driver for the Intel Management Engine found in the
5 * 6-series chipset. It handles the required boot-time messages over the
6 * MMIO-based Management Engine Interface to tell the ME that the BIOS is
7 * finished with POST. Additional messages are defined for debug but are
8 * not used unless the console loglevel is high enough.
9 */
10
Furquan Shaikh76cedd22020-05-02 10:24:23 -070011#include <acpi/acpi.h>
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010012#include <console/console.h>
Kyösti Mälkki21d6a272019-11-05 18:50:38 +020013#include <device/device.h>
14#include <device/mmio.h>
15#include <device/pci.h>
Kyösti Mälkki21d6a272019-11-05 18:50:38 +020016#include <device/pci_ids.h>
17#include <device/pci_ops.h>
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010018#include <string.h>
19#include <delay.h>
20#include <elog.h>
21
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010022#include "me.h"
23#include "pch.h"
24
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010025/* Path that the BIOS should take based on ME state */
Bill XIE44ef2122022-07-09 11:31:35 +080026static const char *const me_bios_path_values[] = {
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010027 [ME_NORMAL_BIOS_PATH] = "Normal",
28 [ME_S3WAKE_BIOS_PATH] = "S3 Wake",
29 [ME_ERROR_BIOS_PATH] = "Error",
30 [ME_RECOVERY_BIOS_PATH] = "Recovery",
31 [ME_DISABLE_BIOS_PATH] = "Disable",
32 [ME_FIRMWARE_UPDATE_BIOS_PATH] = "Firmware Update",
33};
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010034
35/* MMIO base address for MEI interface */
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -080036static u32 *mei_base_address;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010037
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010038static void mei_dump(void *ptr, int dword, int offset, const char *type)
39{
40 struct mei_csr *csr;
41
Kyösti Mälkkic86fc8e2019-11-06 06:32:27 +020042 if (!CONFIG(DEBUG_INTEL_ME))
43 return;
44
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010045 printk(BIOS_SPEW, "%-9s[%02x] : ", type, offset);
46
47 switch (offset) {
48 case MEI_H_CSR:
49 case MEI_ME_CSR_HA:
50 csr = ptr;
51 if (!csr) {
52 printk(BIOS_SPEW, "ERROR: 0x%08x\n", dword);
53 break;
54 }
55 printk(BIOS_SPEW, "cbd=%u cbrp=%02u cbwp=%02u ready=%u "
56 "reset=%u ig=%u is=%u ie=%u\n", csr->buffer_depth,
57 csr->buffer_read_ptr, csr->buffer_write_ptr,
58 csr->ready, csr->reset, csr->interrupt_generate,
59 csr->interrupt_status, csr->interrupt_enable);
60 break;
61 case MEI_ME_CB_RW:
62 case MEI_H_CB_WW:
63 printk(BIOS_SPEW, "CB: 0x%08x\n", dword);
64 break;
65 default:
66 printk(BIOS_SPEW, "0x%08x\n", offset);
67 break;
68 }
69}
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010070
71/*
72 * ME/MEI access helpers using memcpy to avoid aliasing.
73 */
74
75static inline void mei_read_dword_ptr(void *ptr, int offset)
76{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -080077 u32 dword = read32(mei_base_address + (offset/sizeof(u32)));
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010078 memcpy(ptr, &dword, sizeof(dword));
79 mei_dump(ptr, dword, offset, "READ");
80}
81
82static inline void mei_write_dword_ptr(void *ptr, int offset)
83{
84 u32 dword = 0;
85 memcpy(&dword, ptr, sizeof(dword));
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -080086 write32(mei_base_address + (offset/sizeof(u32)), dword);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010087 mei_dump(ptr, dword, offset, "WRITE");
88}
89
Kyösti Mälkki21d6a272019-11-05 18:50:38 +020090#ifndef __SIMPLE_DEVICE__
Elyes HAOUASbe841402018-05-13 13:40:39 +020091static inline void pci_read_dword_ptr(struct device *dev,void *ptr,
92 int offset)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010093{
94 u32 dword = pci_read_config32(dev, offset);
95 memcpy(ptr, &dword, sizeof(dword));
96 mei_dump(ptr, dword, offset, "PCI READ");
97}
98#endif
99
100static inline void read_host_csr(struct mei_csr *csr)
101{
102 mei_read_dword_ptr(csr, MEI_H_CSR);
103}
104
105static inline void write_host_csr(struct mei_csr *csr)
106{
107 mei_write_dword_ptr(csr, MEI_H_CSR);
108}
109
110static inline void read_me_csr(struct mei_csr *csr)
111{
112 mei_read_dword_ptr(csr, MEI_ME_CSR_HA);
113}
114
115static inline void write_cb(u32 dword)
116{
Angel Pons77f340a2020-10-17 18:39:04 +0200117 write32(mei_base_address + (MEI_H_CB_WW / sizeof(u32)), dword);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100118 mei_dump(NULL, dword, MEI_H_CB_WW, "WRITE");
119}
120
121static inline u32 read_cb(void)
122{
Angel Pons77f340a2020-10-17 18:39:04 +0200123 u32 dword = read32(mei_base_address + (MEI_ME_CB_RW / sizeof(u32)));
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100124 mei_dump(NULL, dword, MEI_ME_CB_RW, "READ");
125 return dword;
126}
127
128/* Wait for ME ready bit to be asserted */
129static int mei_wait_for_me_ready(void)
130{
131 struct mei_csr me;
Martin Rothff744bf2019-10-23 21:46:03 -0600132 unsigned int try = ME_RETRY;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100133
134 while (try--) {
135 read_me_csr(&me);
136 if (me.ready)
137 return 0;
138 udelay(ME_DELAY);
139 }
140
141 printk(BIOS_ERR, "ME: failed to become ready\n");
142 return -1;
143}
144
145static void mei_reset(void)
146{
147 struct mei_csr host;
148
149 if (mei_wait_for_me_ready() < 0)
150 return;
151
152 /* Reset host and ME circular buffers for next message */
153 read_host_csr(&host);
154 host.reset = 1;
155 host.interrupt_generate = 1;
156 write_host_csr(&host);
157
158 if (mei_wait_for_me_ready() < 0)
159 return;
160
161 /* Re-init and indicate host is ready */
162 read_host_csr(&host);
163 host.interrupt_generate = 1;
164 host.ready = 1;
165 host.reset = 0;
166 write_host_csr(&host);
167}
168
169static int mei_send_msg(struct mei_header *mei, struct mkhi_header *mkhi,
170 void *req_data)
171{
172 struct mei_csr host;
Martin Rothff744bf2019-10-23 21:46:03 -0600173 unsigned int ndata, n;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100174 u32 *data;
175
176 /* Number of dwords to write, ignoring MKHI */
177 ndata = mei->length >> 2;
178
179 /* Pad non-dword aligned request message length */
180 if (mei->length & 3)
181 ndata++;
182 if (!ndata) {
183 printk(BIOS_DEBUG, "ME: request does not include MKHI\n");
184 return -1;
185 }
186 ndata++; /* Add MEI header */
187
188 /*
189 * Make sure there is still room left in the circular buffer.
190 * Reset the buffer pointers if the requested message will not fit.
191 */
192 read_host_csr(&host);
193 if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
194 printk(BIOS_ERR, "ME: circular buffer full, resetting...\n");
195 mei_reset();
196 read_host_csr(&host);
197 }
198
199 /*
200 * This implementation does not handle splitting large messages
201 * across multiple transactions. Ensure the requested length
202 * will fit in the available circular buffer depth.
203 */
204 if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
205 printk(BIOS_ERR, "ME: message (%u) too large for buffer (%u)\n",
206 ndata + 2, host.buffer_depth);
207 return -1;
208 }
209
210 /* Write MEI header */
211 mei_write_dword_ptr(mei, MEI_H_CB_WW);
212 ndata--;
213
214 /* Write MKHI header */
215 mei_write_dword_ptr(mkhi, MEI_H_CB_WW);
216 ndata--;
217
218 /* Write message data */
219 data = req_data;
220 for (n = 0; n < ndata; ++n)
221 write_cb(*data++);
222
223 /* Generate interrupt to the ME */
224 read_host_csr(&host);
225 host.interrupt_generate = 1;
226 write_host_csr(&host);
227
228 /* Make sure ME is ready after sending request data */
229 return mei_wait_for_me_ready();
230}
231
232static int mei_recv_msg(struct mei_header *mei, struct mkhi_header *mkhi,
233 void *rsp_data, int rsp_bytes)
234{
235 struct mei_header mei_rsp;
236 struct mkhi_header mkhi_rsp;
237 struct mei_csr me, host;
Martin Rothff744bf2019-10-23 21:46:03 -0600238 unsigned int ndata, n;
239 unsigned int expected;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100240 u32 *data;
241
242 /* Total number of dwords to read from circular buffer */
243 expected = (rsp_bytes + sizeof(mei_rsp) + sizeof(mkhi_rsp)) >> 2;
244 if (rsp_bytes & 3)
245 expected++;
246
247 /*
248 * The interrupt status bit does not appear to indicate that the
249 * message has actually been received. Instead we wait until the
250 * expected number of dwords are present in the circular buffer.
251 */
252 for (n = ME_RETRY; n; --n) {
253 read_me_csr(&me);
254 if ((me.buffer_write_ptr - me.buffer_read_ptr) >= expected)
255 break;
256 udelay(ME_DELAY);
257 }
258 if (!n) {
Angel Pons77f340a2020-10-17 18:39:04 +0200259 printk(BIOS_ERR, "ME: timeout waiting for data: expected %u, available %u\n",
260 expected, me.buffer_write_ptr - me.buffer_read_ptr);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100261 return -1;
262 }
263
264 /* Read and verify MEI response header from the ME */
265 mei_read_dword_ptr(&mei_rsp, MEI_ME_CB_RW);
266 if (!mei_rsp.is_complete) {
267 printk(BIOS_ERR, "ME: response is not complete\n");
268 return -1;
269 }
270
271 /* Handle non-dword responses and expect at least MKHI header */
272 ndata = mei_rsp.length >> 2;
273 if (mei_rsp.length & 3)
274 ndata++;
275 if (ndata != (expected - 1)) {
276 printk(BIOS_ERR, "ME: response is missing data\n");
277 return -1;
278 }
279
280 /* Read and verify MKHI response header from the ME */
281 mei_read_dword_ptr(&mkhi_rsp, MEI_ME_CB_RW);
282 if (!mkhi_rsp.is_response ||
283 mkhi->group_id != mkhi_rsp.group_id ||
284 mkhi->command != mkhi_rsp.command) {
285 printk(BIOS_ERR, "ME: invalid response, group %u ?= %u, "
286 "command %u ?= %u, is_response %u\n", mkhi->group_id,
287 mkhi_rsp.group_id, mkhi->command, mkhi_rsp.command,
288 mkhi_rsp.is_response);
289 return -1;
290 }
291 ndata--; /* MKHI header has been read */
292
293 /* Make sure caller passed a buffer with enough space */
294 if (ndata != (rsp_bytes >> 2)) {
Angel Pons77f340a2020-10-17 18:39:04 +0200295 printk(BIOS_ERR, "ME: not enough room in response buffer: %u != %u\n",
296 ndata, rsp_bytes >> 2);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100297 return -1;
298 }
299
300 /* Read response data from the circular buffer */
301 data = rsp_data;
302 for (n = 0; n < ndata; ++n)
303 *data++ = read_cb();
304
305 /* Tell the ME that we have consumed the response */
306 read_host_csr(&host);
307 host.interrupt_status = 1;
308 host.interrupt_generate = 1;
309 write_host_csr(&host);
310
311 return mei_wait_for_me_ready();
312}
313
314static inline int mei_sendrecv(struct mei_header *mei, struct mkhi_header *mkhi,
315 void *req_data, void *rsp_data, int rsp_bytes)
316{
317 if (mei_send_msg(mei, mkhi, req_data) < 0)
318 return -1;
319 if (mei_recv_msg(mei, mkhi, rsp_data, rsp_bytes) < 0)
320 return -1;
321 return 0;
322}
323
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100324/* Determine the path that we should take based on ME status */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200325static me_bios_path intel_me_path(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100326{
327 me_bios_path path = ME_DISABLE_BIOS_PATH;
328 struct me_hfs hfs;
329 struct me_gmes gmes;
330
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100331 /* S3 wake skips all MKHI messages */
Kyösti Mälkkic3ed8862014-06-19 19:50:51 +0300332 if (acpi_is_wakeup_s3())
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100333 return ME_S3WAKE_BIOS_PATH;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100334
335 pci_read_dword_ptr(dev, &hfs, PCI_ME_HFS);
336 pci_read_dword_ptr(dev, &gmes, PCI_ME_GMES);
337
338 /* Check and dump status */
339 intel_me_status(&hfs, &gmes);
340
341 /* Check Current Working State */
342 switch (hfs.working_state) {
343 case ME_HFS_CWS_NORMAL:
344 path = ME_NORMAL_BIOS_PATH;
345 break;
346 case ME_HFS_CWS_REC:
347 path = ME_RECOVERY_BIOS_PATH;
348 break;
349 default:
350 path = ME_DISABLE_BIOS_PATH;
351 break;
352 }
353
354 /* Check Current Operation Mode */
355 switch (hfs.operation_mode) {
356 case ME_HFS_MODE_NORMAL:
357 break;
358 case ME_HFS_MODE_DEBUG:
359 case ME_HFS_MODE_DIS:
360 case ME_HFS_MODE_OVER_JMPR:
361 case ME_HFS_MODE_OVER_MEI:
362 default:
363 path = ME_DISABLE_BIOS_PATH;
364 break;
365 }
366
367 /* Check for any error code and valid firmware */
368 if (hfs.error_code || hfs.fpt_bad)
369 path = ME_ERROR_BIOS_PATH;
370
Kyösti Mälkkibe5317f2019-11-06 12:07:21 +0200371 if (CONFIG(ELOG) && path != ME_NORMAL_BIOS_PATH) {
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100372 struct elog_event_data_me_extended data = {
373 .current_working_state = hfs.working_state,
374 .operation_state = hfs.operation_state,
375 .operation_mode = hfs.operation_mode,
376 .error_code = hfs.error_code,
377 .progress_code = gmes.progress_code,
378 .current_pmevent = gmes.current_pmevent,
379 .current_state = gmes.current_state,
380 };
381 elog_add_event_byte(ELOG_TYPE_MANAGEMENT_ENGINE, path);
382 elog_add_event_raw(ELOG_TYPE_MANAGEMENT_ENGINE_EXT,
383 &data, sizeof(data));
384 }
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100385
386 return path;
387}
388
389/* Prepare ME for MEI messages */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200390static int intel_mei_setup(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100391{
392 struct resource *res;
393 struct mei_csr host;
Elyes HAOUAS8b6dfde2020-04-28 09:58:21 +0200394 u16 reg16;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100395
396 /* Find the MMIO base for the ME interface */
Angel Ponsf32ae102021-11-03 13:07:14 +0100397 res = probe_resource(dev, PCI_BASE_ADDRESS_0);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100398 if (!res || res->base == 0 || res->size == 0) {
399 printk(BIOS_DEBUG, "ME: MEI resource not present!\n");
400 return -1;
401 }
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800402 mei_base_address = (u32 *)(uintptr_t)res->base;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100403
404 /* Ensure Memory and Bus Master bits are set */
Elyes HAOUAS8b6dfde2020-04-28 09:58:21 +0200405 reg16 = pci_read_config16(dev, PCI_COMMAND);
406 reg16 |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
407 pci_write_config16(dev, PCI_COMMAND, reg16);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100408
409 /* Clean up status for next message */
410 read_host_csr(&host);
411 host.interrupt_generate = 1;
412 host.ready = 1;
413 host.reset = 0;
414 write_host_csr(&host);
415
416 return 0;
417}
418
419/* Read the Extend register hash of ME firmware */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200420static int intel_me_extend_valid(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100421{
422 struct me_heres status;
423 u32 extend[8] = {0};
424 int i, count = 0;
425
426 pci_read_dword_ptr(dev, &status, PCI_ME_HERES);
427 if (!status.extend_feature_present) {
428 printk(BIOS_ERR, "ME: Extend Feature not present\n");
429 return -1;
430 }
431
432 if (!status.extend_reg_valid) {
433 printk(BIOS_ERR, "ME: Extend Register not valid\n");
434 return -1;
435 }
436
437 switch (status.extend_reg_algorithm) {
438 case PCI_ME_EXT_SHA1:
439 count = 5;
440 printk(BIOS_DEBUG, "ME: Extend SHA-1: ");
441 break;
442 case PCI_ME_EXT_SHA256:
443 count = 8;
444 printk(BIOS_DEBUG, "ME: Extend SHA-256: ");
445 break;
446 default:
447 printk(BIOS_ERR, "ME: Extend Algorithm %d unknown\n",
448 status.extend_reg_algorithm);
449 return -1;
450 }
451
452 for (i = 0; i < count; ++i) {
453 extend[i] = pci_read_config32(dev, PCI_ME_HER(i));
454 printk(BIOS_DEBUG, "%08x", extend[i]);
455 }
456 printk(BIOS_DEBUG, "\n");
457
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100458 return 0;
459}
460
461/* Hide the ME virtual PCI devices */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200462static void intel_me_hide(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100463{
464 dev->enabled = 0;
465 pch_enable(dev);
466}
467
468/* Check whether ME is present and do basic init */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200469static void intel_me_init(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100470{
471 me_bios_path path = intel_me_path(dev);
472
473 /* Do initial setup and determine the BIOS path */
474 printk(BIOS_NOTICE, "ME: BIOS path: %s\n", me_bios_path_values[path]);
475
476 switch (path) {
477 case ME_S3WAKE_BIOS_PATH:
James Yea85d4a52020-02-22 20:30:49 +1100478 case ME_DISABLE_BIOS_PATH:
479#if CONFIG(HIDE_MEI_ON_ERROR)
480 case ME_ERROR_BIOS_PATH:
481#endif
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100482 intel_me_hide(dev);
483 break;
484
485 case ME_NORMAL_BIOS_PATH:
486 /* Validate the extend register */
487 if (intel_me_extend_valid(dev) < 0)
488 break; /* TODO: force recovery mode */
489
490 /* Prepare MEI MMIO interface */
491 if (intel_mei_setup(dev) < 0)
492 break;
493
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100494 /*
495 * Leave the ME unlocked in this path.
496 * It will be locked via SMI command later.
497 */
498 break;
499
James Yea85d4a52020-02-22 20:30:49 +1100500#if !CONFIG(HIDE_MEI_ON_ERROR)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100501 case ME_ERROR_BIOS_PATH:
James Yea85d4a52020-02-22 20:30:49 +1100502#endif
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100503 case ME_RECOVERY_BIOS_PATH:
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100504 case ME_FIRMWARE_UPDATE_BIOS_PATH:
505 break;
506 }
507}
508
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100509static struct device_operations device_ops = {
510 .read_resources = pci_dev_read_resources,
511 .set_resources = pci_dev_set_resources,
512 .enable_resources = pci_dev_enable_resources,
513 .init = intel_me_init,
Angel Pons1fc0edd2020-05-31 00:03:28 +0200514 .ops_pci = &pci_dev_ops_pci,
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100515};
516
Felix Singer838fbc72019-11-21 21:23:32 +0100517static const unsigned short pci_device_ids[] = {
518 0x1c3a,
519 PCI_DID_INTEL_IBEXPEAK_HECI1,
520 0
521};
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100522
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100523static const struct pci_driver intel_me __pci_driver = {
524 .ops = &device_ops,
Felix Singer43b7f412022-03-07 04:34:52 +0100525 .vendor = PCI_VID_INTEL,
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100526 .devices = pci_device_ids
527};