blob: 15d8402d8b28a9d1545e6096339e881339c40f8e [file] [log] [blame]
Angel Pons182dbde2020-04-02 23:49:05 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Vladimir Serbinenko888d5592013-11-13 17:53:38 +01002
3/*
4 * This is a ramstage driver for the Intel Management Engine found in the
5 * 6-series chipset. It handles the required boot-time messages over the
6 * MMIO-based Management Engine Interface to tell the ME that the BIOS is
7 * finished with POST. Additional messages are defined for debug but are
8 * not used unless the console loglevel is high enough.
9 */
10
Furquan Shaikh76cedd22020-05-02 10:24:23 -070011#include <acpi/acpi.h>
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010012#include <console/console.h>
Kyösti Mälkki21d6a272019-11-05 18:50:38 +020013#include <device/device.h>
14#include <device/mmio.h>
15#include <device/pci.h>
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010016#include <device/pci_def.h>
Kyösti Mälkki21d6a272019-11-05 18:50:38 +020017#include <device/pci_ids.h>
18#include <device/pci_ops.h>
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010019#include <string.h>
20#include <delay.h>
21#include <elog.h>
22
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010023#include "me.h"
24#include "pch.h"
25
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010026/* Path that the BIOS should take based on ME state */
Kyösti Mälkki21d6a272019-11-05 18:50:38 +020027static const char *me_bios_path_values[] __unused = {
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010028 [ME_NORMAL_BIOS_PATH] = "Normal",
29 [ME_S3WAKE_BIOS_PATH] = "S3 Wake",
30 [ME_ERROR_BIOS_PATH] = "Error",
31 [ME_RECOVERY_BIOS_PATH] = "Recovery",
32 [ME_DISABLE_BIOS_PATH] = "Disable",
33 [ME_FIRMWARE_UPDATE_BIOS_PATH] = "Firmware Update",
34};
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010035
36/* MMIO base address for MEI interface */
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -080037static u32 *mei_base_address;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010038
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010039static void mei_dump(void *ptr, int dword, int offset, const char *type)
40{
41 struct mei_csr *csr;
42
Kyösti Mälkkic86fc8e2019-11-06 06:32:27 +020043 if (!CONFIG(DEBUG_INTEL_ME))
44 return;
45
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010046 printk(BIOS_SPEW, "%-9s[%02x] : ", type, offset);
47
48 switch (offset) {
49 case MEI_H_CSR:
50 case MEI_ME_CSR_HA:
51 csr = ptr;
52 if (!csr) {
53 printk(BIOS_SPEW, "ERROR: 0x%08x\n", dword);
54 break;
55 }
56 printk(BIOS_SPEW, "cbd=%u cbrp=%02u cbwp=%02u ready=%u "
57 "reset=%u ig=%u is=%u ie=%u\n", csr->buffer_depth,
58 csr->buffer_read_ptr, csr->buffer_write_ptr,
59 csr->ready, csr->reset, csr->interrupt_generate,
60 csr->interrupt_status, csr->interrupt_enable);
61 break;
62 case MEI_ME_CB_RW:
63 case MEI_H_CB_WW:
64 printk(BIOS_SPEW, "CB: 0x%08x\n", dword);
65 break;
66 default:
67 printk(BIOS_SPEW, "0x%08x\n", offset);
68 break;
69 }
70}
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010071
72/*
73 * ME/MEI access helpers using memcpy to avoid aliasing.
74 */
75
76static inline void mei_read_dword_ptr(void *ptr, int offset)
77{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -080078 u32 dword = read32(mei_base_address + (offset/sizeof(u32)));
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010079 memcpy(ptr, &dword, sizeof(dword));
80 mei_dump(ptr, dword, offset, "READ");
81}
82
83static inline void mei_write_dword_ptr(void *ptr, int offset)
84{
85 u32 dword = 0;
86 memcpy(&dword, ptr, sizeof(dword));
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -080087 write32(mei_base_address + (offset/sizeof(u32)), dword);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010088 mei_dump(ptr, dword, offset, "WRITE");
89}
90
Kyösti Mälkki21d6a272019-11-05 18:50:38 +020091#ifndef __SIMPLE_DEVICE__
Elyes HAOUASbe841402018-05-13 13:40:39 +020092static inline void pci_read_dword_ptr(struct device *dev,void *ptr,
93 int offset)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +010094{
95 u32 dword = pci_read_config32(dev, offset);
96 memcpy(ptr, &dword, sizeof(dword));
97 mei_dump(ptr, dword, offset, "PCI READ");
98}
99#endif
100
101static inline void read_host_csr(struct mei_csr *csr)
102{
103 mei_read_dword_ptr(csr, MEI_H_CSR);
104}
105
106static inline void write_host_csr(struct mei_csr *csr)
107{
108 mei_write_dword_ptr(csr, MEI_H_CSR);
109}
110
111static inline void read_me_csr(struct mei_csr *csr)
112{
113 mei_read_dword_ptr(csr, MEI_ME_CSR_HA);
114}
115
116static inline void write_cb(u32 dword)
117{
Angel Pons77f340a2020-10-17 18:39:04 +0200118 write32(mei_base_address + (MEI_H_CB_WW / sizeof(u32)), dword);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100119 mei_dump(NULL, dword, MEI_H_CB_WW, "WRITE");
120}
121
122static inline u32 read_cb(void)
123{
Angel Pons77f340a2020-10-17 18:39:04 +0200124 u32 dword = read32(mei_base_address + (MEI_ME_CB_RW / sizeof(u32)));
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100125 mei_dump(NULL, dword, MEI_ME_CB_RW, "READ");
126 return dword;
127}
128
129/* Wait for ME ready bit to be asserted */
130static int mei_wait_for_me_ready(void)
131{
132 struct mei_csr me;
Martin Rothff744bf2019-10-23 21:46:03 -0600133 unsigned int try = ME_RETRY;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100134
135 while (try--) {
136 read_me_csr(&me);
137 if (me.ready)
138 return 0;
139 udelay(ME_DELAY);
140 }
141
142 printk(BIOS_ERR, "ME: failed to become ready\n");
143 return -1;
144}
145
146static void mei_reset(void)
147{
148 struct mei_csr host;
149
150 if (mei_wait_for_me_ready() < 0)
151 return;
152
153 /* Reset host and ME circular buffers for next message */
154 read_host_csr(&host);
155 host.reset = 1;
156 host.interrupt_generate = 1;
157 write_host_csr(&host);
158
159 if (mei_wait_for_me_ready() < 0)
160 return;
161
162 /* Re-init and indicate host is ready */
163 read_host_csr(&host);
164 host.interrupt_generate = 1;
165 host.ready = 1;
166 host.reset = 0;
167 write_host_csr(&host);
168}
169
170static int mei_send_msg(struct mei_header *mei, struct mkhi_header *mkhi,
171 void *req_data)
172{
173 struct mei_csr host;
Martin Rothff744bf2019-10-23 21:46:03 -0600174 unsigned int ndata, n;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100175 u32 *data;
176
177 /* Number of dwords to write, ignoring MKHI */
178 ndata = mei->length >> 2;
179
180 /* Pad non-dword aligned request message length */
181 if (mei->length & 3)
182 ndata++;
183 if (!ndata) {
184 printk(BIOS_DEBUG, "ME: request does not include MKHI\n");
185 return -1;
186 }
187 ndata++; /* Add MEI header */
188
189 /*
190 * Make sure there is still room left in the circular buffer.
191 * Reset the buffer pointers if the requested message will not fit.
192 */
193 read_host_csr(&host);
194 if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
195 printk(BIOS_ERR, "ME: circular buffer full, resetting...\n");
196 mei_reset();
197 read_host_csr(&host);
198 }
199
200 /*
201 * This implementation does not handle splitting large messages
202 * across multiple transactions. Ensure the requested length
203 * will fit in the available circular buffer depth.
204 */
205 if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
206 printk(BIOS_ERR, "ME: message (%u) too large for buffer (%u)\n",
207 ndata + 2, host.buffer_depth);
208 return -1;
209 }
210
211 /* Write MEI header */
212 mei_write_dword_ptr(mei, MEI_H_CB_WW);
213 ndata--;
214
215 /* Write MKHI header */
216 mei_write_dword_ptr(mkhi, MEI_H_CB_WW);
217 ndata--;
218
219 /* Write message data */
220 data = req_data;
221 for (n = 0; n < ndata; ++n)
222 write_cb(*data++);
223
224 /* Generate interrupt to the ME */
225 read_host_csr(&host);
226 host.interrupt_generate = 1;
227 write_host_csr(&host);
228
229 /* Make sure ME is ready after sending request data */
230 return mei_wait_for_me_ready();
231}
232
233static int mei_recv_msg(struct mei_header *mei, struct mkhi_header *mkhi,
234 void *rsp_data, int rsp_bytes)
235{
236 struct mei_header mei_rsp;
237 struct mkhi_header mkhi_rsp;
238 struct mei_csr me, host;
Martin Rothff744bf2019-10-23 21:46:03 -0600239 unsigned int ndata, n;
240 unsigned int expected;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100241 u32 *data;
242
243 /* Total number of dwords to read from circular buffer */
244 expected = (rsp_bytes + sizeof(mei_rsp) + sizeof(mkhi_rsp)) >> 2;
245 if (rsp_bytes & 3)
246 expected++;
247
248 /*
249 * The interrupt status bit does not appear to indicate that the
250 * message has actually been received. Instead we wait until the
251 * expected number of dwords are present in the circular buffer.
252 */
253 for (n = ME_RETRY; n; --n) {
254 read_me_csr(&me);
255 if ((me.buffer_write_ptr - me.buffer_read_ptr) >= expected)
256 break;
257 udelay(ME_DELAY);
258 }
259 if (!n) {
Angel Pons77f340a2020-10-17 18:39:04 +0200260 printk(BIOS_ERR, "ME: timeout waiting for data: expected %u, available %u\n",
261 expected, me.buffer_write_ptr - me.buffer_read_ptr);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100262 return -1;
263 }
264
265 /* Read and verify MEI response header from the ME */
266 mei_read_dword_ptr(&mei_rsp, MEI_ME_CB_RW);
267 if (!mei_rsp.is_complete) {
268 printk(BIOS_ERR, "ME: response is not complete\n");
269 return -1;
270 }
271
272 /* Handle non-dword responses and expect at least MKHI header */
273 ndata = mei_rsp.length >> 2;
274 if (mei_rsp.length & 3)
275 ndata++;
276 if (ndata != (expected - 1)) {
277 printk(BIOS_ERR, "ME: response is missing data\n");
278 return -1;
279 }
280
281 /* Read and verify MKHI response header from the ME */
282 mei_read_dword_ptr(&mkhi_rsp, MEI_ME_CB_RW);
283 if (!mkhi_rsp.is_response ||
284 mkhi->group_id != mkhi_rsp.group_id ||
285 mkhi->command != mkhi_rsp.command) {
286 printk(BIOS_ERR, "ME: invalid response, group %u ?= %u, "
287 "command %u ?= %u, is_response %u\n", mkhi->group_id,
288 mkhi_rsp.group_id, mkhi->command, mkhi_rsp.command,
289 mkhi_rsp.is_response);
290 return -1;
291 }
292 ndata--; /* MKHI header has been read */
293
294 /* Make sure caller passed a buffer with enough space */
295 if (ndata != (rsp_bytes >> 2)) {
Angel Pons77f340a2020-10-17 18:39:04 +0200296 printk(BIOS_ERR, "ME: not enough room in response buffer: %u != %u\n",
297 ndata, rsp_bytes >> 2);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100298 return -1;
299 }
300
301 /* Read response data from the circular buffer */
302 data = rsp_data;
303 for (n = 0; n < ndata; ++n)
304 *data++ = read_cb();
305
306 /* Tell the ME that we have consumed the response */
307 read_host_csr(&host);
308 host.interrupt_status = 1;
309 host.interrupt_generate = 1;
310 write_host_csr(&host);
311
312 return mei_wait_for_me_ready();
313}
314
315static inline int mei_sendrecv(struct mei_header *mei, struct mkhi_header *mkhi,
316 void *req_data, void *rsp_data, int rsp_bytes)
317{
318 if (mei_send_msg(mei, mkhi, req_data) < 0)
319 return -1;
320 if (mei_recv_msg(mei, mkhi, rsp_data, rsp_bytes) < 0)
321 return -1;
322 return 0;
323}
324
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100325/* Determine the path that we should take based on ME status */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200326static me_bios_path intel_me_path(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100327{
328 me_bios_path path = ME_DISABLE_BIOS_PATH;
329 struct me_hfs hfs;
330 struct me_gmes gmes;
331
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100332 /* S3 wake skips all MKHI messages */
Kyösti Mälkkic3ed8862014-06-19 19:50:51 +0300333 if (acpi_is_wakeup_s3())
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100334 return ME_S3WAKE_BIOS_PATH;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100335
336 pci_read_dword_ptr(dev, &hfs, PCI_ME_HFS);
337 pci_read_dword_ptr(dev, &gmes, PCI_ME_GMES);
338
339 /* Check and dump status */
340 intel_me_status(&hfs, &gmes);
341
342 /* Check Current Working State */
343 switch (hfs.working_state) {
344 case ME_HFS_CWS_NORMAL:
345 path = ME_NORMAL_BIOS_PATH;
346 break;
347 case ME_HFS_CWS_REC:
348 path = ME_RECOVERY_BIOS_PATH;
349 break;
350 default:
351 path = ME_DISABLE_BIOS_PATH;
352 break;
353 }
354
355 /* Check Current Operation Mode */
356 switch (hfs.operation_mode) {
357 case ME_HFS_MODE_NORMAL:
358 break;
359 case ME_HFS_MODE_DEBUG:
360 case ME_HFS_MODE_DIS:
361 case ME_HFS_MODE_OVER_JMPR:
362 case ME_HFS_MODE_OVER_MEI:
363 default:
364 path = ME_DISABLE_BIOS_PATH;
365 break;
366 }
367
368 /* Check for any error code and valid firmware */
369 if (hfs.error_code || hfs.fpt_bad)
370 path = ME_ERROR_BIOS_PATH;
371
Kyösti Mälkkibe5317f2019-11-06 12:07:21 +0200372 if (CONFIG(ELOG) && path != ME_NORMAL_BIOS_PATH) {
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100373 struct elog_event_data_me_extended data = {
374 .current_working_state = hfs.working_state,
375 .operation_state = hfs.operation_state,
376 .operation_mode = hfs.operation_mode,
377 .error_code = hfs.error_code,
378 .progress_code = gmes.progress_code,
379 .current_pmevent = gmes.current_pmevent,
380 .current_state = gmes.current_state,
381 };
382 elog_add_event_byte(ELOG_TYPE_MANAGEMENT_ENGINE, path);
383 elog_add_event_raw(ELOG_TYPE_MANAGEMENT_ENGINE_EXT,
384 &data, sizeof(data));
385 }
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100386
387 return path;
388}
389
390/* Prepare ME for MEI messages */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200391static int intel_mei_setup(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100392{
393 struct resource *res;
394 struct mei_csr host;
Elyes HAOUAS8b6dfde2020-04-28 09:58:21 +0200395 u16 reg16;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100396
397 /* Find the MMIO base for the ME interface */
Angel Ponsf32ae102021-11-03 13:07:14 +0100398 res = probe_resource(dev, PCI_BASE_ADDRESS_0);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100399 if (!res || res->base == 0 || res->size == 0) {
400 printk(BIOS_DEBUG, "ME: MEI resource not present!\n");
401 return -1;
402 }
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800403 mei_base_address = (u32 *)(uintptr_t)res->base;
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100404
405 /* Ensure Memory and Bus Master bits are set */
Elyes HAOUAS8b6dfde2020-04-28 09:58:21 +0200406 reg16 = pci_read_config16(dev, PCI_COMMAND);
407 reg16 |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
408 pci_write_config16(dev, PCI_COMMAND, reg16);
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100409
410 /* Clean up status for next message */
411 read_host_csr(&host);
412 host.interrupt_generate = 1;
413 host.ready = 1;
414 host.reset = 0;
415 write_host_csr(&host);
416
417 return 0;
418}
419
420/* Read the Extend register hash of ME firmware */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200421static int intel_me_extend_valid(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100422{
423 struct me_heres status;
424 u32 extend[8] = {0};
425 int i, count = 0;
426
427 pci_read_dword_ptr(dev, &status, PCI_ME_HERES);
428 if (!status.extend_feature_present) {
429 printk(BIOS_ERR, "ME: Extend Feature not present\n");
430 return -1;
431 }
432
433 if (!status.extend_reg_valid) {
434 printk(BIOS_ERR, "ME: Extend Register not valid\n");
435 return -1;
436 }
437
438 switch (status.extend_reg_algorithm) {
439 case PCI_ME_EXT_SHA1:
440 count = 5;
441 printk(BIOS_DEBUG, "ME: Extend SHA-1: ");
442 break;
443 case PCI_ME_EXT_SHA256:
444 count = 8;
445 printk(BIOS_DEBUG, "ME: Extend SHA-256: ");
446 break;
447 default:
448 printk(BIOS_ERR, "ME: Extend Algorithm %d unknown\n",
449 status.extend_reg_algorithm);
450 return -1;
451 }
452
453 for (i = 0; i < count; ++i) {
454 extend[i] = pci_read_config32(dev, PCI_ME_HER(i));
455 printk(BIOS_DEBUG, "%08x", extend[i]);
456 }
457 printk(BIOS_DEBUG, "\n");
458
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100459 return 0;
460}
461
462/* Hide the ME virtual PCI devices */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200463static void intel_me_hide(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100464{
465 dev->enabled = 0;
466 pch_enable(dev);
467}
468
469/* Check whether ME is present and do basic init */
Elyes HAOUASbe841402018-05-13 13:40:39 +0200470static void intel_me_init(struct device *dev)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100471{
472 me_bios_path path = intel_me_path(dev);
473
474 /* Do initial setup and determine the BIOS path */
475 printk(BIOS_NOTICE, "ME: BIOS path: %s\n", me_bios_path_values[path]);
476
477 switch (path) {
478 case ME_S3WAKE_BIOS_PATH:
James Yea85d4a52020-02-22 20:30:49 +1100479 case ME_DISABLE_BIOS_PATH:
480#if CONFIG(HIDE_MEI_ON_ERROR)
481 case ME_ERROR_BIOS_PATH:
482#endif
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100483 intel_me_hide(dev);
484 break;
485
486 case ME_NORMAL_BIOS_PATH:
487 /* Validate the extend register */
488 if (intel_me_extend_valid(dev) < 0)
489 break; /* TODO: force recovery mode */
490
491 /* Prepare MEI MMIO interface */
492 if (intel_mei_setup(dev) < 0)
493 break;
494
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100495 /*
496 * Leave the ME unlocked in this path.
497 * It will be locked via SMI command later.
498 */
499 break;
500
James Yea85d4a52020-02-22 20:30:49 +1100501#if !CONFIG(HIDE_MEI_ON_ERROR)
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100502 case ME_ERROR_BIOS_PATH:
James Yea85d4a52020-02-22 20:30:49 +1100503#endif
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100504 case ME_RECOVERY_BIOS_PATH:
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100505 case ME_FIRMWARE_UPDATE_BIOS_PATH:
506 break;
507 }
508}
509
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100510static struct device_operations device_ops = {
511 .read_resources = pci_dev_read_resources,
512 .set_resources = pci_dev_set_resources,
513 .enable_resources = pci_dev_enable_resources,
514 .init = intel_me_init,
Angel Pons1fc0edd2020-05-31 00:03:28 +0200515 .ops_pci = &pci_dev_ops_pci,
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100516};
517
Felix Singer838fbc72019-11-21 21:23:32 +0100518static const unsigned short pci_device_ids[] = {
519 0x1c3a,
520 PCI_DID_INTEL_IBEXPEAK_HECI1,
521 0
522};
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100523
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100524static const struct pci_driver intel_me __pci_driver = {
525 .ops = &device_ops,
Felix Singer43b7f412022-03-07 04:34:52 +0100526 .vendor = PCI_VID_INTEL,
Vladimir Serbinenko888d5592013-11-13 17:53:38 +0100527 .devices = pci_device_ids
528};