Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 1 | /* |
| 2 | * This file is part of the coreboot project. |
| 3 | * |
| 4 | * Copyright (C) 2011 The Chromium OS Authors. All rights reserved. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License as |
| 8 | * published by the Free Software Foundation; version 2 of |
| 9 | * the License. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 15 | */ |
| 16 | |
| 17 | /* |
| 18 | * This is a ramstage driver for the Intel Management Engine found in the |
| 19 | * 6-series chipset. It handles the required boot-time messages over the |
| 20 | * MMIO-based Management Engine Interface to tell the ME that the BIOS is |
| 21 | * finished with POST. Additional messages are defined for debug but are |
| 22 | * not used unless the console loglevel is high enough. |
| 23 | */ |
| 24 | |
| 25 | #include <arch/acpi.h> |
Kyösti Mälkki | 13f6650 | 2019-03-03 08:01:05 +0200 | [diff] [blame] | 26 | #include <device/mmio.h> |
Kyösti Mälkki | f1b58b7 | 2019-03-01 13:43:02 +0200 | [diff] [blame] | 27 | #include <device/pci_ops.h> |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 28 | #include <console/console.h> |
| 29 | #include <device/pci_ids.h> |
| 30 | #include <device/pci_def.h> |
| 31 | #include <string.h> |
| 32 | #include <delay.h> |
| 33 | #include <elog.h> |
| 34 | |
Elyes HAOUAS | ead574e | 2018-11-11 20:52:30 +0100 | [diff] [blame] | 35 | #ifndef __SMM__ |
| 36 | #include <device/device.h> |
| 37 | #include <device/pci.h> |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 38 | #endif |
| 39 | |
| 40 | #include "me.h" |
| 41 | #include "pch.h" |
| 42 | |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 43 | #if CONFIG(CHROMEOS) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 44 | #include <vendorcode/google/chromeos/gnvs.h> |
| 45 | #endif |
| 46 | |
| 47 | #ifndef __SMM__ |
| 48 | /* Path that the BIOS should take based on ME state */ |
| 49 | static const char *me_bios_path_values[] = { |
| 50 | [ME_NORMAL_BIOS_PATH] = "Normal", |
| 51 | [ME_S3WAKE_BIOS_PATH] = "S3 Wake", |
| 52 | [ME_ERROR_BIOS_PATH] = "Error", |
| 53 | [ME_RECOVERY_BIOS_PATH] = "Recovery", |
| 54 | [ME_DISABLE_BIOS_PATH] = "Disable", |
| 55 | [ME_FIRMWARE_UPDATE_BIOS_PATH] = "Firmware Update", |
| 56 | }; |
| 57 | #endif |
| 58 | |
| 59 | /* MMIO base address for MEI interface */ |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 60 | static u32 *mei_base_address; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 61 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 62 | static void mei_dump(void *ptr, int dword, int offset, const char *type) |
| 63 | { |
| 64 | struct mei_csr *csr; |
| 65 | |
Kyösti Mälkki | c86fc8e | 2019-11-06 06:32:27 +0200 | [diff] [blame] | 66 | if (!CONFIG(DEBUG_INTEL_ME)) |
| 67 | return; |
| 68 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 69 | printk(BIOS_SPEW, "%-9s[%02x] : ", type, offset); |
| 70 | |
| 71 | switch (offset) { |
| 72 | case MEI_H_CSR: |
| 73 | case MEI_ME_CSR_HA: |
| 74 | csr = ptr; |
| 75 | if (!csr) { |
| 76 | printk(BIOS_SPEW, "ERROR: 0x%08x\n", dword); |
| 77 | break; |
| 78 | } |
| 79 | printk(BIOS_SPEW, "cbd=%u cbrp=%02u cbwp=%02u ready=%u " |
| 80 | "reset=%u ig=%u is=%u ie=%u\n", csr->buffer_depth, |
| 81 | csr->buffer_read_ptr, csr->buffer_write_ptr, |
| 82 | csr->ready, csr->reset, csr->interrupt_generate, |
| 83 | csr->interrupt_status, csr->interrupt_enable); |
| 84 | break; |
| 85 | case MEI_ME_CB_RW: |
| 86 | case MEI_H_CB_WW: |
| 87 | printk(BIOS_SPEW, "CB: 0x%08x\n", dword); |
| 88 | break; |
| 89 | default: |
| 90 | printk(BIOS_SPEW, "0x%08x\n", offset); |
| 91 | break; |
| 92 | } |
| 93 | } |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 94 | |
| 95 | /* |
| 96 | * ME/MEI access helpers using memcpy to avoid aliasing. |
| 97 | */ |
| 98 | |
| 99 | static inline void mei_read_dword_ptr(void *ptr, int offset) |
| 100 | { |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 101 | u32 dword = read32(mei_base_address + (offset/sizeof(u32))); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 102 | memcpy(ptr, &dword, sizeof(dword)); |
| 103 | mei_dump(ptr, dword, offset, "READ"); |
| 104 | } |
| 105 | |
| 106 | static inline void mei_write_dword_ptr(void *ptr, int offset) |
| 107 | { |
| 108 | u32 dword = 0; |
| 109 | memcpy(&dword, ptr, sizeof(dword)); |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 110 | write32(mei_base_address + (offset/sizeof(u32)), dword); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 111 | mei_dump(ptr, dword, offset, "WRITE"); |
| 112 | } |
| 113 | |
| 114 | #ifndef __SMM__ |
Elyes HAOUAS | be84140 | 2018-05-13 13:40:39 +0200 | [diff] [blame] | 115 | static inline void pci_read_dword_ptr(struct device *dev,void *ptr, |
| 116 | int offset) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 117 | { |
| 118 | u32 dword = pci_read_config32(dev, offset); |
| 119 | memcpy(ptr, &dword, sizeof(dword)); |
| 120 | mei_dump(ptr, dword, offset, "PCI READ"); |
| 121 | } |
| 122 | #endif |
| 123 | |
| 124 | static inline void read_host_csr(struct mei_csr *csr) |
| 125 | { |
| 126 | mei_read_dword_ptr(csr, MEI_H_CSR); |
| 127 | } |
| 128 | |
| 129 | static inline void write_host_csr(struct mei_csr *csr) |
| 130 | { |
| 131 | mei_write_dword_ptr(csr, MEI_H_CSR); |
| 132 | } |
| 133 | |
Edward O'Callaghan | 78d33b6 | 2014-06-26 22:13:15 +1000 | [diff] [blame] | 134 | #ifdef __SMM__ |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 135 | static inline void read_me_csr(struct mei_csr *csr) |
| 136 | { |
| 137 | mei_read_dword_ptr(csr, MEI_ME_CSR_HA); |
| 138 | } |
| 139 | |
| 140 | static inline void write_cb(u32 dword) |
| 141 | { |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 142 | write32(mei_base_address + (MEI_H_CB_WW/sizeof(u32)), dword); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 143 | mei_dump(NULL, dword, MEI_H_CB_WW, "WRITE"); |
| 144 | } |
| 145 | |
| 146 | static inline u32 read_cb(void) |
| 147 | { |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 148 | u32 dword = read32(mei_base_address + (MEI_ME_CB_RW/sizeof(u32))); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 149 | mei_dump(NULL, dword, MEI_ME_CB_RW, "READ"); |
| 150 | return dword; |
| 151 | } |
| 152 | |
| 153 | /* Wait for ME ready bit to be asserted */ |
| 154 | static int mei_wait_for_me_ready(void) |
| 155 | { |
| 156 | struct mei_csr me; |
Martin Roth | ff744bf | 2019-10-23 21:46:03 -0600 | [diff] [blame] | 157 | unsigned int try = ME_RETRY; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 158 | |
| 159 | while (try--) { |
| 160 | read_me_csr(&me); |
| 161 | if (me.ready) |
| 162 | return 0; |
| 163 | udelay(ME_DELAY); |
| 164 | } |
| 165 | |
| 166 | printk(BIOS_ERR, "ME: failed to become ready\n"); |
| 167 | return -1; |
| 168 | } |
| 169 | |
| 170 | static void mei_reset(void) |
| 171 | { |
| 172 | struct mei_csr host; |
| 173 | |
| 174 | if (mei_wait_for_me_ready() < 0) |
| 175 | return; |
| 176 | |
| 177 | /* Reset host and ME circular buffers for next message */ |
| 178 | read_host_csr(&host); |
| 179 | host.reset = 1; |
| 180 | host.interrupt_generate = 1; |
| 181 | write_host_csr(&host); |
| 182 | |
| 183 | if (mei_wait_for_me_ready() < 0) |
| 184 | return; |
| 185 | |
| 186 | /* Re-init and indicate host is ready */ |
| 187 | read_host_csr(&host); |
| 188 | host.interrupt_generate = 1; |
| 189 | host.ready = 1; |
| 190 | host.reset = 0; |
| 191 | write_host_csr(&host); |
| 192 | } |
| 193 | |
| 194 | static int mei_send_msg(struct mei_header *mei, struct mkhi_header *mkhi, |
| 195 | void *req_data) |
| 196 | { |
| 197 | struct mei_csr host; |
Martin Roth | ff744bf | 2019-10-23 21:46:03 -0600 | [diff] [blame] | 198 | unsigned int ndata, n; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 199 | u32 *data; |
| 200 | |
| 201 | /* Number of dwords to write, ignoring MKHI */ |
| 202 | ndata = mei->length >> 2; |
| 203 | |
| 204 | /* Pad non-dword aligned request message length */ |
| 205 | if (mei->length & 3) |
| 206 | ndata++; |
| 207 | if (!ndata) { |
| 208 | printk(BIOS_DEBUG, "ME: request does not include MKHI\n"); |
| 209 | return -1; |
| 210 | } |
| 211 | ndata++; /* Add MEI header */ |
| 212 | |
| 213 | /* |
| 214 | * Make sure there is still room left in the circular buffer. |
| 215 | * Reset the buffer pointers if the requested message will not fit. |
| 216 | */ |
| 217 | read_host_csr(&host); |
| 218 | if ((host.buffer_depth - host.buffer_write_ptr) < ndata) { |
| 219 | printk(BIOS_ERR, "ME: circular buffer full, resetting...\n"); |
| 220 | mei_reset(); |
| 221 | read_host_csr(&host); |
| 222 | } |
| 223 | |
| 224 | /* |
| 225 | * This implementation does not handle splitting large messages |
| 226 | * across multiple transactions. Ensure the requested length |
| 227 | * will fit in the available circular buffer depth. |
| 228 | */ |
| 229 | if ((host.buffer_depth - host.buffer_write_ptr) < ndata) { |
| 230 | printk(BIOS_ERR, "ME: message (%u) too large for buffer (%u)\n", |
| 231 | ndata + 2, host.buffer_depth); |
| 232 | return -1; |
| 233 | } |
| 234 | |
| 235 | /* Write MEI header */ |
| 236 | mei_write_dword_ptr(mei, MEI_H_CB_WW); |
| 237 | ndata--; |
| 238 | |
| 239 | /* Write MKHI header */ |
| 240 | mei_write_dword_ptr(mkhi, MEI_H_CB_WW); |
| 241 | ndata--; |
| 242 | |
| 243 | /* Write message data */ |
| 244 | data = req_data; |
| 245 | for (n = 0; n < ndata; ++n) |
| 246 | write_cb(*data++); |
| 247 | |
| 248 | /* Generate interrupt to the ME */ |
| 249 | read_host_csr(&host); |
| 250 | host.interrupt_generate = 1; |
| 251 | write_host_csr(&host); |
| 252 | |
| 253 | /* Make sure ME is ready after sending request data */ |
| 254 | return mei_wait_for_me_ready(); |
| 255 | } |
| 256 | |
| 257 | static int mei_recv_msg(struct mei_header *mei, struct mkhi_header *mkhi, |
| 258 | void *rsp_data, int rsp_bytes) |
| 259 | { |
| 260 | struct mei_header mei_rsp; |
| 261 | struct mkhi_header mkhi_rsp; |
| 262 | struct mei_csr me, host; |
Martin Roth | ff744bf | 2019-10-23 21:46:03 -0600 | [diff] [blame] | 263 | unsigned int ndata, n; |
| 264 | unsigned int expected; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 265 | u32 *data; |
| 266 | |
| 267 | /* Total number of dwords to read from circular buffer */ |
| 268 | expected = (rsp_bytes + sizeof(mei_rsp) + sizeof(mkhi_rsp)) >> 2; |
| 269 | if (rsp_bytes & 3) |
| 270 | expected++; |
| 271 | |
| 272 | /* |
| 273 | * The interrupt status bit does not appear to indicate that the |
| 274 | * message has actually been received. Instead we wait until the |
| 275 | * expected number of dwords are present in the circular buffer. |
| 276 | */ |
| 277 | for (n = ME_RETRY; n; --n) { |
| 278 | read_me_csr(&me); |
| 279 | if ((me.buffer_write_ptr - me.buffer_read_ptr) >= expected) |
| 280 | break; |
| 281 | udelay(ME_DELAY); |
| 282 | } |
| 283 | if (!n) { |
| 284 | printk(BIOS_ERR, "ME: timeout waiting for data: expected " |
| 285 | "%u, available %u\n", expected, |
| 286 | me.buffer_write_ptr - me.buffer_read_ptr); |
| 287 | return -1; |
| 288 | } |
| 289 | |
| 290 | /* Read and verify MEI response header from the ME */ |
| 291 | mei_read_dword_ptr(&mei_rsp, MEI_ME_CB_RW); |
| 292 | if (!mei_rsp.is_complete) { |
| 293 | printk(BIOS_ERR, "ME: response is not complete\n"); |
| 294 | return -1; |
| 295 | } |
| 296 | |
| 297 | /* Handle non-dword responses and expect at least MKHI header */ |
| 298 | ndata = mei_rsp.length >> 2; |
| 299 | if (mei_rsp.length & 3) |
| 300 | ndata++; |
| 301 | if (ndata != (expected - 1)) { |
| 302 | printk(BIOS_ERR, "ME: response is missing data\n"); |
| 303 | return -1; |
| 304 | } |
| 305 | |
| 306 | /* Read and verify MKHI response header from the ME */ |
| 307 | mei_read_dword_ptr(&mkhi_rsp, MEI_ME_CB_RW); |
| 308 | if (!mkhi_rsp.is_response || |
| 309 | mkhi->group_id != mkhi_rsp.group_id || |
| 310 | mkhi->command != mkhi_rsp.command) { |
| 311 | printk(BIOS_ERR, "ME: invalid response, group %u ?= %u, " |
| 312 | "command %u ?= %u, is_response %u\n", mkhi->group_id, |
| 313 | mkhi_rsp.group_id, mkhi->command, mkhi_rsp.command, |
| 314 | mkhi_rsp.is_response); |
| 315 | return -1; |
| 316 | } |
| 317 | ndata--; /* MKHI header has been read */ |
| 318 | |
| 319 | /* Make sure caller passed a buffer with enough space */ |
| 320 | if (ndata != (rsp_bytes >> 2)) { |
| 321 | printk(BIOS_ERR, "ME: not enough room in response buffer: " |
| 322 | "%u != %u\n", ndata, rsp_bytes >> 2); |
| 323 | return -1; |
| 324 | } |
| 325 | |
| 326 | /* Read response data from the circular buffer */ |
| 327 | data = rsp_data; |
| 328 | for (n = 0; n < ndata; ++n) |
| 329 | *data++ = read_cb(); |
| 330 | |
| 331 | /* Tell the ME that we have consumed the response */ |
| 332 | read_host_csr(&host); |
| 333 | host.interrupt_status = 1; |
| 334 | host.interrupt_generate = 1; |
| 335 | write_host_csr(&host); |
| 336 | |
| 337 | return mei_wait_for_me_ready(); |
| 338 | } |
| 339 | |
| 340 | static inline int mei_sendrecv(struct mei_header *mei, struct mkhi_header *mkhi, |
| 341 | void *req_data, void *rsp_data, int rsp_bytes) |
| 342 | { |
| 343 | if (mei_send_msg(mei, mkhi, req_data) < 0) |
| 344 | return -1; |
| 345 | if (mei_recv_msg(mei, mkhi, rsp_data, rsp_bytes) < 0) |
| 346 | return -1; |
| 347 | return 0; |
| 348 | } |
| 349 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 350 | /* Send END OF POST message to the ME */ |
| 351 | static int mkhi_end_of_post(void) |
| 352 | { |
| 353 | struct mkhi_header mkhi = { |
| 354 | .group_id = MKHI_GROUP_ID_GEN, |
| 355 | .command = MKHI_END_OF_POST, |
| 356 | }; |
| 357 | struct mei_header mei = { |
| 358 | .is_complete = 1, |
| 359 | .host_address = MEI_HOST_ADDRESS, |
| 360 | .client_address = MEI_ADDRESS_MKHI, |
| 361 | .length = sizeof(mkhi), |
| 362 | }; |
| 363 | |
| 364 | /* Send request and wait for response */ |
| 365 | if (mei_sendrecv(&mei, &mkhi, NULL, NULL, 0) < 0) { |
| 366 | printk(BIOS_ERR, "ME: END OF POST message failed\n"); |
| 367 | return -1; |
| 368 | } |
| 369 | |
| 370 | printk(BIOS_INFO, "ME: END OF POST message successful\n"); |
| 371 | return 0; |
| 372 | } |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 373 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 374 | static void intel_me7_finalize_smm(void) |
| 375 | { |
| 376 | struct me_hfs hfs; |
| 377 | u32 reg32; |
| 378 | |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 379 | mei_base_address = (u32 *) |
| 380 | (pci_read_config32(PCH_ME_DEV, PCI_BASE_ADDRESS_0) & ~0xf); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 381 | |
| 382 | /* S3 path will have hidden this device already */ |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 383 | if (!mei_base_address || mei_base_address == (u32 *)0xfffffff0) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 384 | return; |
| 385 | |
| 386 | /* Make sure ME is in a mode that expects EOP */ |
| 387 | reg32 = pci_read_config32(PCH_ME_DEV, PCI_ME_HFS); |
| 388 | memcpy(&hfs, ®32, sizeof(u32)); |
| 389 | |
| 390 | /* Abort and leave device alone if not normal mode */ |
| 391 | if (hfs.fpt_bad || |
| 392 | hfs.working_state != ME_HFS_CWS_NORMAL || |
| 393 | hfs.operation_mode != ME_HFS_MODE_NORMAL) |
| 394 | return; |
| 395 | |
| 396 | /* Try to send EOP command so ME stops accepting other commands */ |
| 397 | mkhi_end_of_post(); |
| 398 | |
| 399 | /* Make sure IO is disabled */ |
| 400 | reg32 = pci_read_config32(PCH_ME_DEV, PCI_COMMAND); |
| 401 | reg32 &= ~(PCI_COMMAND_MASTER | |
| 402 | PCI_COMMAND_MEMORY | PCI_COMMAND_IO); |
| 403 | pci_write_config32(PCH_ME_DEV, PCI_COMMAND, reg32); |
| 404 | |
| 405 | /* Hide the PCI device */ |
| 406 | RCBA32_OR(FD2, PCH_DISABLE_MEI1); |
| 407 | } |
| 408 | |
| 409 | void intel_me_finalize_smm(void) |
| 410 | { |
| 411 | u32 did = pci_read_config32(PCH_ME_DEV, PCI_VENDOR_ID); |
| 412 | switch (did) { |
| 413 | case 0x1c3a8086: |
| 414 | intel_me7_finalize_smm(); |
| 415 | break; |
| 416 | case 0x1e3a8086: |
| 417 | intel_me8_finalize_smm(); |
| 418 | break; |
| 419 | default: |
| 420 | printk(BIOS_ERR, "No finalize handler for ME %08x.\n", did); |
| 421 | } |
| 422 | } |
| 423 | #else /* !__SMM__ */ |
| 424 | |
| 425 | /* Determine the path that we should take based on ME status */ |
Elyes HAOUAS | be84140 | 2018-05-13 13:40:39 +0200 | [diff] [blame] | 426 | static me_bios_path intel_me_path(struct device *dev) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 427 | { |
| 428 | me_bios_path path = ME_DISABLE_BIOS_PATH; |
| 429 | struct me_hfs hfs; |
| 430 | struct me_gmes gmes; |
| 431 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 432 | /* S3 wake skips all MKHI messages */ |
Kyösti Mälkki | c3ed886 | 2014-06-19 19:50:51 +0300 | [diff] [blame] | 433 | if (acpi_is_wakeup_s3()) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 434 | return ME_S3WAKE_BIOS_PATH; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 435 | |
| 436 | pci_read_dword_ptr(dev, &hfs, PCI_ME_HFS); |
| 437 | pci_read_dword_ptr(dev, &gmes, PCI_ME_GMES); |
| 438 | |
| 439 | /* Check and dump status */ |
| 440 | intel_me_status(&hfs, &gmes); |
| 441 | |
| 442 | /* Check Current Working State */ |
| 443 | switch (hfs.working_state) { |
| 444 | case ME_HFS_CWS_NORMAL: |
| 445 | path = ME_NORMAL_BIOS_PATH; |
| 446 | break; |
| 447 | case ME_HFS_CWS_REC: |
| 448 | path = ME_RECOVERY_BIOS_PATH; |
| 449 | break; |
| 450 | default: |
| 451 | path = ME_DISABLE_BIOS_PATH; |
| 452 | break; |
| 453 | } |
| 454 | |
| 455 | /* Check Current Operation Mode */ |
| 456 | switch (hfs.operation_mode) { |
| 457 | case ME_HFS_MODE_NORMAL: |
| 458 | break; |
| 459 | case ME_HFS_MODE_DEBUG: |
| 460 | case ME_HFS_MODE_DIS: |
| 461 | case ME_HFS_MODE_OVER_JMPR: |
| 462 | case ME_HFS_MODE_OVER_MEI: |
| 463 | default: |
| 464 | path = ME_DISABLE_BIOS_PATH; |
| 465 | break; |
| 466 | } |
| 467 | |
| 468 | /* Check for any error code and valid firmware */ |
| 469 | if (hfs.error_code || hfs.fpt_bad) |
| 470 | path = ME_ERROR_BIOS_PATH; |
| 471 | |
Kyösti Mälkki | be5317f | 2019-11-06 12:07:21 +0200 | [diff] [blame^] | 472 | if (CONFIG(ELOG) && path != ME_NORMAL_BIOS_PATH) { |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 473 | struct elog_event_data_me_extended data = { |
| 474 | .current_working_state = hfs.working_state, |
| 475 | .operation_state = hfs.operation_state, |
| 476 | .operation_mode = hfs.operation_mode, |
| 477 | .error_code = hfs.error_code, |
| 478 | .progress_code = gmes.progress_code, |
| 479 | .current_pmevent = gmes.current_pmevent, |
| 480 | .current_state = gmes.current_state, |
| 481 | }; |
| 482 | elog_add_event_byte(ELOG_TYPE_MANAGEMENT_ENGINE, path); |
| 483 | elog_add_event_raw(ELOG_TYPE_MANAGEMENT_ENGINE_EXT, |
| 484 | &data, sizeof(data)); |
| 485 | } |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 486 | |
| 487 | return path; |
| 488 | } |
| 489 | |
| 490 | /* Prepare ME for MEI messages */ |
Elyes HAOUAS | be84140 | 2018-05-13 13:40:39 +0200 | [diff] [blame] | 491 | static int intel_mei_setup(struct device *dev) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 492 | { |
| 493 | struct resource *res; |
| 494 | struct mei_csr host; |
| 495 | u32 reg32; |
| 496 | |
| 497 | /* Find the MMIO base for the ME interface */ |
| 498 | res = find_resource(dev, PCI_BASE_ADDRESS_0); |
| 499 | if (!res || res->base == 0 || res->size == 0) { |
| 500 | printk(BIOS_DEBUG, "ME: MEI resource not present!\n"); |
| 501 | return -1; |
| 502 | } |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 503 | mei_base_address = (u32 *)(uintptr_t)res->base; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 504 | |
| 505 | /* Ensure Memory and Bus Master bits are set */ |
| 506 | reg32 = pci_read_config32(dev, PCI_COMMAND); |
| 507 | reg32 |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; |
| 508 | pci_write_config32(dev, PCI_COMMAND, reg32); |
| 509 | |
| 510 | /* Clean up status for next message */ |
| 511 | read_host_csr(&host); |
| 512 | host.interrupt_generate = 1; |
| 513 | host.ready = 1; |
| 514 | host.reset = 0; |
| 515 | write_host_csr(&host); |
| 516 | |
| 517 | return 0; |
| 518 | } |
| 519 | |
| 520 | /* Read the Extend register hash of ME firmware */ |
Elyes HAOUAS | be84140 | 2018-05-13 13:40:39 +0200 | [diff] [blame] | 521 | static int intel_me_extend_valid(struct device *dev) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 522 | { |
| 523 | struct me_heres status; |
| 524 | u32 extend[8] = {0}; |
| 525 | int i, count = 0; |
| 526 | |
| 527 | pci_read_dword_ptr(dev, &status, PCI_ME_HERES); |
| 528 | if (!status.extend_feature_present) { |
| 529 | printk(BIOS_ERR, "ME: Extend Feature not present\n"); |
| 530 | return -1; |
| 531 | } |
| 532 | |
| 533 | if (!status.extend_reg_valid) { |
| 534 | printk(BIOS_ERR, "ME: Extend Register not valid\n"); |
| 535 | return -1; |
| 536 | } |
| 537 | |
| 538 | switch (status.extend_reg_algorithm) { |
| 539 | case PCI_ME_EXT_SHA1: |
| 540 | count = 5; |
| 541 | printk(BIOS_DEBUG, "ME: Extend SHA-1: "); |
| 542 | break; |
| 543 | case PCI_ME_EXT_SHA256: |
| 544 | count = 8; |
| 545 | printk(BIOS_DEBUG, "ME: Extend SHA-256: "); |
| 546 | break; |
| 547 | default: |
| 548 | printk(BIOS_ERR, "ME: Extend Algorithm %d unknown\n", |
| 549 | status.extend_reg_algorithm); |
| 550 | return -1; |
| 551 | } |
| 552 | |
| 553 | for (i = 0; i < count; ++i) { |
| 554 | extend[i] = pci_read_config32(dev, PCI_ME_HER(i)); |
| 555 | printk(BIOS_DEBUG, "%08x", extend[i]); |
| 556 | } |
| 557 | printk(BIOS_DEBUG, "\n"); |
| 558 | |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 559 | #if CONFIG(CHROMEOS) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 560 | /* Save hash in NVS for the OS to verify */ |
| 561 | chromeos_set_me_hash(extend, count); |
| 562 | #endif |
| 563 | |
| 564 | return 0; |
| 565 | } |
| 566 | |
| 567 | /* Hide the ME virtual PCI devices */ |
Elyes HAOUAS | be84140 | 2018-05-13 13:40:39 +0200 | [diff] [blame] | 568 | static void intel_me_hide(struct device *dev) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 569 | { |
| 570 | dev->enabled = 0; |
| 571 | pch_enable(dev); |
| 572 | } |
| 573 | |
| 574 | /* Check whether ME is present and do basic init */ |
Elyes HAOUAS | be84140 | 2018-05-13 13:40:39 +0200 | [diff] [blame] | 575 | static void intel_me_init(struct device *dev) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 576 | { |
| 577 | me_bios_path path = intel_me_path(dev); |
| 578 | |
| 579 | /* Do initial setup and determine the BIOS path */ |
| 580 | printk(BIOS_NOTICE, "ME: BIOS path: %s\n", me_bios_path_values[path]); |
| 581 | |
| 582 | switch (path) { |
| 583 | case ME_S3WAKE_BIOS_PATH: |
| 584 | intel_me_hide(dev); |
| 585 | break; |
| 586 | |
| 587 | case ME_NORMAL_BIOS_PATH: |
| 588 | /* Validate the extend register */ |
| 589 | if (intel_me_extend_valid(dev) < 0) |
| 590 | break; /* TODO: force recovery mode */ |
| 591 | |
| 592 | /* Prepare MEI MMIO interface */ |
| 593 | if (intel_mei_setup(dev) < 0) |
| 594 | break; |
| 595 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 596 | /* |
| 597 | * Leave the ME unlocked in this path. |
| 598 | * It will be locked via SMI command later. |
| 599 | */ |
| 600 | break; |
| 601 | |
| 602 | case ME_ERROR_BIOS_PATH: |
| 603 | case ME_RECOVERY_BIOS_PATH: |
| 604 | case ME_DISABLE_BIOS_PATH: |
| 605 | case ME_FIRMWARE_UPDATE_BIOS_PATH: |
| 606 | break; |
| 607 | } |
| 608 | } |
| 609 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 610 | static struct pci_operations pci_ops = { |
Subrata Banik | 4a0f071 | 2019-03-20 14:29:47 +0530 | [diff] [blame] | 611 | .set_subsystem = pci_dev_set_subsystem, |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 612 | }; |
| 613 | |
| 614 | static struct device_operations device_ops = { |
| 615 | .read_resources = pci_dev_read_resources, |
| 616 | .set_resources = pci_dev_set_resources, |
| 617 | .enable_resources = pci_dev_enable_resources, |
| 618 | .init = intel_me_init, |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 619 | .ops_pci = &pci_ops, |
| 620 | }; |
| 621 | |
| 622 | static const unsigned short pci_device_ids[] = { 0x1c3a, 0x3b64, |
| 623 | 0 }; |
| 624 | |
| 625 | |
| 626 | static const struct pci_driver intel_me __pci_driver = { |
| 627 | .ops = &device_ops, |
| 628 | .vendor = PCI_VENDOR_ID_INTEL, |
| 629 | .devices = pci_device_ids |
| 630 | }; |
| 631 | |
| 632 | #endif /* !__SMM__ */ |