Angel Pons | 182dbde | 2020-04-02 23:49:05 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | /* This file is part of the coreboot project. */ |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 3 | |
| 4 | /* |
| 5 | * This is a ramstage driver for the Intel Management Engine found in the |
| 6 | * 6-series chipset. It handles the required boot-time messages over the |
| 7 | * MMIO-based Management Engine Interface to tell the ME that the BIOS is |
| 8 | * finished with POST. Additional messages are defined for debug but are |
| 9 | * not used unless the console loglevel is high enough. |
| 10 | */ |
| 11 | |
Furquan Shaikh | 76cedd2 | 2020-05-02 10:24:23 -0700 | [diff] [blame^] | 12 | #include <acpi/acpi.h> |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 13 | #include <console/console.h> |
Kyösti Mälkki | 21d6a27 | 2019-11-05 18:50:38 +0200 | [diff] [blame] | 14 | #include <device/device.h> |
| 15 | #include <device/mmio.h> |
| 16 | #include <device/pci.h> |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 17 | #include <device/pci_def.h> |
Kyösti Mälkki | 21d6a27 | 2019-11-05 18:50:38 +0200 | [diff] [blame] | 18 | #include <device/pci_ids.h> |
| 19 | #include <device/pci_ops.h> |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 20 | #include <string.h> |
| 21 | #include <delay.h> |
| 22 | #include <elog.h> |
| 23 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 24 | #include "me.h" |
| 25 | #include "pch.h" |
| 26 | |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 27 | #if CONFIG(CHROMEOS) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 28 | #include <vendorcode/google/chromeos/gnvs.h> |
| 29 | #endif |
| 30 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 31 | /* Path that the BIOS should take based on ME state */ |
Kyösti Mälkki | 21d6a27 | 2019-11-05 18:50:38 +0200 | [diff] [blame] | 32 | static const char *me_bios_path_values[] __unused = { |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 33 | [ME_NORMAL_BIOS_PATH] = "Normal", |
| 34 | [ME_S3WAKE_BIOS_PATH] = "S3 Wake", |
| 35 | [ME_ERROR_BIOS_PATH] = "Error", |
| 36 | [ME_RECOVERY_BIOS_PATH] = "Recovery", |
| 37 | [ME_DISABLE_BIOS_PATH] = "Disable", |
| 38 | [ME_FIRMWARE_UPDATE_BIOS_PATH] = "Firmware Update", |
| 39 | }; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 40 | |
| 41 | /* MMIO base address for MEI interface */ |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 42 | static u32 *mei_base_address; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 43 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 44 | static void mei_dump(void *ptr, int dword, int offset, const char *type) |
| 45 | { |
| 46 | struct mei_csr *csr; |
| 47 | |
Kyösti Mälkki | c86fc8e | 2019-11-06 06:32:27 +0200 | [diff] [blame] | 48 | if (!CONFIG(DEBUG_INTEL_ME)) |
| 49 | return; |
| 50 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 51 | printk(BIOS_SPEW, "%-9s[%02x] : ", type, offset); |
| 52 | |
| 53 | switch (offset) { |
| 54 | case MEI_H_CSR: |
| 55 | case MEI_ME_CSR_HA: |
| 56 | csr = ptr; |
| 57 | if (!csr) { |
| 58 | printk(BIOS_SPEW, "ERROR: 0x%08x\n", dword); |
| 59 | break; |
| 60 | } |
| 61 | printk(BIOS_SPEW, "cbd=%u cbrp=%02u cbwp=%02u ready=%u " |
| 62 | "reset=%u ig=%u is=%u ie=%u\n", csr->buffer_depth, |
| 63 | csr->buffer_read_ptr, csr->buffer_write_ptr, |
| 64 | csr->ready, csr->reset, csr->interrupt_generate, |
| 65 | csr->interrupt_status, csr->interrupt_enable); |
| 66 | break; |
| 67 | case MEI_ME_CB_RW: |
| 68 | case MEI_H_CB_WW: |
| 69 | printk(BIOS_SPEW, "CB: 0x%08x\n", dword); |
| 70 | break; |
| 71 | default: |
| 72 | printk(BIOS_SPEW, "0x%08x\n", offset); |
| 73 | break; |
| 74 | } |
| 75 | } |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 76 | |
| 77 | /* |
| 78 | * ME/MEI access helpers using memcpy to avoid aliasing. |
| 79 | */ |
| 80 | |
| 81 | static inline void mei_read_dword_ptr(void *ptr, int offset) |
| 82 | { |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 83 | u32 dword = read32(mei_base_address + (offset/sizeof(u32))); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 84 | memcpy(ptr, &dword, sizeof(dword)); |
| 85 | mei_dump(ptr, dword, offset, "READ"); |
| 86 | } |
| 87 | |
| 88 | static inline void mei_write_dword_ptr(void *ptr, int offset) |
| 89 | { |
| 90 | u32 dword = 0; |
| 91 | memcpy(&dword, ptr, sizeof(dword)); |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 92 | write32(mei_base_address + (offset/sizeof(u32)), dword); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 93 | mei_dump(ptr, dword, offset, "WRITE"); |
| 94 | } |
| 95 | |
Kyösti Mälkki | 21d6a27 | 2019-11-05 18:50:38 +0200 | [diff] [blame] | 96 | #ifndef __SIMPLE_DEVICE__ |
Elyes HAOUAS | be84140 | 2018-05-13 13:40:39 +0200 | [diff] [blame] | 97 | static inline void pci_read_dword_ptr(struct device *dev,void *ptr, |
| 98 | int offset) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 99 | { |
| 100 | u32 dword = pci_read_config32(dev, offset); |
| 101 | memcpy(ptr, &dword, sizeof(dword)); |
| 102 | mei_dump(ptr, dword, offset, "PCI READ"); |
| 103 | } |
| 104 | #endif |
| 105 | |
| 106 | static inline void read_host_csr(struct mei_csr *csr) |
| 107 | { |
| 108 | mei_read_dword_ptr(csr, MEI_H_CSR); |
| 109 | } |
| 110 | |
| 111 | static inline void write_host_csr(struct mei_csr *csr) |
| 112 | { |
| 113 | mei_write_dword_ptr(csr, MEI_H_CSR); |
| 114 | } |
| 115 | |
| 116 | static inline void read_me_csr(struct mei_csr *csr) |
| 117 | { |
| 118 | mei_read_dword_ptr(csr, MEI_ME_CSR_HA); |
| 119 | } |
| 120 | |
| 121 | static inline void write_cb(u32 dword) |
| 122 | { |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 123 | write32(mei_base_address + (MEI_H_CB_WW/sizeof(u32)), dword); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 124 | mei_dump(NULL, dword, MEI_H_CB_WW, "WRITE"); |
| 125 | } |
| 126 | |
| 127 | static inline u32 read_cb(void) |
| 128 | { |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 129 | u32 dword = read32(mei_base_address + (MEI_ME_CB_RW/sizeof(u32))); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 130 | mei_dump(NULL, dword, MEI_ME_CB_RW, "READ"); |
| 131 | return dword; |
| 132 | } |
| 133 | |
| 134 | /* Wait for ME ready bit to be asserted */ |
| 135 | static int mei_wait_for_me_ready(void) |
| 136 | { |
| 137 | struct mei_csr me; |
Martin Roth | ff744bf | 2019-10-23 21:46:03 -0600 | [diff] [blame] | 138 | unsigned int try = ME_RETRY; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 139 | |
| 140 | while (try--) { |
| 141 | read_me_csr(&me); |
| 142 | if (me.ready) |
| 143 | return 0; |
| 144 | udelay(ME_DELAY); |
| 145 | } |
| 146 | |
| 147 | printk(BIOS_ERR, "ME: failed to become ready\n"); |
| 148 | return -1; |
| 149 | } |
| 150 | |
| 151 | static void mei_reset(void) |
| 152 | { |
| 153 | struct mei_csr host; |
| 154 | |
| 155 | if (mei_wait_for_me_ready() < 0) |
| 156 | return; |
| 157 | |
| 158 | /* Reset host and ME circular buffers for next message */ |
| 159 | read_host_csr(&host); |
| 160 | host.reset = 1; |
| 161 | host.interrupt_generate = 1; |
| 162 | write_host_csr(&host); |
| 163 | |
| 164 | if (mei_wait_for_me_ready() < 0) |
| 165 | return; |
| 166 | |
| 167 | /* Re-init and indicate host is ready */ |
| 168 | read_host_csr(&host); |
| 169 | host.interrupt_generate = 1; |
| 170 | host.ready = 1; |
| 171 | host.reset = 0; |
| 172 | write_host_csr(&host); |
| 173 | } |
| 174 | |
| 175 | static int mei_send_msg(struct mei_header *mei, struct mkhi_header *mkhi, |
| 176 | void *req_data) |
| 177 | { |
| 178 | struct mei_csr host; |
Martin Roth | ff744bf | 2019-10-23 21:46:03 -0600 | [diff] [blame] | 179 | unsigned int ndata, n; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 180 | u32 *data; |
| 181 | |
| 182 | /* Number of dwords to write, ignoring MKHI */ |
| 183 | ndata = mei->length >> 2; |
| 184 | |
| 185 | /* Pad non-dword aligned request message length */ |
| 186 | if (mei->length & 3) |
| 187 | ndata++; |
| 188 | if (!ndata) { |
| 189 | printk(BIOS_DEBUG, "ME: request does not include MKHI\n"); |
| 190 | return -1; |
| 191 | } |
| 192 | ndata++; /* Add MEI header */ |
| 193 | |
| 194 | /* |
| 195 | * Make sure there is still room left in the circular buffer. |
| 196 | * Reset the buffer pointers if the requested message will not fit. |
| 197 | */ |
| 198 | read_host_csr(&host); |
| 199 | if ((host.buffer_depth - host.buffer_write_ptr) < ndata) { |
| 200 | printk(BIOS_ERR, "ME: circular buffer full, resetting...\n"); |
| 201 | mei_reset(); |
| 202 | read_host_csr(&host); |
| 203 | } |
| 204 | |
| 205 | /* |
| 206 | * This implementation does not handle splitting large messages |
| 207 | * across multiple transactions. Ensure the requested length |
| 208 | * will fit in the available circular buffer depth. |
| 209 | */ |
| 210 | if ((host.buffer_depth - host.buffer_write_ptr) < ndata) { |
| 211 | printk(BIOS_ERR, "ME: message (%u) too large for buffer (%u)\n", |
| 212 | ndata + 2, host.buffer_depth); |
| 213 | return -1; |
| 214 | } |
| 215 | |
| 216 | /* Write MEI header */ |
| 217 | mei_write_dword_ptr(mei, MEI_H_CB_WW); |
| 218 | ndata--; |
| 219 | |
| 220 | /* Write MKHI header */ |
| 221 | mei_write_dword_ptr(mkhi, MEI_H_CB_WW); |
| 222 | ndata--; |
| 223 | |
| 224 | /* Write message data */ |
| 225 | data = req_data; |
| 226 | for (n = 0; n < ndata; ++n) |
| 227 | write_cb(*data++); |
| 228 | |
| 229 | /* Generate interrupt to the ME */ |
| 230 | read_host_csr(&host); |
| 231 | host.interrupt_generate = 1; |
| 232 | write_host_csr(&host); |
| 233 | |
| 234 | /* Make sure ME is ready after sending request data */ |
| 235 | return mei_wait_for_me_ready(); |
| 236 | } |
| 237 | |
| 238 | static int mei_recv_msg(struct mei_header *mei, struct mkhi_header *mkhi, |
| 239 | void *rsp_data, int rsp_bytes) |
| 240 | { |
| 241 | struct mei_header mei_rsp; |
| 242 | struct mkhi_header mkhi_rsp; |
| 243 | struct mei_csr me, host; |
Martin Roth | ff744bf | 2019-10-23 21:46:03 -0600 | [diff] [blame] | 244 | unsigned int ndata, n; |
| 245 | unsigned int expected; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 246 | u32 *data; |
| 247 | |
| 248 | /* Total number of dwords to read from circular buffer */ |
| 249 | expected = (rsp_bytes + sizeof(mei_rsp) + sizeof(mkhi_rsp)) >> 2; |
| 250 | if (rsp_bytes & 3) |
| 251 | expected++; |
| 252 | |
| 253 | /* |
| 254 | * The interrupt status bit does not appear to indicate that the |
| 255 | * message has actually been received. Instead we wait until the |
| 256 | * expected number of dwords are present in the circular buffer. |
| 257 | */ |
| 258 | for (n = ME_RETRY; n; --n) { |
| 259 | read_me_csr(&me); |
| 260 | if ((me.buffer_write_ptr - me.buffer_read_ptr) >= expected) |
| 261 | break; |
| 262 | udelay(ME_DELAY); |
| 263 | } |
| 264 | if (!n) { |
| 265 | printk(BIOS_ERR, "ME: timeout waiting for data: expected " |
| 266 | "%u, available %u\n", expected, |
| 267 | me.buffer_write_ptr - me.buffer_read_ptr); |
| 268 | return -1; |
| 269 | } |
| 270 | |
| 271 | /* Read and verify MEI response header from the ME */ |
| 272 | mei_read_dword_ptr(&mei_rsp, MEI_ME_CB_RW); |
| 273 | if (!mei_rsp.is_complete) { |
| 274 | printk(BIOS_ERR, "ME: response is not complete\n"); |
| 275 | return -1; |
| 276 | } |
| 277 | |
| 278 | /* Handle non-dword responses and expect at least MKHI header */ |
| 279 | ndata = mei_rsp.length >> 2; |
| 280 | if (mei_rsp.length & 3) |
| 281 | ndata++; |
| 282 | if (ndata != (expected - 1)) { |
| 283 | printk(BIOS_ERR, "ME: response is missing data\n"); |
| 284 | return -1; |
| 285 | } |
| 286 | |
| 287 | /* Read and verify MKHI response header from the ME */ |
| 288 | mei_read_dword_ptr(&mkhi_rsp, MEI_ME_CB_RW); |
| 289 | if (!mkhi_rsp.is_response || |
| 290 | mkhi->group_id != mkhi_rsp.group_id || |
| 291 | mkhi->command != mkhi_rsp.command) { |
| 292 | printk(BIOS_ERR, "ME: invalid response, group %u ?= %u, " |
| 293 | "command %u ?= %u, is_response %u\n", mkhi->group_id, |
| 294 | mkhi_rsp.group_id, mkhi->command, mkhi_rsp.command, |
| 295 | mkhi_rsp.is_response); |
| 296 | return -1; |
| 297 | } |
| 298 | ndata--; /* MKHI header has been read */ |
| 299 | |
| 300 | /* Make sure caller passed a buffer with enough space */ |
| 301 | if (ndata != (rsp_bytes >> 2)) { |
| 302 | printk(BIOS_ERR, "ME: not enough room in response buffer: " |
| 303 | "%u != %u\n", ndata, rsp_bytes >> 2); |
| 304 | return -1; |
| 305 | } |
| 306 | |
| 307 | /* Read response data from the circular buffer */ |
| 308 | data = rsp_data; |
| 309 | for (n = 0; n < ndata; ++n) |
| 310 | *data++ = read_cb(); |
| 311 | |
| 312 | /* Tell the ME that we have consumed the response */ |
| 313 | read_host_csr(&host); |
| 314 | host.interrupt_status = 1; |
| 315 | host.interrupt_generate = 1; |
| 316 | write_host_csr(&host); |
| 317 | |
| 318 | return mei_wait_for_me_ready(); |
| 319 | } |
| 320 | |
| 321 | static inline int mei_sendrecv(struct mei_header *mei, struct mkhi_header *mkhi, |
| 322 | void *req_data, void *rsp_data, int rsp_bytes) |
| 323 | { |
| 324 | if (mei_send_msg(mei, mkhi, req_data) < 0) |
| 325 | return -1; |
| 326 | if (mei_recv_msg(mei, mkhi, rsp_data, rsp_bytes) < 0) |
| 327 | return -1; |
| 328 | return 0; |
| 329 | } |
| 330 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 331 | /* Send END OF POST message to the ME */ |
Kyösti Mälkki | 21d6a27 | 2019-11-05 18:50:38 +0200 | [diff] [blame] | 332 | static int __unused mkhi_end_of_post(void) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 333 | { |
| 334 | struct mkhi_header mkhi = { |
| 335 | .group_id = MKHI_GROUP_ID_GEN, |
| 336 | .command = MKHI_END_OF_POST, |
| 337 | }; |
| 338 | struct mei_header mei = { |
| 339 | .is_complete = 1, |
| 340 | .host_address = MEI_HOST_ADDRESS, |
| 341 | .client_address = MEI_ADDRESS_MKHI, |
| 342 | .length = sizeof(mkhi), |
| 343 | }; |
| 344 | |
| 345 | /* Send request and wait for response */ |
| 346 | if (mei_sendrecv(&mei, &mkhi, NULL, NULL, 0) < 0) { |
| 347 | printk(BIOS_ERR, "ME: END OF POST message failed\n"); |
| 348 | return -1; |
| 349 | } |
| 350 | |
| 351 | printk(BIOS_INFO, "ME: END OF POST message successful\n"); |
| 352 | return 0; |
| 353 | } |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 354 | |
Kyösti Mälkki | 21d6a27 | 2019-11-05 18:50:38 +0200 | [diff] [blame] | 355 | #ifdef __SIMPLE_DEVICE__ |
| 356 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 357 | static void intel_me7_finalize_smm(void) |
| 358 | { |
| 359 | struct me_hfs hfs; |
| 360 | u32 reg32; |
Elyes HAOUAS | 8b6dfde | 2020-04-28 09:58:21 +0200 | [diff] [blame] | 361 | u16 reg16; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 362 | |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 363 | mei_base_address = (u32 *) |
| 364 | (pci_read_config32(PCH_ME_DEV, PCI_BASE_ADDRESS_0) & ~0xf); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 365 | |
| 366 | /* S3 path will have hidden this device already */ |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 367 | if (!mei_base_address || mei_base_address == (u32 *)0xfffffff0) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 368 | return; |
| 369 | |
| 370 | /* Make sure ME is in a mode that expects EOP */ |
| 371 | reg32 = pci_read_config32(PCH_ME_DEV, PCI_ME_HFS); |
| 372 | memcpy(&hfs, ®32, sizeof(u32)); |
| 373 | |
| 374 | /* Abort and leave device alone if not normal mode */ |
| 375 | if (hfs.fpt_bad || |
| 376 | hfs.working_state != ME_HFS_CWS_NORMAL || |
| 377 | hfs.operation_mode != ME_HFS_MODE_NORMAL) |
| 378 | return; |
| 379 | |
| 380 | /* Try to send EOP command so ME stops accepting other commands */ |
| 381 | mkhi_end_of_post(); |
| 382 | |
| 383 | /* Make sure IO is disabled */ |
Elyes HAOUAS | 8b6dfde | 2020-04-28 09:58:21 +0200 | [diff] [blame] | 384 | reg16 = pci_read_config16(PCH_ME_DEV, PCI_COMMAND); |
| 385 | reg16 &= ~(PCI_COMMAND_MASTER | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 386 | PCI_COMMAND_MEMORY | PCI_COMMAND_IO); |
Elyes HAOUAS | 8b6dfde | 2020-04-28 09:58:21 +0200 | [diff] [blame] | 387 | pci_write_config16(PCH_ME_DEV, PCI_COMMAND, reg16); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 388 | |
| 389 | /* Hide the PCI device */ |
| 390 | RCBA32_OR(FD2, PCH_DISABLE_MEI1); |
| 391 | } |
| 392 | |
| 393 | void intel_me_finalize_smm(void) |
| 394 | { |
| 395 | u32 did = pci_read_config32(PCH_ME_DEV, PCI_VENDOR_ID); |
| 396 | switch (did) { |
| 397 | case 0x1c3a8086: |
| 398 | intel_me7_finalize_smm(); |
| 399 | break; |
| 400 | case 0x1e3a8086: |
| 401 | intel_me8_finalize_smm(); |
| 402 | break; |
| 403 | default: |
| 404 | printk(BIOS_ERR, "No finalize handler for ME %08x.\n", did); |
| 405 | } |
| 406 | } |
Kyösti Mälkki | 21d6a27 | 2019-11-05 18:50:38 +0200 | [diff] [blame] | 407 | #else /* !__SIMPLE_DEVICE__ */ |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 408 | |
| 409 | /* Determine the path that we should take based on ME status */ |
Elyes HAOUAS | be84140 | 2018-05-13 13:40:39 +0200 | [diff] [blame] | 410 | static me_bios_path intel_me_path(struct device *dev) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 411 | { |
| 412 | me_bios_path path = ME_DISABLE_BIOS_PATH; |
| 413 | struct me_hfs hfs; |
| 414 | struct me_gmes gmes; |
| 415 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 416 | /* S3 wake skips all MKHI messages */ |
Kyösti Mälkki | c3ed886 | 2014-06-19 19:50:51 +0300 | [diff] [blame] | 417 | if (acpi_is_wakeup_s3()) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 418 | return ME_S3WAKE_BIOS_PATH; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 419 | |
| 420 | pci_read_dword_ptr(dev, &hfs, PCI_ME_HFS); |
| 421 | pci_read_dword_ptr(dev, &gmes, PCI_ME_GMES); |
| 422 | |
| 423 | /* Check and dump status */ |
| 424 | intel_me_status(&hfs, &gmes); |
| 425 | |
| 426 | /* Check Current Working State */ |
| 427 | switch (hfs.working_state) { |
| 428 | case ME_HFS_CWS_NORMAL: |
| 429 | path = ME_NORMAL_BIOS_PATH; |
| 430 | break; |
| 431 | case ME_HFS_CWS_REC: |
| 432 | path = ME_RECOVERY_BIOS_PATH; |
| 433 | break; |
| 434 | default: |
| 435 | path = ME_DISABLE_BIOS_PATH; |
| 436 | break; |
| 437 | } |
| 438 | |
| 439 | /* Check Current Operation Mode */ |
| 440 | switch (hfs.operation_mode) { |
| 441 | case ME_HFS_MODE_NORMAL: |
| 442 | break; |
| 443 | case ME_HFS_MODE_DEBUG: |
| 444 | case ME_HFS_MODE_DIS: |
| 445 | case ME_HFS_MODE_OVER_JMPR: |
| 446 | case ME_HFS_MODE_OVER_MEI: |
| 447 | default: |
| 448 | path = ME_DISABLE_BIOS_PATH; |
| 449 | break; |
| 450 | } |
| 451 | |
| 452 | /* Check for any error code and valid firmware */ |
| 453 | if (hfs.error_code || hfs.fpt_bad) |
| 454 | path = ME_ERROR_BIOS_PATH; |
| 455 | |
Kyösti Mälkki | be5317f | 2019-11-06 12:07:21 +0200 | [diff] [blame] | 456 | if (CONFIG(ELOG) && path != ME_NORMAL_BIOS_PATH) { |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 457 | struct elog_event_data_me_extended data = { |
| 458 | .current_working_state = hfs.working_state, |
| 459 | .operation_state = hfs.operation_state, |
| 460 | .operation_mode = hfs.operation_mode, |
| 461 | .error_code = hfs.error_code, |
| 462 | .progress_code = gmes.progress_code, |
| 463 | .current_pmevent = gmes.current_pmevent, |
| 464 | .current_state = gmes.current_state, |
| 465 | }; |
| 466 | elog_add_event_byte(ELOG_TYPE_MANAGEMENT_ENGINE, path); |
| 467 | elog_add_event_raw(ELOG_TYPE_MANAGEMENT_ENGINE_EXT, |
| 468 | &data, sizeof(data)); |
| 469 | } |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 470 | |
| 471 | return path; |
| 472 | } |
| 473 | |
| 474 | /* Prepare ME for MEI messages */ |
Elyes HAOUAS | be84140 | 2018-05-13 13:40:39 +0200 | [diff] [blame] | 475 | static int intel_mei_setup(struct device *dev) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 476 | { |
| 477 | struct resource *res; |
| 478 | struct mei_csr host; |
Elyes HAOUAS | 8b6dfde | 2020-04-28 09:58:21 +0200 | [diff] [blame] | 479 | u16 reg16; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 480 | |
| 481 | /* Find the MMIO base for the ME interface */ |
| 482 | res = find_resource(dev, PCI_BASE_ADDRESS_0); |
| 483 | if (!res || res->base == 0 || res->size == 0) { |
| 484 | printk(BIOS_DEBUG, "ME: MEI resource not present!\n"); |
| 485 | return -1; |
| 486 | } |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 487 | mei_base_address = (u32 *)(uintptr_t)res->base; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 488 | |
| 489 | /* Ensure Memory and Bus Master bits are set */ |
Elyes HAOUAS | 8b6dfde | 2020-04-28 09:58:21 +0200 | [diff] [blame] | 490 | reg16 = pci_read_config16(dev, PCI_COMMAND); |
| 491 | reg16 |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; |
| 492 | pci_write_config16(dev, PCI_COMMAND, reg16); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 493 | |
| 494 | /* Clean up status for next message */ |
| 495 | read_host_csr(&host); |
| 496 | host.interrupt_generate = 1; |
| 497 | host.ready = 1; |
| 498 | host.reset = 0; |
| 499 | write_host_csr(&host); |
| 500 | |
| 501 | return 0; |
| 502 | } |
| 503 | |
| 504 | /* Read the Extend register hash of ME firmware */ |
Elyes HAOUAS | be84140 | 2018-05-13 13:40:39 +0200 | [diff] [blame] | 505 | static int intel_me_extend_valid(struct device *dev) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 506 | { |
| 507 | struct me_heres status; |
| 508 | u32 extend[8] = {0}; |
| 509 | int i, count = 0; |
| 510 | |
| 511 | pci_read_dword_ptr(dev, &status, PCI_ME_HERES); |
| 512 | if (!status.extend_feature_present) { |
| 513 | printk(BIOS_ERR, "ME: Extend Feature not present\n"); |
| 514 | return -1; |
| 515 | } |
| 516 | |
| 517 | if (!status.extend_reg_valid) { |
| 518 | printk(BIOS_ERR, "ME: Extend Register not valid\n"); |
| 519 | return -1; |
| 520 | } |
| 521 | |
| 522 | switch (status.extend_reg_algorithm) { |
| 523 | case PCI_ME_EXT_SHA1: |
| 524 | count = 5; |
| 525 | printk(BIOS_DEBUG, "ME: Extend SHA-1: "); |
| 526 | break; |
| 527 | case PCI_ME_EXT_SHA256: |
| 528 | count = 8; |
| 529 | printk(BIOS_DEBUG, "ME: Extend SHA-256: "); |
| 530 | break; |
| 531 | default: |
| 532 | printk(BIOS_ERR, "ME: Extend Algorithm %d unknown\n", |
| 533 | status.extend_reg_algorithm); |
| 534 | return -1; |
| 535 | } |
| 536 | |
| 537 | for (i = 0; i < count; ++i) { |
| 538 | extend[i] = pci_read_config32(dev, PCI_ME_HER(i)); |
| 539 | printk(BIOS_DEBUG, "%08x", extend[i]); |
| 540 | } |
| 541 | printk(BIOS_DEBUG, "\n"); |
| 542 | |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 543 | #if CONFIG(CHROMEOS) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 544 | /* Save hash in NVS for the OS to verify */ |
| 545 | chromeos_set_me_hash(extend, count); |
| 546 | #endif |
| 547 | |
| 548 | return 0; |
| 549 | } |
| 550 | |
| 551 | /* Hide the ME virtual PCI devices */ |
Elyes HAOUAS | be84140 | 2018-05-13 13:40:39 +0200 | [diff] [blame] | 552 | static void intel_me_hide(struct device *dev) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 553 | { |
| 554 | dev->enabled = 0; |
| 555 | pch_enable(dev); |
| 556 | } |
| 557 | |
| 558 | /* Check whether ME is present and do basic init */ |
Elyes HAOUAS | be84140 | 2018-05-13 13:40:39 +0200 | [diff] [blame] | 559 | static void intel_me_init(struct device *dev) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 560 | { |
| 561 | me_bios_path path = intel_me_path(dev); |
| 562 | |
| 563 | /* Do initial setup and determine the BIOS path */ |
| 564 | printk(BIOS_NOTICE, "ME: BIOS path: %s\n", me_bios_path_values[path]); |
| 565 | |
| 566 | switch (path) { |
| 567 | case ME_S3WAKE_BIOS_PATH: |
| 568 | intel_me_hide(dev); |
| 569 | break; |
| 570 | |
| 571 | case ME_NORMAL_BIOS_PATH: |
| 572 | /* Validate the extend register */ |
| 573 | if (intel_me_extend_valid(dev) < 0) |
| 574 | break; /* TODO: force recovery mode */ |
| 575 | |
| 576 | /* Prepare MEI MMIO interface */ |
| 577 | if (intel_mei_setup(dev) < 0) |
| 578 | break; |
| 579 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 580 | /* |
| 581 | * Leave the ME unlocked in this path. |
| 582 | * It will be locked via SMI command later. |
| 583 | */ |
| 584 | break; |
| 585 | |
| 586 | case ME_ERROR_BIOS_PATH: |
| 587 | case ME_RECOVERY_BIOS_PATH: |
| 588 | case ME_DISABLE_BIOS_PATH: |
| 589 | case ME_FIRMWARE_UPDATE_BIOS_PATH: |
| 590 | break; |
| 591 | } |
| 592 | } |
| 593 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 594 | static struct pci_operations pci_ops = { |
Subrata Banik | 4a0f071 | 2019-03-20 14:29:47 +0530 | [diff] [blame] | 595 | .set_subsystem = pci_dev_set_subsystem, |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 596 | }; |
| 597 | |
| 598 | static struct device_operations device_ops = { |
| 599 | .read_resources = pci_dev_read_resources, |
| 600 | .set_resources = pci_dev_set_resources, |
| 601 | .enable_resources = pci_dev_enable_resources, |
| 602 | .init = intel_me_init, |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 603 | .ops_pci = &pci_ops, |
| 604 | }; |
| 605 | |
Felix Singer | 838fbc7 | 2019-11-21 21:23:32 +0100 | [diff] [blame] | 606 | static const unsigned short pci_device_ids[] = { |
| 607 | 0x1c3a, |
| 608 | PCI_DID_INTEL_IBEXPEAK_HECI1, |
| 609 | 0 |
| 610 | }; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 611 | |
| 612 | |
| 613 | static const struct pci_driver intel_me __pci_driver = { |
| 614 | .ops = &device_ops, |
| 615 | .vendor = PCI_VENDOR_ID_INTEL, |
| 616 | .devices = pci_device_ids |
| 617 | }; |
| 618 | |
Kyösti Mälkki | 21d6a27 | 2019-11-05 18:50:38 +0200 | [diff] [blame] | 619 | #endif /* !__SIMPLE_DEVICE__ */ |