Angel Pons | 182dbde | 2020-04-02 23:49:05 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 2 | |
| 3 | /* |
| 4 | * This is a ramstage driver for the Intel Management Engine found in the |
| 5 | * 6-series chipset. It handles the required boot-time messages over the |
| 6 | * MMIO-based Management Engine Interface to tell the ME that the BIOS is |
| 7 | * finished with POST. Additional messages are defined for debug but are |
| 8 | * not used unless the console loglevel is high enough. |
| 9 | */ |
| 10 | |
Furquan Shaikh | 76cedd2 | 2020-05-02 10:24:23 -0700 | [diff] [blame] | 11 | #include <acpi/acpi.h> |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 12 | #include <console/console.h> |
Kyösti Mälkki | 21d6a27 | 2019-11-05 18:50:38 +0200 | [diff] [blame] | 13 | #include <device/device.h> |
| 14 | #include <device/mmio.h> |
| 15 | #include <device/pci.h> |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 16 | #include <device/pci_def.h> |
Kyösti Mälkki | 21d6a27 | 2019-11-05 18:50:38 +0200 | [diff] [blame] | 17 | #include <device/pci_ids.h> |
| 18 | #include <device/pci_ops.h> |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 19 | #include <string.h> |
| 20 | #include <delay.h> |
| 21 | #include <elog.h> |
| 22 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 23 | #include "me.h" |
| 24 | #include "pch.h" |
| 25 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 26 | /* Path that the BIOS should take based on ME state */ |
Kyösti Mälkki | 21d6a27 | 2019-11-05 18:50:38 +0200 | [diff] [blame] | 27 | static const char *me_bios_path_values[] __unused = { |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 28 | [ME_NORMAL_BIOS_PATH] = "Normal", |
| 29 | [ME_S3WAKE_BIOS_PATH] = "S3 Wake", |
| 30 | [ME_ERROR_BIOS_PATH] = "Error", |
| 31 | [ME_RECOVERY_BIOS_PATH] = "Recovery", |
| 32 | [ME_DISABLE_BIOS_PATH] = "Disable", |
| 33 | [ME_FIRMWARE_UPDATE_BIOS_PATH] = "Firmware Update", |
| 34 | }; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 35 | |
| 36 | /* MMIO base address for MEI interface */ |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 37 | static u32 *mei_base_address; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 38 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 39 | static void mei_dump(void *ptr, int dword, int offset, const char *type) |
| 40 | { |
| 41 | struct mei_csr *csr; |
| 42 | |
Kyösti Mälkki | c86fc8e | 2019-11-06 06:32:27 +0200 | [diff] [blame] | 43 | if (!CONFIG(DEBUG_INTEL_ME)) |
| 44 | return; |
| 45 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 46 | printk(BIOS_SPEW, "%-9s[%02x] : ", type, offset); |
| 47 | |
| 48 | switch (offset) { |
| 49 | case MEI_H_CSR: |
| 50 | case MEI_ME_CSR_HA: |
| 51 | csr = ptr; |
| 52 | if (!csr) { |
| 53 | printk(BIOS_SPEW, "ERROR: 0x%08x\n", dword); |
| 54 | break; |
| 55 | } |
| 56 | printk(BIOS_SPEW, "cbd=%u cbrp=%02u cbwp=%02u ready=%u " |
| 57 | "reset=%u ig=%u is=%u ie=%u\n", csr->buffer_depth, |
| 58 | csr->buffer_read_ptr, csr->buffer_write_ptr, |
| 59 | csr->ready, csr->reset, csr->interrupt_generate, |
| 60 | csr->interrupt_status, csr->interrupt_enable); |
| 61 | break; |
| 62 | case MEI_ME_CB_RW: |
| 63 | case MEI_H_CB_WW: |
| 64 | printk(BIOS_SPEW, "CB: 0x%08x\n", dword); |
| 65 | break; |
| 66 | default: |
| 67 | printk(BIOS_SPEW, "0x%08x\n", offset); |
| 68 | break; |
| 69 | } |
| 70 | } |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 71 | |
| 72 | /* |
| 73 | * ME/MEI access helpers using memcpy to avoid aliasing. |
| 74 | */ |
| 75 | |
| 76 | static inline void mei_read_dword_ptr(void *ptr, int offset) |
| 77 | { |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 78 | u32 dword = read32(mei_base_address + (offset/sizeof(u32))); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 79 | memcpy(ptr, &dword, sizeof(dword)); |
| 80 | mei_dump(ptr, dword, offset, "READ"); |
| 81 | } |
| 82 | |
| 83 | static inline void mei_write_dword_ptr(void *ptr, int offset) |
| 84 | { |
| 85 | u32 dword = 0; |
| 86 | memcpy(&dword, ptr, sizeof(dword)); |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 87 | write32(mei_base_address + (offset/sizeof(u32)), dword); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 88 | mei_dump(ptr, dword, offset, "WRITE"); |
| 89 | } |
| 90 | |
Kyösti Mälkki | 21d6a27 | 2019-11-05 18:50:38 +0200 | [diff] [blame] | 91 | #ifndef __SIMPLE_DEVICE__ |
Elyes HAOUAS | be84140 | 2018-05-13 13:40:39 +0200 | [diff] [blame] | 92 | static inline void pci_read_dword_ptr(struct device *dev,void *ptr, |
| 93 | int offset) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 94 | { |
| 95 | u32 dword = pci_read_config32(dev, offset); |
| 96 | memcpy(ptr, &dword, sizeof(dword)); |
| 97 | mei_dump(ptr, dword, offset, "PCI READ"); |
| 98 | } |
| 99 | #endif |
| 100 | |
| 101 | static inline void read_host_csr(struct mei_csr *csr) |
| 102 | { |
| 103 | mei_read_dword_ptr(csr, MEI_H_CSR); |
| 104 | } |
| 105 | |
| 106 | static inline void write_host_csr(struct mei_csr *csr) |
| 107 | { |
| 108 | mei_write_dword_ptr(csr, MEI_H_CSR); |
| 109 | } |
| 110 | |
| 111 | static inline void read_me_csr(struct mei_csr *csr) |
| 112 | { |
| 113 | mei_read_dword_ptr(csr, MEI_ME_CSR_HA); |
| 114 | } |
| 115 | |
| 116 | static inline void write_cb(u32 dword) |
| 117 | { |
Angel Pons | 77f340a | 2020-10-17 18:39:04 +0200 | [diff] [blame] | 118 | write32(mei_base_address + (MEI_H_CB_WW / sizeof(u32)), dword); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 119 | mei_dump(NULL, dword, MEI_H_CB_WW, "WRITE"); |
| 120 | } |
| 121 | |
| 122 | static inline u32 read_cb(void) |
| 123 | { |
Angel Pons | 77f340a | 2020-10-17 18:39:04 +0200 | [diff] [blame] | 124 | u32 dword = read32(mei_base_address + (MEI_ME_CB_RW / sizeof(u32))); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 125 | mei_dump(NULL, dword, MEI_ME_CB_RW, "READ"); |
| 126 | return dword; |
| 127 | } |
| 128 | |
| 129 | /* Wait for ME ready bit to be asserted */ |
| 130 | static int mei_wait_for_me_ready(void) |
| 131 | { |
| 132 | struct mei_csr me; |
Martin Roth | ff744bf | 2019-10-23 21:46:03 -0600 | [diff] [blame] | 133 | unsigned int try = ME_RETRY; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 134 | |
| 135 | while (try--) { |
| 136 | read_me_csr(&me); |
| 137 | if (me.ready) |
| 138 | return 0; |
| 139 | udelay(ME_DELAY); |
| 140 | } |
| 141 | |
| 142 | printk(BIOS_ERR, "ME: failed to become ready\n"); |
| 143 | return -1; |
| 144 | } |
| 145 | |
| 146 | static void mei_reset(void) |
| 147 | { |
| 148 | struct mei_csr host; |
| 149 | |
| 150 | if (mei_wait_for_me_ready() < 0) |
| 151 | return; |
| 152 | |
| 153 | /* Reset host and ME circular buffers for next message */ |
| 154 | read_host_csr(&host); |
| 155 | host.reset = 1; |
| 156 | host.interrupt_generate = 1; |
| 157 | write_host_csr(&host); |
| 158 | |
| 159 | if (mei_wait_for_me_ready() < 0) |
| 160 | return; |
| 161 | |
| 162 | /* Re-init and indicate host is ready */ |
| 163 | read_host_csr(&host); |
| 164 | host.interrupt_generate = 1; |
| 165 | host.ready = 1; |
| 166 | host.reset = 0; |
| 167 | write_host_csr(&host); |
| 168 | } |
| 169 | |
| 170 | static int mei_send_msg(struct mei_header *mei, struct mkhi_header *mkhi, |
| 171 | void *req_data) |
| 172 | { |
| 173 | struct mei_csr host; |
Martin Roth | ff744bf | 2019-10-23 21:46:03 -0600 | [diff] [blame] | 174 | unsigned int ndata, n; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 175 | u32 *data; |
| 176 | |
| 177 | /* Number of dwords to write, ignoring MKHI */ |
| 178 | ndata = mei->length >> 2; |
| 179 | |
| 180 | /* Pad non-dword aligned request message length */ |
| 181 | if (mei->length & 3) |
| 182 | ndata++; |
| 183 | if (!ndata) { |
| 184 | printk(BIOS_DEBUG, "ME: request does not include MKHI\n"); |
| 185 | return -1; |
| 186 | } |
| 187 | ndata++; /* Add MEI header */ |
| 188 | |
| 189 | /* |
| 190 | * Make sure there is still room left in the circular buffer. |
| 191 | * Reset the buffer pointers if the requested message will not fit. |
| 192 | */ |
| 193 | read_host_csr(&host); |
| 194 | if ((host.buffer_depth - host.buffer_write_ptr) < ndata) { |
| 195 | printk(BIOS_ERR, "ME: circular buffer full, resetting...\n"); |
| 196 | mei_reset(); |
| 197 | read_host_csr(&host); |
| 198 | } |
| 199 | |
| 200 | /* |
| 201 | * This implementation does not handle splitting large messages |
| 202 | * across multiple transactions. Ensure the requested length |
| 203 | * will fit in the available circular buffer depth. |
| 204 | */ |
| 205 | if ((host.buffer_depth - host.buffer_write_ptr) < ndata) { |
| 206 | printk(BIOS_ERR, "ME: message (%u) too large for buffer (%u)\n", |
| 207 | ndata + 2, host.buffer_depth); |
| 208 | return -1; |
| 209 | } |
| 210 | |
| 211 | /* Write MEI header */ |
| 212 | mei_write_dword_ptr(mei, MEI_H_CB_WW); |
| 213 | ndata--; |
| 214 | |
| 215 | /* Write MKHI header */ |
| 216 | mei_write_dword_ptr(mkhi, MEI_H_CB_WW); |
| 217 | ndata--; |
| 218 | |
| 219 | /* Write message data */ |
| 220 | data = req_data; |
| 221 | for (n = 0; n < ndata; ++n) |
| 222 | write_cb(*data++); |
| 223 | |
| 224 | /* Generate interrupt to the ME */ |
| 225 | read_host_csr(&host); |
| 226 | host.interrupt_generate = 1; |
| 227 | write_host_csr(&host); |
| 228 | |
| 229 | /* Make sure ME is ready after sending request data */ |
| 230 | return mei_wait_for_me_ready(); |
| 231 | } |
| 232 | |
| 233 | static int mei_recv_msg(struct mei_header *mei, struct mkhi_header *mkhi, |
| 234 | void *rsp_data, int rsp_bytes) |
| 235 | { |
| 236 | struct mei_header mei_rsp; |
| 237 | struct mkhi_header mkhi_rsp; |
| 238 | struct mei_csr me, host; |
Martin Roth | ff744bf | 2019-10-23 21:46:03 -0600 | [diff] [blame] | 239 | unsigned int ndata, n; |
| 240 | unsigned int expected; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 241 | u32 *data; |
| 242 | |
| 243 | /* Total number of dwords to read from circular buffer */ |
| 244 | expected = (rsp_bytes + sizeof(mei_rsp) + sizeof(mkhi_rsp)) >> 2; |
| 245 | if (rsp_bytes & 3) |
| 246 | expected++; |
| 247 | |
| 248 | /* |
| 249 | * The interrupt status bit does not appear to indicate that the |
| 250 | * message has actually been received. Instead we wait until the |
| 251 | * expected number of dwords are present in the circular buffer. |
| 252 | */ |
| 253 | for (n = ME_RETRY; n; --n) { |
| 254 | read_me_csr(&me); |
| 255 | if ((me.buffer_write_ptr - me.buffer_read_ptr) >= expected) |
| 256 | break; |
| 257 | udelay(ME_DELAY); |
| 258 | } |
| 259 | if (!n) { |
Angel Pons | 77f340a | 2020-10-17 18:39:04 +0200 | [diff] [blame] | 260 | printk(BIOS_ERR, "ME: timeout waiting for data: expected %u, available %u\n", |
| 261 | expected, me.buffer_write_ptr - me.buffer_read_ptr); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 262 | return -1; |
| 263 | } |
| 264 | |
| 265 | /* Read and verify MEI response header from the ME */ |
| 266 | mei_read_dword_ptr(&mei_rsp, MEI_ME_CB_RW); |
| 267 | if (!mei_rsp.is_complete) { |
| 268 | printk(BIOS_ERR, "ME: response is not complete\n"); |
| 269 | return -1; |
| 270 | } |
| 271 | |
| 272 | /* Handle non-dword responses and expect at least MKHI header */ |
| 273 | ndata = mei_rsp.length >> 2; |
| 274 | if (mei_rsp.length & 3) |
| 275 | ndata++; |
| 276 | if (ndata != (expected - 1)) { |
| 277 | printk(BIOS_ERR, "ME: response is missing data\n"); |
| 278 | return -1; |
| 279 | } |
| 280 | |
| 281 | /* Read and verify MKHI response header from the ME */ |
| 282 | mei_read_dword_ptr(&mkhi_rsp, MEI_ME_CB_RW); |
| 283 | if (!mkhi_rsp.is_response || |
| 284 | mkhi->group_id != mkhi_rsp.group_id || |
| 285 | mkhi->command != mkhi_rsp.command) { |
| 286 | printk(BIOS_ERR, "ME: invalid response, group %u ?= %u, " |
| 287 | "command %u ?= %u, is_response %u\n", mkhi->group_id, |
| 288 | mkhi_rsp.group_id, mkhi->command, mkhi_rsp.command, |
| 289 | mkhi_rsp.is_response); |
| 290 | return -1; |
| 291 | } |
| 292 | ndata--; /* MKHI header has been read */ |
| 293 | |
| 294 | /* Make sure caller passed a buffer with enough space */ |
| 295 | if (ndata != (rsp_bytes >> 2)) { |
Angel Pons | 77f340a | 2020-10-17 18:39:04 +0200 | [diff] [blame] | 296 | printk(BIOS_ERR, "ME: not enough room in response buffer: %u != %u\n", |
| 297 | ndata, rsp_bytes >> 2); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 298 | return -1; |
| 299 | } |
| 300 | |
| 301 | /* Read response data from the circular buffer */ |
| 302 | data = rsp_data; |
| 303 | for (n = 0; n < ndata; ++n) |
| 304 | *data++ = read_cb(); |
| 305 | |
| 306 | /* Tell the ME that we have consumed the response */ |
| 307 | read_host_csr(&host); |
| 308 | host.interrupt_status = 1; |
| 309 | host.interrupt_generate = 1; |
| 310 | write_host_csr(&host); |
| 311 | |
| 312 | return mei_wait_for_me_ready(); |
| 313 | } |
| 314 | |
| 315 | static inline int mei_sendrecv(struct mei_header *mei, struct mkhi_header *mkhi, |
| 316 | void *req_data, void *rsp_data, int rsp_bytes) |
| 317 | { |
| 318 | if (mei_send_msg(mei, mkhi, req_data) < 0) |
| 319 | return -1; |
| 320 | if (mei_recv_msg(mei, mkhi, rsp_data, rsp_bytes) < 0) |
| 321 | return -1; |
| 322 | return 0; |
| 323 | } |
| 324 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 325 | /* Determine the path that we should take based on ME status */ |
Elyes HAOUAS | be84140 | 2018-05-13 13:40:39 +0200 | [diff] [blame] | 326 | static me_bios_path intel_me_path(struct device *dev) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 327 | { |
| 328 | me_bios_path path = ME_DISABLE_BIOS_PATH; |
| 329 | struct me_hfs hfs; |
| 330 | struct me_gmes gmes; |
| 331 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 332 | /* S3 wake skips all MKHI messages */ |
Kyösti Mälkki | c3ed886 | 2014-06-19 19:50:51 +0300 | [diff] [blame] | 333 | if (acpi_is_wakeup_s3()) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 334 | return ME_S3WAKE_BIOS_PATH; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 335 | |
| 336 | pci_read_dword_ptr(dev, &hfs, PCI_ME_HFS); |
| 337 | pci_read_dword_ptr(dev, &gmes, PCI_ME_GMES); |
| 338 | |
| 339 | /* Check and dump status */ |
| 340 | intel_me_status(&hfs, &gmes); |
| 341 | |
| 342 | /* Check Current Working State */ |
| 343 | switch (hfs.working_state) { |
| 344 | case ME_HFS_CWS_NORMAL: |
| 345 | path = ME_NORMAL_BIOS_PATH; |
| 346 | break; |
| 347 | case ME_HFS_CWS_REC: |
| 348 | path = ME_RECOVERY_BIOS_PATH; |
| 349 | break; |
| 350 | default: |
| 351 | path = ME_DISABLE_BIOS_PATH; |
| 352 | break; |
| 353 | } |
| 354 | |
| 355 | /* Check Current Operation Mode */ |
| 356 | switch (hfs.operation_mode) { |
| 357 | case ME_HFS_MODE_NORMAL: |
| 358 | break; |
| 359 | case ME_HFS_MODE_DEBUG: |
| 360 | case ME_HFS_MODE_DIS: |
| 361 | case ME_HFS_MODE_OVER_JMPR: |
| 362 | case ME_HFS_MODE_OVER_MEI: |
| 363 | default: |
| 364 | path = ME_DISABLE_BIOS_PATH; |
| 365 | break; |
| 366 | } |
| 367 | |
| 368 | /* Check for any error code and valid firmware */ |
| 369 | if (hfs.error_code || hfs.fpt_bad) |
| 370 | path = ME_ERROR_BIOS_PATH; |
| 371 | |
Kyösti Mälkki | be5317f | 2019-11-06 12:07:21 +0200 | [diff] [blame] | 372 | if (CONFIG(ELOG) && path != ME_NORMAL_BIOS_PATH) { |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 373 | struct elog_event_data_me_extended data = { |
| 374 | .current_working_state = hfs.working_state, |
| 375 | .operation_state = hfs.operation_state, |
| 376 | .operation_mode = hfs.operation_mode, |
| 377 | .error_code = hfs.error_code, |
| 378 | .progress_code = gmes.progress_code, |
| 379 | .current_pmevent = gmes.current_pmevent, |
| 380 | .current_state = gmes.current_state, |
| 381 | }; |
| 382 | elog_add_event_byte(ELOG_TYPE_MANAGEMENT_ENGINE, path); |
| 383 | elog_add_event_raw(ELOG_TYPE_MANAGEMENT_ENGINE_EXT, |
| 384 | &data, sizeof(data)); |
| 385 | } |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 386 | |
| 387 | return path; |
| 388 | } |
| 389 | |
| 390 | /* Prepare ME for MEI messages */ |
Elyes HAOUAS | be84140 | 2018-05-13 13:40:39 +0200 | [diff] [blame] | 391 | static int intel_mei_setup(struct device *dev) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 392 | { |
| 393 | struct resource *res; |
| 394 | struct mei_csr host; |
Elyes HAOUAS | 8b6dfde | 2020-04-28 09:58:21 +0200 | [diff] [blame] | 395 | u16 reg16; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 396 | |
| 397 | /* Find the MMIO base for the ME interface */ |
Angel Pons | f32ae10 | 2021-11-03 13:07:14 +0100 | [diff] [blame] | 398 | res = probe_resource(dev, PCI_BASE_ADDRESS_0); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 399 | if (!res || res->base == 0 || res->size == 0) { |
| 400 | printk(BIOS_DEBUG, "ME: MEI resource not present!\n"); |
| 401 | return -1; |
| 402 | } |
Kevin Paul Herbert | bde6d30 | 2014-12-24 18:43:20 -0800 | [diff] [blame] | 403 | mei_base_address = (u32 *)(uintptr_t)res->base; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 404 | |
| 405 | /* Ensure Memory and Bus Master bits are set */ |
Elyes HAOUAS | 8b6dfde | 2020-04-28 09:58:21 +0200 | [diff] [blame] | 406 | reg16 = pci_read_config16(dev, PCI_COMMAND); |
| 407 | reg16 |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; |
| 408 | pci_write_config16(dev, PCI_COMMAND, reg16); |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 409 | |
| 410 | /* Clean up status for next message */ |
| 411 | read_host_csr(&host); |
| 412 | host.interrupt_generate = 1; |
| 413 | host.ready = 1; |
| 414 | host.reset = 0; |
| 415 | write_host_csr(&host); |
| 416 | |
| 417 | return 0; |
| 418 | } |
| 419 | |
| 420 | /* Read the Extend register hash of ME firmware */ |
Elyes HAOUAS | be84140 | 2018-05-13 13:40:39 +0200 | [diff] [blame] | 421 | static int intel_me_extend_valid(struct device *dev) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 422 | { |
| 423 | struct me_heres status; |
| 424 | u32 extend[8] = {0}; |
| 425 | int i, count = 0; |
| 426 | |
| 427 | pci_read_dword_ptr(dev, &status, PCI_ME_HERES); |
| 428 | if (!status.extend_feature_present) { |
| 429 | printk(BIOS_ERR, "ME: Extend Feature not present\n"); |
| 430 | return -1; |
| 431 | } |
| 432 | |
| 433 | if (!status.extend_reg_valid) { |
| 434 | printk(BIOS_ERR, "ME: Extend Register not valid\n"); |
| 435 | return -1; |
| 436 | } |
| 437 | |
| 438 | switch (status.extend_reg_algorithm) { |
| 439 | case PCI_ME_EXT_SHA1: |
| 440 | count = 5; |
| 441 | printk(BIOS_DEBUG, "ME: Extend SHA-1: "); |
| 442 | break; |
| 443 | case PCI_ME_EXT_SHA256: |
| 444 | count = 8; |
| 445 | printk(BIOS_DEBUG, "ME: Extend SHA-256: "); |
| 446 | break; |
| 447 | default: |
| 448 | printk(BIOS_ERR, "ME: Extend Algorithm %d unknown\n", |
| 449 | status.extend_reg_algorithm); |
| 450 | return -1; |
| 451 | } |
| 452 | |
| 453 | for (i = 0; i < count; ++i) { |
| 454 | extend[i] = pci_read_config32(dev, PCI_ME_HER(i)); |
| 455 | printk(BIOS_DEBUG, "%08x", extend[i]); |
| 456 | } |
| 457 | printk(BIOS_DEBUG, "\n"); |
| 458 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 459 | return 0; |
| 460 | } |
| 461 | |
| 462 | /* Hide the ME virtual PCI devices */ |
Elyes HAOUAS | be84140 | 2018-05-13 13:40:39 +0200 | [diff] [blame] | 463 | static void intel_me_hide(struct device *dev) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 464 | { |
| 465 | dev->enabled = 0; |
| 466 | pch_enable(dev); |
| 467 | } |
| 468 | |
| 469 | /* Check whether ME is present and do basic init */ |
Elyes HAOUAS | be84140 | 2018-05-13 13:40:39 +0200 | [diff] [blame] | 470 | static void intel_me_init(struct device *dev) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 471 | { |
| 472 | me_bios_path path = intel_me_path(dev); |
| 473 | |
| 474 | /* Do initial setup and determine the BIOS path */ |
| 475 | printk(BIOS_NOTICE, "ME: BIOS path: %s\n", me_bios_path_values[path]); |
| 476 | |
| 477 | switch (path) { |
| 478 | case ME_S3WAKE_BIOS_PATH: |
James Ye | a85d4a5 | 2020-02-22 20:30:49 +1100 | [diff] [blame] | 479 | case ME_DISABLE_BIOS_PATH: |
| 480 | #if CONFIG(HIDE_MEI_ON_ERROR) |
| 481 | case ME_ERROR_BIOS_PATH: |
| 482 | #endif |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 483 | intel_me_hide(dev); |
| 484 | break; |
| 485 | |
| 486 | case ME_NORMAL_BIOS_PATH: |
| 487 | /* Validate the extend register */ |
| 488 | if (intel_me_extend_valid(dev) < 0) |
| 489 | break; /* TODO: force recovery mode */ |
| 490 | |
| 491 | /* Prepare MEI MMIO interface */ |
| 492 | if (intel_mei_setup(dev) < 0) |
| 493 | break; |
| 494 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 495 | /* |
| 496 | * Leave the ME unlocked in this path. |
| 497 | * It will be locked via SMI command later. |
| 498 | */ |
| 499 | break; |
| 500 | |
James Ye | a85d4a5 | 2020-02-22 20:30:49 +1100 | [diff] [blame] | 501 | #if !CONFIG(HIDE_MEI_ON_ERROR) |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 502 | case ME_ERROR_BIOS_PATH: |
James Ye | a85d4a5 | 2020-02-22 20:30:49 +1100 | [diff] [blame] | 503 | #endif |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 504 | case ME_RECOVERY_BIOS_PATH: |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 505 | case ME_FIRMWARE_UPDATE_BIOS_PATH: |
| 506 | break; |
| 507 | } |
| 508 | } |
| 509 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 510 | static struct device_operations device_ops = { |
| 511 | .read_resources = pci_dev_read_resources, |
| 512 | .set_resources = pci_dev_set_resources, |
| 513 | .enable_resources = pci_dev_enable_resources, |
| 514 | .init = intel_me_init, |
Angel Pons | 1fc0edd | 2020-05-31 00:03:28 +0200 | [diff] [blame] | 515 | .ops_pci = &pci_dev_ops_pci, |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 516 | }; |
| 517 | |
Felix Singer | 838fbc7 | 2019-11-21 21:23:32 +0100 | [diff] [blame] | 518 | static const unsigned short pci_device_ids[] = { |
| 519 | 0x1c3a, |
| 520 | PCI_DID_INTEL_IBEXPEAK_HECI1, |
| 521 | 0 |
| 522 | }; |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 523 | |
Vladimir Serbinenko | 888d559 | 2013-11-13 17:53:38 +0100 | [diff] [blame] | 524 | static const struct pci_driver intel_me __pci_driver = { |
| 525 | .ops = &device_ops, |
| 526 | .vendor = PCI_VENDOR_ID_INTEL, |
| 527 | .devices = pci_device_ids |
| 528 | }; |