blob: 1cea7d9237e4f88c572fa5ff382ac672363b8f8a [file] [log] [blame]
Angel Pons0612b272020-04-05 15:46:56 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Andrey Petrov04a72c42017-03-01 15:51:57 -08002
Subrata Banik05e06cd2017-11-09 15:04:09 +05303#include <assert.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -08004#include <commonlib/helpers.h>
5#include <console/console.h>
Kyösti Mälkki13f66502019-03-03 08:01:05 +02006#include <device/mmio.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -08007#include <delay.h>
8#include <device/pci.h>
9#include <device/pci_ids.h>
10#include <device/pci_ops.h>
11#include <intelblocks/cse.h>
Tim Wawrzynczak09635f42021-06-18 10:08:47 -060012#include <security/vboot/misc.h>
13#include <security/vboot/vboot_common.h>
Subrata Banik05e06cd2017-11-09 15:04:09 +053014#include <soc/iomap.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -080015#include <soc/pci_devs.h>
Sridhar Siricilla8e465452019-09-23 20:59:38 +053016#include <soc/me.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -080017#include <string.h>
18#include <timer.h>
19
Subrata Banik5c08c732017-11-13 14:54:37 +053020#define MAX_HECI_MESSAGE_RETRY_COUNT 5
21
Andrey Petrov04a72c42017-03-01 15:51:57 -080022/* Wait up to 15 sec for HECI to get ready */
23#define HECI_DELAY_READY (15 * 1000)
Jonathan Neuschäfer5268b762018-02-12 12:24:25 +010024/* Wait up to 100 usec between circular buffer polls */
Andrey Petrov04a72c42017-03-01 15:51:57 -080025#define HECI_DELAY 100
26/* Wait up to 5 sec for CSE to chew something we sent */
27#define HECI_SEND_TIMEOUT (5 * 1000)
28/* Wait up to 5 sec for CSE to blurp a reply */
29#define HECI_READ_TIMEOUT (5 * 1000)
30
31#define SLOT_SIZE sizeof(uint32_t)
32
33#define MMIO_CSE_CB_WW 0x00
34#define MMIO_HOST_CSR 0x04
35#define MMIO_CSE_CB_RW 0x08
36#define MMIO_CSE_CSR 0x0c
37
38#define CSR_IE (1 << 0)
39#define CSR_IS (1 << 1)
40#define CSR_IG (1 << 2)
41#define CSR_READY (1 << 3)
42#define CSR_RESET (1 << 4)
43#define CSR_RP_START 8
44#define CSR_RP (((1 << 8) - 1) << CSR_RP_START)
45#define CSR_WP_START 16
46#define CSR_WP (((1 << 8) - 1) << CSR_WP_START)
47#define CSR_CBD_START 24
48#define CSR_CBD (((1 << 8) - 1) << CSR_CBD_START)
49
50#define MEI_HDR_IS_COMPLETE (1 << 31)
51#define MEI_HDR_LENGTH_START 16
52#define MEI_HDR_LENGTH_SIZE 9
53#define MEI_HDR_LENGTH (((1 << MEI_HDR_LENGTH_SIZE) - 1) \
54 << MEI_HDR_LENGTH_START)
55#define MEI_HDR_HOST_ADDR_START 8
56#define MEI_HDR_HOST_ADDR (((1 << 8) - 1) << MEI_HDR_HOST_ADDR_START)
57#define MEI_HDR_CSE_ADDR_START 0
58#define MEI_HDR_CSE_ADDR (((1 << 8) - 1) << MEI_HDR_CSE_ADDR_START)
59
Sridhar Siricilla09ea3712019-11-12 23:35:50 +053060/* Wait up to 5 seconds for CSE to boot from RO(BP1) */
61#define CSE_DELAY_BOOT_TO_RO (5 * 1000)
62
Arthur Heymans3d6ccd02019-05-27 17:25:23 +020063static struct cse_device {
Andrey Petrov04a72c42017-03-01 15:51:57 -080064 uintptr_t sec_bar;
Patrick Georgic9b13592019-11-29 11:47:47 +010065} cse;
Andrey Petrov04a72c42017-03-01 15:51:57 -080066
67/*
68 * Initialize the device with provided temporary BAR. If BAR is 0 use a
69 * default. This is intended for pre-mem usage only where BARs haven't been
70 * assigned yet and devices are not enabled.
71 */
72void heci_init(uintptr_t tempbar)
73{
Elyes HAOUAS68c851b2018-06-12 22:06:09 +020074#if defined(__SIMPLE_DEVICE__)
75 pci_devfn_t dev = PCH_DEV_CSE;
76#else
77 struct device *dev = PCH_DEV_CSE;
78#endif
Elyes HAOUAS2ec1c132020-04-29 09:57:05 +020079 u16 pcireg;
Andrey Petrov04a72c42017-03-01 15:51:57 -080080
81 /* Assume it is already initialized, nothing else to do */
Patrick Georgic9b13592019-11-29 11:47:47 +010082 if (cse.sec_bar)
Andrey Petrov04a72c42017-03-01 15:51:57 -080083 return;
84
85 /* Use default pre-ram bar */
86 if (!tempbar)
87 tempbar = HECI1_BASE_ADDRESS;
88
89 /* Assign Resources to HECI1 */
90 /* Clear BIT 1-2 of Command Register */
Elyes HAOUAS2ec1c132020-04-29 09:57:05 +020091 pcireg = pci_read_config16(dev, PCI_COMMAND);
Andrey Petrov04a72c42017-03-01 15:51:57 -080092 pcireg &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY);
Elyes HAOUAS2ec1c132020-04-29 09:57:05 +020093 pci_write_config16(dev, PCI_COMMAND, pcireg);
Andrey Petrov04a72c42017-03-01 15:51:57 -080094
95 /* Program Temporary BAR for HECI1 */
96 pci_write_config32(dev, PCI_BASE_ADDRESS_0, tempbar);
97 pci_write_config32(dev, PCI_BASE_ADDRESS_1, 0x0);
98
99 /* Enable Bus Master and MMIO Space */
Elyes HAOUAS2ec1c132020-04-29 09:57:05 +0200100 pci_or_config16(dev, PCI_COMMAND, PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800101
Patrick Georgic9b13592019-11-29 11:47:47 +0100102 cse.sec_bar = tempbar;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800103}
104
Subrata Banik05e06cd2017-11-09 15:04:09 +0530105/* Get HECI BAR 0 from PCI configuration space */
106static uint32_t get_cse_bar(void)
107{
108 uintptr_t bar;
109
110 bar = pci_read_config32(PCH_DEV_CSE, PCI_BASE_ADDRESS_0);
111 assert(bar != 0);
112 /*
113 * Bits 31-12 are the base address as per EDS for SPI,
114 * Don't care about 0-11 bit
115 */
116 return bar & ~PCI_BASE_ADDRESS_MEM_ATTR_MASK;
117}
118
Andrey Petrov04a72c42017-03-01 15:51:57 -0800119static uint32_t read_bar(uint32_t offset)
120{
Patrick Georgi08c8cf92019-12-02 11:43:20 +0100121 /* Load and cache BAR */
Patrick Georgic9b13592019-11-29 11:47:47 +0100122 if (!cse.sec_bar)
123 cse.sec_bar = get_cse_bar();
124 return read32((void *)(cse.sec_bar + offset));
Andrey Petrov04a72c42017-03-01 15:51:57 -0800125}
126
127static void write_bar(uint32_t offset, uint32_t val)
128{
Patrick Georgi08c8cf92019-12-02 11:43:20 +0100129 /* Load and cache BAR */
Patrick Georgic9b13592019-11-29 11:47:47 +0100130 if (!cse.sec_bar)
131 cse.sec_bar = get_cse_bar();
132 return write32((void *)(cse.sec_bar + offset), val);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800133}
134
135static uint32_t read_cse_csr(void)
136{
137 return read_bar(MMIO_CSE_CSR);
138}
139
140static uint32_t read_host_csr(void)
141{
142 return read_bar(MMIO_HOST_CSR);
143}
144
145static void write_host_csr(uint32_t data)
146{
147 write_bar(MMIO_HOST_CSR, data);
148}
149
150static size_t filled_slots(uint32_t data)
151{
152 uint8_t wp, rp;
153 rp = data >> CSR_RP_START;
154 wp = data >> CSR_WP_START;
155 return (uint8_t) (wp - rp);
156}
157
158static size_t cse_filled_slots(void)
159{
160 return filled_slots(read_cse_csr());
161}
162
163static size_t host_empty_slots(void)
164{
165 uint32_t csr;
166 csr = read_host_csr();
167
168 return ((csr & CSR_CBD) >> CSR_CBD_START) - filled_slots(csr);
169}
170
171static void clear_int(void)
172{
173 uint32_t csr;
174 csr = read_host_csr();
175 csr |= CSR_IS;
176 write_host_csr(csr);
177}
178
179static uint32_t read_slot(void)
180{
181 return read_bar(MMIO_CSE_CB_RW);
182}
183
184static void write_slot(uint32_t val)
185{
186 write_bar(MMIO_CSE_CB_WW, val);
187}
188
189static int wait_write_slots(size_t cnt)
190{
191 struct stopwatch sw;
192
193 stopwatch_init_msecs_expire(&sw, HECI_SEND_TIMEOUT);
194 while (host_empty_slots() < cnt) {
195 udelay(HECI_DELAY);
196 if (stopwatch_expired(&sw)) {
197 printk(BIOS_ERR, "HECI: timeout, buffer not drained\n");
198 return 0;
199 }
200 }
201 return 1;
202}
203
204static int wait_read_slots(size_t cnt)
205{
206 struct stopwatch sw;
207
208 stopwatch_init_msecs_expire(&sw, HECI_READ_TIMEOUT);
209 while (cse_filled_slots() < cnt) {
210 udelay(HECI_DELAY);
211 if (stopwatch_expired(&sw)) {
212 printk(BIOS_ERR, "HECI: timed out reading answer!\n");
213 return 0;
214 }
215 }
216 return 1;
217}
218
219/* get number of full 4-byte slots */
220static size_t bytes_to_slots(size_t bytes)
221{
222 return ALIGN_UP(bytes, SLOT_SIZE) / SLOT_SIZE;
223}
224
225static int cse_ready(void)
226{
227 uint32_t csr;
228 csr = read_cse_csr();
229 return csr & CSR_READY;
230}
231
Sridhar Siricilla8e465452019-09-23 20:59:38 +0530232static bool cse_check_hfs1_com(int mode)
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530233{
234 union me_hfsts1 hfs1;
235 hfs1.data = me_read_config32(PCI_ME_HFSTS1);
Sridhar Siricilla8e465452019-09-23 20:59:38 +0530236 return hfs1.fields.operation_mode == mode;
237}
238
239bool cse_is_hfs1_cws_normal(void)
240{
241 union me_hfsts1 hfs1;
242 hfs1.data = me_read_config32(PCI_ME_HFSTS1);
243 if (hfs1.fields.working_state == ME_HFS1_CWS_NORMAL)
244 return true;
245 return false;
246}
247
248bool cse_is_hfs1_com_normal(void)
249{
250 return cse_check_hfs1_com(ME_HFS1_COM_NORMAL);
251}
252
253bool cse_is_hfs1_com_secover_mei_msg(void)
254{
255 return cse_check_hfs1_com(ME_HFS1_COM_SECOVER_MEI_MSG);
256}
257
258bool cse_is_hfs1_com_soft_temp_disable(void)
259{
260 return cse_check_hfs1_com(ME_HFS1_COM_SOFT_TEMP_DISABLE);
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530261}
262
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530263bool cse_is_hfs3_fw_sku_lite(void)
Sridhar Siricilla3465d272020-02-06 15:31:04 +0530264{
265 union me_hfsts3 hfs3;
266 hfs3.data = me_read_config32(PCI_ME_HFSTS3);
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530267 return hfs3.fields.fw_sku == ME_HFS3_FW_SKU_LITE;
Sridhar Siricilla3465d272020-02-06 15:31:04 +0530268}
269
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530270/* Makes the host ready to communicate with CSE */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530271void cse_set_host_ready(void)
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530272{
273 uint32_t csr;
274 csr = read_host_csr();
275 csr &= ~CSR_RESET;
276 csr |= (CSR_IG | CSR_READY);
277 write_host_csr(csr);
278}
279
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530280/* Polls for ME mode ME_HFS1_COM_SECOVER_MEI_MSG for 15 seconds */
281uint8_t cse_wait_sec_override_mode(void)
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530282{
283 struct stopwatch sw;
284 stopwatch_init_msecs_expire(&sw, HECI_DELAY_READY);
Sridhar Siricilla8e465452019-09-23 20:59:38 +0530285 while (!cse_is_hfs1_com_secover_mei_msg()) {
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530286 udelay(HECI_DELAY);
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530287 if (stopwatch_expired(&sw)) {
288 printk(BIOS_ERR, "HECI: Timed out waiting for SEC_OVERRIDE mode!\n");
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530289 return 0;
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530290 }
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530291 }
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530292 printk(BIOS_DEBUG, "HECI: CSE took %lu ms to enter security override mode\n",
293 stopwatch_duration_msecs(&sw));
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530294 return 1;
295}
296
Sridhar Siricilla09ea3712019-11-12 23:35:50 +0530297/*
298 * Polls for CSE's current operation mode 'Soft Temporary Disable'.
299 * The CSE enters the current operation mode when it boots from RO(BP1).
300 */
301uint8_t cse_wait_com_soft_temp_disable(void)
302{
303 struct stopwatch sw;
304 stopwatch_init_msecs_expire(&sw, CSE_DELAY_BOOT_TO_RO);
305 while (!cse_is_hfs1_com_soft_temp_disable()) {
306 udelay(HECI_DELAY);
307 if (stopwatch_expired(&sw)) {
308 printk(BIOS_ERR, "HECI: Timed out waiting for CSE to boot from RO!\n");
309 return 0;
310 }
311 }
312 printk(BIOS_SPEW, "HECI: CSE took %lu ms to boot from RO\n",
313 stopwatch_duration_msecs(&sw));
314 return 1;
315}
316
Andrey Petrov04a72c42017-03-01 15:51:57 -0800317static int wait_heci_ready(void)
318{
319 struct stopwatch sw;
320
321 stopwatch_init_msecs_expire(&sw, HECI_DELAY_READY);
322 while (!cse_ready()) {
323 udelay(HECI_DELAY);
324 if (stopwatch_expired(&sw))
325 return 0;
326 }
327
328 return 1;
329}
330
331static void host_gen_interrupt(void)
332{
333 uint32_t csr;
334 csr = read_host_csr();
335 csr |= CSR_IG;
336 write_host_csr(csr);
337}
338
339static size_t hdr_get_length(uint32_t hdr)
340{
341 return (hdr & MEI_HDR_LENGTH) >> MEI_HDR_LENGTH_START;
342}
343
344static int
345send_one_message(uint32_t hdr, const void *buff)
346{
347 size_t pend_len, pend_slots, remainder, i;
348 uint32_t tmp;
349 const uint32_t *p = buff;
350
351 /* Get space for the header */
352 if (!wait_write_slots(1))
353 return 0;
354
355 /* First, write header */
356 write_slot(hdr);
357
358 pend_len = hdr_get_length(hdr);
359 pend_slots = bytes_to_slots(pend_len);
360
361 if (!wait_write_slots(pend_slots))
362 return 0;
363
364 /* Write the body in whole slots */
365 i = 0;
366 while (i < ALIGN_DOWN(pend_len, SLOT_SIZE)) {
367 write_slot(*p++);
368 i += SLOT_SIZE;
369 }
370
371 remainder = pend_len % SLOT_SIZE;
372 /* Pad to 4 bytes not touching caller's buffer */
373 if (remainder) {
374 memcpy(&tmp, p, remainder);
375 write_slot(tmp);
376 }
377
378 host_gen_interrupt();
379
380 /* Make sure nothing bad happened during transmission */
381 if (!cse_ready())
382 return 0;
383
384 return pend_len;
385}
386
Rizwan Qureshi957857d2021-08-30 16:43:57 +0530387/*
388 * Send message msg of size len to host from host_addr to cse_addr.
389 * Returns 1 on success and 0 otherwise.
390 * In case of error heci_reset() may be required.
391 */
392static int
Andrey Petrov04a72c42017-03-01 15:51:57 -0800393heci_send(const void *msg, size_t len, uint8_t host_addr, uint8_t client_addr)
394{
Subrata Banik5c08c732017-11-13 14:54:37 +0530395 uint8_t retry;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800396 uint32_t csr, hdr;
Subrata Banik5c08c732017-11-13 14:54:37 +0530397 size_t sent, remaining, cb_size, max_length;
398 const uint8_t *p;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800399
400 if (!msg || !len)
401 return 0;
402
403 clear_int();
404
Subrata Banik5c08c732017-11-13 14:54:37 +0530405 for (retry = 0; retry < MAX_HECI_MESSAGE_RETRY_COUNT; retry++) {
406 p = msg;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800407
Subrata Banik5c08c732017-11-13 14:54:37 +0530408 if (!wait_heci_ready()) {
409 printk(BIOS_ERR, "HECI: not ready\n");
410 continue;
411 }
Andrey Petrov04a72c42017-03-01 15:51:57 -0800412
Subrata Banik4a722f52017-11-13 14:56:42 +0530413 csr = read_host_csr();
Subrata Banik5c08c732017-11-13 14:54:37 +0530414 cb_size = ((csr & CSR_CBD) >> CSR_CBD_START) * SLOT_SIZE;
415 /*
416 * Reserve one slot for the header. Limit max message
417 * length by 9 bits that are available in the header.
418 */
419 max_length = MIN(cb_size, (1 << MEI_HDR_LENGTH_SIZE) - 1)
420 - SLOT_SIZE;
421 remaining = len;
422
423 /*
424 * Fragment the message into smaller messages not exceeding
Jonathan Neuschäfer5268b762018-02-12 12:24:25 +0100425 * useful circular buffer length. Mark last message complete.
Subrata Banik5c08c732017-11-13 14:54:37 +0530426 */
427 do {
428 hdr = MIN(max_length, remaining)
429 << MEI_HDR_LENGTH_START;
430 hdr |= client_addr << MEI_HDR_CSE_ADDR_START;
431 hdr |= host_addr << MEI_HDR_HOST_ADDR_START;
432 hdr |= (MIN(max_length, remaining) == remaining) ?
Lee Leahy68ab0b52017-03-10 13:42:34 -0800433 MEI_HDR_IS_COMPLETE : 0;
Subrata Banik5c08c732017-11-13 14:54:37 +0530434 sent = send_one_message(hdr, p);
435 p += sent;
436 remaining -= sent;
437 } while (remaining > 0 && sent != 0);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800438
Subrata Banik5c08c732017-11-13 14:54:37 +0530439 if (!remaining)
440 return 1;
441 }
442 return 0;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800443}
444
445static size_t
446recv_one_message(uint32_t *hdr, void *buff, size_t maxlen)
447{
448 uint32_t reg, *p = buff;
449 size_t recv_slots, recv_len, remainder, i;
450
451 /* first get the header */
452 if (!wait_read_slots(1))
453 return 0;
454
455 *hdr = read_slot();
456 recv_len = hdr_get_length(*hdr);
457
458 if (!recv_len)
459 printk(BIOS_WARNING, "HECI: message is zero-sized\n");
460
461 recv_slots = bytes_to_slots(recv_len);
462
463 i = 0;
464 if (recv_len > maxlen) {
465 printk(BIOS_ERR, "HECI: response is too big\n");
466 return 0;
467 }
468
469 /* wait for the rest of messages to arrive */
470 wait_read_slots(recv_slots);
471
472 /* fetch whole slots first */
473 while (i < ALIGN_DOWN(recv_len, SLOT_SIZE)) {
474 *p++ = read_slot();
475 i += SLOT_SIZE;
476 }
477
Subrata Banik5c08c732017-11-13 14:54:37 +0530478 /*
479 * If ME is not ready, something went wrong and
480 * we received junk
481 */
482 if (!cse_ready())
483 return 0;
484
Andrey Petrov04a72c42017-03-01 15:51:57 -0800485 remainder = recv_len % SLOT_SIZE;
486
487 if (remainder) {
488 reg = read_slot();
489 memcpy(p, &reg, remainder);
490 }
491
492 return recv_len;
493}
494
Rizwan Qureshi957857d2021-08-30 16:43:57 +0530495/*
496 * Receive message into buff not exceeding maxlen. Message is considered
497 * successfully received if a 'complete' indication is read from ME side
498 * and there was enough space in the buffer to fit that message. maxlen
499 * is updated with size of message that was received. Returns 0 on failure
500 * and 1 on success.
501 * In case of error heci_reset() may be required.
502 */
503static int heci_receive(void *buff, size_t *maxlen)
Andrey Petrov04a72c42017-03-01 15:51:57 -0800504{
Subrata Banik5c08c732017-11-13 14:54:37 +0530505 uint8_t retry;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800506 size_t left, received;
507 uint32_t hdr = 0;
Subrata Banik5c08c732017-11-13 14:54:37 +0530508 uint8_t *p;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800509
510 if (!buff || !maxlen || !*maxlen)
511 return 0;
512
Andrey Petrov04a72c42017-03-01 15:51:57 -0800513 clear_int();
514
Subrata Banik5c08c732017-11-13 14:54:37 +0530515 for (retry = 0; retry < MAX_HECI_MESSAGE_RETRY_COUNT; retry++) {
516 p = buff;
517 left = *maxlen;
518
519 if (!wait_heci_ready()) {
520 printk(BIOS_ERR, "HECI: not ready\n");
521 continue;
522 }
523
524 /*
525 * Receive multiple packets until we meet one marked
526 * complete or we run out of space in caller-provided buffer.
527 */
528 do {
529 received = recv_one_message(&hdr, p, left);
Lijian Zhaoc50296d2017-12-15 19:10:18 -0800530 if (!received) {
Elyes HAOUAS3d450002018-08-09 18:55:58 +0200531 printk(BIOS_ERR, "HECI: Failed to receive!\n");
Lijian Zhaoc50296d2017-12-15 19:10:18 -0800532 return 0;
533 }
Subrata Banik5c08c732017-11-13 14:54:37 +0530534 left -= received;
535 p += received;
536 /* If we read out everything ping to send more */
537 if (!(hdr & MEI_HDR_IS_COMPLETE) && !cse_filled_slots())
538 host_gen_interrupt();
539 } while (received && !(hdr & MEI_HDR_IS_COMPLETE) && left > 0);
540
541 if ((hdr & MEI_HDR_IS_COMPLETE) && received) {
542 *maxlen = p - (uint8_t *) buff;
543 return 1;
544 }
Andrey Petrov04a72c42017-03-01 15:51:57 -0800545 }
Subrata Banik5c08c732017-11-13 14:54:37 +0530546 return 0;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800547}
548
Rizwan Qureshi957857d2021-08-30 16:43:57 +0530549int heci_send_receive(const void *snd_msg, size_t snd_sz, void *rcv_msg, size_t *rcv_sz,
550 uint8_t cse_addr)
Sridhar Siricillaa5208f52019-08-30 17:10:24 +0530551{
Rizwan Qureshi957857d2021-08-30 16:43:57 +0530552 if (!heci_send(snd_msg, snd_sz, BIOS_HOST_ADDR, cse_addr)) {
Sridhar Siricillaa5208f52019-08-30 17:10:24 +0530553 printk(BIOS_ERR, "HECI: send Failed\n");
554 return 0;
555 }
556
557 if (rcv_msg != NULL) {
558 if (!heci_receive(rcv_msg, rcv_sz)) {
559 printk(BIOS_ERR, "HECI: receive Failed\n");
560 return 0;
561 }
562 }
563 return 1;
564}
565
Andrey Petrov04a72c42017-03-01 15:51:57 -0800566/*
567 * Attempt to reset the device. This is useful when host and ME are out
568 * of sync during transmission or ME didn't understand the message.
569 */
570int heci_reset(void)
571{
572 uint32_t csr;
573
Duncan Laurie15ca9032020-11-05 10:09:07 -0800574 /* Clear post code to prevent eventlog entry from unknown code. */
575 post_code(0);
576
Andrey Petrov04a72c42017-03-01 15:51:57 -0800577 /* Send reset request */
578 csr = read_host_csr();
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530579 csr |= (CSR_RESET | CSR_IG);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800580 write_host_csr(csr);
581
582 if (wait_heci_ready()) {
583 /* Device is back on its imaginary feet, clear reset */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530584 cse_set_host_ready();
Andrey Petrov04a72c42017-03-01 15:51:57 -0800585 return 1;
586 }
587
588 printk(BIOS_CRIT, "HECI: reset failed\n");
589
590 return 0;
591}
592
Sridhar Siricilla2cc66912019-08-31 11:20:34 +0530593bool is_cse_enabled(void)
594{
595 const struct device *cse_dev = pcidev_path_on_root(PCH_DEVFN_CSE);
596
597 if (!cse_dev || !cse_dev->enabled) {
598 printk(BIOS_WARNING, "HECI: No CSE device\n");
599 return false;
600 }
601
602 if (pci_read_config16(PCH_DEV_CSE, PCI_VENDOR_ID) == 0xFFFF) {
603 printk(BIOS_WARNING, "HECI: CSE device is hidden\n");
604 return false;
605 }
606
607 return true;
608}
609
610uint32_t me_read_config32(int offset)
611{
612 return pci_read_config32(PCH_DEV_CSE, offset);
613}
614
Sridhar Siricilla59c7cb7d2020-02-07 11:59:30 +0530615static bool cse_is_global_reset_allowed(void)
616{
617 /*
618 * Allow sending GLOBAL_RESET command only if:
619 * - CSE's current working state is Normal and current operation mode is Normal.
620 * - (or) CSE's current working state is normal and current operation mode can
621 * be Soft Temp Disable or Security Override Mode if CSE's Firmware SKU is
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530622 * Lite.
Sridhar Siricilla59c7cb7d2020-02-07 11:59:30 +0530623 */
624 if (!cse_is_hfs1_cws_normal())
625 return false;
626
627 if (cse_is_hfs1_com_normal())
628 return true;
629
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530630 if (cse_is_hfs3_fw_sku_lite()) {
Sridhar Siricilla59c7cb7d2020-02-07 11:59:30 +0530631 if (cse_is_hfs1_com_soft_temp_disable() || cse_is_hfs1_com_secover_mei_msg())
632 return true;
633 }
634 return false;
635}
636
Sridhar Siricillad415c202019-08-31 14:54:57 +0530637/*
Subrata Banikf463dc02020-09-14 19:04:03 +0530638 * Sends GLOBAL_RESET_REQ cmd to CSE with reset type GLOBAL_RESET.
639 * Returns 0 on failure and 1 on success.
Sridhar Siricillad415c202019-08-31 14:54:57 +0530640 */
Subrata Banikf463dc02020-09-14 19:04:03 +0530641static int cse_request_reset(enum rst_req_type rst_type)
Sridhar Siricillad415c202019-08-31 14:54:57 +0530642{
643 int status;
644 struct mkhi_hdr reply;
645 struct reset_message {
646 struct mkhi_hdr hdr;
647 uint8_t req_origin;
648 uint8_t reset_type;
649 } __packed;
650 struct reset_message msg = {
651 .hdr = {
652 .group_id = MKHI_GROUP_ID_CBM,
Sridhar Siricillae202e672020-01-07 23:36:40 +0530653 .command = MKHI_CBM_GLOBAL_RESET_REQ,
Sridhar Siricillad415c202019-08-31 14:54:57 +0530654 },
655 .req_origin = GR_ORIGIN_BIOS_POST,
656 .reset_type = rst_type
657 };
658 size_t reply_size;
659
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530660 printk(BIOS_DEBUG, "HECI: Global Reset(Type:%d) Command\n", rst_type);
Sridhar Siricilla59c7cb7d2020-02-07 11:59:30 +0530661
Sridhar Siricillac2a2d2b2020-02-27 17:16:13 +0530662 if (!(rst_type == GLOBAL_RESET || rst_type == CSE_RESET_ONLY)) {
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530663 printk(BIOS_ERR, "HECI: Unsupported reset type is requested\n");
664 return 0;
665 }
Sridhar Siricillad415c202019-08-31 14:54:57 +0530666
Subrata Banikf463dc02020-09-14 19:04:03 +0530667 if (!cse_is_global_reset_allowed() || !is_cse_enabled()) {
Sridhar Siricilla59c7cb7d2020-02-07 11:59:30 +0530668 printk(BIOS_ERR, "HECI: CSE does not meet required prerequisites\n");
669 return 0;
670 }
671
Sridhar Siricillad415c202019-08-31 14:54:57 +0530672 heci_reset();
673
674 reply_size = sizeof(reply);
675 memset(&reply, 0, reply_size);
676
Sridhar Siricillad415c202019-08-31 14:54:57 +0530677 if (rst_type == CSE_RESET_ONLY)
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530678 status = heci_send(&msg, sizeof(msg), BIOS_HOST_ADDR, HECI_MKHI_ADDR);
Sridhar Siricillad415c202019-08-31 14:54:57 +0530679 else
Rizwan Qureshi957857d2021-08-30 16:43:57 +0530680 status = heci_send_receive(&msg, sizeof(msg), &reply, &reply_size,
681 HECI_MKHI_ADDR);
Sridhar Siricillad415c202019-08-31 14:54:57 +0530682
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530683 printk(BIOS_DEBUG, "HECI: Global Reset %s!\n", status ? "success" : "failure");
684 return status;
Sridhar Siricillad415c202019-08-31 14:54:57 +0530685}
686
Subrata Banikf463dc02020-09-14 19:04:03 +0530687int cse_request_global_reset(void)
688{
689 return cse_request_reset(GLOBAL_RESET);
690}
691
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530692static bool cse_is_hmrfpo_enable_allowed(void)
693{
694 /*
695 * Allow sending HMRFPO ENABLE command only if:
696 * - CSE's current working state is Normal and current operation mode is Normal
697 * - (or) cse's current working state is normal and current operation mode is
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530698 * Soft Temp Disable if CSE's Firmware SKU is Lite
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530699 */
700 if (!cse_is_hfs1_cws_normal())
701 return false;
702
703 if (cse_is_hfs1_com_normal())
704 return true;
705
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530706 if (cse_is_hfs3_fw_sku_lite() && cse_is_hfs1_com_soft_temp_disable())
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530707 return true;
708
709 return false;
710}
711
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530712/* Sends HMRFPO Enable command to CSE */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530713int cse_hmrfpo_enable(void)
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530714{
715 struct hmrfpo_enable_msg {
716 struct mkhi_hdr hdr;
717 uint32_t nonce[2];
718 } __packed;
719
720 /* HMRFPO Enable message */
721 struct hmrfpo_enable_msg msg = {
722 .hdr = {
Sridhar Siricillae202e672020-01-07 23:36:40 +0530723 .group_id = MKHI_GROUP_ID_HMRFPO,
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530724 .command = MKHI_HMRFPO_ENABLE,
725 },
726 .nonce = {0},
727 };
728
729 /* HMRFPO Enable response */
730 struct hmrfpo_enable_resp {
731 struct mkhi_hdr hdr;
Sridhar Siricillae202e672020-01-07 23:36:40 +0530732 /* Base addr for factory data area, not relevant for client SKUs */
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530733 uint32_t fct_base;
Sridhar Siricillae202e672020-01-07 23:36:40 +0530734 /* Length of factory data area, not relevant for client SKUs */
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530735 uint32_t fct_limit;
736 uint8_t status;
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530737 uint8_t reserved[3];
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530738 } __packed;
739
740 struct hmrfpo_enable_resp resp;
741 size_t resp_size = sizeof(struct hmrfpo_enable_resp);
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530742
743 printk(BIOS_DEBUG, "HECI: Send HMRFPO Enable Command\n");
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530744
745 if (!cse_is_hmrfpo_enable_allowed()) {
746 printk(BIOS_ERR, "HECI: CSE does not meet required prerequisites\n");
747 return 0;
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530748 }
749
750 if (!heci_send_receive(&msg, sizeof(struct hmrfpo_enable_msg),
Rizwan Qureshi957857d2021-08-30 16:43:57 +0530751 &resp, &resp_size, HECI_MKHI_ADDR))
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530752 return 0;
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530753
754 if (resp.hdr.result) {
755 printk(BIOS_ERR, "HECI: Resp Failed:%d\n", resp.hdr.result);
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530756 return 0;
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530757 }
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530758
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530759 if (resp.status) {
760 printk(BIOS_ERR, "HECI: HMRFPO_Enable Failed (resp status: %d)\n", resp.status);
761 return 0;
762 }
763
764 return 1;
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530765}
766
767/*
768 * Sends HMRFPO Get Status command to CSE to get the HMRFPO status.
Sridhar Siricilla63be9182020-01-19 12:38:56 +0530769 * The status can be DISABLED/LOCKED/ENABLED
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530770 */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530771int cse_hmrfpo_get_status(void)
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530772{
773 struct hmrfpo_get_status_msg {
774 struct mkhi_hdr hdr;
775 } __packed;
776
777 struct hmrfpo_get_status_resp {
778 struct mkhi_hdr hdr;
779 uint8_t status;
Sridhar Siricilla63be9182020-01-19 12:38:56 +0530780 uint8_t reserved[3];
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530781 } __packed;
782
783 struct hmrfpo_get_status_msg msg = {
784 .hdr = {
Sridhar Siricillae202e672020-01-07 23:36:40 +0530785 .group_id = MKHI_GROUP_ID_HMRFPO,
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530786 .command = MKHI_HMRFPO_GET_STATUS,
787 },
788 };
789 struct hmrfpo_get_status_resp resp;
790 size_t resp_size = sizeof(struct hmrfpo_get_status_resp);
791
792 printk(BIOS_INFO, "HECI: Sending Get HMRFPO Status Command\n");
793
Sridhar Siricilla206905c2020-02-06 18:48:22 +0530794 if (!cse_is_hfs1_cws_normal()) {
795 printk(BIOS_ERR, "HECI: CSE's current working state is not Normal\n");
796 return -1;
797 }
798
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530799 if (!heci_send_receive(&msg, sizeof(struct hmrfpo_get_status_msg),
Rizwan Qureshi957857d2021-08-30 16:43:57 +0530800 &resp, &resp_size, HECI_MKHI_ADDR)) {
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530801 printk(BIOS_ERR, "HECI: HMRFPO send/receive fail\n");
802 return -1;
803 }
804
805 if (resp.hdr.result) {
806 printk(BIOS_ERR, "HECI: HMRFPO Resp Failed:%d\n",
807 resp.hdr.result);
808 return -1;
809 }
810
811 return resp.status;
812}
813
Sridhar Siricilla24a974a2020-02-19 14:41:36 +0530814void print_me_fw_version(void *unused)
815{
816 struct version {
817 uint16_t minor;
818 uint16_t major;
819 uint16_t build;
820 uint16_t hotfix;
821 } __packed;
822
823 struct fw_ver_resp {
824 struct mkhi_hdr hdr;
825 struct version code;
826 struct version rec;
827 struct version fitc;
828 } __packed;
829
830 const struct mkhi_hdr fw_ver_msg = {
831 .group_id = MKHI_GROUP_ID_GEN,
832 .command = MKHI_GEN_GET_FW_VERSION,
833 };
834
835 struct fw_ver_resp resp;
836 size_t resp_size = sizeof(resp);
837
838 /* Ignore if UART debugging is disabled */
839 if (!CONFIG(CONSOLE_SERIAL))
840 return;
841
Wim Vervoorn8602fb72020-03-30 12:17:54 +0200842 /* Ignore if CSE is disabled */
843 if (!is_cse_enabled())
844 return;
845
Sridhar Siricilla24a974a2020-02-19 14:41:36 +0530846 /*
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530847 * Ignore if ME Firmware SKU type is Lite since
Sridhar Siricilla24a974a2020-02-19 14:41:36 +0530848 * print_boot_partition_info() logs RO(BP1) and RW(BP2) versions.
849 */
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530850 if (cse_is_hfs3_fw_sku_lite())
Sridhar Siricilla24a974a2020-02-19 14:41:36 +0530851 return;
852
853 /*
854 * Prerequisites:
855 * 1) HFSTS1 Current Working State is Normal
856 * 2) HFSTS1 Current Operation Mode is Normal
857 * 3) It's after DRAM INIT DONE message (taken care of by calling it
858 * during ramstage
859 */
860 if (!cse_is_hfs1_cws_normal() || !cse_is_hfs1_com_normal())
861 goto fail;
862
863 heci_reset();
864
Rizwan Qureshi957857d2021-08-30 16:43:57 +0530865 if (!heci_send_receive(&fw_ver_msg, sizeof(fw_ver_msg), &resp, &resp_size,
866 HECI_MKHI_ADDR))
Sridhar Siricilla24a974a2020-02-19 14:41:36 +0530867 goto fail;
868
869 if (resp.hdr.result)
870 goto fail;
871
872 printk(BIOS_DEBUG, "ME: Version: %d.%d.%d.%d\n", resp.code.major,
873 resp.code.minor, resp.code.hotfix, resp.code.build);
874 return;
875
876fail:
877 printk(BIOS_DEBUG, "ME: Version: Unavailable\n");
878}
879
Tim Wawrzynczak09635f42021-06-18 10:08:47 -0600880void cse_trigger_vboot_recovery(enum csme_failure_reason reason)
881{
882 printk(BIOS_DEBUG, "cse: CSE status registers: HFSTS1: 0x%x, HFSTS2: 0x%x "
883 "HFSTS3: 0x%x\n", me_read_config32(PCI_ME_HFSTS1),
884 me_read_config32(PCI_ME_HFSTS2), me_read_config32(PCI_ME_HFSTS3));
885
886 if (CONFIG(VBOOT)) {
887 struct vb2_context *ctx = vboot_get_context();
888 if (ctx == NULL)
889 goto failure;
890 vb2api_fail(ctx, VB2_RECOVERY_INTEL_CSE_LITE_SKU, reason);
891 vboot_save_data(ctx);
892 vboot_reboot();
893 }
894failure:
895 die("cse: Failed to trigger recovery mode(recovery subcode:%d)\n", reason);
896}
897
Andrey Petrov04a72c42017-03-01 15:51:57 -0800898#if ENV_RAMSTAGE
899
900static void update_sec_bar(struct device *dev)
901{
Patrick Georgic9b13592019-11-29 11:47:47 +0100902 cse.sec_bar = find_resource(dev, PCI_BASE_ADDRESS_0)->base;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800903}
904
905static void cse_set_resources(struct device *dev)
906{
Subrata Banik2ee54db2017-03-05 12:37:00 +0530907 if (dev->path.pci.devfn == PCH_DEVFN_CSE)
Andrey Petrov04a72c42017-03-01 15:51:57 -0800908 update_sec_bar(dev);
909
910 pci_dev_set_resources(dev);
911}
912
913static struct device_operations cse_ops = {
914 .set_resources = cse_set_resources,
915 .read_resources = pci_dev_read_resources,
916 .enable_resources = pci_dev_enable_resources,
917 .init = pci_dev_init,
Subrata Banik6bbc91a2017-12-07 14:55:51 +0530918 .ops_pci = &pci_dev_ops_pci,
Andrey Petrov04a72c42017-03-01 15:51:57 -0800919};
920
Hannah Williams63142152017-06-12 14:03:18 -0700921static const unsigned short pci_device_ids[] = {
922 PCI_DEVICE_ID_INTEL_APL_CSE0,
923 PCI_DEVICE_ID_INTEL_GLK_CSE0,
Andrey Petrov0405de92017-06-05 13:25:29 -0700924 PCI_DEVICE_ID_INTEL_CNL_CSE0,
Subrata Banikd0586d22017-11-27 13:28:41 +0530925 PCI_DEVICE_ID_INTEL_SKL_CSE0,
Maxim Polyakov571d07d2019-08-22 13:11:32 +0300926 PCI_DEVICE_ID_INTEL_LWB_CSE0,
927 PCI_DEVICE_ID_INTEL_LWB_CSE0_SUPER,
praveen hodagatta praneshe26c4a42018-09-20 03:49:45 +0800928 PCI_DEVICE_ID_INTEL_CNP_H_CSE0,
Aamir Bohra9eac0392018-06-30 12:07:04 +0530929 PCI_DEVICE_ID_INTEL_ICL_CSE0,
Ronak Kanabarda7ffb482019-02-05 01:51:13 +0530930 PCI_DEVICE_ID_INTEL_CMP_CSE0,
Gaggery Tsai12a651c2019-12-05 11:23:20 -0800931 PCI_DEVICE_ID_INTEL_CMP_H_CSE0,
Ravi Sarawadi6b5bf402019-10-21 22:25:04 -0700932 PCI_DEVICE_ID_INTEL_TGL_CSE0,
Jeremy Soller191a8d72021-08-10 14:06:51 -0600933 PCI_DEVICE_ID_INTEL_TGL_H_CSE0,
Tan, Lean Sheng26136092020-01-20 19:13:56 -0800934 PCI_DEVICE_ID_INTEL_MCC_CSE0,
935 PCI_DEVICE_ID_INTEL_MCC_CSE1,
936 PCI_DEVICE_ID_INTEL_MCC_CSE2,
937 PCI_DEVICE_ID_INTEL_MCC_CSE3,
Meera Ravindranath3f4af0d2020-02-12 16:01:22 +0530938 PCI_DEVICE_ID_INTEL_JSP_CSE0,
939 PCI_DEVICE_ID_INTEL_JSP_CSE1,
940 PCI_DEVICE_ID_INTEL_JSP_CSE2,
941 PCI_DEVICE_ID_INTEL_JSP_CSE3,
Subrata Banikf672f7f2020-08-03 14:29:25 +0530942 PCI_DEVICE_ID_INTEL_ADP_P_CSE0,
943 PCI_DEVICE_ID_INTEL_ADP_P_CSE1,
944 PCI_DEVICE_ID_INTEL_ADP_P_CSE2,
945 PCI_DEVICE_ID_INTEL_ADP_P_CSE3,
946 PCI_DEVICE_ID_INTEL_ADP_S_CSE0,
947 PCI_DEVICE_ID_INTEL_ADP_S_CSE1,
948 PCI_DEVICE_ID_INTEL_ADP_S_CSE2,
949 PCI_DEVICE_ID_INTEL_ADP_S_CSE3,
Varshit Pandyaf4d98fdd22021-01-17 18:39:29 +0530950 PCI_DEVICE_ID_INTEL_ADP_M_CSE0,
951 PCI_DEVICE_ID_INTEL_ADP_M_CSE1,
952 PCI_DEVICE_ID_INTEL_ADP_M_CSE2,
953 PCI_DEVICE_ID_INTEL_ADP_M_CSE3,
Hannah Williams63142152017-06-12 14:03:18 -0700954 0,
955};
956
Andrey Petrov04a72c42017-03-01 15:51:57 -0800957static const struct pci_driver cse_driver __pci_driver = {
958 .ops = &cse_ops,
959 .vendor = PCI_VENDOR_ID_INTEL,
960 /* SoC/chipset needs to provide PCI device ID */
Andrey Petrov0405de92017-06-05 13:25:29 -0700961 .devices = pci_device_ids
Andrey Petrov04a72c42017-03-01 15:51:57 -0800962};
963
964#endif