blob: d10492bbe00cdc8071c6ce3d2f7b185ef442b1d8 [file] [log] [blame]
Angel Pons0612b272020-04-05 15:46:56 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Andrey Petrov04a72c42017-03-01 15:51:57 -08002
Subrata Banik05e06cd2017-11-09 15:04:09 +05303#include <assert.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -08004#include <commonlib/helpers.h>
5#include <console/console.h>
Kyösti Mälkki13f66502019-03-03 08:01:05 +02006#include <device/mmio.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -08007#include <delay.h>
8#include <device/pci.h>
9#include <device/pci_ids.h>
10#include <device/pci_ops.h>
11#include <intelblocks/cse.h>
Subrata Banik05e06cd2017-11-09 15:04:09 +053012#include <soc/iomap.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -080013#include <soc/pci_devs.h>
Sridhar Siricilla8e465452019-09-23 20:59:38 +053014#include <soc/me.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -080015#include <string.h>
16#include <timer.h>
17
Subrata Banik5c08c732017-11-13 14:54:37 +053018#define MAX_HECI_MESSAGE_RETRY_COUNT 5
19
Andrey Petrov04a72c42017-03-01 15:51:57 -080020/* Wait up to 15 sec for HECI to get ready */
21#define HECI_DELAY_READY (15 * 1000)
Jonathan Neuschäfer5268b762018-02-12 12:24:25 +010022/* Wait up to 100 usec between circular buffer polls */
Andrey Petrov04a72c42017-03-01 15:51:57 -080023#define HECI_DELAY 100
24/* Wait up to 5 sec for CSE to chew something we sent */
25#define HECI_SEND_TIMEOUT (5 * 1000)
26/* Wait up to 5 sec for CSE to blurp a reply */
27#define HECI_READ_TIMEOUT (5 * 1000)
28
29#define SLOT_SIZE sizeof(uint32_t)
30
31#define MMIO_CSE_CB_WW 0x00
32#define MMIO_HOST_CSR 0x04
33#define MMIO_CSE_CB_RW 0x08
34#define MMIO_CSE_CSR 0x0c
35
36#define CSR_IE (1 << 0)
37#define CSR_IS (1 << 1)
38#define CSR_IG (1 << 2)
39#define CSR_READY (1 << 3)
40#define CSR_RESET (1 << 4)
41#define CSR_RP_START 8
42#define CSR_RP (((1 << 8) - 1) << CSR_RP_START)
43#define CSR_WP_START 16
44#define CSR_WP (((1 << 8) - 1) << CSR_WP_START)
45#define CSR_CBD_START 24
46#define CSR_CBD (((1 << 8) - 1) << CSR_CBD_START)
47
48#define MEI_HDR_IS_COMPLETE (1 << 31)
49#define MEI_HDR_LENGTH_START 16
50#define MEI_HDR_LENGTH_SIZE 9
51#define MEI_HDR_LENGTH (((1 << MEI_HDR_LENGTH_SIZE) - 1) \
52 << MEI_HDR_LENGTH_START)
53#define MEI_HDR_HOST_ADDR_START 8
54#define MEI_HDR_HOST_ADDR (((1 << 8) - 1) << MEI_HDR_HOST_ADDR_START)
55#define MEI_HDR_CSE_ADDR_START 0
56#define MEI_HDR_CSE_ADDR (((1 << 8) - 1) << MEI_HDR_CSE_ADDR_START)
57
Sridhar Siricilla09ea3712019-11-12 23:35:50 +053058/* Wait up to 5 seconds for CSE to boot from RO(BP1) */
59#define CSE_DELAY_BOOT_TO_RO (5 * 1000)
60
Arthur Heymans3d6ccd02019-05-27 17:25:23 +020061static struct cse_device {
Andrey Petrov04a72c42017-03-01 15:51:57 -080062 uintptr_t sec_bar;
Patrick Georgic9b13592019-11-29 11:47:47 +010063} cse;
Andrey Petrov04a72c42017-03-01 15:51:57 -080064
65/*
66 * Initialize the device with provided temporary BAR. If BAR is 0 use a
67 * default. This is intended for pre-mem usage only where BARs haven't been
68 * assigned yet and devices are not enabled.
69 */
70void heci_init(uintptr_t tempbar)
71{
Elyes HAOUAS68c851b2018-06-12 22:06:09 +020072#if defined(__SIMPLE_DEVICE__)
73 pci_devfn_t dev = PCH_DEV_CSE;
74#else
75 struct device *dev = PCH_DEV_CSE;
76#endif
Elyes HAOUAS2ec1c132020-04-29 09:57:05 +020077 u16 pcireg;
Andrey Petrov04a72c42017-03-01 15:51:57 -080078
79 /* Assume it is already initialized, nothing else to do */
Patrick Georgic9b13592019-11-29 11:47:47 +010080 if (cse.sec_bar)
Andrey Petrov04a72c42017-03-01 15:51:57 -080081 return;
82
83 /* Use default pre-ram bar */
84 if (!tempbar)
85 tempbar = HECI1_BASE_ADDRESS;
86
87 /* Assign Resources to HECI1 */
88 /* Clear BIT 1-2 of Command Register */
Elyes HAOUAS2ec1c132020-04-29 09:57:05 +020089 pcireg = pci_read_config16(dev, PCI_COMMAND);
Andrey Petrov04a72c42017-03-01 15:51:57 -080090 pcireg &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY);
Elyes HAOUAS2ec1c132020-04-29 09:57:05 +020091 pci_write_config16(dev, PCI_COMMAND, pcireg);
Andrey Petrov04a72c42017-03-01 15:51:57 -080092
93 /* Program Temporary BAR for HECI1 */
94 pci_write_config32(dev, PCI_BASE_ADDRESS_0, tempbar);
95 pci_write_config32(dev, PCI_BASE_ADDRESS_1, 0x0);
96
97 /* Enable Bus Master and MMIO Space */
Elyes HAOUAS2ec1c132020-04-29 09:57:05 +020098 pci_or_config16(dev, PCI_COMMAND, PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY);
Andrey Petrov04a72c42017-03-01 15:51:57 -080099
Patrick Georgic9b13592019-11-29 11:47:47 +0100100 cse.sec_bar = tempbar;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800101}
102
Subrata Banik05e06cd2017-11-09 15:04:09 +0530103/* Get HECI BAR 0 from PCI configuration space */
104static uint32_t get_cse_bar(void)
105{
106 uintptr_t bar;
107
108 bar = pci_read_config32(PCH_DEV_CSE, PCI_BASE_ADDRESS_0);
109 assert(bar != 0);
110 /*
111 * Bits 31-12 are the base address as per EDS for SPI,
112 * Don't care about 0-11 bit
113 */
114 return bar & ~PCI_BASE_ADDRESS_MEM_ATTR_MASK;
115}
116
Andrey Petrov04a72c42017-03-01 15:51:57 -0800117static uint32_t read_bar(uint32_t offset)
118{
Patrick Georgi08c8cf92019-12-02 11:43:20 +0100119 /* Load and cache BAR */
Patrick Georgic9b13592019-11-29 11:47:47 +0100120 if (!cse.sec_bar)
121 cse.sec_bar = get_cse_bar();
122 return read32((void *)(cse.sec_bar + offset));
Andrey Petrov04a72c42017-03-01 15:51:57 -0800123}
124
125static void write_bar(uint32_t offset, uint32_t val)
126{
Patrick Georgi08c8cf92019-12-02 11:43:20 +0100127 /* Load and cache BAR */
Patrick Georgic9b13592019-11-29 11:47:47 +0100128 if (!cse.sec_bar)
129 cse.sec_bar = get_cse_bar();
130 return write32((void *)(cse.sec_bar + offset), val);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800131}
132
133static uint32_t read_cse_csr(void)
134{
135 return read_bar(MMIO_CSE_CSR);
136}
137
138static uint32_t read_host_csr(void)
139{
140 return read_bar(MMIO_HOST_CSR);
141}
142
143static void write_host_csr(uint32_t data)
144{
145 write_bar(MMIO_HOST_CSR, data);
146}
147
148static size_t filled_slots(uint32_t data)
149{
150 uint8_t wp, rp;
151 rp = data >> CSR_RP_START;
152 wp = data >> CSR_WP_START;
153 return (uint8_t) (wp - rp);
154}
155
156static size_t cse_filled_slots(void)
157{
158 return filled_slots(read_cse_csr());
159}
160
161static size_t host_empty_slots(void)
162{
163 uint32_t csr;
164 csr = read_host_csr();
165
166 return ((csr & CSR_CBD) >> CSR_CBD_START) - filled_slots(csr);
167}
168
169static void clear_int(void)
170{
171 uint32_t csr;
172 csr = read_host_csr();
173 csr |= CSR_IS;
174 write_host_csr(csr);
175}
176
177static uint32_t read_slot(void)
178{
179 return read_bar(MMIO_CSE_CB_RW);
180}
181
182static void write_slot(uint32_t val)
183{
184 write_bar(MMIO_CSE_CB_WW, val);
185}
186
187static int wait_write_slots(size_t cnt)
188{
189 struct stopwatch sw;
190
191 stopwatch_init_msecs_expire(&sw, HECI_SEND_TIMEOUT);
192 while (host_empty_slots() < cnt) {
193 udelay(HECI_DELAY);
194 if (stopwatch_expired(&sw)) {
195 printk(BIOS_ERR, "HECI: timeout, buffer not drained\n");
196 return 0;
197 }
198 }
199 return 1;
200}
201
202static int wait_read_slots(size_t cnt)
203{
204 struct stopwatch sw;
205
206 stopwatch_init_msecs_expire(&sw, HECI_READ_TIMEOUT);
207 while (cse_filled_slots() < cnt) {
208 udelay(HECI_DELAY);
209 if (stopwatch_expired(&sw)) {
210 printk(BIOS_ERR, "HECI: timed out reading answer!\n");
211 return 0;
212 }
213 }
214 return 1;
215}
216
217/* get number of full 4-byte slots */
218static size_t bytes_to_slots(size_t bytes)
219{
220 return ALIGN_UP(bytes, SLOT_SIZE) / SLOT_SIZE;
221}
222
223static int cse_ready(void)
224{
225 uint32_t csr;
226 csr = read_cse_csr();
227 return csr & CSR_READY;
228}
229
Sridhar Siricilla8e465452019-09-23 20:59:38 +0530230static bool cse_check_hfs1_com(int mode)
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530231{
232 union me_hfsts1 hfs1;
233 hfs1.data = me_read_config32(PCI_ME_HFSTS1);
Sridhar Siricilla8e465452019-09-23 20:59:38 +0530234 return hfs1.fields.operation_mode == mode;
235}
236
237bool cse_is_hfs1_cws_normal(void)
238{
239 union me_hfsts1 hfs1;
240 hfs1.data = me_read_config32(PCI_ME_HFSTS1);
241 if (hfs1.fields.working_state == ME_HFS1_CWS_NORMAL)
242 return true;
243 return false;
244}
245
246bool cse_is_hfs1_com_normal(void)
247{
248 return cse_check_hfs1_com(ME_HFS1_COM_NORMAL);
249}
250
251bool cse_is_hfs1_com_secover_mei_msg(void)
252{
253 return cse_check_hfs1_com(ME_HFS1_COM_SECOVER_MEI_MSG);
254}
255
256bool cse_is_hfs1_com_soft_temp_disable(void)
257{
258 return cse_check_hfs1_com(ME_HFS1_COM_SOFT_TEMP_DISABLE);
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530259}
260
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530261bool cse_is_hfs3_fw_sku_lite(void)
Sridhar Siricilla3465d272020-02-06 15:31:04 +0530262{
263 union me_hfsts3 hfs3;
264 hfs3.data = me_read_config32(PCI_ME_HFSTS3);
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530265 return hfs3.fields.fw_sku == ME_HFS3_FW_SKU_LITE;
Sridhar Siricilla3465d272020-02-06 15:31:04 +0530266}
267
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530268/* Makes the host ready to communicate with CSE */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530269void cse_set_host_ready(void)
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530270{
271 uint32_t csr;
272 csr = read_host_csr();
273 csr &= ~CSR_RESET;
274 csr |= (CSR_IG | CSR_READY);
275 write_host_csr(csr);
276}
277
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530278/* Polls for ME mode ME_HFS1_COM_SECOVER_MEI_MSG for 15 seconds */
279uint8_t cse_wait_sec_override_mode(void)
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530280{
281 struct stopwatch sw;
282 stopwatch_init_msecs_expire(&sw, HECI_DELAY_READY);
Sridhar Siricilla8e465452019-09-23 20:59:38 +0530283 while (!cse_is_hfs1_com_secover_mei_msg()) {
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530284 udelay(HECI_DELAY);
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530285 if (stopwatch_expired(&sw)) {
286 printk(BIOS_ERR, "HECI: Timed out waiting for SEC_OVERRIDE mode!\n");
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530287 return 0;
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530288 }
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530289 }
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530290 printk(BIOS_DEBUG, "HECI: CSE took %lu ms to enter security override mode\n",
291 stopwatch_duration_msecs(&sw));
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530292 return 1;
293}
294
Sridhar Siricilla09ea3712019-11-12 23:35:50 +0530295/*
296 * Polls for CSE's current operation mode 'Soft Temporary Disable'.
297 * The CSE enters the current operation mode when it boots from RO(BP1).
298 */
299uint8_t cse_wait_com_soft_temp_disable(void)
300{
301 struct stopwatch sw;
302 stopwatch_init_msecs_expire(&sw, CSE_DELAY_BOOT_TO_RO);
303 while (!cse_is_hfs1_com_soft_temp_disable()) {
304 udelay(HECI_DELAY);
305 if (stopwatch_expired(&sw)) {
306 printk(BIOS_ERR, "HECI: Timed out waiting for CSE to boot from RO!\n");
307 return 0;
308 }
309 }
310 printk(BIOS_SPEW, "HECI: CSE took %lu ms to boot from RO\n",
311 stopwatch_duration_msecs(&sw));
312 return 1;
313}
314
Andrey Petrov04a72c42017-03-01 15:51:57 -0800315static int wait_heci_ready(void)
316{
317 struct stopwatch sw;
318
319 stopwatch_init_msecs_expire(&sw, HECI_DELAY_READY);
320 while (!cse_ready()) {
321 udelay(HECI_DELAY);
322 if (stopwatch_expired(&sw))
323 return 0;
324 }
325
326 return 1;
327}
328
329static void host_gen_interrupt(void)
330{
331 uint32_t csr;
332 csr = read_host_csr();
333 csr |= CSR_IG;
334 write_host_csr(csr);
335}
336
337static size_t hdr_get_length(uint32_t hdr)
338{
339 return (hdr & MEI_HDR_LENGTH) >> MEI_HDR_LENGTH_START;
340}
341
342static int
343send_one_message(uint32_t hdr, const void *buff)
344{
345 size_t pend_len, pend_slots, remainder, i;
346 uint32_t tmp;
347 const uint32_t *p = buff;
348
349 /* Get space for the header */
350 if (!wait_write_slots(1))
351 return 0;
352
353 /* First, write header */
354 write_slot(hdr);
355
356 pend_len = hdr_get_length(hdr);
357 pend_slots = bytes_to_slots(pend_len);
358
359 if (!wait_write_slots(pend_slots))
360 return 0;
361
362 /* Write the body in whole slots */
363 i = 0;
364 while (i < ALIGN_DOWN(pend_len, SLOT_SIZE)) {
365 write_slot(*p++);
366 i += SLOT_SIZE;
367 }
368
369 remainder = pend_len % SLOT_SIZE;
370 /* Pad to 4 bytes not touching caller's buffer */
371 if (remainder) {
372 memcpy(&tmp, p, remainder);
373 write_slot(tmp);
374 }
375
376 host_gen_interrupt();
377
378 /* Make sure nothing bad happened during transmission */
379 if (!cse_ready())
380 return 0;
381
382 return pend_len;
383}
384
385int
386heci_send(const void *msg, size_t len, uint8_t host_addr, uint8_t client_addr)
387{
Subrata Banik5c08c732017-11-13 14:54:37 +0530388 uint8_t retry;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800389 uint32_t csr, hdr;
Subrata Banik5c08c732017-11-13 14:54:37 +0530390 size_t sent, remaining, cb_size, max_length;
391 const uint8_t *p;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800392
393 if (!msg || !len)
394 return 0;
395
396 clear_int();
397
Subrata Banik5c08c732017-11-13 14:54:37 +0530398 for (retry = 0; retry < MAX_HECI_MESSAGE_RETRY_COUNT; retry++) {
399 p = msg;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800400
Subrata Banik5c08c732017-11-13 14:54:37 +0530401 if (!wait_heci_ready()) {
402 printk(BIOS_ERR, "HECI: not ready\n");
403 continue;
404 }
Andrey Petrov04a72c42017-03-01 15:51:57 -0800405
Subrata Banik4a722f52017-11-13 14:56:42 +0530406 csr = read_host_csr();
Subrata Banik5c08c732017-11-13 14:54:37 +0530407 cb_size = ((csr & CSR_CBD) >> CSR_CBD_START) * SLOT_SIZE;
408 /*
409 * Reserve one slot for the header. Limit max message
410 * length by 9 bits that are available in the header.
411 */
412 max_length = MIN(cb_size, (1 << MEI_HDR_LENGTH_SIZE) - 1)
413 - SLOT_SIZE;
414 remaining = len;
415
416 /*
417 * Fragment the message into smaller messages not exceeding
Jonathan Neuschäfer5268b762018-02-12 12:24:25 +0100418 * useful circular buffer length. Mark last message complete.
Subrata Banik5c08c732017-11-13 14:54:37 +0530419 */
420 do {
421 hdr = MIN(max_length, remaining)
422 << MEI_HDR_LENGTH_START;
423 hdr |= client_addr << MEI_HDR_CSE_ADDR_START;
424 hdr |= host_addr << MEI_HDR_HOST_ADDR_START;
425 hdr |= (MIN(max_length, remaining) == remaining) ?
Lee Leahy68ab0b52017-03-10 13:42:34 -0800426 MEI_HDR_IS_COMPLETE : 0;
Subrata Banik5c08c732017-11-13 14:54:37 +0530427 sent = send_one_message(hdr, p);
428 p += sent;
429 remaining -= sent;
430 } while (remaining > 0 && sent != 0);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800431
Subrata Banik5c08c732017-11-13 14:54:37 +0530432 if (!remaining)
433 return 1;
434 }
435 return 0;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800436}
437
438static size_t
439recv_one_message(uint32_t *hdr, void *buff, size_t maxlen)
440{
441 uint32_t reg, *p = buff;
442 size_t recv_slots, recv_len, remainder, i;
443
444 /* first get the header */
445 if (!wait_read_slots(1))
446 return 0;
447
448 *hdr = read_slot();
449 recv_len = hdr_get_length(*hdr);
450
451 if (!recv_len)
452 printk(BIOS_WARNING, "HECI: message is zero-sized\n");
453
454 recv_slots = bytes_to_slots(recv_len);
455
456 i = 0;
457 if (recv_len > maxlen) {
458 printk(BIOS_ERR, "HECI: response is too big\n");
459 return 0;
460 }
461
462 /* wait for the rest of messages to arrive */
463 wait_read_slots(recv_slots);
464
465 /* fetch whole slots first */
466 while (i < ALIGN_DOWN(recv_len, SLOT_SIZE)) {
467 *p++ = read_slot();
468 i += SLOT_SIZE;
469 }
470
Subrata Banik5c08c732017-11-13 14:54:37 +0530471 /*
472 * If ME is not ready, something went wrong and
473 * we received junk
474 */
475 if (!cse_ready())
476 return 0;
477
Andrey Petrov04a72c42017-03-01 15:51:57 -0800478 remainder = recv_len % SLOT_SIZE;
479
480 if (remainder) {
481 reg = read_slot();
482 memcpy(p, &reg, remainder);
483 }
484
485 return recv_len;
486}
487
488int heci_receive(void *buff, size_t *maxlen)
489{
Subrata Banik5c08c732017-11-13 14:54:37 +0530490 uint8_t retry;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800491 size_t left, received;
492 uint32_t hdr = 0;
Subrata Banik5c08c732017-11-13 14:54:37 +0530493 uint8_t *p;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800494
495 if (!buff || !maxlen || !*maxlen)
496 return 0;
497
Andrey Petrov04a72c42017-03-01 15:51:57 -0800498 clear_int();
499
Subrata Banik5c08c732017-11-13 14:54:37 +0530500 for (retry = 0; retry < MAX_HECI_MESSAGE_RETRY_COUNT; retry++) {
501 p = buff;
502 left = *maxlen;
503
504 if (!wait_heci_ready()) {
505 printk(BIOS_ERR, "HECI: not ready\n");
506 continue;
507 }
508
509 /*
510 * Receive multiple packets until we meet one marked
511 * complete or we run out of space in caller-provided buffer.
512 */
513 do {
514 received = recv_one_message(&hdr, p, left);
Lijian Zhaoc50296d2017-12-15 19:10:18 -0800515 if (!received) {
Elyes HAOUAS3d450002018-08-09 18:55:58 +0200516 printk(BIOS_ERR, "HECI: Failed to receive!\n");
Lijian Zhaoc50296d2017-12-15 19:10:18 -0800517 return 0;
518 }
Subrata Banik5c08c732017-11-13 14:54:37 +0530519 left -= received;
520 p += received;
521 /* If we read out everything ping to send more */
522 if (!(hdr & MEI_HDR_IS_COMPLETE) && !cse_filled_slots())
523 host_gen_interrupt();
524 } while (received && !(hdr & MEI_HDR_IS_COMPLETE) && left > 0);
525
526 if ((hdr & MEI_HDR_IS_COMPLETE) && received) {
527 *maxlen = p - (uint8_t *) buff;
528 return 1;
529 }
Andrey Petrov04a72c42017-03-01 15:51:57 -0800530 }
Subrata Banik5c08c732017-11-13 14:54:37 +0530531 return 0;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800532}
533
Sridhar Siricillaa5208f52019-08-30 17:10:24 +0530534int heci_send_receive(const void *snd_msg, size_t snd_sz, void *rcv_msg, size_t *rcv_sz)
535{
536 if (!heci_send(snd_msg, snd_sz, BIOS_HOST_ADDR, HECI_MKHI_ADDR)) {
537 printk(BIOS_ERR, "HECI: send Failed\n");
538 return 0;
539 }
540
541 if (rcv_msg != NULL) {
542 if (!heci_receive(rcv_msg, rcv_sz)) {
543 printk(BIOS_ERR, "HECI: receive Failed\n");
544 return 0;
545 }
546 }
547 return 1;
548}
549
Andrey Petrov04a72c42017-03-01 15:51:57 -0800550/*
551 * Attempt to reset the device. This is useful when host and ME are out
552 * of sync during transmission or ME didn't understand the message.
553 */
554int heci_reset(void)
555{
556 uint32_t csr;
557
Duncan Laurie15ca9032020-11-05 10:09:07 -0800558 /* Clear post code to prevent eventlog entry from unknown code. */
559 post_code(0);
560
Andrey Petrov04a72c42017-03-01 15:51:57 -0800561 /* Send reset request */
562 csr = read_host_csr();
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530563 csr |= (CSR_RESET | CSR_IG);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800564 write_host_csr(csr);
565
566 if (wait_heci_ready()) {
567 /* Device is back on its imaginary feet, clear reset */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530568 cse_set_host_ready();
Andrey Petrov04a72c42017-03-01 15:51:57 -0800569 return 1;
570 }
571
572 printk(BIOS_CRIT, "HECI: reset failed\n");
573
574 return 0;
575}
576
Sridhar Siricilla2cc66912019-08-31 11:20:34 +0530577bool is_cse_enabled(void)
578{
579 const struct device *cse_dev = pcidev_path_on_root(PCH_DEVFN_CSE);
580
581 if (!cse_dev || !cse_dev->enabled) {
582 printk(BIOS_WARNING, "HECI: No CSE device\n");
583 return false;
584 }
585
586 if (pci_read_config16(PCH_DEV_CSE, PCI_VENDOR_ID) == 0xFFFF) {
587 printk(BIOS_WARNING, "HECI: CSE device is hidden\n");
588 return false;
589 }
590
591 return true;
592}
593
594uint32_t me_read_config32(int offset)
595{
596 return pci_read_config32(PCH_DEV_CSE, offset);
597}
598
Sridhar Siricilla59c7cb7d2020-02-07 11:59:30 +0530599static bool cse_is_global_reset_allowed(void)
600{
601 /*
602 * Allow sending GLOBAL_RESET command only if:
603 * - CSE's current working state is Normal and current operation mode is Normal.
604 * - (or) CSE's current working state is normal and current operation mode can
605 * be Soft Temp Disable or Security Override Mode if CSE's Firmware SKU is
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530606 * Lite.
Sridhar Siricilla59c7cb7d2020-02-07 11:59:30 +0530607 */
608 if (!cse_is_hfs1_cws_normal())
609 return false;
610
611 if (cse_is_hfs1_com_normal())
612 return true;
613
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530614 if (cse_is_hfs3_fw_sku_lite()) {
Sridhar Siricilla59c7cb7d2020-02-07 11:59:30 +0530615 if (cse_is_hfs1_com_soft_temp_disable() || cse_is_hfs1_com_secover_mei_msg())
616 return true;
617 }
618 return false;
619}
620
Sridhar Siricillad415c202019-08-31 14:54:57 +0530621/*
Subrata Banikf463dc02020-09-14 19:04:03 +0530622 * Sends GLOBAL_RESET_REQ cmd to CSE with reset type GLOBAL_RESET.
623 * Returns 0 on failure and 1 on success.
Sridhar Siricillad415c202019-08-31 14:54:57 +0530624 */
Subrata Banikf463dc02020-09-14 19:04:03 +0530625static int cse_request_reset(enum rst_req_type rst_type)
Sridhar Siricillad415c202019-08-31 14:54:57 +0530626{
627 int status;
628 struct mkhi_hdr reply;
629 struct reset_message {
630 struct mkhi_hdr hdr;
631 uint8_t req_origin;
632 uint8_t reset_type;
633 } __packed;
634 struct reset_message msg = {
635 .hdr = {
636 .group_id = MKHI_GROUP_ID_CBM,
Sridhar Siricillae202e672020-01-07 23:36:40 +0530637 .command = MKHI_CBM_GLOBAL_RESET_REQ,
Sridhar Siricillad415c202019-08-31 14:54:57 +0530638 },
639 .req_origin = GR_ORIGIN_BIOS_POST,
640 .reset_type = rst_type
641 };
642 size_t reply_size;
643
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530644 printk(BIOS_DEBUG, "HECI: Global Reset(Type:%d) Command\n", rst_type);
Sridhar Siricilla59c7cb7d2020-02-07 11:59:30 +0530645
Sridhar Siricillac2a2d2b2020-02-27 17:16:13 +0530646 if (!(rst_type == GLOBAL_RESET || rst_type == CSE_RESET_ONLY)) {
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530647 printk(BIOS_ERR, "HECI: Unsupported reset type is requested\n");
648 return 0;
649 }
Sridhar Siricillad415c202019-08-31 14:54:57 +0530650
Subrata Banikf463dc02020-09-14 19:04:03 +0530651 if (!cse_is_global_reset_allowed() || !is_cse_enabled()) {
Sridhar Siricilla59c7cb7d2020-02-07 11:59:30 +0530652 printk(BIOS_ERR, "HECI: CSE does not meet required prerequisites\n");
653 return 0;
654 }
655
Sridhar Siricillad415c202019-08-31 14:54:57 +0530656 heci_reset();
657
658 reply_size = sizeof(reply);
659 memset(&reply, 0, reply_size);
660
Sridhar Siricillad415c202019-08-31 14:54:57 +0530661 if (rst_type == CSE_RESET_ONLY)
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530662 status = heci_send(&msg, sizeof(msg), BIOS_HOST_ADDR, HECI_MKHI_ADDR);
Sridhar Siricillad415c202019-08-31 14:54:57 +0530663 else
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530664 status = heci_send_receive(&msg, sizeof(msg), &reply, &reply_size);
Sridhar Siricillad415c202019-08-31 14:54:57 +0530665
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530666 printk(BIOS_DEBUG, "HECI: Global Reset %s!\n", status ? "success" : "failure");
667 return status;
Sridhar Siricillad415c202019-08-31 14:54:57 +0530668}
669
Subrata Banikf463dc02020-09-14 19:04:03 +0530670int cse_request_global_reset(void)
671{
672 return cse_request_reset(GLOBAL_RESET);
673}
674
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530675static bool cse_is_hmrfpo_enable_allowed(void)
676{
677 /*
678 * Allow sending HMRFPO ENABLE command only if:
679 * - CSE's current working state is Normal and current operation mode is Normal
680 * - (or) cse's current working state is normal and current operation mode is
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530681 * Soft Temp Disable if CSE's Firmware SKU is Lite
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530682 */
683 if (!cse_is_hfs1_cws_normal())
684 return false;
685
686 if (cse_is_hfs1_com_normal())
687 return true;
688
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530689 if (cse_is_hfs3_fw_sku_lite() && cse_is_hfs1_com_soft_temp_disable())
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530690 return true;
691
692 return false;
693}
694
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530695/* Sends HMRFPO Enable command to CSE */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530696int cse_hmrfpo_enable(void)
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530697{
698 struct hmrfpo_enable_msg {
699 struct mkhi_hdr hdr;
700 uint32_t nonce[2];
701 } __packed;
702
703 /* HMRFPO Enable message */
704 struct hmrfpo_enable_msg msg = {
705 .hdr = {
Sridhar Siricillae202e672020-01-07 23:36:40 +0530706 .group_id = MKHI_GROUP_ID_HMRFPO,
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530707 .command = MKHI_HMRFPO_ENABLE,
708 },
709 .nonce = {0},
710 };
711
712 /* HMRFPO Enable response */
713 struct hmrfpo_enable_resp {
714 struct mkhi_hdr hdr;
Sridhar Siricillae202e672020-01-07 23:36:40 +0530715 /* Base addr for factory data area, not relevant for client SKUs */
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530716 uint32_t fct_base;
Sridhar Siricillae202e672020-01-07 23:36:40 +0530717 /* Length of factory data area, not relevant for client SKUs */
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530718 uint32_t fct_limit;
719 uint8_t status;
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530720 uint8_t reserved[3];
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530721 } __packed;
722
723 struct hmrfpo_enable_resp resp;
724 size_t resp_size = sizeof(struct hmrfpo_enable_resp);
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530725
726 printk(BIOS_DEBUG, "HECI: Send HMRFPO Enable Command\n");
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530727
728 if (!cse_is_hmrfpo_enable_allowed()) {
729 printk(BIOS_ERR, "HECI: CSE does not meet required prerequisites\n");
730 return 0;
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530731 }
732
733 if (!heci_send_receive(&msg, sizeof(struct hmrfpo_enable_msg),
734 &resp, &resp_size))
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530735 return 0;
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530736
737 if (resp.hdr.result) {
738 printk(BIOS_ERR, "HECI: Resp Failed:%d\n", resp.hdr.result);
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530739 return 0;
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530740 }
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530741
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530742 if (resp.status) {
743 printk(BIOS_ERR, "HECI: HMRFPO_Enable Failed (resp status: %d)\n", resp.status);
744 return 0;
745 }
746
747 return 1;
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530748}
749
750/*
751 * Sends HMRFPO Get Status command to CSE to get the HMRFPO status.
Sridhar Siricilla63be9182020-01-19 12:38:56 +0530752 * The status can be DISABLED/LOCKED/ENABLED
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530753 */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530754int cse_hmrfpo_get_status(void)
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530755{
756 struct hmrfpo_get_status_msg {
757 struct mkhi_hdr hdr;
758 } __packed;
759
760 struct hmrfpo_get_status_resp {
761 struct mkhi_hdr hdr;
762 uint8_t status;
Sridhar Siricilla63be9182020-01-19 12:38:56 +0530763 uint8_t reserved[3];
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530764 } __packed;
765
766 struct hmrfpo_get_status_msg msg = {
767 .hdr = {
Sridhar Siricillae202e672020-01-07 23:36:40 +0530768 .group_id = MKHI_GROUP_ID_HMRFPO,
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530769 .command = MKHI_HMRFPO_GET_STATUS,
770 },
771 };
772 struct hmrfpo_get_status_resp resp;
773 size_t resp_size = sizeof(struct hmrfpo_get_status_resp);
774
775 printk(BIOS_INFO, "HECI: Sending Get HMRFPO Status Command\n");
776
Sridhar Siricilla206905c2020-02-06 18:48:22 +0530777 if (!cse_is_hfs1_cws_normal()) {
778 printk(BIOS_ERR, "HECI: CSE's current working state is not Normal\n");
779 return -1;
780 }
781
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530782 if (!heci_send_receive(&msg, sizeof(struct hmrfpo_get_status_msg),
783 &resp, &resp_size)) {
784 printk(BIOS_ERR, "HECI: HMRFPO send/receive fail\n");
785 return -1;
786 }
787
788 if (resp.hdr.result) {
789 printk(BIOS_ERR, "HECI: HMRFPO Resp Failed:%d\n",
790 resp.hdr.result);
791 return -1;
792 }
793
794 return resp.status;
795}
796
Sridhar Siricilla24a974a2020-02-19 14:41:36 +0530797void print_me_fw_version(void *unused)
798{
799 struct version {
800 uint16_t minor;
801 uint16_t major;
802 uint16_t build;
803 uint16_t hotfix;
804 } __packed;
805
806 struct fw_ver_resp {
807 struct mkhi_hdr hdr;
808 struct version code;
809 struct version rec;
810 struct version fitc;
811 } __packed;
812
813 const struct mkhi_hdr fw_ver_msg = {
814 .group_id = MKHI_GROUP_ID_GEN,
815 .command = MKHI_GEN_GET_FW_VERSION,
816 };
817
818 struct fw_ver_resp resp;
819 size_t resp_size = sizeof(resp);
820
821 /* Ignore if UART debugging is disabled */
822 if (!CONFIG(CONSOLE_SERIAL))
823 return;
824
Wim Vervoorn8602fb72020-03-30 12:17:54 +0200825 /* Ignore if CSE is disabled */
826 if (!is_cse_enabled())
827 return;
828
Sridhar Siricilla24a974a2020-02-19 14:41:36 +0530829 /*
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530830 * Ignore if ME Firmware SKU type is Lite since
Sridhar Siricilla24a974a2020-02-19 14:41:36 +0530831 * print_boot_partition_info() logs RO(BP1) and RW(BP2) versions.
832 */
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530833 if (cse_is_hfs3_fw_sku_lite())
Sridhar Siricilla24a974a2020-02-19 14:41:36 +0530834 return;
835
836 /*
837 * Prerequisites:
838 * 1) HFSTS1 Current Working State is Normal
839 * 2) HFSTS1 Current Operation Mode is Normal
840 * 3) It's after DRAM INIT DONE message (taken care of by calling it
841 * during ramstage
842 */
843 if (!cse_is_hfs1_cws_normal() || !cse_is_hfs1_com_normal())
844 goto fail;
845
846 heci_reset();
847
848 if (!heci_send_receive(&fw_ver_msg, sizeof(fw_ver_msg), &resp, &resp_size))
849 goto fail;
850
851 if (resp.hdr.result)
852 goto fail;
853
854 printk(BIOS_DEBUG, "ME: Version: %d.%d.%d.%d\n", resp.code.major,
855 resp.code.minor, resp.code.hotfix, resp.code.build);
856 return;
857
858fail:
859 printk(BIOS_DEBUG, "ME: Version: Unavailable\n");
860}
861
Andrey Petrov04a72c42017-03-01 15:51:57 -0800862#if ENV_RAMSTAGE
863
864static void update_sec_bar(struct device *dev)
865{
Patrick Georgic9b13592019-11-29 11:47:47 +0100866 cse.sec_bar = find_resource(dev, PCI_BASE_ADDRESS_0)->base;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800867}
868
869static void cse_set_resources(struct device *dev)
870{
Subrata Banik2ee54db2017-03-05 12:37:00 +0530871 if (dev->path.pci.devfn == PCH_DEVFN_CSE)
Andrey Petrov04a72c42017-03-01 15:51:57 -0800872 update_sec_bar(dev);
873
874 pci_dev_set_resources(dev);
875}
876
877static struct device_operations cse_ops = {
878 .set_resources = cse_set_resources,
879 .read_resources = pci_dev_read_resources,
880 .enable_resources = pci_dev_enable_resources,
881 .init = pci_dev_init,
Subrata Banik6bbc91a2017-12-07 14:55:51 +0530882 .ops_pci = &pci_dev_ops_pci,
Andrey Petrov04a72c42017-03-01 15:51:57 -0800883};
884
Hannah Williams63142152017-06-12 14:03:18 -0700885static const unsigned short pci_device_ids[] = {
886 PCI_DEVICE_ID_INTEL_APL_CSE0,
887 PCI_DEVICE_ID_INTEL_GLK_CSE0,
Andrey Petrov0405de92017-06-05 13:25:29 -0700888 PCI_DEVICE_ID_INTEL_CNL_CSE0,
Subrata Banikd0586d22017-11-27 13:28:41 +0530889 PCI_DEVICE_ID_INTEL_SKL_CSE0,
Maxim Polyakov571d07d2019-08-22 13:11:32 +0300890 PCI_DEVICE_ID_INTEL_LWB_CSE0,
891 PCI_DEVICE_ID_INTEL_LWB_CSE0_SUPER,
praveen hodagatta praneshe26c4a42018-09-20 03:49:45 +0800892 PCI_DEVICE_ID_INTEL_CNP_H_CSE0,
Aamir Bohra9eac0392018-06-30 12:07:04 +0530893 PCI_DEVICE_ID_INTEL_ICL_CSE0,
Ronak Kanabarda7ffb482019-02-05 01:51:13 +0530894 PCI_DEVICE_ID_INTEL_CMP_CSE0,
Gaggery Tsai12a651c2019-12-05 11:23:20 -0800895 PCI_DEVICE_ID_INTEL_CMP_H_CSE0,
Ravi Sarawadi6b5bf402019-10-21 22:25:04 -0700896 PCI_DEVICE_ID_INTEL_TGL_CSE0,
Tan, Lean Sheng26136092020-01-20 19:13:56 -0800897 PCI_DEVICE_ID_INTEL_MCC_CSE0,
898 PCI_DEVICE_ID_INTEL_MCC_CSE1,
899 PCI_DEVICE_ID_INTEL_MCC_CSE2,
900 PCI_DEVICE_ID_INTEL_MCC_CSE3,
Meera Ravindranath3f4af0d2020-02-12 16:01:22 +0530901 PCI_DEVICE_ID_INTEL_JSP_CSE0,
902 PCI_DEVICE_ID_INTEL_JSP_CSE1,
903 PCI_DEVICE_ID_INTEL_JSP_CSE2,
904 PCI_DEVICE_ID_INTEL_JSP_CSE3,
Subrata Banikf672f7f2020-08-03 14:29:25 +0530905 PCI_DEVICE_ID_INTEL_ADP_P_CSE0,
906 PCI_DEVICE_ID_INTEL_ADP_P_CSE1,
907 PCI_DEVICE_ID_INTEL_ADP_P_CSE2,
908 PCI_DEVICE_ID_INTEL_ADP_P_CSE3,
909 PCI_DEVICE_ID_INTEL_ADP_S_CSE0,
910 PCI_DEVICE_ID_INTEL_ADP_S_CSE1,
911 PCI_DEVICE_ID_INTEL_ADP_S_CSE2,
912 PCI_DEVICE_ID_INTEL_ADP_S_CSE3,
Hannah Williams63142152017-06-12 14:03:18 -0700913 0,
914};
915
Andrey Petrov04a72c42017-03-01 15:51:57 -0800916static const struct pci_driver cse_driver __pci_driver = {
917 .ops = &cse_ops,
918 .vendor = PCI_VENDOR_ID_INTEL,
919 /* SoC/chipset needs to provide PCI device ID */
Andrey Petrov0405de92017-06-05 13:25:29 -0700920 .devices = pci_device_ids
Andrey Petrov04a72c42017-03-01 15:51:57 -0800921};
922
923#endif