blob: 86ed038b06d67619010c5ffd6a3203127b4603f5 [file] [log] [blame]
Andrey Petrov04a72c42017-03-01 15:51:57 -08001/*
2 * This file is part of the coreboot project.
3 *
Andrey Petrov04a72c42017-03-01 15:51:57 -08004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
Subrata Banik05e06cd2017-11-09 15:04:09 +053015#include <assert.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -080016#include <commonlib/helpers.h>
17#include <console/console.h>
Kyösti Mälkki13f66502019-03-03 08:01:05 +020018#include <device/mmio.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -080019#include <delay.h>
20#include <device/pci.h>
21#include <device/pci_ids.h>
22#include <device/pci_ops.h>
23#include <intelblocks/cse.h>
Subrata Banik05e06cd2017-11-09 15:04:09 +053024#include <soc/iomap.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -080025#include <soc/pci_devs.h>
Sridhar Siricilla8e465452019-09-23 20:59:38 +053026#include <soc/me.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -080027#include <string.h>
28#include <timer.h>
29
Subrata Banik5c08c732017-11-13 14:54:37 +053030#define MAX_HECI_MESSAGE_RETRY_COUNT 5
31
Andrey Petrov04a72c42017-03-01 15:51:57 -080032/* Wait up to 15 sec for HECI to get ready */
33#define HECI_DELAY_READY (15 * 1000)
Jonathan Neuschäfer5268b762018-02-12 12:24:25 +010034/* Wait up to 100 usec between circular buffer polls */
Andrey Petrov04a72c42017-03-01 15:51:57 -080035#define HECI_DELAY 100
36/* Wait up to 5 sec for CSE to chew something we sent */
37#define HECI_SEND_TIMEOUT (5 * 1000)
38/* Wait up to 5 sec for CSE to blurp a reply */
39#define HECI_READ_TIMEOUT (5 * 1000)
40
41#define SLOT_SIZE sizeof(uint32_t)
42
43#define MMIO_CSE_CB_WW 0x00
44#define MMIO_HOST_CSR 0x04
45#define MMIO_CSE_CB_RW 0x08
46#define MMIO_CSE_CSR 0x0c
47
48#define CSR_IE (1 << 0)
49#define CSR_IS (1 << 1)
50#define CSR_IG (1 << 2)
51#define CSR_READY (1 << 3)
52#define CSR_RESET (1 << 4)
53#define CSR_RP_START 8
54#define CSR_RP (((1 << 8) - 1) << CSR_RP_START)
55#define CSR_WP_START 16
56#define CSR_WP (((1 << 8) - 1) << CSR_WP_START)
57#define CSR_CBD_START 24
58#define CSR_CBD (((1 << 8) - 1) << CSR_CBD_START)
59
60#define MEI_HDR_IS_COMPLETE (1 << 31)
61#define MEI_HDR_LENGTH_START 16
62#define MEI_HDR_LENGTH_SIZE 9
63#define MEI_HDR_LENGTH (((1 << MEI_HDR_LENGTH_SIZE) - 1) \
64 << MEI_HDR_LENGTH_START)
65#define MEI_HDR_HOST_ADDR_START 8
66#define MEI_HDR_HOST_ADDR (((1 << 8) - 1) << MEI_HDR_HOST_ADDR_START)
67#define MEI_HDR_CSE_ADDR_START 0
68#define MEI_HDR_CSE_ADDR (((1 << 8) - 1) << MEI_HDR_CSE_ADDR_START)
69
Sridhar Siricilla09ea3712019-11-12 23:35:50 +053070/* Wait up to 5 seconds for CSE to boot from RO(BP1) */
71#define CSE_DELAY_BOOT_TO_RO (5 * 1000)
72
Arthur Heymans3d6ccd02019-05-27 17:25:23 +020073static struct cse_device {
Andrey Petrov04a72c42017-03-01 15:51:57 -080074 uintptr_t sec_bar;
Patrick Georgic9b13592019-11-29 11:47:47 +010075} cse;
Andrey Petrov04a72c42017-03-01 15:51:57 -080076
77/*
78 * Initialize the device with provided temporary BAR. If BAR is 0 use a
79 * default. This is intended for pre-mem usage only where BARs haven't been
80 * assigned yet and devices are not enabled.
81 */
82void heci_init(uintptr_t tempbar)
83{
Elyes HAOUAS68c851b2018-06-12 22:06:09 +020084#if defined(__SIMPLE_DEVICE__)
85 pci_devfn_t dev = PCH_DEV_CSE;
86#else
87 struct device *dev = PCH_DEV_CSE;
88#endif
Andrey Petrov04a72c42017-03-01 15:51:57 -080089 u8 pcireg;
90
91 /* Assume it is already initialized, nothing else to do */
Patrick Georgic9b13592019-11-29 11:47:47 +010092 if (cse.sec_bar)
Andrey Petrov04a72c42017-03-01 15:51:57 -080093 return;
94
95 /* Use default pre-ram bar */
96 if (!tempbar)
97 tempbar = HECI1_BASE_ADDRESS;
98
99 /* Assign Resources to HECI1 */
100 /* Clear BIT 1-2 of Command Register */
101 pcireg = pci_read_config8(dev, PCI_COMMAND);
102 pcireg &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY);
103 pci_write_config8(dev, PCI_COMMAND, pcireg);
104
105 /* Program Temporary BAR for HECI1 */
106 pci_write_config32(dev, PCI_BASE_ADDRESS_0, tempbar);
107 pci_write_config32(dev, PCI_BASE_ADDRESS_1, 0x0);
108
109 /* Enable Bus Master and MMIO Space */
110 pcireg = pci_read_config8(dev, PCI_COMMAND);
111 pcireg |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
112 pci_write_config8(dev, PCI_COMMAND, pcireg);
113
Patrick Georgic9b13592019-11-29 11:47:47 +0100114 cse.sec_bar = tempbar;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800115}
116
Subrata Banik05e06cd2017-11-09 15:04:09 +0530117/* Get HECI BAR 0 from PCI configuration space */
118static uint32_t get_cse_bar(void)
119{
120 uintptr_t bar;
121
122 bar = pci_read_config32(PCH_DEV_CSE, PCI_BASE_ADDRESS_0);
123 assert(bar != 0);
124 /*
125 * Bits 31-12 are the base address as per EDS for SPI,
126 * Don't care about 0-11 bit
127 */
128 return bar & ~PCI_BASE_ADDRESS_MEM_ATTR_MASK;
129}
130
Andrey Petrov04a72c42017-03-01 15:51:57 -0800131static uint32_t read_bar(uint32_t offset)
132{
Patrick Georgi08c8cf92019-12-02 11:43:20 +0100133 /* Load and cache BAR */
Patrick Georgic9b13592019-11-29 11:47:47 +0100134 if (!cse.sec_bar)
135 cse.sec_bar = get_cse_bar();
136 return read32((void *)(cse.sec_bar + offset));
Andrey Petrov04a72c42017-03-01 15:51:57 -0800137}
138
139static void write_bar(uint32_t offset, uint32_t val)
140{
Patrick Georgi08c8cf92019-12-02 11:43:20 +0100141 /* Load and cache BAR */
Patrick Georgic9b13592019-11-29 11:47:47 +0100142 if (!cse.sec_bar)
143 cse.sec_bar = get_cse_bar();
144 return write32((void *)(cse.sec_bar + offset), val);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800145}
146
147static uint32_t read_cse_csr(void)
148{
149 return read_bar(MMIO_CSE_CSR);
150}
151
152static uint32_t read_host_csr(void)
153{
154 return read_bar(MMIO_HOST_CSR);
155}
156
157static void write_host_csr(uint32_t data)
158{
159 write_bar(MMIO_HOST_CSR, data);
160}
161
162static size_t filled_slots(uint32_t data)
163{
164 uint8_t wp, rp;
165 rp = data >> CSR_RP_START;
166 wp = data >> CSR_WP_START;
167 return (uint8_t) (wp - rp);
168}
169
170static size_t cse_filled_slots(void)
171{
172 return filled_slots(read_cse_csr());
173}
174
175static size_t host_empty_slots(void)
176{
177 uint32_t csr;
178 csr = read_host_csr();
179
180 return ((csr & CSR_CBD) >> CSR_CBD_START) - filled_slots(csr);
181}
182
183static void clear_int(void)
184{
185 uint32_t csr;
186 csr = read_host_csr();
187 csr |= CSR_IS;
188 write_host_csr(csr);
189}
190
191static uint32_t read_slot(void)
192{
193 return read_bar(MMIO_CSE_CB_RW);
194}
195
196static void write_slot(uint32_t val)
197{
198 write_bar(MMIO_CSE_CB_WW, val);
199}
200
201static int wait_write_slots(size_t cnt)
202{
203 struct stopwatch sw;
204
205 stopwatch_init_msecs_expire(&sw, HECI_SEND_TIMEOUT);
206 while (host_empty_slots() < cnt) {
207 udelay(HECI_DELAY);
208 if (stopwatch_expired(&sw)) {
209 printk(BIOS_ERR, "HECI: timeout, buffer not drained\n");
210 return 0;
211 }
212 }
213 return 1;
214}
215
216static int wait_read_slots(size_t cnt)
217{
218 struct stopwatch sw;
219
220 stopwatch_init_msecs_expire(&sw, HECI_READ_TIMEOUT);
221 while (cse_filled_slots() < cnt) {
222 udelay(HECI_DELAY);
223 if (stopwatch_expired(&sw)) {
224 printk(BIOS_ERR, "HECI: timed out reading answer!\n");
225 return 0;
226 }
227 }
228 return 1;
229}
230
231/* get number of full 4-byte slots */
232static size_t bytes_to_slots(size_t bytes)
233{
234 return ALIGN_UP(bytes, SLOT_SIZE) / SLOT_SIZE;
235}
236
237static int cse_ready(void)
238{
239 uint32_t csr;
240 csr = read_cse_csr();
241 return csr & CSR_READY;
242}
243
Sridhar Siricilla8e465452019-09-23 20:59:38 +0530244static bool cse_check_hfs1_com(int mode)
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530245{
246 union me_hfsts1 hfs1;
247 hfs1.data = me_read_config32(PCI_ME_HFSTS1);
Sridhar Siricilla8e465452019-09-23 20:59:38 +0530248 return hfs1.fields.operation_mode == mode;
249}
250
251bool cse_is_hfs1_cws_normal(void)
252{
253 union me_hfsts1 hfs1;
254 hfs1.data = me_read_config32(PCI_ME_HFSTS1);
255 if (hfs1.fields.working_state == ME_HFS1_CWS_NORMAL)
256 return true;
257 return false;
258}
259
260bool cse_is_hfs1_com_normal(void)
261{
262 return cse_check_hfs1_com(ME_HFS1_COM_NORMAL);
263}
264
265bool cse_is_hfs1_com_secover_mei_msg(void)
266{
267 return cse_check_hfs1_com(ME_HFS1_COM_SECOVER_MEI_MSG);
268}
269
270bool cse_is_hfs1_com_soft_temp_disable(void)
271{
272 return cse_check_hfs1_com(ME_HFS1_COM_SOFT_TEMP_DISABLE);
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530273}
274
Sridhar Siricilla3465d272020-02-06 15:31:04 +0530275bool cse_is_hfs3_fw_sku_custom(void)
276{
277 union me_hfsts3 hfs3;
278 hfs3.data = me_read_config32(PCI_ME_HFSTS3);
279 return hfs3.fields.fw_sku == ME_HFS3_FW_SKU_CUSTOM;
280}
281
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530282/* Makes the host ready to communicate with CSE */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530283void cse_set_host_ready(void)
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530284{
285 uint32_t csr;
286 csr = read_host_csr();
287 csr &= ~CSR_RESET;
288 csr |= (CSR_IG | CSR_READY);
289 write_host_csr(csr);
290}
291
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530292/* Polls for ME mode ME_HFS1_COM_SECOVER_MEI_MSG for 15 seconds */
293uint8_t cse_wait_sec_override_mode(void)
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530294{
295 struct stopwatch sw;
296 stopwatch_init_msecs_expire(&sw, HECI_DELAY_READY);
Sridhar Siricilla8e465452019-09-23 20:59:38 +0530297 while (!cse_is_hfs1_com_secover_mei_msg()) {
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530298 udelay(HECI_DELAY);
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530299 if (stopwatch_expired(&sw)) {
300 printk(BIOS_ERR, "HECI: Timed out waiting for SEC_OVERRIDE mode!\n");
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530301 return 0;
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530302 }
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530303 }
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530304 printk(BIOS_DEBUG, "HECI: CSE took %lu ms to enter security override mode\n",
305 stopwatch_duration_msecs(&sw));
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530306 return 1;
307}
308
Sridhar Siricilla09ea3712019-11-12 23:35:50 +0530309/*
310 * Polls for CSE's current operation mode 'Soft Temporary Disable'.
311 * The CSE enters the current operation mode when it boots from RO(BP1).
312 */
313uint8_t cse_wait_com_soft_temp_disable(void)
314{
315 struct stopwatch sw;
316 stopwatch_init_msecs_expire(&sw, CSE_DELAY_BOOT_TO_RO);
317 while (!cse_is_hfs1_com_soft_temp_disable()) {
318 udelay(HECI_DELAY);
319 if (stopwatch_expired(&sw)) {
320 printk(BIOS_ERR, "HECI: Timed out waiting for CSE to boot from RO!\n");
321 return 0;
322 }
323 }
324 printk(BIOS_SPEW, "HECI: CSE took %lu ms to boot from RO\n",
325 stopwatch_duration_msecs(&sw));
326 return 1;
327}
328
Andrey Petrov04a72c42017-03-01 15:51:57 -0800329static int wait_heci_ready(void)
330{
331 struct stopwatch sw;
332
333 stopwatch_init_msecs_expire(&sw, HECI_DELAY_READY);
334 while (!cse_ready()) {
335 udelay(HECI_DELAY);
336 if (stopwatch_expired(&sw))
337 return 0;
338 }
339
340 return 1;
341}
342
343static void host_gen_interrupt(void)
344{
345 uint32_t csr;
346 csr = read_host_csr();
347 csr |= CSR_IG;
348 write_host_csr(csr);
349}
350
351static size_t hdr_get_length(uint32_t hdr)
352{
353 return (hdr & MEI_HDR_LENGTH) >> MEI_HDR_LENGTH_START;
354}
355
356static int
357send_one_message(uint32_t hdr, const void *buff)
358{
359 size_t pend_len, pend_slots, remainder, i;
360 uint32_t tmp;
361 const uint32_t *p = buff;
362
363 /* Get space for the header */
364 if (!wait_write_slots(1))
365 return 0;
366
367 /* First, write header */
368 write_slot(hdr);
369
370 pend_len = hdr_get_length(hdr);
371 pend_slots = bytes_to_slots(pend_len);
372
373 if (!wait_write_slots(pend_slots))
374 return 0;
375
376 /* Write the body in whole slots */
377 i = 0;
378 while (i < ALIGN_DOWN(pend_len, SLOT_SIZE)) {
379 write_slot(*p++);
380 i += SLOT_SIZE;
381 }
382
383 remainder = pend_len % SLOT_SIZE;
384 /* Pad to 4 bytes not touching caller's buffer */
385 if (remainder) {
386 memcpy(&tmp, p, remainder);
387 write_slot(tmp);
388 }
389
390 host_gen_interrupt();
391
392 /* Make sure nothing bad happened during transmission */
393 if (!cse_ready())
394 return 0;
395
396 return pend_len;
397}
398
399int
400heci_send(const void *msg, size_t len, uint8_t host_addr, uint8_t client_addr)
401{
Subrata Banik5c08c732017-11-13 14:54:37 +0530402 uint8_t retry;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800403 uint32_t csr, hdr;
Subrata Banik5c08c732017-11-13 14:54:37 +0530404 size_t sent, remaining, cb_size, max_length;
405 const uint8_t *p;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800406
407 if (!msg || !len)
408 return 0;
409
410 clear_int();
411
Subrata Banik5c08c732017-11-13 14:54:37 +0530412 for (retry = 0; retry < MAX_HECI_MESSAGE_RETRY_COUNT; retry++) {
413 p = msg;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800414
Subrata Banik5c08c732017-11-13 14:54:37 +0530415 if (!wait_heci_ready()) {
416 printk(BIOS_ERR, "HECI: not ready\n");
417 continue;
418 }
Andrey Petrov04a72c42017-03-01 15:51:57 -0800419
Subrata Banik4a722f52017-11-13 14:56:42 +0530420 csr = read_host_csr();
Subrata Banik5c08c732017-11-13 14:54:37 +0530421 cb_size = ((csr & CSR_CBD) >> CSR_CBD_START) * SLOT_SIZE;
422 /*
423 * Reserve one slot for the header. Limit max message
424 * length by 9 bits that are available in the header.
425 */
426 max_length = MIN(cb_size, (1 << MEI_HDR_LENGTH_SIZE) - 1)
427 - SLOT_SIZE;
428 remaining = len;
429
430 /*
431 * Fragment the message into smaller messages not exceeding
Jonathan Neuschäfer5268b762018-02-12 12:24:25 +0100432 * useful circular buffer length. Mark last message complete.
Subrata Banik5c08c732017-11-13 14:54:37 +0530433 */
434 do {
435 hdr = MIN(max_length, remaining)
436 << MEI_HDR_LENGTH_START;
437 hdr |= client_addr << MEI_HDR_CSE_ADDR_START;
438 hdr |= host_addr << MEI_HDR_HOST_ADDR_START;
439 hdr |= (MIN(max_length, remaining) == remaining) ?
Lee Leahy68ab0b52017-03-10 13:42:34 -0800440 MEI_HDR_IS_COMPLETE : 0;
Subrata Banik5c08c732017-11-13 14:54:37 +0530441 sent = send_one_message(hdr, p);
442 p += sent;
443 remaining -= sent;
444 } while (remaining > 0 && sent != 0);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800445
Subrata Banik5c08c732017-11-13 14:54:37 +0530446 if (!remaining)
447 return 1;
448 }
449 return 0;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800450}
451
452static size_t
453recv_one_message(uint32_t *hdr, void *buff, size_t maxlen)
454{
455 uint32_t reg, *p = buff;
456 size_t recv_slots, recv_len, remainder, i;
457
458 /* first get the header */
459 if (!wait_read_slots(1))
460 return 0;
461
462 *hdr = read_slot();
463 recv_len = hdr_get_length(*hdr);
464
465 if (!recv_len)
466 printk(BIOS_WARNING, "HECI: message is zero-sized\n");
467
468 recv_slots = bytes_to_slots(recv_len);
469
470 i = 0;
471 if (recv_len > maxlen) {
472 printk(BIOS_ERR, "HECI: response is too big\n");
473 return 0;
474 }
475
476 /* wait for the rest of messages to arrive */
477 wait_read_slots(recv_slots);
478
479 /* fetch whole slots first */
480 while (i < ALIGN_DOWN(recv_len, SLOT_SIZE)) {
481 *p++ = read_slot();
482 i += SLOT_SIZE;
483 }
484
Subrata Banik5c08c732017-11-13 14:54:37 +0530485 /*
486 * If ME is not ready, something went wrong and
487 * we received junk
488 */
489 if (!cse_ready())
490 return 0;
491
Andrey Petrov04a72c42017-03-01 15:51:57 -0800492 remainder = recv_len % SLOT_SIZE;
493
494 if (remainder) {
495 reg = read_slot();
496 memcpy(p, &reg, remainder);
497 }
498
499 return recv_len;
500}
501
502int heci_receive(void *buff, size_t *maxlen)
503{
Subrata Banik5c08c732017-11-13 14:54:37 +0530504 uint8_t retry;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800505 size_t left, received;
506 uint32_t hdr = 0;
Subrata Banik5c08c732017-11-13 14:54:37 +0530507 uint8_t *p;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800508
509 if (!buff || !maxlen || !*maxlen)
510 return 0;
511
Andrey Petrov04a72c42017-03-01 15:51:57 -0800512 clear_int();
513
Subrata Banik5c08c732017-11-13 14:54:37 +0530514 for (retry = 0; retry < MAX_HECI_MESSAGE_RETRY_COUNT; retry++) {
515 p = buff;
516 left = *maxlen;
517
518 if (!wait_heci_ready()) {
519 printk(BIOS_ERR, "HECI: not ready\n");
520 continue;
521 }
522
523 /*
524 * Receive multiple packets until we meet one marked
525 * complete or we run out of space in caller-provided buffer.
526 */
527 do {
528 received = recv_one_message(&hdr, p, left);
Lijian Zhaoc50296d2017-12-15 19:10:18 -0800529 if (!received) {
Elyes HAOUAS3d450002018-08-09 18:55:58 +0200530 printk(BIOS_ERR, "HECI: Failed to receive!\n");
Lijian Zhaoc50296d2017-12-15 19:10:18 -0800531 return 0;
532 }
Subrata Banik5c08c732017-11-13 14:54:37 +0530533 left -= received;
534 p += received;
535 /* If we read out everything ping to send more */
536 if (!(hdr & MEI_HDR_IS_COMPLETE) && !cse_filled_slots())
537 host_gen_interrupt();
538 } while (received && !(hdr & MEI_HDR_IS_COMPLETE) && left > 0);
539
540 if ((hdr & MEI_HDR_IS_COMPLETE) && received) {
541 *maxlen = p - (uint8_t *) buff;
542 return 1;
543 }
Andrey Petrov04a72c42017-03-01 15:51:57 -0800544 }
Subrata Banik5c08c732017-11-13 14:54:37 +0530545 return 0;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800546}
547
Sridhar Siricillaa5208f52019-08-30 17:10:24 +0530548int heci_send_receive(const void *snd_msg, size_t snd_sz, void *rcv_msg, size_t *rcv_sz)
549{
550 if (!heci_send(snd_msg, snd_sz, BIOS_HOST_ADDR, HECI_MKHI_ADDR)) {
551 printk(BIOS_ERR, "HECI: send Failed\n");
552 return 0;
553 }
554
555 if (rcv_msg != NULL) {
556 if (!heci_receive(rcv_msg, rcv_sz)) {
557 printk(BIOS_ERR, "HECI: receive Failed\n");
558 return 0;
559 }
560 }
561 return 1;
562}
563
Andrey Petrov04a72c42017-03-01 15:51:57 -0800564/*
565 * Attempt to reset the device. This is useful when host and ME are out
566 * of sync during transmission or ME didn't understand the message.
567 */
568int heci_reset(void)
569{
570 uint32_t csr;
571
572 /* Send reset request */
573 csr = read_host_csr();
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530574 csr |= (CSR_RESET | CSR_IG);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800575 write_host_csr(csr);
576
577 if (wait_heci_ready()) {
578 /* Device is back on its imaginary feet, clear reset */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530579 cse_set_host_ready();
Andrey Petrov04a72c42017-03-01 15:51:57 -0800580 return 1;
581 }
582
583 printk(BIOS_CRIT, "HECI: reset failed\n");
584
585 return 0;
586}
587
Sridhar Siricilla2cc66912019-08-31 11:20:34 +0530588bool is_cse_enabled(void)
589{
590 const struct device *cse_dev = pcidev_path_on_root(PCH_DEVFN_CSE);
591
592 if (!cse_dev || !cse_dev->enabled) {
593 printk(BIOS_WARNING, "HECI: No CSE device\n");
594 return false;
595 }
596
597 if (pci_read_config16(PCH_DEV_CSE, PCI_VENDOR_ID) == 0xFFFF) {
598 printk(BIOS_WARNING, "HECI: CSE device is hidden\n");
599 return false;
600 }
601
602 return true;
603}
604
605uint32_t me_read_config32(int offset)
606{
607 return pci_read_config32(PCH_DEV_CSE, offset);
608}
609
Sridhar Siricilla59c7cb7d2020-02-07 11:59:30 +0530610static bool cse_is_global_reset_allowed(void)
611{
612 /*
613 * Allow sending GLOBAL_RESET command only if:
614 * - CSE's current working state is Normal and current operation mode is Normal.
615 * - (or) CSE's current working state is normal and current operation mode can
616 * be Soft Temp Disable or Security Override Mode if CSE's Firmware SKU is
617 * Custom.
618 */
619 if (!cse_is_hfs1_cws_normal())
620 return false;
621
622 if (cse_is_hfs1_com_normal())
623 return true;
624
625 if (cse_is_hfs3_fw_sku_custom()) {
626 if (cse_is_hfs1_com_soft_temp_disable() || cse_is_hfs1_com_secover_mei_msg())
627 return true;
628 }
629 return false;
630}
631
Sridhar Siricillad415c202019-08-31 14:54:57 +0530632/*
Sridhar Siricillac2a2d2b2020-02-27 17:16:13 +0530633 * Sends GLOBAL_RESET_REQ cmd to CSE.The reset type can be GLOBAL_RESET/CSE_RESET_ONLY.
Sridhar Siricillad415c202019-08-31 14:54:57 +0530634 */
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530635int cse_request_global_reset(enum rst_req_type rst_type)
Sridhar Siricillad415c202019-08-31 14:54:57 +0530636{
637 int status;
638 struct mkhi_hdr reply;
639 struct reset_message {
640 struct mkhi_hdr hdr;
641 uint8_t req_origin;
642 uint8_t reset_type;
643 } __packed;
644 struct reset_message msg = {
645 .hdr = {
646 .group_id = MKHI_GROUP_ID_CBM,
Sridhar Siricillae202e672020-01-07 23:36:40 +0530647 .command = MKHI_CBM_GLOBAL_RESET_REQ,
Sridhar Siricillad415c202019-08-31 14:54:57 +0530648 },
649 .req_origin = GR_ORIGIN_BIOS_POST,
650 .reset_type = rst_type
651 };
652 size_t reply_size;
653
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530654 printk(BIOS_DEBUG, "HECI: Global Reset(Type:%d) Command\n", rst_type);
Sridhar Siricilla59c7cb7d2020-02-07 11:59:30 +0530655
Sridhar Siricillac2a2d2b2020-02-27 17:16:13 +0530656 if (!(rst_type == GLOBAL_RESET || rst_type == CSE_RESET_ONLY)) {
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530657 printk(BIOS_ERR, "HECI: Unsupported reset type is requested\n");
658 return 0;
659 }
Sridhar Siricillad415c202019-08-31 14:54:57 +0530660
Sridhar Siricilla59c7cb7d2020-02-07 11:59:30 +0530661 if (!cse_is_global_reset_allowed()) {
662 printk(BIOS_ERR, "HECI: CSE does not meet required prerequisites\n");
663 return 0;
664 }
665
Sridhar Siricillad415c202019-08-31 14:54:57 +0530666 heci_reset();
667
668 reply_size = sizeof(reply);
669 memset(&reply, 0, reply_size);
670
Sridhar Siricillad415c202019-08-31 14:54:57 +0530671 if (rst_type == CSE_RESET_ONLY)
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530672 status = heci_send(&msg, sizeof(msg), BIOS_HOST_ADDR, HECI_MKHI_ADDR);
Sridhar Siricillad415c202019-08-31 14:54:57 +0530673 else
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530674 status = heci_send_receive(&msg, sizeof(msg), &reply, &reply_size);
Sridhar Siricillad415c202019-08-31 14:54:57 +0530675
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530676 printk(BIOS_DEBUG, "HECI: Global Reset %s!\n", status ? "success" : "failure");
677 return status;
Sridhar Siricillad415c202019-08-31 14:54:57 +0530678}
679
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530680static bool cse_is_hmrfpo_enable_allowed(void)
681{
682 /*
683 * Allow sending HMRFPO ENABLE command only if:
684 * - CSE's current working state is Normal and current operation mode is Normal
685 * - (or) cse's current working state is normal and current operation mode is
686 * Soft Temp Disable if CSE's Firmware SKU is Custom
687 */
688 if (!cse_is_hfs1_cws_normal())
689 return false;
690
691 if (cse_is_hfs1_com_normal())
692 return true;
693
694 if (cse_is_hfs3_fw_sku_custom() && cse_is_hfs1_com_soft_temp_disable())
695 return true;
696
697 return false;
698}
699
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530700/* Sends HMRFPO Enable command to CSE */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530701int cse_hmrfpo_enable(void)
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530702{
703 struct hmrfpo_enable_msg {
704 struct mkhi_hdr hdr;
705 uint32_t nonce[2];
706 } __packed;
707
708 /* HMRFPO Enable message */
709 struct hmrfpo_enable_msg msg = {
710 .hdr = {
Sridhar Siricillae202e672020-01-07 23:36:40 +0530711 .group_id = MKHI_GROUP_ID_HMRFPO,
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530712 .command = MKHI_HMRFPO_ENABLE,
713 },
714 .nonce = {0},
715 };
716
717 /* HMRFPO Enable response */
718 struct hmrfpo_enable_resp {
719 struct mkhi_hdr hdr;
Sridhar Siricillae202e672020-01-07 23:36:40 +0530720 /* Base addr for factory data area, not relevant for client SKUs */
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530721 uint32_t fct_base;
Sridhar Siricillae202e672020-01-07 23:36:40 +0530722 /* Length of factory data area, not relevant for client SKUs */
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530723 uint32_t fct_limit;
724 uint8_t status;
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530725 uint8_t reserved[3];
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530726 } __packed;
727
728 struct hmrfpo_enable_resp resp;
729 size_t resp_size = sizeof(struct hmrfpo_enable_resp);
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530730
731 printk(BIOS_DEBUG, "HECI: Send HMRFPO Enable Command\n");
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530732
733 if (!cse_is_hmrfpo_enable_allowed()) {
734 printk(BIOS_ERR, "HECI: CSE does not meet required prerequisites\n");
735 return 0;
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530736 }
737
738 if (!heci_send_receive(&msg, sizeof(struct hmrfpo_enable_msg),
739 &resp, &resp_size))
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530740 return 0;
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530741
742 if (resp.hdr.result) {
743 printk(BIOS_ERR, "HECI: Resp Failed:%d\n", resp.hdr.result);
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530744 return 0;
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530745 }
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530746
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530747 if (resp.status) {
748 printk(BIOS_ERR, "HECI: HMRFPO_Enable Failed (resp status: %d)\n", resp.status);
749 return 0;
750 }
751
752 return 1;
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530753}
754
755/*
756 * Sends HMRFPO Get Status command to CSE to get the HMRFPO status.
Sridhar Siricilla63be9182020-01-19 12:38:56 +0530757 * The status can be DISABLED/LOCKED/ENABLED
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530758 */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530759int cse_hmrfpo_get_status(void)
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530760{
761 struct hmrfpo_get_status_msg {
762 struct mkhi_hdr hdr;
763 } __packed;
764
765 struct hmrfpo_get_status_resp {
766 struct mkhi_hdr hdr;
767 uint8_t status;
Sridhar Siricilla63be9182020-01-19 12:38:56 +0530768 uint8_t reserved[3];
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530769 } __packed;
770
771 struct hmrfpo_get_status_msg msg = {
772 .hdr = {
Sridhar Siricillae202e672020-01-07 23:36:40 +0530773 .group_id = MKHI_GROUP_ID_HMRFPO,
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530774 .command = MKHI_HMRFPO_GET_STATUS,
775 },
776 };
777 struct hmrfpo_get_status_resp resp;
778 size_t resp_size = sizeof(struct hmrfpo_get_status_resp);
779
780 printk(BIOS_INFO, "HECI: Sending Get HMRFPO Status Command\n");
781
Sridhar Siricilla206905c2020-02-06 18:48:22 +0530782 if (!cse_is_hfs1_cws_normal()) {
783 printk(BIOS_ERR, "HECI: CSE's current working state is not Normal\n");
784 return -1;
785 }
786
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530787 if (!heci_send_receive(&msg, sizeof(struct hmrfpo_get_status_msg),
788 &resp, &resp_size)) {
789 printk(BIOS_ERR, "HECI: HMRFPO send/receive fail\n");
790 return -1;
791 }
792
793 if (resp.hdr.result) {
794 printk(BIOS_ERR, "HECI: HMRFPO Resp Failed:%d\n",
795 resp.hdr.result);
796 return -1;
797 }
798
799 return resp.status;
800}
801
Sridhar Siricilla24a974a2020-02-19 14:41:36 +0530802void print_me_fw_version(void *unused)
803{
804 struct version {
805 uint16_t minor;
806 uint16_t major;
807 uint16_t build;
808 uint16_t hotfix;
809 } __packed;
810
811 struct fw_ver_resp {
812 struct mkhi_hdr hdr;
813 struct version code;
814 struct version rec;
815 struct version fitc;
816 } __packed;
817
818 const struct mkhi_hdr fw_ver_msg = {
819 .group_id = MKHI_GROUP_ID_GEN,
820 .command = MKHI_GEN_GET_FW_VERSION,
821 };
822
823 struct fw_ver_resp resp;
824 size_t resp_size = sizeof(resp);
825
826 /* Ignore if UART debugging is disabled */
827 if (!CONFIG(CONSOLE_SERIAL))
828 return;
829
Wim Vervoorn8602fb72020-03-30 12:17:54 +0200830 /* Ignore if CSE is disabled */
831 if (!is_cse_enabled())
832 return;
833
Sridhar Siricilla24a974a2020-02-19 14:41:36 +0530834 /*
835 * Ignore if ME Firmware SKU type is custom since
836 * print_boot_partition_info() logs RO(BP1) and RW(BP2) versions.
837 */
838 if (cse_is_hfs3_fw_sku_custom())
839 return;
840
841 /*
842 * Prerequisites:
843 * 1) HFSTS1 Current Working State is Normal
844 * 2) HFSTS1 Current Operation Mode is Normal
845 * 3) It's after DRAM INIT DONE message (taken care of by calling it
846 * during ramstage
847 */
848 if (!cse_is_hfs1_cws_normal() || !cse_is_hfs1_com_normal())
849 goto fail;
850
851 heci_reset();
852
853 if (!heci_send_receive(&fw_ver_msg, sizeof(fw_ver_msg), &resp, &resp_size))
854 goto fail;
855
856 if (resp.hdr.result)
857 goto fail;
858
859 printk(BIOS_DEBUG, "ME: Version: %d.%d.%d.%d\n", resp.code.major,
860 resp.code.minor, resp.code.hotfix, resp.code.build);
861 return;
862
863fail:
864 printk(BIOS_DEBUG, "ME: Version: Unavailable\n");
865}
866
Andrey Petrov04a72c42017-03-01 15:51:57 -0800867#if ENV_RAMSTAGE
868
869static void update_sec_bar(struct device *dev)
870{
Patrick Georgic9b13592019-11-29 11:47:47 +0100871 cse.sec_bar = find_resource(dev, PCI_BASE_ADDRESS_0)->base;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800872}
873
874static void cse_set_resources(struct device *dev)
875{
Subrata Banik2ee54db2017-03-05 12:37:00 +0530876 if (dev->path.pci.devfn == PCH_DEVFN_CSE)
Andrey Petrov04a72c42017-03-01 15:51:57 -0800877 update_sec_bar(dev);
878
879 pci_dev_set_resources(dev);
880}
881
882static struct device_operations cse_ops = {
883 .set_resources = cse_set_resources,
884 .read_resources = pci_dev_read_resources,
885 .enable_resources = pci_dev_enable_resources,
886 .init = pci_dev_init,
Subrata Banik6bbc91a2017-12-07 14:55:51 +0530887 .ops_pci = &pci_dev_ops_pci,
Andrey Petrov04a72c42017-03-01 15:51:57 -0800888};
889
Hannah Williams63142152017-06-12 14:03:18 -0700890static const unsigned short pci_device_ids[] = {
891 PCI_DEVICE_ID_INTEL_APL_CSE0,
892 PCI_DEVICE_ID_INTEL_GLK_CSE0,
Andrey Petrov0405de92017-06-05 13:25:29 -0700893 PCI_DEVICE_ID_INTEL_CNL_CSE0,
Subrata Banikd0586d22017-11-27 13:28:41 +0530894 PCI_DEVICE_ID_INTEL_SKL_CSE0,
Maxim Polyakov571d07d2019-08-22 13:11:32 +0300895 PCI_DEVICE_ID_INTEL_LWB_CSE0,
896 PCI_DEVICE_ID_INTEL_LWB_CSE0_SUPER,
praveen hodagatta praneshe26c4a42018-09-20 03:49:45 +0800897 PCI_DEVICE_ID_INTEL_CNP_H_CSE0,
Aamir Bohra9eac0392018-06-30 12:07:04 +0530898 PCI_DEVICE_ID_INTEL_ICL_CSE0,
Ronak Kanabarda7ffb482019-02-05 01:51:13 +0530899 PCI_DEVICE_ID_INTEL_CMP_CSE0,
Gaggery Tsai12a651c2019-12-05 11:23:20 -0800900 PCI_DEVICE_ID_INTEL_CMP_H_CSE0,
Ravi Sarawadi6b5bf402019-10-21 22:25:04 -0700901 PCI_DEVICE_ID_INTEL_TGL_CSE0,
Tan, Lean Sheng26136092020-01-20 19:13:56 -0800902 PCI_DEVICE_ID_INTEL_MCC_CSE0,
903 PCI_DEVICE_ID_INTEL_MCC_CSE1,
904 PCI_DEVICE_ID_INTEL_MCC_CSE2,
905 PCI_DEVICE_ID_INTEL_MCC_CSE3,
Meera Ravindranath3f4af0d2020-02-12 16:01:22 +0530906 PCI_DEVICE_ID_INTEL_JSP_CSE0,
907 PCI_DEVICE_ID_INTEL_JSP_CSE1,
908 PCI_DEVICE_ID_INTEL_JSP_CSE2,
909 PCI_DEVICE_ID_INTEL_JSP_CSE3,
Hannah Williams63142152017-06-12 14:03:18 -0700910 0,
911};
912
Andrey Petrov04a72c42017-03-01 15:51:57 -0800913static const struct pci_driver cse_driver __pci_driver = {
914 .ops = &cse_ops,
915 .vendor = PCI_VENDOR_ID_INTEL,
916 /* SoC/chipset needs to provide PCI device ID */
Andrey Petrov0405de92017-06-05 13:25:29 -0700917 .devices = pci_device_ids
Andrey Petrov04a72c42017-03-01 15:51:57 -0800918};
919
920#endif