blob: 5877d537f8f1b37b9b8671367781ad86291e1a1f [file] [log] [blame]
Andrey Petrov04a72c42017-03-01 15:51:57 -08001/*
2 * This file is part of the coreboot project.
3 *
praveen hodagatta praneshe26c4a42018-09-20 03:49:45 +08004 * Copyright 2017-2018 Intel Inc.
Andrey Petrov04a72c42017-03-01 15:51:57 -08005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
Subrata Banik05e06cd2017-11-09 15:04:09 +053016#include <assert.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -080017#include <commonlib/helpers.h>
18#include <console/console.h>
Kyösti Mälkki13f66502019-03-03 08:01:05 +020019#include <device/mmio.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -080020#include <delay.h>
21#include <device/pci.h>
22#include <device/pci_ids.h>
23#include <device/pci_ops.h>
24#include <intelblocks/cse.h>
Subrata Banik05e06cd2017-11-09 15:04:09 +053025#include <soc/iomap.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -080026#include <soc/pci_devs.h>
Sridhar Siricilla8e465452019-09-23 20:59:38 +053027#include <soc/me.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -080028#include <string.h>
29#include <timer.h>
30
Subrata Banik5c08c732017-11-13 14:54:37 +053031#define MAX_HECI_MESSAGE_RETRY_COUNT 5
32
Andrey Petrov04a72c42017-03-01 15:51:57 -080033/* Wait up to 15 sec for HECI to get ready */
34#define HECI_DELAY_READY (15 * 1000)
Jonathan Neuschäfer5268b762018-02-12 12:24:25 +010035/* Wait up to 100 usec between circular buffer polls */
Andrey Petrov04a72c42017-03-01 15:51:57 -080036#define HECI_DELAY 100
37/* Wait up to 5 sec for CSE to chew something we sent */
38#define HECI_SEND_TIMEOUT (5 * 1000)
39/* Wait up to 5 sec for CSE to blurp a reply */
40#define HECI_READ_TIMEOUT (5 * 1000)
41
42#define SLOT_SIZE sizeof(uint32_t)
43
44#define MMIO_CSE_CB_WW 0x00
45#define MMIO_HOST_CSR 0x04
46#define MMIO_CSE_CB_RW 0x08
47#define MMIO_CSE_CSR 0x0c
48
49#define CSR_IE (1 << 0)
50#define CSR_IS (1 << 1)
51#define CSR_IG (1 << 2)
52#define CSR_READY (1 << 3)
53#define CSR_RESET (1 << 4)
54#define CSR_RP_START 8
55#define CSR_RP (((1 << 8) - 1) << CSR_RP_START)
56#define CSR_WP_START 16
57#define CSR_WP (((1 << 8) - 1) << CSR_WP_START)
58#define CSR_CBD_START 24
59#define CSR_CBD (((1 << 8) - 1) << CSR_CBD_START)
60
61#define MEI_HDR_IS_COMPLETE (1 << 31)
62#define MEI_HDR_LENGTH_START 16
63#define MEI_HDR_LENGTH_SIZE 9
64#define MEI_HDR_LENGTH (((1 << MEI_HDR_LENGTH_SIZE) - 1) \
65 << MEI_HDR_LENGTH_START)
66#define MEI_HDR_HOST_ADDR_START 8
67#define MEI_HDR_HOST_ADDR (((1 << 8) - 1) << MEI_HDR_HOST_ADDR_START)
68#define MEI_HDR_CSE_ADDR_START 0
69#define MEI_HDR_CSE_ADDR (((1 << 8) - 1) << MEI_HDR_CSE_ADDR_START)
70
Sridhar Siricilla09ea3712019-11-12 23:35:50 +053071/* Wait up to 5 seconds for CSE to boot from RO(BP1) */
72#define CSE_DELAY_BOOT_TO_RO (5 * 1000)
73
Arthur Heymans3d6ccd02019-05-27 17:25:23 +020074static struct cse_device {
Andrey Petrov04a72c42017-03-01 15:51:57 -080075 uintptr_t sec_bar;
Patrick Georgic9b13592019-11-29 11:47:47 +010076} cse;
Andrey Petrov04a72c42017-03-01 15:51:57 -080077
78/*
79 * Initialize the device with provided temporary BAR. If BAR is 0 use a
80 * default. This is intended for pre-mem usage only where BARs haven't been
81 * assigned yet and devices are not enabled.
82 */
83void heci_init(uintptr_t tempbar)
84{
Elyes HAOUAS68c851b2018-06-12 22:06:09 +020085#if defined(__SIMPLE_DEVICE__)
86 pci_devfn_t dev = PCH_DEV_CSE;
87#else
88 struct device *dev = PCH_DEV_CSE;
89#endif
Andrey Petrov04a72c42017-03-01 15:51:57 -080090 u8 pcireg;
91
92 /* Assume it is already initialized, nothing else to do */
Patrick Georgic9b13592019-11-29 11:47:47 +010093 if (cse.sec_bar)
Andrey Petrov04a72c42017-03-01 15:51:57 -080094 return;
95
96 /* Use default pre-ram bar */
97 if (!tempbar)
98 tempbar = HECI1_BASE_ADDRESS;
99
100 /* Assign Resources to HECI1 */
101 /* Clear BIT 1-2 of Command Register */
102 pcireg = pci_read_config8(dev, PCI_COMMAND);
103 pcireg &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY);
104 pci_write_config8(dev, PCI_COMMAND, pcireg);
105
106 /* Program Temporary BAR for HECI1 */
107 pci_write_config32(dev, PCI_BASE_ADDRESS_0, tempbar);
108 pci_write_config32(dev, PCI_BASE_ADDRESS_1, 0x0);
109
110 /* Enable Bus Master and MMIO Space */
111 pcireg = pci_read_config8(dev, PCI_COMMAND);
112 pcireg |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
113 pci_write_config8(dev, PCI_COMMAND, pcireg);
114
Patrick Georgic9b13592019-11-29 11:47:47 +0100115 cse.sec_bar = tempbar;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800116}
117
Subrata Banik05e06cd2017-11-09 15:04:09 +0530118/* Get HECI BAR 0 from PCI configuration space */
119static uint32_t get_cse_bar(void)
120{
121 uintptr_t bar;
122
123 bar = pci_read_config32(PCH_DEV_CSE, PCI_BASE_ADDRESS_0);
124 assert(bar != 0);
125 /*
126 * Bits 31-12 are the base address as per EDS for SPI,
127 * Don't care about 0-11 bit
128 */
129 return bar & ~PCI_BASE_ADDRESS_MEM_ATTR_MASK;
130}
131
Andrey Petrov04a72c42017-03-01 15:51:57 -0800132static uint32_t read_bar(uint32_t offset)
133{
Patrick Georgi08c8cf92019-12-02 11:43:20 +0100134 /* Load and cache BAR */
Patrick Georgic9b13592019-11-29 11:47:47 +0100135 if (!cse.sec_bar)
136 cse.sec_bar = get_cse_bar();
137 return read32((void *)(cse.sec_bar + offset));
Andrey Petrov04a72c42017-03-01 15:51:57 -0800138}
139
140static void write_bar(uint32_t offset, uint32_t val)
141{
Patrick Georgi08c8cf92019-12-02 11:43:20 +0100142 /* Load and cache BAR */
Patrick Georgic9b13592019-11-29 11:47:47 +0100143 if (!cse.sec_bar)
144 cse.sec_bar = get_cse_bar();
145 return write32((void *)(cse.sec_bar + offset), val);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800146}
147
148static uint32_t read_cse_csr(void)
149{
150 return read_bar(MMIO_CSE_CSR);
151}
152
153static uint32_t read_host_csr(void)
154{
155 return read_bar(MMIO_HOST_CSR);
156}
157
158static void write_host_csr(uint32_t data)
159{
160 write_bar(MMIO_HOST_CSR, data);
161}
162
163static size_t filled_slots(uint32_t data)
164{
165 uint8_t wp, rp;
166 rp = data >> CSR_RP_START;
167 wp = data >> CSR_WP_START;
168 return (uint8_t) (wp - rp);
169}
170
171static size_t cse_filled_slots(void)
172{
173 return filled_slots(read_cse_csr());
174}
175
176static size_t host_empty_slots(void)
177{
178 uint32_t csr;
179 csr = read_host_csr();
180
181 return ((csr & CSR_CBD) >> CSR_CBD_START) - filled_slots(csr);
182}
183
184static void clear_int(void)
185{
186 uint32_t csr;
187 csr = read_host_csr();
188 csr |= CSR_IS;
189 write_host_csr(csr);
190}
191
192static uint32_t read_slot(void)
193{
194 return read_bar(MMIO_CSE_CB_RW);
195}
196
197static void write_slot(uint32_t val)
198{
199 write_bar(MMIO_CSE_CB_WW, val);
200}
201
202static int wait_write_slots(size_t cnt)
203{
204 struct stopwatch sw;
205
206 stopwatch_init_msecs_expire(&sw, HECI_SEND_TIMEOUT);
207 while (host_empty_slots() < cnt) {
208 udelay(HECI_DELAY);
209 if (stopwatch_expired(&sw)) {
210 printk(BIOS_ERR, "HECI: timeout, buffer not drained\n");
211 return 0;
212 }
213 }
214 return 1;
215}
216
217static int wait_read_slots(size_t cnt)
218{
219 struct stopwatch sw;
220
221 stopwatch_init_msecs_expire(&sw, HECI_READ_TIMEOUT);
222 while (cse_filled_slots() < cnt) {
223 udelay(HECI_DELAY);
224 if (stopwatch_expired(&sw)) {
225 printk(BIOS_ERR, "HECI: timed out reading answer!\n");
226 return 0;
227 }
228 }
229 return 1;
230}
231
232/* get number of full 4-byte slots */
233static size_t bytes_to_slots(size_t bytes)
234{
235 return ALIGN_UP(bytes, SLOT_SIZE) / SLOT_SIZE;
236}
237
238static int cse_ready(void)
239{
240 uint32_t csr;
241 csr = read_cse_csr();
242 return csr & CSR_READY;
243}
244
Sridhar Siricilla8e465452019-09-23 20:59:38 +0530245static bool cse_check_hfs1_com(int mode)
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530246{
247 union me_hfsts1 hfs1;
248 hfs1.data = me_read_config32(PCI_ME_HFSTS1);
Sridhar Siricilla8e465452019-09-23 20:59:38 +0530249 return hfs1.fields.operation_mode == mode;
250}
251
252bool cse_is_hfs1_cws_normal(void)
253{
254 union me_hfsts1 hfs1;
255 hfs1.data = me_read_config32(PCI_ME_HFSTS1);
256 if (hfs1.fields.working_state == ME_HFS1_CWS_NORMAL)
257 return true;
258 return false;
259}
260
261bool cse_is_hfs1_com_normal(void)
262{
263 return cse_check_hfs1_com(ME_HFS1_COM_NORMAL);
264}
265
266bool cse_is_hfs1_com_secover_mei_msg(void)
267{
268 return cse_check_hfs1_com(ME_HFS1_COM_SECOVER_MEI_MSG);
269}
270
271bool cse_is_hfs1_com_soft_temp_disable(void)
272{
273 return cse_check_hfs1_com(ME_HFS1_COM_SOFT_TEMP_DISABLE);
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530274}
275
Sridhar Siricilla3465d272020-02-06 15:31:04 +0530276bool cse_is_hfs3_fw_sku_custom(void)
277{
278 union me_hfsts3 hfs3;
279 hfs3.data = me_read_config32(PCI_ME_HFSTS3);
280 return hfs3.fields.fw_sku == ME_HFS3_FW_SKU_CUSTOM;
281}
282
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530283/* Makes the host ready to communicate with CSE */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530284void cse_set_host_ready(void)
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530285{
286 uint32_t csr;
287 csr = read_host_csr();
288 csr &= ~CSR_RESET;
289 csr |= (CSR_IG | CSR_READY);
290 write_host_csr(csr);
291}
292
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530293/* Polls for ME mode ME_HFS1_COM_SECOVER_MEI_MSG for 15 seconds */
294uint8_t cse_wait_sec_override_mode(void)
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530295{
296 struct stopwatch sw;
297 stopwatch_init_msecs_expire(&sw, HECI_DELAY_READY);
Sridhar Siricilla8e465452019-09-23 20:59:38 +0530298 while (!cse_is_hfs1_com_secover_mei_msg()) {
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530299 udelay(HECI_DELAY);
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530300 if (stopwatch_expired(&sw)) {
301 printk(BIOS_ERR, "HECI: Timed out waiting for SEC_OVERRIDE mode!\n");
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530302 return 0;
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530303 }
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530304 }
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530305 printk(BIOS_DEBUG, "HECI: CSE took %lu ms to enter security override mode\n",
306 stopwatch_duration_msecs(&sw));
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530307 return 1;
308}
309
Sridhar Siricilla09ea3712019-11-12 23:35:50 +0530310/*
311 * Polls for CSE's current operation mode 'Soft Temporary Disable'.
312 * The CSE enters the current operation mode when it boots from RO(BP1).
313 */
314uint8_t cse_wait_com_soft_temp_disable(void)
315{
316 struct stopwatch sw;
317 stopwatch_init_msecs_expire(&sw, CSE_DELAY_BOOT_TO_RO);
318 while (!cse_is_hfs1_com_soft_temp_disable()) {
319 udelay(HECI_DELAY);
320 if (stopwatch_expired(&sw)) {
321 printk(BIOS_ERR, "HECI: Timed out waiting for CSE to boot from RO!\n");
322 return 0;
323 }
324 }
325 printk(BIOS_SPEW, "HECI: CSE took %lu ms to boot from RO\n",
326 stopwatch_duration_msecs(&sw));
327 return 1;
328}
329
Andrey Petrov04a72c42017-03-01 15:51:57 -0800330static int wait_heci_ready(void)
331{
332 struct stopwatch sw;
333
334 stopwatch_init_msecs_expire(&sw, HECI_DELAY_READY);
335 while (!cse_ready()) {
336 udelay(HECI_DELAY);
337 if (stopwatch_expired(&sw))
338 return 0;
339 }
340
341 return 1;
342}
343
344static void host_gen_interrupt(void)
345{
346 uint32_t csr;
347 csr = read_host_csr();
348 csr |= CSR_IG;
349 write_host_csr(csr);
350}
351
352static size_t hdr_get_length(uint32_t hdr)
353{
354 return (hdr & MEI_HDR_LENGTH) >> MEI_HDR_LENGTH_START;
355}
356
357static int
358send_one_message(uint32_t hdr, const void *buff)
359{
360 size_t pend_len, pend_slots, remainder, i;
361 uint32_t tmp;
362 const uint32_t *p = buff;
363
364 /* Get space for the header */
365 if (!wait_write_slots(1))
366 return 0;
367
368 /* First, write header */
369 write_slot(hdr);
370
371 pend_len = hdr_get_length(hdr);
372 pend_slots = bytes_to_slots(pend_len);
373
374 if (!wait_write_slots(pend_slots))
375 return 0;
376
377 /* Write the body in whole slots */
378 i = 0;
379 while (i < ALIGN_DOWN(pend_len, SLOT_SIZE)) {
380 write_slot(*p++);
381 i += SLOT_SIZE;
382 }
383
384 remainder = pend_len % SLOT_SIZE;
385 /* Pad to 4 bytes not touching caller's buffer */
386 if (remainder) {
387 memcpy(&tmp, p, remainder);
388 write_slot(tmp);
389 }
390
391 host_gen_interrupt();
392
393 /* Make sure nothing bad happened during transmission */
394 if (!cse_ready())
395 return 0;
396
397 return pend_len;
398}
399
400int
401heci_send(const void *msg, size_t len, uint8_t host_addr, uint8_t client_addr)
402{
Subrata Banik5c08c732017-11-13 14:54:37 +0530403 uint8_t retry;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800404 uint32_t csr, hdr;
Subrata Banik5c08c732017-11-13 14:54:37 +0530405 size_t sent, remaining, cb_size, max_length;
406 const uint8_t *p;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800407
408 if (!msg || !len)
409 return 0;
410
411 clear_int();
412
Subrata Banik5c08c732017-11-13 14:54:37 +0530413 for (retry = 0; retry < MAX_HECI_MESSAGE_RETRY_COUNT; retry++) {
414 p = msg;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800415
Subrata Banik5c08c732017-11-13 14:54:37 +0530416 if (!wait_heci_ready()) {
417 printk(BIOS_ERR, "HECI: not ready\n");
418 continue;
419 }
Andrey Petrov04a72c42017-03-01 15:51:57 -0800420
Subrata Banik4a722f52017-11-13 14:56:42 +0530421 csr = read_host_csr();
Subrata Banik5c08c732017-11-13 14:54:37 +0530422 cb_size = ((csr & CSR_CBD) >> CSR_CBD_START) * SLOT_SIZE;
423 /*
424 * Reserve one slot for the header. Limit max message
425 * length by 9 bits that are available in the header.
426 */
427 max_length = MIN(cb_size, (1 << MEI_HDR_LENGTH_SIZE) - 1)
428 - SLOT_SIZE;
429 remaining = len;
430
431 /*
432 * Fragment the message into smaller messages not exceeding
Jonathan Neuschäfer5268b762018-02-12 12:24:25 +0100433 * useful circular buffer length. Mark last message complete.
Subrata Banik5c08c732017-11-13 14:54:37 +0530434 */
435 do {
436 hdr = MIN(max_length, remaining)
437 << MEI_HDR_LENGTH_START;
438 hdr |= client_addr << MEI_HDR_CSE_ADDR_START;
439 hdr |= host_addr << MEI_HDR_HOST_ADDR_START;
440 hdr |= (MIN(max_length, remaining) == remaining) ?
Lee Leahy68ab0b52017-03-10 13:42:34 -0800441 MEI_HDR_IS_COMPLETE : 0;
Subrata Banik5c08c732017-11-13 14:54:37 +0530442 sent = send_one_message(hdr, p);
443 p += sent;
444 remaining -= sent;
445 } while (remaining > 0 && sent != 0);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800446
Subrata Banik5c08c732017-11-13 14:54:37 +0530447 if (!remaining)
448 return 1;
449 }
450 return 0;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800451}
452
453static size_t
454recv_one_message(uint32_t *hdr, void *buff, size_t maxlen)
455{
456 uint32_t reg, *p = buff;
457 size_t recv_slots, recv_len, remainder, i;
458
459 /* first get the header */
460 if (!wait_read_slots(1))
461 return 0;
462
463 *hdr = read_slot();
464 recv_len = hdr_get_length(*hdr);
465
466 if (!recv_len)
467 printk(BIOS_WARNING, "HECI: message is zero-sized\n");
468
469 recv_slots = bytes_to_slots(recv_len);
470
471 i = 0;
472 if (recv_len > maxlen) {
473 printk(BIOS_ERR, "HECI: response is too big\n");
474 return 0;
475 }
476
477 /* wait for the rest of messages to arrive */
478 wait_read_slots(recv_slots);
479
480 /* fetch whole slots first */
481 while (i < ALIGN_DOWN(recv_len, SLOT_SIZE)) {
482 *p++ = read_slot();
483 i += SLOT_SIZE;
484 }
485
Subrata Banik5c08c732017-11-13 14:54:37 +0530486 /*
487 * If ME is not ready, something went wrong and
488 * we received junk
489 */
490 if (!cse_ready())
491 return 0;
492
Andrey Petrov04a72c42017-03-01 15:51:57 -0800493 remainder = recv_len % SLOT_SIZE;
494
495 if (remainder) {
496 reg = read_slot();
497 memcpy(p, &reg, remainder);
498 }
499
500 return recv_len;
501}
502
503int heci_receive(void *buff, size_t *maxlen)
504{
Subrata Banik5c08c732017-11-13 14:54:37 +0530505 uint8_t retry;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800506 size_t left, received;
507 uint32_t hdr = 0;
Subrata Banik5c08c732017-11-13 14:54:37 +0530508 uint8_t *p;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800509
510 if (!buff || !maxlen || !*maxlen)
511 return 0;
512
Andrey Petrov04a72c42017-03-01 15:51:57 -0800513 clear_int();
514
Subrata Banik5c08c732017-11-13 14:54:37 +0530515 for (retry = 0; retry < MAX_HECI_MESSAGE_RETRY_COUNT; retry++) {
516 p = buff;
517 left = *maxlen;
518
519 if (!wait_heci_ready()) {
520 printk(BIOS_ERR, "HECI: not ready\n");
521 continue;
522 }
523
524 /*
525 * Receive multiple packets until we meet one marked
526 * complete or we run out of space in caller-provided buffer.
527 */
528 do {
529 received = recv_one_message(&hdr, p, left);
Lijian Zhaoc50296d2017-12-15 19:10:18 -0800530 if (!received) {
Elyes HAOUAS3d450002018-08-09 18:55:58 +0200531 printk(BIOS_ERR, "HECI: Failed to receive!\n");
Lijian Zhaoc50296d2017-12-15 19:10:18 -0800532 return 0;
533 }
Subrata Banik5c08c732017-11-13 14:54:37 +0530534 left -= received;
535 p += received;
536 /* If we read out everything ping to send more */
537 if (!(hdr & MEI_HDR_IS_COMPLETE) && !cse_filled_slots())
538 host_gen_interrupt();
539 } while (received && !(hdr & MEI_HDR_IS_COMPLETE) && left > 0);
540
541 if ((hdr & MEI_HDR_IS_COMPLETE) && received) {
542 *maxlen = p - (uint8_t *) buff;
543 return 1;
544 }
Andrey Petrov04a72c42017-03-01 15:51:57 -0800545 }
Subrata Banik5c08c732017-11-13 14:54:37 +0530546 return 0;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800547}
548
Sridhar Siricillaa5208f52019-08-30 17:10:24 +0530549int heci_send_receive(const void *snd_msg, size_t snd_sz, void *rcv_msg, size_t *rcv_sz)
550{
551 if (!heci_send(snd_msg, snd_sz, BIOS_HOST_ADDR, HECI_MKHI_ADDR)) {
552 printk(BIOS_ERR, "HECI: send Failed\n");
553 return 0;
554 }
555
556 if (rcv_msg != NULL) {
557 if (!heci_receive(rcv_msg, rcv_sz)) {
558 printk(BIOS_ERR, "HECI: receive Failed\n");
559 return 0;
560 }
561 }
562 return 1;
563}
564
Andrey Petrov04a72c42017-03-01 15:51:57 -0800565/*
566 * Attempt to reset the device. This is useful when host and ME are out
567 * of sync during transmission or ME didn't understand the message.
568 */
569int heci_reset(void)
570{
571 uint32_t csr;
572
573 /* Send reset request */
574 csr = read_host_csr();
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530575 csr |= (CSR_RESET | CSR_IG);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800576 write_host_csr(csr);
577
578 if (wait_heci_ready()) {
579 /* Device is back on its imaginary feet, clear reset */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530580 cse_set_host_ready();
Andrey Petrov04a72c42017-03-01 15:51:57 -0800581 return 1;
582 }
583
584 printk(BIOS_CRIT, "HECI: reset failed\n");
585
586 return 0;
587}
588
Sridhar Siricilla2cc66912019-08-31 11:20:34 +0530589bool is_cse_enabled(void)
590{
591 const struct device *cse_dev = pcidev_path_on_root(PCH_DEVFN_CSE);
592
593 if (!cse_dev || !cse_dev->enabled) {
594 printk(BIOS_WARNING, "HECI: No CSE device\n");
595 return false;
596 }
597
598 if (pci_read_config16(PCH_DEV_CSE, PCI_VENDOR_ID) == 0xFFFF) {
599 printk(BIOS_WARNING, "HECI: CSE device is hidden\n");
600 return false;
601 }
602
603 return true;
604}
605
606uint32_t me_read_config32(int offset)
607{
608 return pci_read_config32(PCH_DEV_CSE, offset);
609}
610
Sridhar Siricillad415c202019-08-31 14:54:57 +0530611/*
612 * Sends GLOBAL_RESET_REQ cmd to CSE.The reset type can be GLOBAL_RESET/
613 * HOST_RESET_ONLY/CSE_RESET_ONLY.
614 */
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530615int cse_request_global_reset(enum rst_req_type rst_type)
Sridhar Siricillad415c202019-08-31 14:54:57 +0530616{
617 int status;
618 struct mkhi_hdr reply;
619 struct reset_message {
620 struct mkhi_hdr hdr;
621 uint8_t req_origin;
622 uint8_t reset_type;
623 } __packed;
624 struct reset_message msg = {
625 .hdr = {
626 .group_id = MKHI_GROUP_ID_CBM,
Sridhar Siricillae202e672020-01-07 23:36:40 +0530627 .command = MKHI_CBM_GLOBAL_RESET_REQ,
Sridhar Siricillad415c202019-08-31 14:54:57 +0530628 },
629 .req_origin = GR_ORIGIN_BIOS_POST,
630 .reset_type = rst_type
631 };
632 size_t reply_size;
633
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530634 printk(BIOS_DEBUG, "HECI: Global Reset(Type:%d) Command\n", rst_type);
Sridhar Siricillad415c202019-08-31 14:54:57 +0530635 if (!((rst_type == GLOBAL_RESET) ||
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530636 (rst_type == HOST_RESET_ONLY) || (rst_type == CSE_RESET_ONLY))) {
637 printk(BIOS_ERR, "HECI: Unsupported reset type is requested\n");
638 return 0;
639 }
Sridhar Siricillad415c202019-08-31 14:54:57 +0530640
641 heci_reset();
642
643 reply_size = sizeof(reply);
644 memset(&reply, 0, reply_size);
645
Sridhar Siricillad415c202019-08-31 14:54:57 +0530646 if (rst_type == CSE_RESET_ONLY)
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530647 status = heci_send(&msg, sizeof(msg), BIOS_HOST_ADDR, HECI_MKHI_ADDR);
Sridhar Siricillad415c202019-08-31 14:54:57 +0530648 else
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530649 status = heci_send_receive(&msg, sizeof(msg), &reply, &reply_size);
Sridhar Siricillad415c202019-08-31 14:54:57 +0530650
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530651 printk(BIOS_DEBUG, "HECI: Global Reset %s!\n", status ? "success" : "failure");
652 return status;
Sridhar Siricillad415c202019-08-31 14:54:57 +0530653}
654
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530655/* Sends HMRFPO Enable command to CSE */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530656int cse_hmrfpo_enable(void)
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530657{
658 struct hmrfpo_enable_msg {
659 struct mkhi_hdr hdr;
660 uint32_t nonce[2];
661 } __packed;
662
663 /* HMRFPO Enable message */
664 struct hmrfpo_enable_msg msg = {
665 .hdr = {
Sridhar Siricillae202e672020-01-07 23:36:40 +0530666 .group_id = MKHI_GROUP_ID_HMRFPO,
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530667 .command = MKHI_HMRFPO_ENABLE,
668 },
669 .nonce = {0},
670 };
671
672 /* HMRFPO Enable response */
673 struct hmrfpo_enable_resp {
674 struct mkhi_hdr hdr;
Sridhar Siricillae202e672020-01-07 23:36:40 +0530675 /* Base addr for factory data area, not relevant for client SKUs */
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530676 uint32_t fct_base;
Sridhar Siricillae202e672020-01-07 23:36:40 +0530677 /* Length of factory data area, not relevant for client SKUs */
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530678 uint32_t fct_limit;
679 uint8_t status;
680 uint8_t padding[3];
681 } __packed;
682
683 struct hmrfpo_enable_resp resp;
684 size_t resp_size = sizeof(struct hmrfpo_enable_resp);
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530685
686 printk(BIOS_DEBUG, "HECI: Send HMRFPO Enable Command\n");
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530687 /*
688 * This command can be run only if:
689 * - Working state is normal and
690 * - Operation mode is normal or temporary disable mode.
691 */
Sridhar Siricilla8e465452019-09-23 20:59:38 +0530692 if (!cse_is_hfs1_cws_normal() ||
693 (!cse_is_hfs1_com_normal() && !cse_is_hfs1_com_soft_temp_disable())) {
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530694 printk(BIOS_ERR, "HECI: ME not in required Mode\n");
695 goto failed;
696 }
697
698 if (!heci_send_receive(&msg, sizeof(struct hmrfpo_enable_msg),
699 &resp, &resp_size))
700 goto failed;
701
702 if (resp.hdr.result) {
703 printk(BIOS_ERR, "HECI: Resp Failed:%d\n", resp.hdr.result);
704 goto failed;
705 }
706 return 1;
707
708failed:
709 return 0;
710}
711
712/*
713 * Sends HMRFPO Get Status command to CSE to get the HMRFPO status.
Sridhar Siricilla63be9182020-01-19 12:38:56 +0530714 * The status can be DISABLED/LOCKED/ENABLED
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530715 */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530716int cse_hmrfpo_get_status(void)
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530717{
718 struct hmrfpo_get_status_msg {
719 struct mkhi_hdr hdr;
720 } __packed;
721
722 struct hmrfpo_get_status_resp {
723 struct mkhi_hdr hdr;
724 uint8_t status;
Sridhar Siricilla63be9182020-01-19 12:38:56 +0530725 uint8_t reserved[3];
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530726 } __packed;
727
728 struct hmrfpo_get_status_msg msg = {
729 .hdr = {
Sridhar Siricillae202e672020-01-07 23:36:40 +0530730 .group_id = MKHI_GROUP_ID_HMRFPO,
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530731 .command = MKHI_HMRFPO_GET_STATUS,
732 },
733 };
734 struct hmrfpo_get_status_resp resp;
735 size_t resp_size = sizeof(struct hmrfpo_get_status_resp);
736
737 printk(BIOS_INFO, "HECI: Sending Get HMRFPO Status Command\n");
738
Sridhar Siricilla206905c2020-02-06 18:48:22 +0530739 if (!cse_is_hfs1_cws_normal()) {
740 printk(BIOS_ERR, "HECI: CSE's current working state is not Normal\n");
741 return -1;
742 }
743
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530744 if (!heci_send_receive(&msg, sizeof(struct hmrfpo_get_status_msg),
745 &resp, &resp_size)) {
746 printk(BIOS_ERR, "HECI: HMRFPO send/receive fail\n");
747 return -1;
748 }
749
750 if (resp.hdr.result) {
751 printk(BIOS_ERR, "HECI: HMRFPO Resp Failed:%d\n",
752 resp.hdr.result);
753 return -1;
754 }
755
756 return resp.status;
757}
758
Andrey Petrov04a72c42017-03-01 15:51:57 -0800759#if ENV_RAMSTAGE
760
761static void update_sec_bar(struct device *dev)
762{
Patrick Georgic9b13592019-11-29 11:47:47 +0100763 cse.sec_bar = find_resource(dev, PCI_BASE_ADDRESS_0)->base;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800764}
765
766static void cse_set_resources(struct device *dev)
767{
Subrata Banik2ee54db2017-03-05 12:37:00 +0530768 if (dev->path.pci.devfn == PCH_DEVFN_CSE)
Andrey Petrov04a72c42017-03-01 15:51:57 -0800769 update_sec_bar(dev);
770
771 pci_dev_set_resources(dev);
772}
773
774static struct device_operations cse_ops = {
775 .set_resources = cse_set_resources,
776 .read_resources = pci_dev_read_resources,
777 .enable_resources = pci_dev_enable_resources,
778 .init = pci_dev_init,
Subrata Banik6bbc91a2017-12-07 14:55:51 +0530779 .ops_pci = &pci_dev_ops_pci,
Andrey Petrov04a72c42017-03-01 15:51:57 -0800780};
781
Hannah Williams63142152017-06-12 14:03:18 -0700782static const unsigned short pci_device_ids[] = {
783 PCI_DEVICE_ID_INTEL_APL_CSE0,
784 PCI_DEVICE_ID_INTEL_GLK_CSE0,
Andrey Petrov0405de92017-06-05 13:25:29 -0700785 PCI_DEVICE_ID_INTEL_CNL_CSE0,
Subrata Banikd0586d22017-11-27 13:28:41 +0530786 PCI_DEVICE_ID_INTEL_SKL_CSE0,
Maxim Polyakov571d07d2019-08-22 13:11:32 +0300787 PCI_DEVICE_ID_INTEL_LWB_CSE0,
788 PCI_DEVICE_ID_INTEL_LWB_CSE0_SUPER,
praveen hodagatta praneshe26c4a42018-09-20 03:49:45 +0800789 PCI_DEVICE_ID_INTEL_CNP_H_CSE0,
Aamir Bohra9eac0392018-06-30 12:07:04 +0530790 PCI_DEVICE_ID_INTEL_ICL_CSE0,
Ronak Kanabarda7ffb482019-02-05 01:51:13 +0530791 PCI_DEVICE_ID_INTEL_CMP_CSE0,
Gaggery Tsai12a651c2019-12-05 11:23:20 -0800792 PCI_DEVICE_ID_INTEL_CMP_H_CSE0,
Ravi Sarawadi6b5bf402019-10-21 22:25:04 -0700793 PCI_DEVICE_ID_INTEL_TGL_CSE0,
rkanabar263f1292019-11-28 10:41:45 +0530794 PCI_DEVICE_ID_INTEL_JSP_PRE_PROD_CSE0,
Tan, Lean Sheng26136092020-01-20 19:13:56 -0800795 PCI_DEVICE_ID_INTEL_MCC_CSE0,
796 PCI_DEVICE_ID_INTEL_MCC_CSE1,
797 PCI_DEVICE_ID_INTEL_MCC_CSE2,
798 PCI_DEVICE_ID_INTEL_MCC_CSE3,
Hannah Williams63142152017-06-12 14:03:18 -0700799 0,
800};
801
Andrey Petrov04a72c42017-03-01 15:51:57 -0800802static const struct pci_driver cse_driver __pci_driver = {
803 .ops = &cse_ops,
804 .vendor = PCI_VENDOR_ID_INTEL,
805 /* SoC/chipset needs to provide PCI device ID */
Andrey Petrov0405de92017-06-05 13:25:29 -0700806 .devices = pci_device_ids
Andrey Petrov04a72c42017-03-01 15:51:57 -0800807};
808
809#endif