blob: ffe10a5ea528ccf94692abb01c96aa8804e1014b [file] [log] [blame]
Angel Pons0612b272020-04-05 15:46:56 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Andrey Petrov04a72c42017-03-01 15:51:57 -08002
Subrata Banik05e06cd2017-11-09 15:04:09 +05303#include <assert.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -08004#include <commonlib/helpers.h>
5#include <console/console.h>
Kyösti Mälkki13f66502019-03-03 08:01:05 +02006#include <device/mmio.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -08007#include <delay.h>
8#include <device/pci.h>
9#include <device/pci_ids.h>
10#include <device/pci_ops.h>
11#include <intelblocks/cse.h>
Tim Wawrzynczak09635f42021-06-18 10:08:47 -060012#include <security/vboot/misc.h>
13#include <security/vboot/vboot_common.h>
Subrata Banik05e06cd2017-11-09 15:04:09 +053014#include <soc/iomap.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -080015#include <soc/pci_devs.h>
Sridhar Siricilla8e465452019-09-23 20:59:38 +053016#include <soc/me.h>
Andrey Petrov04a72c42017-03-01 15:51:57 -080017#include <string.h>
18#include <timer.h>
19
Subrata Banik5c08c732017-11-13 14:54:37 +053020#define MAX_HECI_MESSAGE_RETRY_COUNT 5
21
Andrey Petrov04a72c42017-03-01 15:51:57 -080022/* Wait up to 15 sec for HECI to get ready */
Subrata Banik03aef282021-09-28 18:10:24 +053023#define HECI_DELAY_READY_MS (15 * 1000)
Jonathan Neuschäfer5268b762018-02-12 12:24:25 +010024/* Wait up to 100 usec between circular buffer polls */
Subrata Banik03aef282021-09-28 18:10:24 +053025#define HECI_DELAY_US 100
Andrey Petrov04a72c42017-03-01 15:51:57 -080026/* Wait up to 5 sec for CSE to chew something we sent */
Subrata Banik03aef282021-09-28 18:10:24 +053027#define HECI_SEND_TIMEOUT_MS (5 * 1000)
Andrey Petrov04a72c42017-03-01 15:51:57 -080028/* Wait up to 5 sec for CSE to blurp a reply */
Subrata Banik03aef282021-09-28 18:10:24 +053029#define HECI_READ_TIMEOUT_MS (5 * 1000)
Subrata Banika219edb2021-09-25 15:02:37 +053030/* Wait up to 1 ms for CSE CIP */
Subrata Banik03aef282021-09-28 18:10:24 +053031#define HECI_CIP_TIMEOUT_US 1000
Subrata Banikf5765812021-09-30 13:37:10 +053032/* Wait up to 5 seconds for CSE to boot from RO(BP1) */
33#define CSE_DELAY_BOOT_TO_RO_MS (5 * 1000)
Andrey Petrov04a72c42017-03-01 15:51:57 -080034
35#define SLOT_SIZE sizeof(uint32_t)
36
37#define MMIO_CSE_CB_WW 0x00
38#define MMIO_HOST_CSR 0x04
39#define MMIO_CSE_CB_RW 0x08
40#define MMIO_CSE_CSR 0x0c
Subrata Banika219edb2021-09-25 15:02:37 +053041#define MMIO_CSE_DEVIDLE 0x800
42#define CSE_DEV_IDLE (1 << 2)
43#define CSE_DEV_CIP (1 << 0)
Andrey Petrov04a72c42017-03-01 15:51:57 -080044
45#define CSR_IE (1 << 0)
46#define CSR_IS (1 << 1)
47#define CSR_IG (1 << 2)
48#define CSR_READY (1 << 3)
49#define CSR_RESET (1 << 4)
50#define CSR_RP_START 8
51#define CSR_RP (((1 << 8) - 1) << CSR_RP_START)
52#define CSR_WP_START 16
53#define CSR_WP (((1 << 8) - 1) << CSR_WP_START)
54#define CSR_CBD_START 24
55#define CSR_CBD (((1 << 8) - 1) << CSR_CBD_START)
56
57#define MEI_HDR_IS_COMPLETE (1 << 31)
58#define MEI_HDR_LENGTH_START 16
59#define MEI_HDR_LENGTH_SIZE 9
60#define MEI_HDR_LENGTH (((1 << MEI_HDR_LENGTH_SIZE) - 1) \
61 << MEI_HDR_LENGTH_START)
62#define MEI_HDR_HOST_ADDR_START 8
63#define MEI_HDR_HOST_ADDR (((1 << 8) - 1) << MEI_HDR_HOST_ADDR_START)
64#define MEI_HDR_CSE_ADDR_START 0
65#define MEI_HDR_CSE_ADDR (((1 << 8) - 1) << MEI_HDR_CSE_ADDR_START)
66
Arthur Heymans3d6ccd02019-05-27 17:25:23 +020067static struct cse_device {
Andrey Petrov04a72c42017-03-01 15:51:57 -080068 uintptr_t sec_bar;
Patrick Georgic9b13592019-11-29 11:47:47 +010069} cse;
Andrey Petrov04a72c42017-03-01 15:51:57 -080070
71/*
72 * Initialize the device with provided temporary BAR. If BAR is 0 use a
73 * default. This is intended for pre-mem usage only where BARs haven't been
74 * assigned yet and devices are not enabled.
75 */
76void heci_init(uintptr_t tempbar)
77{
Elyes HAOUAS68c851b2018-06-12 22:06:09 +020078#if defined(__SIMPLE_DEVICE__)
79 pci_devfn_t dev = PCH_DEV_CSE;
80#else
81 struct device *dev = PCH_DEV_CSE;
82#endif
Elyes HAOUAS2ec1c132020-04-29 09:57:05 +020083 u16 pcireg;
Andrey Petrov04a72c42017-03-01 15:51:57 -080084
85 /* Assume it is already initialized, nothing else to do */
Patrick Georgic9b13592019-11-29 11:47:47 +010086 if (cse.sec_bar)
Andrey Petrov04a72c42017-03-01 15:51:57 -080087 return;
88
89 /* Use default pre-ram bar */
90 if (!tempbar)
91 tempbar = HECI1_BASE_ADDRESS;
92
93 /* Assign Resources to HECI1 */
94 /* Clear BIT 1-2 of Command Register */
Elyes HAOUAS2ec1c132020-04-29 09:57:05 +020095 pcireg = pci_read_config16(dev, PCI_COMMAND);
Andrey Petrov04a72c42017-03-01 15:51:57 -080096 pcireg &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY);
Elyes HAOUAS2ec1c132020-04-29 09:57:05 +020097 pci_write_config16(dev, PCI_COMMAND, pcireg);
Andrey Petrov04a72c42017-03-01 15:51:57 -080098
99 /* Program Temporary BAR for HECI1 */
100 pci_write_config32(dev, PCI_BASE_ADDRESS_0, tempbar);
101 pci_write_config32(dev, PCI_BASE_ADDRESS_1, 0x0);
102
103 /* Enable Bus Master and MMIO Space */
Elyes HAOUAS2ec1c132020-04-29 09:57:05 +0200104 pci_or_config16(dev, PCI_COMMAND, PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800105
Patrick Georgic9b13592019-11-29 11:47:47 +0100106 cse.sec_bar = tempbar;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800107}
108
Subrata Banik05e06cd2017-11-09 15:04:09 +0530109/* Get HECI BAR 0 from PCI configuration space */
110static uint32_t get_cse_bar(void)
111{
112 uintptr_t bar;
113
114 bar = pci_read_config32(PCH_DEV_CSE, PCI_BASE_ADDRESS_0);
115 assert(bar != 0);
116 /*
117 * Bits 31-12 are the base address as per EDS for SPI,
118 * Don't care about 0-11 bit
119 */
120 return bar & ~PCI_BASE_ADDRESS_MEM_ATTR_MASK;
121}
122
Andrey Petrov04a72c42017-03-01 15:51:57 -0800123static uint32_t read_bar(uint32_t offset)
124{
Patrick Georgi08c8cf92019-12-02 11:43:20 +0100125 /* Load and cache BAR */
Patrick Georgic9b13592019-11-29 11:47:47 +0100126 if (!cse.sec_bar)
127 cse.sec_bar = get_cse_bar();
128 return read32((void *)(cse.sec_bar + offset));
Andrey Petrov04a72c42017-03-01 15:51:57 -0800129}
130
131static void write_bar(uint32_t offset, uint32_t val)
132{
Patrick Georgi08c8cf92019-12-02 11:43:20 +0100133 /* Load and cache BAR */
Patrick Georgic9b13592019-11-29 11:47:47 +0100134 if (!cse.sec_bar)
135 cse.sec_bar = get_cse_bar();
136 return write32((void *)(cse.sec_bar + offset), val);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800137}
138
139static uint32_t read_cse_csr(void)
140{
141 return read_bar(MMIO_CSE_CSR);
142}
143
144static uint32_t read_host_csr(void)
145{
146 return read_bar(MMIO_HOST_CSR);
147}
148
149static void write_host_csr(uint32_t data)
150{
151 write_bar(MMIO_HOST_CSR, data);
152}
153
154static size_t filled_slots(uint32_t data)
155{
156 uint8_t wp, rp;
157 rp = data >> CSR_RP_START;
158 wp = data >> CSR_WP_START;
159 return (uint8_t) (wp - rp);
160}
161
162static size_t cse_filled_slots(void)
163{
164 return filled_slots(read_cse_csr());
165}
166
167static size_t host_empty_slots(void)
168{
169 uint32_t csr;
170 csr = read_host_csr();
171
172 return ((csr & CSR_CBD) >> CSR_CBD_START) - filled_slots(csr);
173}
174
175static void clear_int(void)
176{
177 uint32_t csr;
178 csr = read_host_csr();
179 csr |= CSR_IS;
180 write_host_csr(csr);
181}
182
183static uint32_t read_slot(void)
184{
185 return read_bar(MMIO_CSE_CB_RW);
186}
187
188static void write_slot(uint32_t val)
189{
190 write_bar(MMIO_CSE_CB_WW, val);
191}
192
193static int wait_write_slots(size_t cnt)
194{
195 struct stopwatch sw;
196
Subrata Banik03aef282021-09-28 18:10:24 +0530197 stopwatch_init_msecs_expire(&sw, HECI_SEND_TIMEOUT_MS);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800198 while (host_empty_slots() < cnt) {
Subrata Banik03aef282021-09-28 18:10:24 +0530199 udelay(HECI_DELAY_US);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800200 if (stopwatch_expired(&sw)) {
201 printk(BIOS_ERR, "HECI: timeout, buffer not drained\n");
202 return 0;
203 }
204 }
205 return 1;
206}
207
208static int wait_read_slots(size_t cnt)
209{
210 struct stopwatch sw;
211
Subrata Banik03aef282021-09-28 18:10:24 +0530212 stopwatch_init_msecs_expire(&sw, HECI_READ_TIMEOUT_MS);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800213 while (cse_filled_slots() < cnt) {
Subrata Banik03aef282021-09-28 18:10:24 +0530214 udelay(HECI_DELAY_US);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800215 if (stopwatch_expired(&sw)) {
216 printk(BIOS_ERR, "HECI: timed out reading answer!\n");
217 return 0;
218 }
219 }
220 return 1;
221}
222
223/* get number of full 4-byte slots */
224static size_t bytes_to_slots(size_t bytes)
225{
226 return ALIGN_UP(bytes, SLOT_SIZE) / SLOT_SIZE;
227}
228
229static int cse_ready(void)
230{
231 uint32_t csr;
232 csr = read_cse_csr();
233 return csr & CSR_READY;
234}
235
Sridhar Siricilla8e465452019-09-23 20:59:38 +0530236static bool cse_check_hfs1_com(int mode)
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530237{
238 union me_hfsts1 hfs1;
239 hfs1.data = me_read_config32(PCI_ME_HFSTS1);
Sridhar Siricilla8e465452019-09-23 20:59:38 +0530240 return hfs1.fields.operation_mode == mode;
241}
242
243bool cse_is_hfs1_cws_normal(void)
244{
245 union me_hfsts1 hfs1;
246 hfs1.data = me_read_config32(PCI_ME_HFSTS1);
247 if (hfs1.fields.working_state == ME_HFS1_CWS_NORMAL)
248 return true;
249 return false;
250}
251
252bool cse_is_hfs1_com_normal(void)
253{
254 return cse_check_hfs1_com(ME_HFS1_COM_NORMAL);
255}
256
257bool cse_is_hfs1_com_secover_mei_msg(void)
258{
259 return cse_check_hfs1_com(ME_HFS1_COM_SECOVER_MEI_MSG);
260}
261
262bool cse_is_hfs1_com_soft_temp_disable(void)
263{
264 return cse_check_hfs1_com(ME_HFS1_COM_SOFT_TEMP_DISABLE);
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530265}
266
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530267bool cse_is_hfs3_fw_sku_lite(void)
Sridhar Siricilla3465d272020-02-06 15:31:04 +0530268{
269 union me_hfsts3 hfs3;
270 hfs3.data = me_read_config32(PCI_ME_HFSTS3);
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530271 return hfs3.fields.fw_sku == ME_HFS3_FW_SKU_LITE;
Sridhar Siricilla3465d272020-02-06 15:31:04 +0530272}
273
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530274/* Makes the host ready to communicate with CSE */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530275void cse_set_host_ready(void)
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530276{
277 uint32_t csr;
278 csr = read_host_csr();
279 csr &= ~CSR_RESET;
280 csr |= (CSR_IG | CSR_READY);
281 write_host_csr(csr);
282}
283
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530284/* Polls for ME mode ME_HFS1_COM_SECOVER_MEI_MSG for 15 seconds */
285uint8_t cse_wait_sec_override_mode(void)
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530286{
287 struct stopwatch sw;
Subrata Banik03aef282021-09-28 18:10:24 +0530288 stopwatch_init_msecs_expire(&sw, HECI_DELAY_READY_MS);
Sridhar Siricilla8e465452019-09-23 20:59:38 +0530289 while (!cse_is_hfs1_com_secover_mei_msg()) {
Subrata Banik03aef282021-09-28 18:10:24 +0530290 udelay(HECI_DELAY_US);
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530291 if (stopwatch_expired(&sw)) {
292 printk(BIOS_ERR, "HECI: Timed out waiting for SEC_OVERRIDE mode!\n");
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530293 return 0;
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530294 }
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530295 }
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530296 printk(BIOS_DEBUG, "HECI: CSE took %lu ms to enter security override mode\n",
297 stopwatch_duration_msecs(&sw));
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530298 return 1;
299}
300
Sridhar Siricilla09ea3712019-11-12 23:35:50 +0530301/*
302 * Polls for CSE's current operation mode 'Soft Temporary Disable'.
303 * The CSE enters the current operation mode when it boots from RO(BP1).
304 */
305uint8_t cse_wait_com_soft_temp_disable(void)
306{
307 struct stopwatch sw;
Subrata Banikf5765812021-09-30 13:37:10 +0530308 stopwatch_init_msecs_expire(&sw, CSE_DELAY_BOOT_TO_RO_MS);
Sridhar Siricilla09ea3712019-11-12 23:35:50 +0530309 while (!cse_is_hfs1_com_soft_temp_disable()) {
Subrata Banik03aef282021-09-28 18:10:24 +0530310 udelay(HECI_DELAY_US);
Sridhar Siricilla09ea3712019-11-12 23:35:50 +0530311 if (stopwatch_expired(&sw)) {
312 printk(BIOS_ERR, "HECI: Timed out waiting for CSE to boot from RO!\n");
313 return 0;
314 }
315 }
316 printk(BIOS_SPEW, "HECI: CSE took %lu ms to boot from RO\n",
317 stopwatch_duration_msecs(&sw));
318 return 1;
319}
320
Andrey Petrov04a72c42017-03-01 15:51:57 -0800321static int wait_heci_ready(void)
322{
323 struct stopwatch sw;
324
Subrata Banik03aef282021-09-28 18:10:24 +0530325 stopwatch_init_msecs_expire(&sw, HECI_DELAY_READY_MS);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800326 while (!cse_ready()) {
Subrata Banik03aef282021-09-28 18:10:24 +0530327 udelay(HECI_DELAY_US);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800328 if (stopwatch_expired(&sw))
329 return 0;
330 }
331
332 return 1;
333}
334
335static void host_gen_interrupt(void)
336{
337 uint32_t csr;
338 csr = read_host_csr();
339 csr |= CSR_IG;
340 write_host_csr(csr);
341}
342
343static size_t hdr_get_length(uint32_t hdr)
344{
345 return (hdr & MEI_HDR_LENGTH) >> MEI_HDR_LENGTH_START;
346}
347
348static int
349send_one_message(uint32_t hdr, const void *buff)
350{
351 size_t pend_len, pend_slots, remainder, i;
352 uint32_t tmp;
353 const uint32_t *p = buff;
354
355 /* Get space for the header */
356 if (!wait_write_slots(1))
357 return 0;
358
359 /* First, write header */
360 write_slot(hdr);
361
362 pend_len = hdr_get_length(hdr);
363 pend_slots = bytes_to_slots(pend_len);
364
365 if (!wait_write_slots(pend_slots))
366 return 0;
367
368 /* Write the body in whole slots */
369 i = 0;
370 while (i < ALIGN_DOWN(pend_len, SLOT_SIZE)) {
371 write_slot(*p++);
372 i += SLOT_SIZE;
373 }
374
375 remainder = pend_len % SLOT_SIZE;
376 /* Pad to 4 bytes not touching caller's buffer */
377 if (remainder) {
378 memcpy(&tmp, p, remainder);
379 write_slot(tmp);
380 }
381
382 host_gen_interrupt();
383
384 /* Make sure nothing bad happened during transmission */
385 if (!cse_ready())
386 return 0;
387
388 return pend_len;
389}
390
Rizwan Qureshi957857d2021-08-30 16:43:57 +0530391/*
392 * Send message msg of size len to host from host_addr to cse_addr.
393 * Returns 1 on success and 0 otherwise.
394 * In case of error heci_reset() may be required.
395 */
396static int
Andrey Petrov04a72c42017-03-01 15:51:57 -0800397heci_send(const void *msg, size_t len, uint8_t host_addr, uint8_t client_addr)
398{
Subrata Banik5c08c732017-11-13 14:54:37 +0530399 uint8_t retry;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800400 uint32_t csr, hdr;
Subrata Banik5c08c732017-11-13 14:54:37 +0530401 size_t sent, remaining, cb_size, max_length;
402 const uint8_t *p;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800403
404 if (!msg || !len)
405 return 0;
406
407 clear_int();
408
Subrata Banik5c08c732017-11-13 14:54:37 +0530409 for (retry = 0; retry < MAX_HECI_MESSAGE_RETRY_COUNT; retry++) {
410 p = msg;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800411
Subrata Banik5c08c732017-11-13 14:54:37 +0530412 if (!wait_heci_ready()) {
413 printk(BIOS_ERR, "HECI: not ready\n");
414 continue;
415 }
Andrey Petrov04a72c42017-03-01 15:51:57 -0800416
Subrata Banik4a722f52017-11-13 14:56:42 +0530417 csr = read_host_csr();
Subrata Banik5c08c732017-11-13 14:54:37 +0530418 cb_size = ((csr & CSR_CBD) >> CSR_CBD_START) * SLOT_SIZE;
419 /*
420 * Reserve one slot for the header. Limit max message
421 * length by 9 bits that are available in the header.
422 */
423 max_length = MIN(cb_size, (1 << MEI_HDR_LENGTH_SIZE) - 1)
424 - SLOT_SIZE;
425 remaining = len;
426
427 /*
428 * Fragment the message into smaller messages not exceeding
Jonathan Neuschäfer5268b762018-02-12 12:24:25 +0100429 * useful circular buffer length. Mark last message complete.
Subrata Banik5c08c732017-11-13 14:54:37 +0530430 */
431 do {
432 hdr = MIN(max_length, remaining)
433 << MEI_HDR_LENGTH_START;
434 hdr |= client_addr << MEI_HDR_CSE_ADDR_START;
435 hdr |= host_addr << MEI_HDR_HOST_ADDR_START;
436 hdr |= (MIN(max_length, remaining) == remaining) ?
Lee Leahy68ab0b52017-03-10 13:42:34 -0800437 MEI_HDR_IS_COMPLETE : 0;
Subrata Banik5c08c732017-11-13 14:54:37 +0530438 sent = send_one_message(hdr, p);
439 p += sent;
440 remaining -= sent;
441 } while (remaining > 0 && sent != 0);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800442
Subrata Banik5c08c732017-11-13 14:54:37 +0530443 if (!remaining)
444 return 1;
445 }
446 return 0;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800447}
448
449static size_t
450recv_one_message(uint32_t *hdr, void *buff, size_t maxlen)
451{
452 uint32_t reg, *p = buff;
453 size_t recv_slots, recv_len, remainder, i;
454
455 /* first get the header */
456 if (!wait_read_slots(1))
457 return 0;
458
459 *hdr = read_slot();
460 recv_len = hdr_get_length(*hdr);
461
462 if (!recv_len)
463 printk(BIOS_WARNING, "HECI: message is zero-sized\n");
464
465 recv_slots = bytes_to_slots(recv_len);
466
467 i = 0;
468 if (recv_len > maxlen) {
469 printk(BIOS_ERR, "HECI: response is too big\n");
470 return 0;
471 }
472
473 /* wait for the rest of messages to arrive */
474 wait_read_slots(recv_slots);
475
476 /* fetch whole slots first */
477 while (i < ALIGN_DOWN(recv_len, SLOT_SIZE)) {
478 *p++ = read_slot();
479 i += SLOT_SIZE;
480 }
481
Subrata Banik5c08c732017-11-13 14:54:37 +0530482 /*
483 * If ME is not ready, something went wrong and
484 * we received junk
485 */
486 if (!cse_ready())
487 return 0;
488
Andrey Petrov04a72c42017-03-01 15:51:57 -0800489 remainder = recv_len % SLOT_SIZE;
490
491 if (remainder) {
492 reg = read_slot();
493 memcpy(p, &reg, remainder);
494 }
495
496 return recv_len;
497}
498
Rizwan Qureshi957857d2021-08-30 16:43:57 +0530499/*
500 * Receive message into buff not exceeding maxlen. Message is considered
501 * successfully received if a 'complete' indication is read from ME side
502 * and there was enough space in the buffer to fit that message. maxlen
503 * is updated with size of message that was received. Returns 0 on failure
504 * and 1 on success.
505 * In case of error heci_reset() may be required.
506 */
507static int heci_receive(void *buff, size_t *maxlen)
Andrey Petrov04a72c42017-03-01 15:51:57 -0800508{
Subrata Banik5c08c732017-11-13 14:54:37 +0530509 uint8_t retry;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800510 size_t left, received;
511 uint32_t hdr = 0;
Subrata Banik5c08c732017-11-13 14:54:37 +0530512 uint8_t *p;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800513
514 if (!buff || !maxlen || !*maxlen)
515 return 0;
516
Andrey Petrov04a72c42017-03-01 15:51:57 -0800517 clear_int();
518
Subrata Banik5c08c732017-11-13 14:54:37 +0530519 for (retry = 0; retry < MAX_HECI_MESSAGE_RETRY_COUNT; retry++) {
520 p = buff;
521 left = *maxlen;
522
523 if (!wait_heci_ready()) {
524 printk(BIOS_ERR, "HECI: not ready\n");
525 continue;
526 }
527
528 /*
529 * Receive multiple packets until we meet one marked
530 * complete or we run out of space in caller-provided buffer.
531 */
532 do {
533 received = recv_one_message(&hdr, p, left);
Lijian Zhaoc50296d2017-12-15 19:10:18 -0800534 if (!received) {
Elyes HAOUAS3d450002018-08-09 18:55:58 +0200535 printk(BIOS_ERR, "HECI: Failed to receive!\n");
Lijian Zhaoc50296d2017-12-15 19:10:18 -0800536 return 0;
537 }
Subrata Banik5c08c732017-11-13 14:54:37 +0530538 left -= received;
539 p += received;
540 /* If we read out everything ping to send more */
541 if (!(hdr & MEI_HDR_IS_COMPLETE) && !cse_filled_slots())
542 host_gen_interrupt();
543 } while (received && !(hdr & MEI_HDR_IS_COMPLETE) && left > 0);
544
545 if ((hdr & MEI_HDR_IS_COMPLETE) && received) {
546 *maxlen = p - (uint8_t *) buff;
547 return 1;
548 }
Andrey Petrov04a72c42017-03-01 15:51:57 -0800549 }
Subrata Banik5c08c732017-11-13 14:54:37 +0530550 return 0;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800551}
552
Rizwan Qureshi957857d2021-08-30 16:43:57 +0530553int heci_send_receive(const void *snd_msg, size_t snd_sz, void *rcv_msg, size_t *rcv_sz,
554 uint8_t cse_addr)
Sridhar Siricillaa5208f52019-08-30 17:10:24 +0530555{
Rizwan Qureshi957857d2021-08-30 16:43:57 +0530556 if (!heci_send(snd_msg, snd_sz, BIOS_HOST_ADDR, cse_addr)) {
Sridhar Siricillaa5208f52019-08-30 17:10:24 +0530557 printk(BIOS_ERR, "HECI: send Failed\n");
558 return 0;
559 }
560
561 if (rcv_msg != NULL) {
562 if (!heci_receive(rcv_msg, rcv_sz)) {
563 printk(BIOS_ERR, "HECI: receive Failed\n");
564 return 0;
565 }
566 }
567 return 1;
568}
569
Andrey Petrov04a72c42017-03-01 15:51:57 -0800570/*
571 * Attempt to reset the device. This is useful when host and ME are out
572 * of sync during transmission or ME didn't understand the message.
573 */
574int heci_reset(void)
575{
576 uint32_t csr;
577
Duncan Laurie15ca9032020-11-05 10:09:07 -0800578 /* Clear post code to prevent eventlog entry from unknown code. */
579 post_code(0);
580
Andrey Petrov04a72c42017-03-01 15:51:57 -0800581 /* Send reset request */
582 csr = read_host_csr();
Sridhar Siricillab9d075b2019-08-31 11:38:33 +0530583 csr |= (CSR_RESET | CSR_IG);
Andrey Petrov04a72c42017-03-01 15:51:57 -0800584 write_host_csr(csr);
585
586 if (wait_heci_ready()) {
587 /* Device is back on its imaginary feet, clear reset */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530588 cse_set_host_ready();
Andrey Petrov04a72c42017-03-01 15:51:57 -0800589 return 1;
590 }
591
592 printk(BIOS_CRIT, "HECI: reset failed\n");
593
594 return 0;
595}
596
Sridhar Siricilla2cc66912019-08-31 11:20:34 +0530597bool is_cse_enabled(void)
598{
599 const struct device *cse_dev = pcidev_path_on_root(PCH_DEVFN_CSE);
600
601 if (!cse_dev || !cse_dev->enabled) {
602 printk(BIOS_WARNING, "HECI: No CSE device\n");
603 return false;
604 }
605
606 if (pci_read_config16(PCH_DEV_CSE, PCI_VENDOR_ID) == 0xFFFF) {
607 printk(BIOS_WARNING, "HECI: CSE device is hidden\n");
608 return false;
609 }
610
611 return true;
612}
613
614uint32_t me_read_config32(int offset)
615{
616 return pci_read_config32(PCH_DEV_CSE, offset);
617}
618
Sridhar Siricilla59c7cb7d2020-02-07 11:59:30 +0530619static bool cse_is_global_reset_allowed(void)
620{
621 /*
622 * Allow sending GLOBAL_RESET command only if:
623 * - CSE's current working state is Normal and current operation mode is Normal.
624 * - (or) CSE's current working state is normal and current operation mode can
625 * be Soft Temp Disable or Security Override Mode if CSE's Firmware SKU is
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530626 * Lite.
Sridhar Siricilla59c7cb7d2020-02-07 11:59:30 +0530627 */
628 if (!cse_is_hfs1_cws_normal())
629 return false;
630
631 if (cse_is_hfs1_com_normal())
632 return true;
633
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530634 if (cse_is_hfs3_fw_sku_lite()) {
Sridhar Siricilla59c7cb7d2020-02-07 11:59:30 +0530635 if (cse_is_hfs1_com_soft_temp_disable() || cse_is_hfs1_com_secover_mei_msg())
636 return true;
637 }
638 return false;
639}
640
Sridhar Siricillad415c202019-08-31 14:54:57 +0530641/*
Subrata Banikf463dc02020-09-14 19:04:03 +0530642 * Sends GLOBAL_RESET_REQ cmd to CSE with reset type GLOBAL_RESET.
643 * Returns 0 on failure and 1 on success.
Sridhar Siricillad415c202019-08-31 14:54:57 +0530644 */
Subrata Banikf463dc02020-09-14 19:04:03 +0530645static int cse_request_reset(enum rst_req_type rst_type)
Sridhar Siricillad415c202019-08-31 14:54:57 +0530646{
647 int status;
648 struct mkhi_hdr reply;
649 struct reset_message {
650 struct mkhi_hdr hdr;
651 uint8_t req_origin;
652 uint8_t reset_type;
653 } __packed;
654 struct reset_message msg = {
655 .hdr = {
656 .group_id = MKHI_GROUP_ID_CBM,
Sridhar Siricillae202e672020-01-07 23:36:40 +0530657 .command = MKHI_CBM_GLOBAL_RESET_REQ,
Sridhar Siricillad415c202019-08-31 14:54:57 +0530658 },
659 .req_origin = GR_ORIGIN_BIOS_POST,
660 .reset_type = rst_type
661 };
662 size_t reply_size;
663
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530664 printk(BIOS_DEBUG, "HECI: Global Reset(Type:%d) Command\n", rst_type);
Sridhar Siricilla59c7cb7d2020-02-07 11:59:30 +0530665
Sridhar Siricillac2a2d2b2020-02-27 17:16:13 +0530666 if (!(rst_type == GLOBAL_RESET || rst_type == CSE_RESET_ONLY)) {
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530667 printk(BIOS_ERR, "HECI: Unsupported reset type is requested\n");
668 return 0;
669 }
Sridhar Siricillad415c202019-08-31 14:54:57 +0530670
Subrata Banikf463dc02020-09-14 19:04:03 +0530671 if (!cse_is_global_reset_allowed() || !is_cse_enabled()) {
Sridhar Siricilla59c7cb7d2020-02-07 11:59:30 +0530672 printk(BIOS_ERR, "HECI: CSE does not meet required prerequisites\n");
673 return 0;
674 }
675
Sridhar Siricillad415c202019-08-31 14:54:57 +0530676 heci_reset();
677
678 reply_size = sizeof(reply);
679 memset(&reply, 0, reply_size);
680
Sridhar Siricillad415c202019-08-31 14:54:57 +0530681 if (rst_type == CSE_RESET_ONLY)
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530682 status = heci_send(&msg, sizeof(msg), BIOS_HOST_ADDR, HECI_MKHI_ADDR);
Sridhar Siricillad415c202019-08-31 14:54:57 +0530683 else
Rizwan Qureshi957857d2021-08-30 16:43:57 +0530684 status = heci_send_receive(&msg, sizeof(msg), &reply, &reply_size,
685 HECI_MKHI_ADDR);
Sridhar Siricillad415c202019-08-31 14:54:57 +0530686
Sridhar Siricillaf2eb6872019-12-05 19:54:16 +0530687 printk(BIOS_DEBUG, "HECI: Global Reset %s!\n", status ? "success" : "failure");
688 return status;
Sridhar Siricillad415c202019-08-31 14:54:57 +0530689}
690
Subrata Banikf463dc02020-09-14 19:04:03 +0530691int cse_request_global_reset(void)
692{
693 return cse_request_reset(GLOBAL_RESET);
694}
695
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530696static bool cse_is_hmrfpo_enable_allowed(void)
697{
698 /*
699 * Allow sending HMRFPO ENABLE command only if:
700 * - CSE's current working state is Normal and current operation mode is Normal
701 * - (or) cse's current working state is normal and current operation mode is
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530702 * Soft Temp Disable if CSE's Firmware SKU is Lite
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530703 */
704 if (!cse_is_hfs1_cws_normal())
705 return false;
706
707 if (cse_is_hfs1_com_normal())
708 return true;
709
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530710 if (cse_is_hfs3_fw_sku_lite() && cse_is_hfs1_com_soft_temp_disable())
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530711 return true;
712
713 return false;
714}
715
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530716/* Sends HMRFPO Enable command to CSE */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530717int cse_hmrfpo_enable(void)
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530718{
719 struct hmrfpo_enable_msg {
720 struct mkhi_hdr hdr;
721 uint32_t nonce[2];
722 } __packed;
723
724 /* HMRFPO Enable message */
725 struct hmrfpo_enable_msg msg = {
726 .hdr = {
Sridhar Siricillae202e672020-01-07 23:36:40 +0530727 .group_id = MKHI_GROUP_ID_HMRFPO,
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530728 .command = MKHI_HMRFPO_ENABLE,
729 },
730 .nonce = {0},
731 };
732
733 /* HMRFPO Enable response */
734 struct hmrfpo_enable_resp {
735 struct mkhi_hdr hdr;
Sridhar Siricillae202e672020-01-07 23:36:40 +0530736 /* Base addr for factory data area, not relevant for client SKUs */
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530737 uint32_t fct_base;
Sridhar Siricillae202e672020-01-07 23:36:40 +0530738 /* Length of factory data area, not relevant for client SKUs */
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530739 uint32_t fct_limit;
740 uint8_t status;
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530741 uint8_t reserved[3];
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530742 } __packed;
743
744 struct hmrfpo_enable_resp resp;
745 size_t resp_size = sizeof(struct hmrfpo_enable_resp);
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530746
747 printk(BIOS_DEBUG, "HECI: Send HMRFPO Enable Command\n");
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530748
749 if (!cse_is_hmrfpo_enable_allowed()) {
750 printk(BIOS_ERR, "HECI: CSE does not meet required prerequisites\n");
751 return 0;
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530752 }
753
754 if (!heci_send_receive(&msg, sizeof(struct hmrfpo_enable_msg),
Rizwan Qureshi957857d2021-08-30 16:43:57 +0530755 &resp, &resp_size, HECI_MKHI_ADDR))
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530756 return 0;
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530757
758 if (resp.hdr.result) {
759 printk(BIOS_ERR, "HECI: Resp Failed:%d\n", resp.hdr.result);
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530760 return 0;
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530761 }
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530762
Sridhar Siricillad16187e2019-11-27 16:02:47 +0530763 if (resp.status) {
764 printk(BIOS_ERR, "HECI: HMRFPO_Enable Failed (resp status: %d)\n", resp.status);
765 return 0;
766 }
767
768 return 1;
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530769}
770
771/*
772 * Sends HMRFPO Get Status command to CSE to get the HMRFPO status.
Sridhar Siricilla63be9182020-01-19 12:38:56 +0530773 * The status can be DISABLED/LOCKED/ENABLED
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530774 */
Sridhar Siricillaff072e62019-11-27 14:55:16 +0530775int cse_hmrfpo_get_status(void)
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530776{
777 struct hmrfpo_get_status_msg {
778 struct mkhi_hdr hdr;
779 } __packed;
780
781 struct hmrfpo_get_status_resp {
782 struct mkhi_hdr hdr;
783 uint8_t status;
Sridhar Siricilla63be9182020-01-19 12:38:56 +0530784 uint8_t reserved[3];
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530785 } __packed;
786
787 struct hmrfpo_get_status_msg msg = {
788 .hdr = {
Sridhar Siricillae202e672020-01-07 23:36:40 +0530789 .group_id = MKHI_GROUP_ID_HMRFPO,
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530790 .command = MKHI_HMRFPO_GET_STATUS,
791 },
792 };
793 struct hmrfpo_get_status_resp resp;
794 size_t resp_size = sizeof(struct hmrfpo_get_status_resp);
795
796 printk(BIOS_INFO, "HECI: Sending Get HMRFPO Status Command\n");
797
Sridhar Siricilla206905c2020-02-06 18:48:22 +0530798 if (!cse_is_hfs1_cws_normal()) {
799 printk(BIOS_ERR, "HECI: CSE's current working state is not Normal\n");
800 return -1;
801 }
802
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530803 if (!heci_send_receive(&msg, sizeof(struct hmrfpo_get_status_msg),
Rizwan Qureshi957857d2021-08-30 16:43:57 +0530804 &resp, &resp_size, HECI_MKHI_ADDR)) {
Sridhar Siricillae30a0e62019-08-31 16:12:21 +0530805 printk(BIOS_ERR, "HECI: HMRFPO send/receive fail\n");
806 return -1;
807 }
808
809 if (resp.hdr.result) {
810 printk(BIOS_ERR, "HECI: HMRFPO Resp Failed:%d\n",
811 resp.hdr.result);
812 return -1;
813 }
814
815 return resp.status;
816}
817
Sridhar Siricilla24a974a2020-02-19 14:41:36 +0530818void print_me_fw_version(void *unused)
819{
820 struct version {
821 uint16_t minor;
822 uint16_t major;
823 uint16_t build;
824 uint16_t hotfix;
825 } __packed;
826
827 struct fw_ver_resp {
828 struct mkhi_hdr hdr;
829 struct version code;
830 struct version rec;
831 struct version fitc;
832 } __packed;
833
834 const struct mkhi_hdr fw_ver_msg = {
835 .group_id = MKHI_GROUP_ID_GEN,
836 .command = MKHI_GEN_GET_FW_VERSION,
837 };
838
839 struct fw_ver_resp resp;
840 size_t resp_size = sizeof(resp);
841
842 /* Ignore if UART debugging is disabled */
843 if (!CONFIG(CONSOLE_SERIAL))
844 return;
845
Wim Vervoorn8602fb72020-03-30 12:17:54 +0200846 /* Ignore if CSE is disabled */
847 if (!is_cse_enabled())
848 return;
849
Sridhar Siricilla24a974a2020-02-19 14:41:36 +0530850 /*
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530851 * Ignore if ME Firmware SKU type is Lite since
Sridhar Siricilla24a974a2020-02-19 14:41:36 +0530852 * print_boot_partition_info() logs RO(BP1) and RW(BP2) versions.
853 */
Sridhar Siricilla99dbca32020-05-12 21:05:04 +0530854 if (cse_is_hfs3_fw_sku_lite())
Sridhar Siricilla24a974a2020-02-19 14:41:36 +0530855 return;
856
857 /*
858 * Prerequisites:
859 * 1) HFSTS1 Current Working State is Normal
860 * 2) HFSTS1 Current Operation Mode is Normal
861 * 3) It's after DRAM INIT DONE message (taken care of by calling it
862 * during ramstage
863 */
864 if (!cse_is_hfs1_cws_normal() || !cse_is_hfs1_com_normal())
865 goto fail;
866
867 heci_reset();
868
Rizwan Qureshi957857d2021-08-30 16:43:57 +0530869 if (!heci_send_receive(&fw_ver_msg, sizeof(fw_ver_msg), &resp, &resp_size,
870 HECI_MKHI_ADDR))
Sridhar Siricilla24a974a2020-02-19 14:41:36 +0530871 goto fail;
872
873 if (resp.hdr.result)
874 goto fail;
875
876 printk(BIOS_DEBUG, "ME: Version: %d.%d.%d.%d\n", resp.code.major,
877 resp.code.minor, resp.code.hotfix, resp.code.build);
878 return;
879
880fail:
881 printk(BIOS_DEBUG, "ME: Version: Unavailable\n");
882}
883
Tim Wawrzynczak09635f42021-06-18 10:08:47 -0600884void cse_trigger_vboot_recovery(enum csme_failure_reason reason)
885{
886 printk(BIOS_DEBUG, "cse: CSE status registers: HFSTS1: 0x%x, HFSTS2: 0x%x "
887 "HFSTS3: 0x%x\n", me_read_config32(PCI_ME_HFSTS1),
888 me_read_config32(PCI_ME_HFSTS2), me_read_config32(PCI_ME_HFSTS3));
889
890 if (CONFIG(VBOOT)) {
891 struct vb2_context *ctx = vboot_get_context();
892 if (ctx == NULL)
893 goto failure;
894 vb2api_fail(ctx, VB2_RECOVERY_INTEL_CSE_LITE_SKU, reason);
895 vboot_save_data(ctx);
896 vboot_reboot();
897 }
898failure:
899 die("cse: Failed to trigger recovery mode(recovery subcode:%d)\n", reason);
900}
901
Subrata Banika219edb2021-09-25 15:02:37 +0530902static bool disable_cse_idle(void)
903{
904 struct stopwatch sw;
905 uint32_t dev_idle_ctrl = read_bar(MMIO_CSE_DEVIDLE);
906 dev_idle_ctrl &= ~CSE_DEV_IDLE;
907 write_bar(MMIO_CSE_DEVIDLE, dev_idle_ctrl);
908
Subrata Banik03aef282021-09-28 18:10:24 +0530909 stopwatch_init_usecs_expire(&sw, HECI_CIP_TIMEOUT_US);
Subrata Banika219edb2021-09-25 15:02:37 +0530910 do {
911 dev_idle_ctrl = read_bar(MMIO_CSE_DEVIDLE);
912 if ((dev_idle_ctrl & CSE_DEV_CIP) == CSE_DEV_CIP)
913 return true;
Subrata Banik03aef282021-09-28 18:10:24 +0530914 udelay(HECI_DELAY_US);
Subrata Banika219edb2021-09-25 15:02:37 +0530915 } while (!stopwatch_expired(&sw));
916
917 return false;
918}
919
920static void enable_cse_idle(void)
921{
922 uint32_t dev_idle_ctrl = read_bar(MMIO_CSE_DEVIDLE);
923 dev_idle_ctrl |= CSE_DEV_IDLE;
924 write_bar(MMIO_CSE_DEVIDLE, dev_idle_ctrl);
925}
926
927enum cse_device_state get_cse_device_state(void)
928{
929 uint32_t dev_idle_ctrl = read_bar(MMIO_CSE_DEVIDLE);
930 if ((dev_idle_ctrl & CSE_DEV_IDLE) == CSE_DEV_IDLE)
931 return DEV_IDLE;
932
933 return DEV_ACTIVE;
934}
935
936static enum cse_device_state ensure_cse_active(void)
937{
938 if (!disable_cse_idle())
939 return DEV_IDLE;
940 pci_or_config32(PCH_DEV_CSE, PCI_COMMAND, PCI_COMMAND_MEMORY |
941 PCI_COMMAND_MASTER);
942
943 return DEV_ACTIVE;
944}
945
946static void ensure_cse_idle(void)
947{
948 enable_cse_idle();
949
950 pci_and_config32(PCH_DEV_CSE, PCI_COMMAND, ~(PCI_COMMAND_MEMORY |
951 PCI_COMMAND_MASTER));
952}
953
954bool set_cse_device_state(enum cse_device_state requested_state)
955{
956 enum cse_device_state current_state = get_cse_device_state();
957
958 if (current_state == requested_state)
959 return true;
960
961 if (requested_state == DEV_ACTIVE)
962 return ensure_cse_active() == requested_state;
963 else
964 ensure_cse_idle();
965
966 return true;
967}
968
Andrey Petrov04a72c42017-03-01 15:51:57 -0800969#if ENV_RAMSTAGE
970
971static void update_sec_bar(struct device *dev)
972{
Patrick Georgic9b13592019-11-29 11:47:47 +0100973 cse.sec_bar = find_resource(dev, PCI_BASE_ADDRESS_0)->base;
Andrey Petrov04a72c42017-03-01 15:51:57 -0800974}
975
976static void cse_set_resources(struct device *dev)
977{
Subrata Banik2ee54db2017-03-05 12:37:00 +0530978 if (dev->path.pci.devfn == PCH_DEVFN_CSE)
Andrey Petrov04a72c42017-03-01 15:51:57 -0800979 update_sec_bar(dev);
980
981 pci_dev_set_resources(dev);
982}
983
984static struct device_operations cse_ops = {
985 .set_resources = cse_set_resources,
986 .read_resources = pci_dev_read_resources,
987 .enable_resources = pci_dev_enable_resources,
988 .init = pci_dev_init,
Subrata Banik6bbc91a2017-12-07 14:55:51 +0530989 .ops_pci = &pci_dev_ops_pci,
Andrey Petrov04a72c42017-03-01 15:51:57 -0800990};
991
Hannah Williams63142152017-06-12 14:03:18 -0700992static const unsigned short pci_device_ids[] = {
993 PCI_DEVICE_ID_INTEL_APL_CSE0,
994 PCI_DEVICE_ID_INTEL_GLK_CSE0,
Andrey Petrov0405de92017-06-05 13:25:29 -0700995 PCI_DEVICE_ID_INTEL_CNL_CSE0,
Subrata Banikd0586d22017-11-27 13:28:41 +0530996 PCI_DEVICE_ID_INTEL_SKL_CSE0,
Maxim Polyakov571d07d2019-08-22 13:11:32 +0300997 PCI_DEVICE_ID_INTEL_LWB_CSE0,
998 PCI_DEVICE_ID_INTEL_LWB_CSE0_SUPER,
praveen hodagatta praneshe26c4a42018-09-20 03:49:45 +0800999 PCI_DEVICE_ID_INTEL_CNP_H_CSE0,
Aamir Bohra9eac0392018-06-30 12:07:04 +05301000 PCI_DEVICE_ID_INTEL_ICL_CSE0,
Ronak Kanabarda7ffb482019-02-05 01:51:13 +05301001 PCI_DEVICE_ID_INTEL_CMP_CSE0,
Gaggery Tsai12a651c2019-12-05 11:23:20 -08001002 PCI_DEVICE_ID_INTEL_CMP_H_CSE0,
Ravi Sarawadi6b5bf402019-10-21 22:25:04 -07001003 PCI_DEVICE_ID_INTEL_TGL_CSE0,
Jeremy Soller191a8d72021-08-10 14:06:51 -06001004 PCI_DEVICE_ID_INTEL_TGL_H_CSE0,
Tan, Lean Sheng26136092020-01-20 19:13:56 -08001005 PCI_DEVICE_ID_INTEL_MCC_CSE0,
1006 PCI_DEVICE_ID_INTEL_MCC_CSE1,
1007 PCI_DEVICE_ID_INTEL_MCC_CSE2,
1008 PCI_DEVICE_ID_INTEL_MCC_CSE3,
Meera Ravindranath3f4af0d2020-02-12 16:01:22 +05301009 PCI_DEVICE_ID_INTEL_JSP_CSE0,
1010 PCI_DEVICE_ID_INTEL_JSP_CSE1,
1011 PCI_DEVICE_ID_INTEL_JSP_CSE2,
1012 PCI_DEVICE_ID_INTEL_JSP_CSE3,
Subrata Banikf672f7f2020-08-03 14:29:25 +05301013 PCI_DEVICE_ID_INTEL_ADP_P_CSE0,
1014 PCI_DEVICE_ID_INTEL_ADP_P_CSE1,
1015 PCI_DEVICE_ID_INTEL_ADP_P_CSE2,
1016 PCI_DEVICE_ID_INTEL_ADP_P_CSE3,
1017 PCI_DEVICE_ID_INTEL_ADP_S_CSE0,
1018 PCI_DEVICE_ID_INTEL_ADP_S_CSE1,
1019 PCI_DEVICE_ID_INTEL_ADP_S_CSE2,
1020 PCI_DEVICE_ID_INTEL_ADP_S_CSE3,
Varshit Pandyaf4d98fdd22021-01-17 18:39:29 +05301021 PCI_DEVICE_ID_INTEL_ADP_M_CSE0,
1022 PCI_DEVICE_ID_INTEL_ADP_M_CSE1,
1023 PCI_DEVICE_ID_INTEL_ADP_M_CSE2,
1024 PCI_DEVICE_ID_INTEL_ADP_M_CSE3,
Hannah Williams63142152017-06-12 14:03:18 -07001025 0,
1026};
1027
Andrey Petrov04a72c42017-03-01 15:51:57 -08001028static const struct pci_driver cse_driver __pci_driver = {
1029 .ops = &cse_ops,
1030 .vendor = PCI_VENDOR_ID_INTEL,
1031 /* SoC/chipset needs to provide PCI device ID */
Andrey Petrov0405de92017-06-05 13:25:29 -07001032 .devices = pci_device_ids
Andrey Petrov04a72c42017-03-01 15:51:57 -08001033};
1034
1035#endif