blob: dacb64399db885e99d244a2210ddcfac9610ca0e [file] [log] [blame]
Michał Żygowski9b0f1692022-05-05 13:21:01 +02001/* SPDX-License-Identifier: GPL-2.0-only */
2
3#define __SIMPLE_DEVICE__
4
5#include <stdlib.h>
6#include <console/console.h>
7#include <device/device.h>
8#include <device/mmio.h>
9#include <device/pci_def.h>
10#include <device/pci_ops.h>
11#include <intelblocks/cse.h>
12#include <intelblocks/systemagent.h>
Michał Żygowskia49945e2022-10-19 09:55:18 +020013#include <intelblocks/vtd.h>
Julius Wernerd96ca242022-08-08 18:08:35 -070014#include <security/vboot/misc.h>
Michał Żygowski9b0f1692022-05-05 13:21:01 +020015#include <soc/hsphy.h>
16#include <soc/iomap.h>
17#include <soc/pci_devs.h>
18#include <vb2_api.h>
19#include <lib.h>
20
21#define HASHALG_SHA1 0x00000001
22#define HASHALG_SHA256 0x00000002
23#define HASHALG_SHA384 0x00000003
24#define HASHALG_SHA512 0x00000004
25
26#define MAX_HASH_SIZE VB2_SHA512_DIGEST_SIZE
27#define GET_IP_FIRMWARE_CMD 0x21
28#define HSPHY_PAYLOAD_SIZE (32*KiB)
29
30#define CPU_PID_PCIE_PHYX16_BROADCAST 0x55
31
32struct ip_push_model {
33 uint16_t count;
34 uint16_t address;
35 uint32_t data[0];
36} __packed;
37
38static int heci_get_hsphy_payload(void *buf, uint32_t *buf_size, uint8_t *hash_buf,
39 uint8_t *hash_alg, uint32_t *status)
40{
41 size_t reply_size;
42
43 struct heci_ip_load_request {
44 struct mkhi_hdr hdr;
45 uint32_t version;
46 uint32_t operation;
47 uint32_t dram_base_low;
48 uint32_t dram_base_high;
49 uint32_t memory_size;
50 uint32_t reserved;
51 } __packed msg = {
52 .hdr = {
53 .group_id = MKHI_GROUP_ID_BUP_COMMON,
54 .command = GET_IP_FIRMWARE_CMD,
55 },
56 .version = 1,
57 .operation = 1,
58 .dram_base_low = (uintptr_t)buf,
59 .dram_base_high = 0,
60 .memory_size = *buf_size,
61 .reserved = 0,
62 };
63
64 struct heci_ip_load_response {
65 struct mkhi_hdr hdr;
66 uint32_t payload_size;
67 uint32_t reserved[2];
68 uint32_t status;
69 uint8_t hash_type;
70 uint8_t hash[MAX_HASH_SIZE];
71 } __packed reply;
72
73 if (!buf || !buf_size || !hash_buf || !hash_alg) {
74 printk(BIOS_ERR, "%s: Invalid parameters\n", __func__);
75 return -1;
76 }
77
78 reply_size = sizeof(reply);
79 memset(&reply, 0, reply_size);
80
81 printk(BIOS_DEBUG, "HECI: Sending Get IP firmware command\n");
82
83 if (heci_send_receive(&msg, sizeof(msg), &reply, &reply_size, HECI_MKHI_ADDR)) {
84 printk(BIOS_ERR, "HECI: Get IP firmware failed\n");
85 return -1;
86 }
87
88 if (reply.hdr.result) {
89 printk(BIOS_ERR, "HECI: Get IP firmware response invalid\n");
90 *status = reply.status;
91 printk(BIOS_DEBUG, "HECI response:\n");
92 hexdump(&reply, sizeof(reply));
93 return -1;
94 }
95
96 *buf_size = reply.payload_size;
97 *hash_alg = reply.hash_type;
98 *status = reply.status;
99 memcpy(hash_buf, reply.hash, MAX_HASH_SIZE);
100
101 printk(BIOS_DEBUG, "HECI: Get IP firmware success. Response:\n");
102 printk(BIOS_DEBUG, " Payload size = 0x%x\n", *buf_size);
103 printk(BIOS_DEBUG, " Hash type used for signing payload = 0x%x\n", *hash_alg);
104
105 return 0;
106}
107
108static int verify_hsphy_hash(void *buf, uint32_t buf_size, uint8_t *hash_buf, uint8_t hash_alg)
109{
Julius Wernerd96ca242022-08-08 18:08:35 -0700110 struct vb2_hash hash;
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200111
112 switch (hash_alg) {
113 case HASHALG_SHA256:
Julius Wernerd96ca242022-08-08 18:08:35 -0700114 hash.algo = VB2_HASH_SHA256;
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200115 break;
116 case HASHALG_SHA384:
Julius Wernerd96ca242022-08-08 18:08:35 -0700117 hash.algo = VB2_HASH_SHA384;
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200118 break;
119 case HASHALG_SHA512:
Julius Wernerd96ca242022-08-08 18:08:35 -0700120 hash.algo = VB2_HASH_SHA512;
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200121 break;
122 case HASHALG_SHA1:
123 default:
124 printk(BIOS_ERR, "Hash alg %d not supported, trying SHA384\n", hash_alg);
Julius Wernerd96ca242022-08-08 18:08:35 -0700125 hash.algo = VB2_HASH_SHA384;
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200126 break;
127 }
Julius Wernerd96ca242022-08-08 18:08:35 -0700128 memcpy(hash.raw, hash_buf, vb2_digest_size(hash.algo));
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200129
Julius Wernerd96ca242022-08-08 18:08:35 -0700130 if (vb2_hash_verify(vboot_hwcrypto_allowed(), buf, buf_size, &hash) != VB2_SUCCESS) {
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200131 printk(BIOS_ERR, "HSPHY SHA hashes do not match\n");
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200132 return -1;
133 }
134
135 return 0;
136}
137
138static void upload_hsphy_to_cpu_pcie(void *buf, uint32_t buf_size)
139{
140 uint16_t i = 0, j;
141 struct ip_push_model *push_model = (struct ip_push_model *)buf;
142
143 while (i < buf_size) {
144 i += sizeof(*push_model);
145
146 if ((push_model->address == 0) && (push_model->count == 0))
147 break; // End of file
148
149 for (j = 0; j < push_model->count; j++) {
150 REGBAR32(CPU_PID_PCIE_PHYX16_BROADCAST,
151 push_model->address) = push_model->data[j];
152 i += sizeof(uint32_t);
153 }
154
155 push_model = (struct ip_push_model *)(buf + i);
156 }
157}
158
159void load_and_init_hsphy(void)
160{
161 void *hsphy_buf;
162 uint8_t hsphy_hash[MAX_HASH_SIZE] = { 0 };
163 uint8_t hash_type;
164 uint32_t buf_size = HSPHY_PAYLOAD_SIZE;
Michał Żygowskia49945e2022-10-19 09:55:18 +0200165 size_t dma_buf_size;
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200166 pci_devfn_t dev = PCH_DEV_CSE;
167 const uint16_t pci_cmd_bme_mem = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
168 uint32_t status;
169
170 if (!is_devfn_enabled(SA_DEVFN_CPU_PCIE1_0) &&
171 !is_devfn_enabled(SA_DEVFN_CPU_PCIE1_1)) {
172 printk(BIOS_DEBUG, "All HSPHY ports disabled, skipping HSPHY loading\n");
173 return;
174 }
175
Michał Żygowskia49945e2022-10-19 09:55:18 +0200176 if (CONFIG(ENABLE_EARLY_DMA_PROTECTION)) {
177 hsphy_buf = vtd_get_dma_buffer(&dma_buf_size);
178 if (!hsphy_buf || dma_buf_size < HSPHY_PAYLOAD_SIZE) {
179 printk(BIOS_ERR, "DMA protection enabled but DMA buffer does not"
180 " exist or is too small\n");
181 printk(BIOS_ERR, "Aborting HSPHY firmware loading, "
182 "PCIe Gen5 won't work.\n");
183 return;
184 }
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200185
Michał Żygowskia49945e2022-10-19 09:55:18 +0200186 /* Rather impossible scenario, but check alignment anyways */
187 if (!IS_ALIGNED((uintptr_t)hsphy_buf, 4 * KiB) &&
188 (HSPHY_PAYLOAD_SIZE + 4 * KiB) <= dma_buf_size)
189 hsphy_buf = (void *)ALIGN_UP((uintptr_t)hsphy_buf, 4 * KiB);
190 } else {
191 /* Align the buffer to page size, otherwise the HECI command will fail */
192 hsphy_buf = memalign(4 * KiB, HSPHY_PAYLOAD_SIZE);
193
194 if (!hsphy_buf) {
195 printk(BIOS_ERR, "Could not allocate memory for HSPHY blob\n");
196 printk(BIOS_ERR, "Aborting HSPHY firmware loading, "
197 "PCIe Gen5 won't work.\n");
198 return;
199 }
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200200 }
201
202 memset(hsphy_buf, 0, HSPHY_PAYLOAD_SIZE);
203
204 if (!is_cse_enabled()) {
205 printk(BIOS_ERR, "%s: CSME not enabled or not visible, but required\n",
206 __func__);
207 printk(BIOS_ERR, "Aborting HSPHY firmware loading, PCIe Gen5 won't work.\n");
Michał Żygowskia49945e2022-10-19 09:55:18 +0200208 goto hsphy_exit;
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200209 }
210
211 /* Ensure BAR, BME and memory space are enabled */
212 if ((pci_read_config16(dev, PCI_COMMAND) & pci_cmd_bme_mem) != pci_cmd_bme_mem)
213 pci_or_config16(dev, PCI_COMMAND, pci_cmd_bme_mem);
214
215
216 if (pci_read_config32(dev, PCI_BASE_ADDRESS_0) == 0) {
217 pci_and_config16(dev, PCI_COMMAND, ~pci_cmd_bme_mem);
218 pci_write_config32(dev, PCI_BASE_ADDRESS_0, HECI1_BASE_ADDRESS);
219 pci_or_config16(dev, PCI_COMMAND, pci_cmd_bme_mem);
220 }
221
222 if (heci_get_hsphy_payload(hsphy_buf, &buf_size, hsphy_hash, &hash_type, &status)) {
223 printk(BIOS_ERR, "Aborting HSPHY firmware loading, PCIe Gen5 won't work.\n");
Michał Żygowskia49945e2022-10-19 09:55:18 +0200224 goto hsphy_exit;
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200225 }
226
227 if (verify_hsphy_hash(hsphy_buf, buf_size, hsphy_hash, hash_type)) {
228 printk(BIOS_ERR, "Aborting HSPHY firmware loading, PCIe Gen5 won't work.\n");
Michał Żygowskia49945e2022-10-19 09:55:18 +0200229 goto hsphy_exit;
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200230 }
231
232 upload_hsphy_to_cpu_pcie(hsphy_buf, buf_size);
233
Michał Żygowskia49945e2022-10-19 09:55:18 +0200234hsphy_exit:
235 if (!CONFIG(ENABLE_EARLY_DMA_PROTECTION))
236 free(hsphy_buf);
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200237}