blob: 5333a8bf2fc340965125b10e2de6e19043679512 [file] [log] [blame]
Michał Żygowski9b0f1692022-05-05 13:21:01 +02001/* SPDX-License-Identifier: GPL-2.0-only */
2
3#define __SIMPLE_DEVICE__
4
5#include <stdlib.h>
6#include <console/console.h>
7#include <device/device.h>
8#include <device/mmio.h>
9#include <device/pci_def.h>
10#include <device/pci_ops.h>
Michał Żygowski95be0122022-10-29 21:32:54 +020011#include <fmap.h>
Michał Żygowski9b0f1692022-05-05 13:21:01 +020012#include <intelblocks/cse.h>
13#include <intelblocks/systemagent.h>
Michał Żygowskia49945e2022-10-19 09:55:18 +020014#include <intelblocks/vtd.h>
Julius Wernerd96ca242022-08-08 18:08:35 -070015#include <security/vboot/misc.h>
Michał Żygowski9b0f1692022-05-05 13:21:01 +020016#include <soc/hsphy.h>
17#include <soc/iomap.h>
18#include <soc/pci_devs.h>
19#include <vb2_api.h>
20#include <lib.h>
21
22#define HASHALG_SHA1 0x00000001
23#define HASHALG_SHA256 0x00000002
24#define HASHALG_SHA384 0x00000003
25#define HASHALG_SHA512 0x00000004
26
27#define MAX_HASH_SIZE VB2_SHA512_DIGEST_SIZE
28#define GET_IP_FIRMWARE_CMD 0x21
29#define HSPHY_PAYLOAD_SIZE (32*KiB)
30
31#define CPU_PID_PCIE_PHYX16_BROADCAST 0x55
32
Michał Żygowski95be0122022-10-29 21:32:54 +020033struct hsphy_cache {
34 uint32_t hsphy_size;
35 uint8_t hash_algo;
36 uint8_t digest[MAX_HASH_SIZE];
37 uint8_t hsphy_fw[0];
38} __packed;
39
Michał Żygowski9b0f1692022-05-05 13:21:01 +020040struct ip_push_model {
41 uint16_t count;
42 uint16_t address;
43 uint32_t data[0];
44} __packed;
45
46static int heci_get_hsphy_payload(void *buf, uint32_t *buf_size, uint8_t *hash_buf,
47 uint8_t *hash_alg, uint32_t *status)
48{
49 size_t reply_size;
50
51 struct heci_ip_load_request {
52 struct mkhi_hdr hdr;
53 uint32_t version;
54 uint32_t operation;
55 uint32_t dram_base_low;
56 uint32_t dram_base_high;
57 uint32_t memory_size;
58 uint32_t reserved;
59 } __packed msg = {
60 .hdr = {
61 .group_id = MKHI_GROUP_ID_BUP_COMMON,
62 .command = GET_IP_FIRMWARE_CMD,
63 },
64 .version = 1,
65 .operation = 1,
66 .dram_base_low = (uintptr_t)buf,
67 .dram_base_high = 0,
68 .memory_size = *buf_size,
69 .reserved = 0,
70 };
71
72 struct heci_ip_load_response {
73 struct mkhi_hdr hdr;
74 uint32_t payload_size;
75 uint32_t reserved[2];
76 uint32_t status;
77 uint8_t hash_type;
78 uint8_t hash[MAX_HASH_SIZE];
79 } __packed reply;
80
81 if (!buf || !buf_size || !hash_buf || !hash_alg) {
82 printk(BIOS_ERR, "%s: Invalid parameters\n", __func__);
83 return -1;
84 }
85
86 reply_size = sizeof(reply);
87 memset(&reply, 0, reply_size);
88
89 printk(BIOS_DEBUG, "HECI: Sending Get IP firmware command\n");
90
91 if (heci_send_receive(&msg, sizeof(msg), &reply, &reply_size, HECI_MKHI_ADDR)) {
92 printk(BIOS_ERR, "HECI: Get IP firmware failed\n");
93 return -1;
94 }
95
96 if (reply.hdr.result) {
97 printk(BIOS_ERR, "HECI: Get IP firmware response invalid\n");
98 *status = reply.status;
99 printk(BIOS_DEBUG, "HECI response:\n");
100 hexdump(&reply, sizeof(reply));
101 return -1;
102 }
103
104 *buf_size = reply.payload_size;
105 *hash_alg = reply.hash_type;
106 *status = reply.status;
107 memcpy(hash_buf, reply.hash, MAX_HASH_SIZE);
108
109 printk(BIOS_DEBUG, "HECI: Get IP firmware success. Response:\n");
110 printk(BIOS_DEBUG, " Payload size = 0x%x\n", *buf_size);
111 printk(BIOS_DEBUG, " Hash type used for signing payload = 0x%x\n", *hash_alg);
112
113 return 0;
114}
115
Michał Żygowski95be0122022-10-29 21:32:54 +0200116static bool verify_hsphy_hash(void *buf, uint32_t buf_size, uint8_t *hash_buf, uint8_t hash_alg)
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200117{
Julius Wernerd96ca242022-08-08 18:08:35 -0700118 struct vb2_hash hash;
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200119
120 switch (hash_alg) {
121 case HASHALG_SHA256:
Julius Wernerd96ca242022-08-08 18:08:35 -0700122 hash.algo = VB2_HASH_SHA256;
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200123 break;
124 case HASHALG_SHA384:
Julius Wernerd96ca242022-08-08 18:08:35 -0700125 hash.algo = VB2_HASH_SHA384;
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200126 break;
127 case HASHALG_SHA512:
Julius Wernerd96ca242022-08-08 18:08:35 -0700128 hash.algo = VB2_HASH_SHA512;
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200129 break;
130 case HASHALG_SHA1:
131 default:
132 printk(BIOS_ERR, "Hash alg %d not supported, trying SHA384\n", hash_alg);
Julius Wernerd96ca242022-08-08 18:08:35 -0700133 hash.algo = VB2_HASH_SHA384;
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200134 break;
135 }
Michał Żygowski95be0122022-10-29 21:32:54 +0200136
Julius Wernerd96ca242022-08-08 18:08:35 -0700137 memcpy(hash.raw, hash_buf, vb2_digest_size(hash.algo));
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200138
Michał Żygowski95be0122022-10-29 21:32:54 +0200139 if (vb2_hash_verify(vboot_hwcrypto_allowed(), buf, buf_size, &hash) != VB2_SUCCESS)
140 return false;
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200141
Michał Żygowski95be0122022-10-29 21:32:54 +0200142 return true;
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200143}
144
145static void upload_hsphy_to_cpu_pcie(void *buf, uint32_t buf_size)
146{
147 uint16_t i = 0, j;
148 struct ip_push_model *push_model = (struct ip_push_model *)buf;
149
150 while (i < buf_size) {
151 i += sizeof(*push_model);
152
153 if ((push_model->address == 0) && (push_model->count == 0))
154 break; // End of file
155
156 for (j = 0; j < push_model->count; j++) {
157 REGBAR32(CPU_PID_PCIE_PHYX16_BROADCAST,
158 push_model->address) = push_model->data[j];
159 i += sizeof(uint32_t);
160 }
161
162 push_model = (struct ip_push_model *)(buf + i);
163 }
164}
165
Michał Żygowski95be0122022-10-29 21:32:54 +0200166static bool hsphy_cache_valid(struct hsphy_cache *hsphy_fw_cache)
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200167{
Michał Żygowski95be0122022-10-29 21:32:54 +0200168 if (!hsphy_fw_cache) {
169 printk(BIOS_WARNING, "Failed to mmap HSPHY cache\n");
170 return false;
171 }
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200172
Michał Żygowski95be0122022-10-29 21:32:54 +0200173 if (hsphy_fw_cache->hsphy_size == 0 ||
174 hsphy_fw_cache->hsphy_size > HSPHY_PAYLOAD_SIZE ||
175 hsphy_fw_cache->hash_algo <= HASHALG_SHA1 ||
176 hsphy_fw_cache->hash_algo > HASHALG_SHA512)
177 return false;
178
179 if (!verify_hsphy_hash(hsphy_fw_cache->hsphy_fw, hsphy_fw_cache->hsphy_size,
180 hsphy_fw_cache->digest, hsphy_fw_cache->hash_algo))
181 return false;
182
183 return true;
184}
185
186static bool load_hsphy_from_cache(void)
187{
188 struct region_device rdev;
189 struct hsphy_cache *hsphy_fw_cache;
190
191 if (fmap_locate_area_as_rdev("HSPHY_FW", &rdev) < 0) {
192 printk(BIOS_ERR, "HSPHY: Cannot find HSPHY_FW region\n");
193 return false;
194 }
195
196 hsphy_fw_cache = (struct hsphy_cache *)rdev_mmap_full(&rdev);
197
198 if (!hsphy_cache_valid(hsphy_fw_cache)) {
199 printk(BIOS_ERR, "HSPHY: HSPHY cache invalid\n");
200 if (hsphy_fw_cache)
201 rdev_munmap(&rdev, hsphy_fw_cache);
202 return false;
203 }
204
205 printk(BIOS_INFO, "Loading HSPHY FW from cache\n");
206 upload_hsphy_to_cpu_pcie(hsphy_fw_cache->hsphy_fw, hsphy_fw_cache->hsphy_size);
207
208 rdev_munmap(&rdev, hsphy_fw_cache);
209
210 return true;
211}
212
213static void cache_hsphy_fw_in_flash(void *buf, uint32_t buf_size, uint8_t *hash_buf,
214 uint8_t hash_alg)
215{
216 struct region_device rdev;
217 struct hsphy_cache *hsphy_fw_cache;
218 size_t ret;
219
220 if (!buf || buf_size == 0 || buf_size > (HSPHY_PAYLOAD_SIZE - sizeof(*hsphy_fw_cache))
221 || !hash_buf || hash_alg <= HASHALG_SHA1 || hash_alg > HASHALG_SHA512) {
222 printk(BIOS_ERR, "Invalid parameters, HSPHY will not be cached in flash.\n");
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200223 return;
224 }
225
Michał Żygowski95be0122022-10-29 21:32:54 +0200226 /* Locate the area as RO rdev, otherwise mmap will fail */
227 if (fmap_locate_area_as_rdev("HSPHY_FW", &rdev) < 0) {
228 printk(BIOS_ERR, "HSPHY: Could not find HSPHY_FW region\n");
229 printk(BIOS_ERR, "HSPHY will not be cached in flash\n");
230 return;
231 }
232
233 hsphy_fw_cache = (struct hsphy_cache *)rdev_mmap_full(&rdev);
234
235 if (hsphy_cache_valid(hsphy_fw_cache)) {
236 /* If the cache is valid, check the buffer against the cache hash */
237 if (verify_hsphy_hash(buf, buf_size, hsphy_fw_cache->digest,
238 hsphy_fw_cache->hash_algo)) {
239 printk(BIOS_INFO, "HSPHY: cache does not need update\n");
240 rdev_munmap(&rdev, hsphy_fw_cache);
241 return;
242 } else {
243 printk(BIOS_INFO, "HSPHY: cache needs update\n");
244 }
245 } else {
246 printk(BIOS_INFO, "HSPHY: cache invalid, updating\n");
247 }
248
249 if (region_device_sz(&rdev) < (buf_size + sizeof(*hsphy_fw_cache))) {
250 printk(BIOS_ERR, "HSPHY: HSPHY_FW region too small: %zx < %zx\n",
251 region_device_sz(&rdev), buf_size + sizeof(*hsphy_fw_cache));
252 printk(BIOS_ERR, "HSPHY will not be cached in flash\n");
253 rdev_munmap(&rdev, hsphy_fw_cache);
254 return;
255 }
256
257 rdev_munmap(&rdev, hsphy_fw_cache);
258 hsphy_fw_cache = malloc(sizeof(*hsphy_fw_cache));
259
260 if (!hsphy_fw_cache) {
261 printk(BIOS_ERR, "HSPHY: Could not allocate memory for HSPHY cache buffer\n");
262 printk(BIOS_ERR, "HSPHY will not be cached in flash\n");
263 return;
264 }
265
266 hsphy_fw_cache->hsphy_size = buf_size;
267 hsphy_fw_cache->hash_algo = hash_alg;
268
269 switch (hash_alg) {
270 case HASHALG_SHA256:
271 hash_alg = VB2_HASH_SHA256;
272 break;
273 case HASHALG_SHA384:
274 hash_alg = VB2_HASH_SHA384;
275 break;
276 case HASHALG_SHA512:
277 hash_alg = VB2_HASH_SHA512;
278 break;
279 }
280
281 memset(hsphy_fw_cache->digest, 0, sizeof(hsphy_fw_cache->digest));
282 memcpy(hsphy_fw_cache->digest, hash_buf, vb2_digest_size(hash_alg));
283
284 /* Now that we want to write to flash, locate the area as RW rdev */
285 if (fmap_locate_area_as_rdev_rw("HSPHY_FW", &rdev) < 0) {
286 printk(BIOS_ERR, "HSPHY: Could not find HSPHY_FW region\n");
287 printk(BIOS_ERR, "HSPHY will not be cached in flash\n");
288 free(hsphy_fw_cache);
289 return;
290 }
291
292 if (rdev_eraseat(&rdev, 0, region_device_sz(&rdev)) < 0) {
293 printk(BIOS_ERR, "Failed to erase HSPHY cache region\n");
294 free(hsphy_fw_cache);
295 return;
296 }
297
298 ret = rdev_writeat(&rdev, hsphy_fw_cache, 0, sizeof(*hsphy_fw_cache));
299 if (ret != sizeof(*hsphy_fw_cache)) {
300 printk(BIOS_ERR, "Failed to write HSPHY cache metadata\n");
301 free(hsphy_fw_cache);
302 return;
303 }
304
305 ret = rdev_writeat(&rdev, buf, sizeof(*hsphy_fw_cache), buf_size);
306 if (ret != buf_size) {
307 printk(BIOS_ERR, "Failed to write HSPHY FW to cache\n");
308 free(hsphy_fw_cache);
309 return;
310 }
311
312 printk(BIOS_INFO, "HSPHY cached to flash successfully\n");
313
314 free(hsphy_fw_cache);
315}
316
317static void *allocate_hsphy_buf(void)
318{
319 void *hsphy_buf;
320 size_t dma_buf_size;
321
Michał Żygowskia49945e2022-10-19 09:55:18 +0200322 if (CONFIG(ENABLE_EARLY_DMA_PROTECTION)) {
323 hsphy_buf = vtd_get_dma_buffer(&dma_buf_size);
324 if (!hsphy_buf || dma_buf_size < HSPHY_PAYLOAD_SIZE) {
325 printk(BIOS_ERR, "DMA protection enabled but DMA buffer does not"
326 " exist or is too small\n");
Michał Żygowski95be0122022-10-29 21:32:54 +0200327 return NULL;
Michał Żygowskia49945e2022-10-19 09:55:18 +0200328 }
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200329
Michał Żygowskia49945e2022-10-19 09:55:18 +0200330 /* Rather impossible scenario, but check alignment anyways */
331 if (!IS_ALIGNED((uintptr_t)hsphy_buf, 4 * KiB) &&
332 (HSPHY_PAYLOAD_SIZE + 4 * KiB) <= dma_buf_size)
333 hsphy_buf = (void *)ALIGN_UP((uintptr_t)hsphy_buf, 4 * KiB);
334 } else {
335 /* Align the buffer to page size, otherwise the HECI command will fail */
336 hsphy_buf = memalign(4 * KiB, HSPHY_PAYLOAD_SIZE);
337
338 if (!hsphy_buf) {
Michał Żygowski95be0122022-10-29 21:32:54 +0200339 printk(BIOS_ERR, "Failed to allocate memory for HSPHY blob\n");
340 return NULL;
Michał Żygowskia49945e2022-10-19 09:55:18 +0200341 }
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200342 }
343
Michał Żygowski95be0122022-10-29 21:32:54 +0200344 return hsphy_buf;
345}
346
347void load_and_init_hsphy(void)
348{
349 void *hsphy_buf;
350 uint8_t hsphy_hash[MAX_HASH_SIZE] = { 0 };
351 uint8_t hash_type;
352 uint32_t buf_size = HSPHY_PAYLOAD_SIZE;
353 pci_devfn_t dev = PCH_DEV_CSE;
354 const uint16_t pci_cmd_bme_mem = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
355 uint32_t status;
356
357 if (!is_devfn_enabled(SA_DEVFN_CPU_PCIE1_0) &&
358 !is_devfn_enabled(SA_DEVFN_CPU_PCIE1_1)) {
359 printk(BIOS_DEBUG, "All HSPHY ports disabled, skipping HSPHY loading\n");
360 return;
361 }
362
363 /*
364 * Try to get HSPHY payload from CSME first, so we can always keep our
365 * HSPHY cache up to date. If we cannot allocate the buffer for it, the
366 * cache is our last resort.
367 */
368 hsphy_buf = allocate_hsphy_buf();
369 if (!hsphy_buf) {
370 printk(BIOS_ERR, "Could not allocate memory for HSPHY blob\n");
371 if (CONFIG(INCLUDE_HSPHY_IN_FMAP)) {
372 printk(BIOS_INFO, "Trying to load HSPHY FW from cache\n");
373 if (load_hsphy_from_cache()) {
374 printk(BIOS_INFO, "Successfully loaded HSPHY FW from cache\n");
375 return;
376 }
377 printk(BIOS_ERR, "Failed to load HSPHY FW from cache\n");
378 }
379 printk(BIOS_ERR, "Aborting HSPHY FW loading, PCIe Gen5 won't work.\n");
380 return;
381 }
382
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200383 memset(hsphy_buf, 0, HSPHY_PAYLOAD_SIZE);
384
Michał Żygowski95be0122022-10-29 21:32:54 +0200385 /*
386 * If CSME is not present, try cached HSPHY FW. We still want to use
387 * CSME just in case CSME is updated along with HSPHY FW, so that we
388 * can update our cache if needed.
389 */
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200390 if (!is_cse_enabled()) {
Michał Żygowski95be0122022-10-29 21:32:54 +0200391 if (CONFIG(INCLUDE_HSPHY_IN_FMAP)) {
392 printk(BIOS_INFO, "Trying to load HSPHY FW from cache"
393 " because CSME is not enabled or not visible\n");
394 if (load_hsphy_from_cache()) {
395 printk(BIOS_INFO, "Successfully loaded HSPHY FW from cache\n");
396 return;
397 }
398 printk(BIOS_ERR, "Failed to load HSPHY FW from cache\n");
399 }
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200400 printk(BIOS_ERR, "%s: CSME not enabled or not visible, but required\n",
401 __func__);
Michał Żygowski95be0122022-10-29 21:32:54 +0200402 printk(BIOS_ERR, "Aborting HSPHY FW loading, PCIe Gen5 won't work.\n");
403 if (!CONFIG(ENABLE_EARLY_DMA_PROTECTION))
404 free(hsphy_buf);
405 return;
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200406 }
407
408 /* Ensure BAR, BME and memory space are enabled */
409 if ((pci_read_config16(dev, PCI_COMMAND) & pci_cmd_bme_mem) != pci_cmd_bme_mem)
410 pci_or_config16(dev, PCI_COMMAND, pci_cmd_bme_mem);
411
412
413 if (pci_read_config32(dev, PCI_BASE_ADDRESS_0) == 0) {
414 pci_and_config16(dev, PCI_COMMAND, ~pci_cmd_bme_mem);
415 pci_write_config32(dev, PCI_BASE_ADDRESS_0, HECI1_BASE_ADDRESS);
416 pci_or_config16(dev, PCI_COMMAND, pci_cmd_bme_mem);
417 }
418
Michał Żygowski95be0122022-10-29 21:32:54 +0200419 /* Try to get HSPHY payload from CSME and cache it if possible. */
420 if (!heci_get_hsphy_payload(hsphy_buf, &buf_size, hsphy_hash, &hash_type, &status)) {
421 if (verify_hsphy_hash(hsphy_buf, buf_size, hsphy_hash, hash_type)) {
422 upload_hsphy_to_cpu_pcie(hsphy_buf, buf_size);
423 if (CONFIG(INCLUDE_HSPHY_IN_FMAP))
424 cache_hsphy_fw_in_flash(hsphy_buf, buf_size, hsphy_hash,
425 hash_type);
426
427 if (!CONFIG(ENABLE_EARLY_DMA_PROTECTION))
428 free(hsphy_buf);
429 return;
430 } else {
431 printk(BIOS_ERR, "Failed to verify HSPHY FW hash.\n");
432 }
433 } else {
434 printk(BIOS_ERR, "Failed to get HSPHY FW over HECI.\n");
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200435 }
436
Michał Żygowskia49945e2022-10-19 09:55:18 +0200437 if (!CONFIG(ENABLE_EARLY_DMA_PROTECTION))
438 free(hsphy_buf);
Michał Żygowski95be0122022-10-29 21:32:54 +0200439
440 /* We failed to get HSPHY payload from CSME, cache is our last chance. */
441 if (CONFIG(INCLUDE_HSPHY_IN_FMAP) && load_hsphy_from_cache()) {
442 printk(BIOS_INFO, "Successfully loaded HSPHY FW from cache\n");
443 return;
444 }
445
446 printk(BIOS_ERR, "Failed to load HSPHY FW, PCIe Gen5 won't work.\n");
Michał Żygowski9b0f1692022-05-05 13:21:01 +0200447}