Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2016 The Chromium OS Authors. All rights reserved. |
| 3 | * Use of this source code is governed by a BSD-style license that can be |
| 4 | * found in the LICENSE file. |
| 5 | * |
| 6 | * This is a driver for a SPI interfaced TPM2 device. |
| 7 | * |
| 8 | * It assumes that the required SPI interface has been initialized before the |
| 9 | * driver is started. A 'sruct spi_slave' pointer passed at initialization is |
| 10 | * used to direct traffic to the correct SPI interface. This dirver does not |
| 11 | * provide a way to instantiate multiple TPM devices. Also, to keep things |
| 12 | * simple, the driver unconditionally uses of TPM locality zero. |
| 13 | * |
| 14 | * References to documentation are based on the TCG issued "TPM Profile (PTP) |
| 15 | * Specification Revision 00.43". |
| 16 | */ |
| 17 | |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 18 | #include <assert.h> |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 19 | #include <commonlib/endian.h> |
| 20 | #include <console/console.h> |
| 21 | #include <delay.h> |
| 22 | #include <endian.h> |
| 23 | #include <string.h> |
| 24 | #include <timer.h> |
Philipp Deppenwiese | d88fb36 | 2017-10-18 20:26:18 +0200 | [diff] [blame] | 25 | #include <security/tpm/tis.h> |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 26 | |
| 27 | #include "tpm.h" |
| 28 | |
Vadim Bendebury | 05155c0 | 2016-06-23 12:03:18 -0700 | [diff] [blame] | 29 | #define TPM_LOCALITY_0_SPI_BASE 0x00d40000 |
| 30 | |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 31 | /* Assorted TPM2 registers for interface type FIFO. */ |
Vadim Bendebury | 05155c0 | 2016-06-23 12:03:18 -0700 | [diff] [blame] | 32 | #define TPM_ACCESS_REG (TPM_LOCALITY_0_SPI_BASE + 0) |
| 33 | #define TPM_STS_REG (TPM_LOCALITY_0_SPI_BASE + 0x18) |
| 34 | #define TPM_DATA_FIFO_REG (TPM_LOCALITY_0_SPI_BASE + 0x24) |
| 35 | #define TPM_DID_VID_REG (TPM_LOCALITY_0_SPI_BASE + 0xf00) |
| 36 | #define TPM_RID_REG (TPM_LOCALITY_0_SPI_BASE + 0xf04) |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 37 | #define TPM_FW_VER (TPM_LOCALITY_0_SPI_BASE + 0xf90) |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 38 | |
Shelley Chen | 85eb031 | 2017-11-07 14:24:19 -0800 | [diff] [blame] | 39 | #define CR50_TIMEOUT_INIT_MS 30000 /* Very long timeout for TPM init */ |
| 40 | |
Furquan Shaikh | bdf86a6 | 2017-04-03 23:52:01 -0700 | [diff] [blame] | 41 | /* SPI slave structure for TPM device. */ |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 42 | static struct spi_slave spi_slave; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 43 | |
| 44 | /* Cached TPM device identification. */ |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 45 | static struct tpm2_info tpm_info; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 46 | |
| 47 | /* |
| 48 | * TODO(vbendeb): make CONFIG_DEBUG_TPM an int to allow different level of |
| 49 | * debug traces. Right now it is either 0 or 1. |
| 50 | */ |
| 51 | static const int debug_level_ = CONFIG_DEBUG_TPM; |
| 52 | |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 53 | /* |
| 54 | * SPI frame header for TPM transactions is 4 bytes in size, it is described |
| 55 | * in section "6.4.6 Spi Bit Protocol". |
| 56 | */ |
| 57 | typedef struct { |
| 58 | unsigned char body[4]; |
| 59 | } spi_frame_header; |
| 60 | |
| 61 | void tpm2_get_info(struct tpm2_info *info) |
| 62 | { |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 63 | *info = tpm_info; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 64 | } |
| 65 | |
Aaron Durbin | 6403167 | 2018-04-21 14:45:32 -0600 | [diff] [blame] | 66 | __weak int tis_plat_irq_status(void) |
Jeffy Chen | 19e3d33 | 2017-03-03 18:24:02 +0800 | [diff] [blame] | 67 | { |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 68 | static int warning_displayed; |
Jeffy Chen | 19e3d33 | 2017-03-03 18:24:02 +0800 | [diff] [blame] | 69 | |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 70 | if (!warning_displayed) { |
Jeffy Chen | 19e3d33 | 2017-03-03 18:24:02 +0800 | [diff] [blame] | 71 | printk(BIOS_WARNING, "WARNING: tis_plat_irq_status() not implemented, wasting 10ms to wait on Cr50!\n"); |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 72 | warning_displayed = 1; |
Jeffy Chen | 19e3d33 | 2017-03-03 18:24:02 +0800 | [diff] [blame] | 73 | } |
| 74 | mdelay(10); |
| 75 | |
| 76 | return 1; |
| 77 | } |
| 78 | |
| 79 | /* |
Elyes HAOUAS | 6688f46 | 2018-08-29 17:22:44 +0200 | [diff] [blame] | 80 | * TPM may trigger a IRQ after finish processing previous transfer. |
| 81 | * Waiting for this IRQ to sync TPM status. |
Jeffy Chen | 19e3d33 | 2017-03-03 18:24:02 +0800 | [diff] [blame] | 82 | * |
| 83 | * Returns 1 on success, 0 on failure (timeout). |
| 84 | */ |
| 85 | static int tpm_sync(void) |
| 86 | { |
| 87 | struct stopwatch sw; |
| 88 | |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 89 | stopwatch_init_msecs_expire(&sw, 10); |
Jeffy Chen | 19e3d33 | 2017-03-03 18:24:02 +0800 | [diff] [blame] | 90 | while (!tis_plat_irq_status()) { |
| 91 | if (stopwatch_expired(&sw)) { |
Elyes HAOUAS | 6688f46 | 2018-08-29 17:22:44 +0200 | [diff] [blame] | 92 | printk(BIOS_ERR, "Timeout wait for TPM IRQ!\n"); |
Jeffy Chen | 19e3d33 | 2017-03-03 18:24:02 +0800 | [diff] [blame] | 93 | return 0; |
| 94 | } |
| 95 | } |
| 96 | return 1; |
| 97 | } |
| 98 | |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 99 | /* |
| 100 | * Each TPM2 SPI transaction starts the same: CS is asserted, the 4 byte |
| 101 | * header is sent to the TPM, the master waits til TPM is ready to continue. |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 102 | * |
| 103 | * Returns 1 on success, 0 on failure (TPM SPI flow control timeout.) |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 104 | */ |
Martin Roth | 38ddbfb | 2019-10-23 21:41:00 -0600 | [diff] [blame] | 105 | static int start_transaction(int read_write, size_t bytes, unsigned int addr) |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 106 | { |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame^] | 107 | spi_frame_header header, header_resp; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 108 | uint8_t byte; |
| 109 | int i; |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame^] | 110 | int ret; |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 111 | struct stopwatch sw; |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 112 | static int tpm_sync_needed; |
| 113 | static struct stopwatch wake_up_sw; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 114 | |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame^] | 115 | if (CONFIG(TPM_CR50)) { |
Vadim Bendebury | 3b62d6b | 2017-10-30 18:29:03 -0700 | [diff] [blame] | 116 | /* |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame^] | 117 | * First Cr50 access in each coreboot stage where TPM is used will be |
| 118 | * prepended by a wake up pulse on the CS line. |
Vadim Bendebury | 3b62d6b | 2017-10-30 18:29:03 -0700 | [diff] [blame] | 119 | */ |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame^] | 120 | int wakeup_needed = 1; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 121 | |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame^] | 122 | /* Wait for TPM to finish previous transaction if needed */ |
| 123 | if (tpm_sync_needed) { |
| 124 | tpm_sync(); |
| 125 | /* |
| 126 | * During the first invocation of this function on each stage |
| 127 | * this if () clause code does not run (as tpm_sync_needed |
| 128 | * value is zero), during all following invocations the |
| 129 | * stopwatch below is guaranteed to be started. |
| 130 | */ |
| 131 | if (!stopwatch_expired(&wake_up_sw)) |
| 132 | wakeup_needed = 0; |
| 133 | } else { |
| 134 | tpm_sync_needed = 1; |
| 135 | } |
Vadim Bendebury | 3b62d6b | 2017-10-30 18:29:03 -0700 | [diff] [blame] | 136 | |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame^] | 137 | if (wakeup_needed) { |
| 138 | /* Just in case Cr50 is asleep. */ |
| 139 | spi_claim_bus(&spi_slave); |
| 140 | udelay(1); |
| 141 | spi_release_bus(&spi_slave); |
| 142 | udelay(100); |
| 143 | } |
| 144 | |
| 145 | /* |
| 146 | * The Cr50 on H1 does not go to sleep for 1 second after any |
| 147 | * SPI slave activity, let's be conservative and limit the |
| 148 | * window to 900 ms. |
| 149 | */ |
| 150 | stopwatch_init_msecs_expire(&wake_up_sw, 900); |
| 151 | } |
Jeffy Chen | f9a40ea | 2017-03-03 18:24:02 +0800 | [diff] [blame] | 152 | |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 153 | /* |
| 154 | * The first byte of the frame header encodes the transaction type |
| 155 | * (read or write) and transfer size (set to lentgh - 1), limited to |
| 156 | * 64 bytes. |
| 157 | */ |
| 158 | header.body[0] = (read_write ? 0x80 : 0) | 0x40 | (bytes - 1); |
| 159 | |
| 160 | /* The rest of the frame header is the TPM register address. */ |
| 161 | for (i = 0; i < 3; i++) |
| 162 | header.body[i + 1] = (addr >> (8 * (2 - i))) & 0xff; |
| 163 | |
| 164 | /* CS assert wakes up the slave. */ |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 165 | spi_claim_bus(&spi_slave); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 166 | |
| 167 | /* |
| 168 | * The TCG TPM over SPI specification introduces the notion of SPI |
| 169 | * flow control (Section "6.4.5 Flow Control"). |
| 170 | * |
| 171 | * Again, the slave (TPM device) expects each transaction to start |
| 172 | * with a 4 byte header trasmitted by master. The header indicates if |
| 173 | * the master needs to read or write a register, and the register |
| 174 | * address. |
| 175 | * |
| 176 | * If the slave needs to stall the transaction (for instance it is not |
| 177 | * ready to send the register value to the master), it sets the MOSI |
| 178 | * line to 0 during the last clock of the 4 byte header. In this case |
| 179 | * the master is supposed to start polling the SPI bus, one byte at |
| 180 | * time, until the last bit in the received byte (transferred during |
| 181 | * the last clock of the byte) is set to 1. |
| 182 | * |
| 183 | * Due to some SPI controllers' shortcomings (Rockchip comes to |
| 184 | * mind...) we trasmit the 4 byte header without checking the byte |
| 185 | * transmitted by the TPM during the transaction's last byte. |
| 186 | * |
| 187 | * We know that cr50 is guaranteed to set the flow control bit to 0 |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame^] | 188 | * during the header transfer. Real TPM2 are fast enough to not require |
| 189 | * to stall the master. They might still use this feature, so test the |
| 190 | * last bit after shifting in the address bytes. |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 191 | * crosbug.com/p/52132 has been opened to track this. |
| 192 | */ |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame^] | 193 | |
| 194 | header_resp.body[3] = 0; |
| 195 | if (CONFIG(TPM_CR50)) |
| 196 | ret = spi_xfer(&spi_slave, header.body, sizeof(header.body), NULL, 0); |
| 197 | else |
| 198 | ret = spi_xfer(&spi_slave, header.body, sizeof(header.body), |
| 199 | header_resp.body, sizeof(header_resp.body)); |
| 200 | if (ret) { |
| 201 | printk(BIOS_ERR, "SPI-TPM: transfer error\n"); |
| 202 | spi_release_bus(&spi_slave); |
| 203 | return 0; |
| 204 | } |
| 205 | |
| 206 | if (header_resp.body[3] & 1) |
| 207 | return 1; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 208 | |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 209 | /* |
| 210 | * Now poll the bus until TPM removes the stall bit. Give it up to 100 |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame^] | 211 | * ms to sort it out - it could be saving stuff in nvram at some point. |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 212 | */ |
| 213 | stopwatch_init_msecs_expire(&sw, 100); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 214 | do { |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 215 | if (stopwatch_expired(&sw)) { |
| 216 | printk(BIOS_ERR, "TPM flow control failure\n"); |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 217 | spi_release_bus(&spi_slave); |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 218 | return 0; |
| 219 | } |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 220 | spi_xfer(&spi_slave, NULL, 0, &byte, 1); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 221 | } while (!(byte & 1)); |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame^] | 222 | |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 223 | return 1; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 224 | } |
| 225 | |
| 226 | /* |
| 227 | * Print out the contents of a buffer, if debug is enabled. Skip registers |
| 228 | * other than FIFO, unless debug_level_ is 2. |
| 229 | */ |
| 230 | static void trace_dump(const char *prefix, uint32_t reg, |
| 231 | size_t bytes, const uint8_t *buffer, |
| 232 | int force) |
| 233 | { |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 234 | static char prev_prefix; |
| 235 | static unsigned int prev_reg; |
| 236 | static int current_char; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 237 | const int BYTES_PER_LINE = 32; |
| 238 | |
| 239 | if (!force) { |
| 240 | if (!debug_level_) |
| 241 | return; |
| 242 | |
| 243 | if ((debug_level_ < 2) && (reg != TPM_DATA_FIFO_REG)) |
| 244 | return; |
| 245 | } |
| 246 | |
| 247 | /* |
| 248 | * Do not print register address again if the last dump print was for |
| 249 | * that register. |
| 250 | */ |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 251 | if (prev_prefix != *prefix || (prev_reg != reg)) { |
| 252 | prev_prefix = *prefix; |
| 253 | prev_reg = reg; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 254 | printk(BIOS_DEBUG, "\n%s %2.2x:", prefix, reg); |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 255 | current_char = 0; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 256 | } |
| 257 | |
| 258 | if ((reg != TPM_DATA_FIFO_REG) && (bytes == 4)) { |
| 259 | /* |
| 260 | * This must be a regular register address, print the 32 bit |
| 261 | * value. |
| 262 | */ |
| 263 | printk(BIOS_DEBUG, " %8.8x", *(const uint32_t *)buffer); |
| 264 | } else { |
| 265 | int i; |
| 266 | |
| 267 | /* |
| 268 | * Data read from or written to FIFO or not in 4 byte |
| 269 | * quantiites is printed byte at a time. |
| 270 | */ |
| 271 | for (i = 0; i < bytes; i++) { |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 272 | if (current_char && |
| 273 | !(current_char % BYTES_PER_LINE)) { |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 274 | printk(BIOS_DEBUG, "\n "); |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 275 | current_char = 0; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 276 | } |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 277 | (current_char)++; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 278 | printk(BIOS_DEBUG, " %2.2x", buffer[i]); |
| 279 | } |
| 280 | } |
| 281 | } |
| 282 | |
| 283 | /* |
| 284 | * Once transaction is initiated and the TPM indicated that it is ready to go, |
| 285 | * write the actual bytes to the register. |
| 286 | */ |
| 287 | static void write_bytes(const void *buffer, size_t bytes) |
| 288 | { |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 289 | spi_xfer(&spi_slave, buffer, bytes, NULL, 0); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 290 | } |
| 291 | |
| 292 | /* |
| 293 | * Once transaction is initiated and the TPM indicated that it is ready to go, |
| 294 | * read the actual bytes from the register. |
| 295 | */ |
| 296 | static void read_bytes(void *buffer, size_t bytes) |
| 297 | { |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 298 | spi_xfer(&spi_slave, NULL, 0, buffer, bytes); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 299 | } |
| 300 | |
| 301 | /* |
| 302 | * To write a register, start transaction, transfer data to the TPM, deassert |
| 303 | * CS when done. |
| 304 | * |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 305 | * Returns one to indicate success, zero to indicate failure. |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 306 | */ |
Martin Roth | 38ddbfb | 2019-10-23 21:41:00 -0600 | [diff] [blame] | 307 | static int tpm2_write_reg(unsigned int reg_number, const void *buffer, size_t bytes) |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 308 | { |
| 309 | trace_dump("W", reg_number, bytes, buffer, 0); |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 310 | if (!start_transaction(false, bytes, reg_number)) |
| 311 | return 0; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 312 | write_bytes(buffer, bytes); |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 313 | spi_release_bus(&spi_slave); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 314 | return 1; |
| 315 | } |
| 316 | |
| 317 | /* |
| 318 | * To read a register, start transaction, transfer data from the TPM, deassert |
| 319 | * CS when done. |
| 320 | * |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 321 | * Returns one to indicate success, zero to indicate failure. In case of |
| 322 | * failure zero out the user buffer. |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 323 | */ |
Martin Roth | 38ddbfb | 2019-10-23 21:41:00 -0600 | [diff] [blame] | 324 | static int tpm2_read_reg(unsigned int reg_number, void *buffer, size_t bytes) |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 325 | { |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 326 | if (!start_transaction(true, bytes, reg_number)) { |
| 327 | memset(buffer, 0, bytes); |
| 328 | return 0; |
| 329 | } |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 330 | read_bytes(buffer, bytes); |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 331 | spi_release_bus(&spi_slave); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 332 | trace_dump("R", reg_number, bytes, buffer, 0); |
| 333 | return 1; |
| 334 | } |
| 335 | |
| 336 | /* |
| 337 | * Status register is accessed often, wrap reading and writing it into |
| 338 | * dedicated functions. |
| 339 | */ |
| 340 | static int read_tpm_sts(uint32_t *status) |
| 341 | { |
| 342 | return tpm2_read_reg(TPM_STS_REG, status, sizeof(*status)); |
| 343 | } |
| 344 | |
| 345 | static int write_tpm_sts(uint32_t status) |
| 346 | { |
| 347 | return tpm2_write_reg(TPM_STS_REG, &status, sizeof(status)); |
| 348 | } |
| 349 | |
| 350 | /* |
| 351 | * The TPM may limit the transaction bytes count (burst count) below the 64 |
| 352 | * bytes max. The current value is available as a field of the status |
| 353 | * register. |
| 354 | */ |
| 355 | static uint32_t get_burst_count(void) |
| 356 | { |
| 357 | uint32_t status; |
| 358 | |
| 359 | read_tpm_sts(&status); |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 360 | return (status & TPM_STS_BURST_COUNT_MASK) >> TPM_STS_BURST_COUNT_SHIFT; |
| 361 | } |
| 362 | |
| 363 | static uint8_t tpm2_read_access_reg(void) |
| 364 | { |
| 365 | uint8_t access; |
| 366 | tpm2_read_reg(TPM_ACCESS_REG, &access, sizeof(access)); |
| 367 | /* We do not care about access establishment bit state. Ignore it. */ |
| 368 | return access & ~TPM_ACCESS_ESTABLISHMENT; |
| 369 | } |
| 370 | |
| 371 | static void tpm2_write_access_reg(uint8_t cmd) |
| 372 | { |
| 373 | /* Writes to access register can set only 1 bit at a time. */ |
| 374 | assert (!(cmd & (cmd - 1))); |
| 375 | |
| 376 | tpm2_write_reg(TPM_ACCESS_REG, &cmd, sizeof(cmd)); |
| 377 | } |
| 378 | |
| 379 | static int tpm2_claim_locality(void) |
| 380 | { |
| 381 | uint8_t access; |
Shelley Chen | 85eb031 | 2017-11-07 14:24:19 -0800 | [diff] [blame] | 382 | struct stopwatch sw; |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 383 | |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 384 | /* |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 385 | * Locality is released by TPM reset. |
| 386 | * |
| 387 | * If locality is taken at this point, this could be due to the fact |
| 388 | * that the TPM is performing a long operation and has not processed |
| 389 | * reset request yet. We'll wait up to CR50_TIMEOUT_INIT_MS and see if |
| 390 | * it releases locality when reset is processed. |
Shelley Chen | 85eb031 | 2017-11-07 14:24:19 -0800 | [diff] [blame] | 391 | */ |
| 392 | stopwatch_init_msecs_expire(&sw, CR50_TIMEOUT_INIT_MS); |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 393 | do { |
Shelley Chen | 85eb031 | 2017-11-07 14:24:19 -0800 | [diff] [blame] | 394 | access = tpm2_read_access_reg(); |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 395 | if (access & TPM_ACCESS_ACTIVE_LOCALITY) { |
| 396 | /* |
| 397 | * Don't bombard the chip with traffic, let it keep |
| 398 | * processing the command. |
| 399 | */ |
| 400 | mdelay(2); |
| 401 | continue; |
| 402 | } |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 403 | |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 404 | /* |
| 405 | * Ok, the locality is free, TPM must be reset, let's claim |
| 406 | * it. |
| 407 | */ |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 408 | |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 409 | tpm2_write_access_reg(TPM_ACCESS_REQUEST_USE); |
| 410 | access = tpm2_read_access_reg(); |
| 411 | if (access != (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) { |
| 412 | break; |
| 413 | } |
| 414 | |
| 415 | printk(BIOS_INFO, "TPM ready after %ld ms\n", |
| 416 | stopwatch_duration_msecs(&sw)); |
| 417 | |
| 418 | return 1; |
| 419 | } while (!stopwatch_expired(&sw)); |
| 420 | |
| 421 | printk(BIOS_ERR, |
| 422 | "Failed to claim locality 0 after %ld ms, status: %#x\n", |
| 423 | stopwatch_duration_msecs(&sw), access); |
| 424 | |
| 425 | return 0; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 426 | } |
| 427 | |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 428 | /* Device/vendor ID values of the TPM devices this driver supports. */ |
| 429 | static const uint32_t supported_did_vids[] = { |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame^] | 430 | 0x00281ae0, /* H1 based Cr50 security chip. */ |
| 431 | 0x0000104a /* ST33HTPH2E32 */ |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 432 | }; |
| 433 | |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 434 | int tpm2_init(struct spi_slave *spi_if) |
| 435 | { |
| 436 | uint32_t did_vid, status; |
| 437 | uint8_t cmd; |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 438 | int retries; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 439 | |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 440 | memcpy(&spi_slave, spi_if, sizeof(*spi_if)); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 441 | |
Elyes HAOUAS | 6688f46 | 2018-08-29 17:22:44 +0200 | [diff] [blame] | 442 | /* clear any pending IRQs */ |
Shelley Chen | f2e7b37 | 2017-12-15 15:25:08 -0800 | [diff] [blame] | 443 | tis_plat_irq_status(); |
| 444 | |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 445 | /* |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 446 | * 150 ms should be enough to synchronize with the TPM even under the |
| 447 | * worst nested reset request conditions. In vast majority of cases |
| 448 | * there would be no wait at all. |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 449 | */ |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 450 | printk(BIOS_INFO, "Probing TPM: "); |
| 451 | for (retries = 15; retries > 0; retries--) { |
| 452 | int i; |
| 453 | |
Elyes HAOUAS | 6688f46 | 2018-08-29 17:22:44 +0200 | [diff] [blame] | 454 | /* In case of failure to read div_vid is set to zero. */ |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 455 | tpm2_read_reg(TPM_DID_VID_REG, &did_vid, sizeof(did_vid)); |
| 456 | |
| 457 | for (i = 0; i < ARRAY_SIZE(supported_did_vids); i++) |
| 458 | if (did_vid == supported_did_vids[i]) |
Elyes HAOUAS | 6688f46 | 2018-08-29 17:22:44 +0200 | [diff] [blame] | 459 | break; /* TPM is up and ready. */ |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 460 | |
| 461 | if (i < ARRAY_SIZE(supported_did_vids)) |
| 462 | break; |
| 463 | |
| 464 | /* TPM might be resetting, let's retry in a bit. */ |
| 465 | mdelay(10); |
| 466 | printk(BIOS_INFO, "."); |
| 467 | } |
| 468 | |
| 469 | if (!retries) { |
| 470 | printk(BIOS_ERR, "\n%s: Failed to connect to the TPM\n", |
| 471 | __func__); |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 472 | return -1; |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 473 | } |
| 474 | |
| 475 | printk(BIOS_INFO, " done!\n"); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 476 | |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame^] | 477 | // FIXME: Move this to tpm_setup() |
| 478 | if (ENV_SEPARATE_VERSTAGE || ENV_BOOTBLOCK || !CONFIG(VBOOT)) |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 479 | /* |
| 480 | * Claim locality 0, do it only during the first |
| 481 | * initialization after reset. |
| 482 | */ |
| 483 | if (!tpm2_claim_locality()) |
| 484 | return -1; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 485 | |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame^] | 486 | if (!read_tpm_sts(&status)) { |
| 487 | printk(BIOS_ERR, "Reading status reg failed\n"); |
| 488 | return -1; |
| 489 | } |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 490 | if ((status & TPM_STS_FAMILY_MASK) != TPM_STS_FAMILY_TPM_2_0) { |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 491 | printk(BIOS_ERR, "unexpected TPM family value, status: %#x\n", |
| 492 | status); |
| 493 | return -1; |
| 494 | } |
| 495 | |
| 496 | /* |
| 497 | * Locality claimed, read the revision value and set up the tpm_info |
| 498 | * structure. |
| 499 | */ |
| 500 | tpm2_read_reg(TPM_RID_REG, &cmd, sizeof(cmd)); |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 501 | tpm_info.vendor_id = did_vid & 0xffff; |
| 502 | tpm_info.device_id = did_vid >> 16; |
| 503 | tpm_info.revision = cmd; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 504 | |
| 505 | printk(BIOS_INFO, "Connected to device vid:did:rid of %4.4x:%4.4x:%2.2x\n", |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 506 | tpm_info.vendor_id, tpm_info.device_id, tpm_info.revision); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 507 | |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 508 | /* Let's report device FW version if available. */ |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 509 | if (tpm_info.vendor_id == 0x1ae0) { |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 510 | int chunk_count = 0; |
Vadim Bendebury | 9e561f8 | 2016-07-31 11:19:20 -0700 | [diff] [blame] | 511 | size_t chunk_size; |
| 512 | /* |
| 513 | * let's read 50 bytes at a time; leave room for the trailing |
| 514 | * zero. |
| 515 | */ |
| 516 | char vstr[51]; |
| 517 | |
| 518 | chunk_size = sizeof(vstr) - 1; |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 519 | |
| 520 | printk(BIOS_INFO, "Firmware version: "); |
| 521 | |
| 522 | /* |
| 523 | * Does not really matter what's written, this just makes sure |
| 524 | * the version is reported from the beginning. |
| 525 | */ |
Vadim Bendebury | 9e561f8 | 2016-07-31 11:19:20 -0700 | [diff] [blame] | 526 | tpm2_write_reg(TPM_FW_VER, &chunk_size, 1); |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 527 | |
Vadim Bendebury | 9e561f8 | 2016-07-31 11:19:20 -0700 | [diff] [blame] | 528 | /* Print it out in sizeof(vstr) - 1 byte chunks. */ |
| 529 | vstr[chunk_size] = 0; |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 530 | do { |
Vadim Bendebury | 9e561f8 | 2016-07-31 11:19:20 -0700 | [diff] [blame] | 531 | tpm2_read_reg(TPM_FW_VER, vstr, chunk_size); |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 532 | printk(BIOS_INFO, "%s", vstr); |
| 533 | |
| 534 | /* |
Vadim Bendebury | 9e561f8 | 2016-07-31 11:19:20 -0700 | [diff] [blame] | 535 | * While string is not over, and is no longer than 300 |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 536 | * characters. |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 537 | */ |
Vadim Bendebury | 9e561f8 | 2016-07-31 11:19:20 -0700 | [diff] [blame] | 538 | } while (vstr[chunk_size - 1] && |
| 539 | (chunk_count++ < (300 / chunk_size))); |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 540 | |
| 541 | printk(BIOS_INFO, "\n"); |
| 542 | } |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 543 | return 0; |
| 544 | } |
| 545 | |
| 546 | /* |
| 547 | * This is in seconds, certain TPM commands, like key generation, can take |
| 548 | * long time to complete. |
| 549 | * |
| 550 | * Returns one to indicate success, zero (not yet implemented) to indicate |
| 551 | * failure. |
| 552 | */ |
| 553 | #define MAX_STATUS_TIMEOUT 120 |
| 554 | static int wait_for_status(uint32_t status_mask, uint32_t status_expected) |
| 555 | { |
| 556 | uint32_t status; |
| 557 | struct stopwatch sw; |
| 558 | |
| 559 | stopwatch_init_usecs_expire(&sw, MAX_STATUS_TIMEOUT * 1000 * 1000); |
| 560 | do { |
| 561 | udelay(1000); |
| 562 | if (stopwatch_expired(&sw)) { |
| 563 | printk(BIOS_ERR, "failed to get expected status %x\n", |
| 564 | status_expected); |
| 565 | return false; |
| 566 | } |
| 567 | read_tpm_sts(&status); |
| 568 | } while ((status & status_mask) != status_expected); |
| 569 | |
| 570 | return 1; |
| 571 | } |
| 572 | |
| 573 | enum fifo_transfer_direction { |
| 574 | fifo_transmit = 0, |
| 575 | fifo_receive = 1 |
| 576 | }; |
| 577 | |
| 578 | /* Union allows to avoid casting away 'const' on transmit buffers. */ |
| 579 | union fifo_transfer_buffer { |
| 580 | uint8_t *rx_buffer; |
| 581 | const uint8_t *tx_buffer; |
| 582 | }; |
| 583 | |
| 584 | /* |
| 585 | * Transfer requested number of bytes to or from TPM FIFO, accounting for the |
| 586 | * current burst count value. |
| 587 | */ |
| 588 | static void fifo_transfer(size_t transfer_size, |
| 589 | union fifo_transfer_buffer buffer, |
| 590 | enum fifo_transfer_direction direction) |
| 591 | { |
| 592 | size_t transaction_size; |
| 593 | size_t burst_count; |
| 594 | size_t handled_so_far = 0; |
| 595 | |
| 596 | do { |
| 597 | do { |
| 598 | /* Could be zero when TPM is busy. */ |
| 599 | burst_count = get_burst_count(); |
| 600 | } while (!burst_count); |
| 601 | |
| 602 | transaction_size = transfer_size - handled_so_far; |
| 603 | transaction_size = MIN(transaction_size, burst_count); |
| 604 | |
| 605 | /* |
| 606 | * The SPI frame header does not allow to pass more than 64 |
| 607 | * bytes. |
| 608 | */ |
| 609 | transaction_size = MIN(transaction_size, 64); |
| 610 | |
| 611 | if (direction == fifo_receive) |
| 612 | tpm2_read_reg(TPM_DATA_FIFO_REG, |
| 613 | buffer.rx_buffer + handled_so_far, |
| 614 | transaction_size); |
| 615 | else |
| 616 | tpm2_write_reg(TPM_DATA_FIFO_REG, |
| 617 | buffer.tx_buffer + handled_so_far, |
| 618 | transaction_size); |
| 619 | |
| 620 | handled_so_far += transaction_size; |
| 621 | |
| 622 | } while (handled_so_far != transfer_size); |
| 623 | } |
| 624 | |
| 625 | size_t tpm2_process_command(const void *tpm2_command, size_t command_size, |
| 626 | void *tpm2_response, size_t max_response) |
| 627 | { |
| 628 | uint32_t status; |
| 629 | uint32_t expected_status_bits; |
| 630 | size_t payload_size; |
| 631 | size_t bytes_to_go; |
| 632 | const uint8_t *cmd_body = tpm2_command; |
| 633 | uint8_t *rsp_body = tpm2_response; |
| 634 | union fifo_transfer_buffer fifo_buffer; |
| 635 | const int HEADER_SIZE = 6; |
| 636 | |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 637 | /* Do not try using an uninitialized TPM. */ |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 638 | if (!tpm_info.vendor_id) |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 639 | return 0; |
| 640 | |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 641 | /* Skip the two byte tag, read the size field. */ |
| 642 | payload_size = read_be32(cmd_body + 2); |
| 643 | |
| 644 | /* Sanity check. */ |
| 645 | if (payload_size != command_size) { |
| 646 | printk(BIOS_ERR, |
| 647 | "Command size mismatch: encoded %zd != requested %zd\n", |
| 648 | payload_size, command_size); |
| 649 | trace_dump("W", TPM_DATA_FIFO_REG, command_size, cmd_body, 1); |
| 650 | printk(BIOS_DEBUG, "\n"); |
| 651 | return 0; |
| 652 | } |
| 653 | |
| 654 | /* Let the TPM know that the command is coming. */ |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 655 | write_tpm_sts(TPM_STS_COMMAND_READY); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 656 | |
| 657 | /* |
Elyes HAOUAS | 6688f46 | 2018-08-29 17:22:44 +0200 | [diff] [blame] | 658 | * TPM commands and responses written to and read from the FIFO |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 659 | * register (0x24) are datagrams of variable size, prepended by a 6 |
| 660 | * byte header. |
| 661 | * |
| 662 | * The specification description of the state machine is a bit vague, |
| 663 | * but from experience it looks like there is no need to wait for the |
| 664 | * sts.expect bit to be set, at least with the 9670 and cr50 devices. |
| 665 | * Just write the command into FIFO, making sure not to exceed the |
| 666 | * burst count or the maximum PDU size, whatever is smaller. |
| 667 | */ |
| 668 | fifo_buffer.tx_buffer = cmd_body; |
| 669 | fifo_transfer(command_size, fifo_buffer, fifo_transmit); |
| 670 | |
| 671 | /* Now tell the TPM it can start processing the command. */ |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 672 | write_tpm_sts(TPM_STS_GO); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 673 | |
| 674 | /* Now wait for it to report that the response is ready. */ |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 675 | expected_status_bits = TPM_STS_VALID | TPM_STS_DATA_AVAIL; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 676 | if (!wait_for_status(expected_status_bits, expected_status_bits)) { |
| 677 | /* |
| 678 | * If timed out, which should never happen, let's at least |
| 679 | * print out the offending command. |
| 680 | */ |
| 681 | trace_dump("W", TPM_DATA_FIFO_REG, command_size, cmd_body, 1); |
| 682 | printk(BIOS_DEBUG, "\n"); |
| 683 | return 0; |
| 684 | } |
| 685 | |
| 686 | /* |
| 687 | * The response is ready, let's read it. First we read the FIFO |
| 688 | * payload header, to see how much data to expect. The response header |
| 689 | * size is fixed to six bytes, the total payload size is stored in |
| 690 | * network order in the last four bytes. |
| 691 | */ |
| 692 | tpm2_read_reg(TPM_DATA_FIFO_REG, rsp_body, HEADER_SIZE); |
| 693 | |
| 694 | /* Find out the total payload size, skipping the two byte tag. */ |
| 695 | payload_size = read_be32(rsp_body + 2); |
| 696 | |
| 697 | if (payload_size > max_response) { |
| 698 | /* |
| 699 | * TODO(vbendeb): at least drain the FIFO here or somehow let |
| 700 | * the TPM know that the response can be dropped. |
| 701 | */ |
Elyes HAOUAS | 6688f46 | 2018-08-29 17:22:44 +0200 | [diff] [blame] | 702 | printk(BIOS_ERR, " TPM response too long (%zd bytes)", |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 703 | payload_size); |
| 704 | return 0; |
| 705 | } |
| 706 | |
| 707 | /* |
| 708 | * Now let's read all but the last byte in the FIFO to make sure the |
| 709 | * status register is showing correct flow control bits: 'more data' |
| 710 | * until the last byte and then 'no more data' once the last byte is |
| 711 | * read. |
| 712 | */ |
| 713 | bytes_to_go = payload_size - 1 - HEADER_SIZE; |
| 714 | fifo_buffer.rx_buffer = rsp_body + HEADER_SIZE; |
| 715 | fifo_transfer(bytes_to_go, fifo_buffer, fifo_receive); |
| 716 | |
| 717 | /* Verify that there is still data to read. */ |
| 718 | read_tpm_sts(&status); |
| 719 | if ((status & expected_status_bits) != expected_status_bits) { |
| 720 | printk(BIOS_ERR, "unexpected intermediate status %#x\n", |
| 721 | status); |
| 722 | return 0; |
| 723 | } |
| 724 | |
| 725 | /* Read the last byte of the PDU. */ |
| 726 | tpm2_read_reg(TPM_DATA_FIFO_REG, rsp_body + payload_size - 1, 1); |
| 727 | |
| 728 | /* Terminate the dump, if enabled. */ |
| 729 | if (debug_level_) |
| 730 | printk(BIOS_DEBUG, "\n"); |
| 731 | |
| 732 | /* Verify that 'data available' is not asseretd any more. */ |
| 733 | read_tpm_sts(&status); |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 734 | if ((status & expected_status_bits) != TPM_STS_VALID) { |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 735 | printk(BIOS_ERR, "unexpected final status %#x\n", status); |
| 736 | return 0; |
| 737 | } |
| 738 | |
| 739 | /* Move the TPM back to idle state. */ |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 740 | write_tpm_sts(TPM_STS_COMMAND_READY); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 741 | |
| 742 | return payload_size; |
| 743 | } |