Patrick Georgi | 593124d | 2020-05-10 19:44:08 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: BSD-3-Clause */ |
| 2 | /* This is a driver for a SPI interfaced TPM2 device. |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 3 | * |
| 4 | * It assumes that the required SPI interface has been initialized before the |
| 5 | * driver is started. A 'sruct spi_slave' pointer passed at initialization is |
| 6 | * used to direct traffic to the correct SPI interface. This dirver does not |
| 7 | * provide a way to instantiate multiple TPM devices. Also, to keep things |
| 8 | * simple, the driver unconditionally uses of TPM locality zero. |
| 9 | * |
| 10 | * References to documentation are based on the TCG issued "TPM Profile (PTP) |
| 11 | * Specification Revision 00.43". |
| 12 | */ |
| 13 | |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 14 | #include <assert.h> |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 15 | #include <commonlib/endian.h> |
| 16 | #include <console/console.h> |
| 17 | #include <delay.h> |
| 18 | #include <endian.h> |
| 19 | #include <string.h> |
| 20 | #include <timer.h> |
Philipp Deppenwiese | d88fb36 | 2017-10-18 20:26:18 +0200 | [diff] [blame] | 21 | #include <security/tpm/tis.h> |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 22 | |
| 23 | #include "tpm.h" |
| 24 | |
Vadim Bendebury | 05155c0 | 2016-06-23 12:03:18 -0700 | [diff] [blame] | 25 | #define TPM_LOCALITY_0_SPI_BASE 0x00d40000 |
| 26 | |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 27 | /* Assorted TPM2 registers for interface type FIFO. */ |
Vadim Bendebury | 05155c0 | 2016-06-23 12:03:18 -0700 | [diff] [blame] | 28 | #define TPM_ACCESS_REG (TPM_LOCALITY_0_SPI_BASE + 0) |
| 29 | #define TPM_STS_REG (TPM_LOCALITY_0_SPI_BASE + 0x18) |
| 30 | #define TPM_DATA_FIFO_REG (TPM_LOCALITY_0_SPI_BASE + 0x24) |
| 31 | #define TPM_DID_VID_REG (TPM_LOCALITY_0_SPI_BASE + 0xf00) |
| 32 | #define TPM_RID_REG (TPM_LOCALITY_0_SPI_BASE + 0xf04) |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 33 | #define TPM_FW_VER (TPM_LOCALITY_0_SPI_BASE + 0xf90) |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 34 | |
Shelley Chen | 85eb031 | 2017-11-07 14:24:19 -0800 | [diff] [blame] | 35 | #define CR50_TIMEOUT_INIT_MS 30000 /* Very long timeout for TPM init */ |
| 36 | |
Furquan Shaikh | bdf86a6 | 2017-04-03 23:52:01 -0700 | [diff] [blame] | 37 | /* SPI slave structure for TPM device. */ |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 38 | static struct spi_slave spi_slave; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 39 | |
| 40 | /* Cached TPM device identification. */ |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 41 | static struct tpm2_info tpm_info; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 42 | |
| 43 | /* |
Martin Roth | f48acbd | 2020-07-24 12:24:27 -0600 | [diff] [blame^] | 44 | * TODO(vbendeb): make CONFIG(DEBUG_TPM) an int to allow different level of |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 45 | * debug traces. Right now it is either 0 or 1. |
| 46 | */ |
| 47 | static const int debug_level_ = CONFIG_DEBUG_TPM; |
| 48 | |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 49 | /* |
| 50 | * SPI frame header for TPM transactions is 4 bytes in size, it is described |
| 51 | * in section "6.4.6 Spi Bit Protocol". |
| 52 | */ |
| 53 | typedef struct { |
| 54 | unsigned char body[4]; |
| 55 | } spi_frame_header; |
| 56 | |
| 57 | void tpm2_get_info(struct tpm2_info *info) |
| 58 | { |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 59 | *info = tpm_info; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 60 | } |
| 61 | |
Aaron Durbin | 6403167 | 2018-04-21 14:45:32 -0600 | [diff] [blame] | 62 | __weak int tis_plat_irq_status(void) |
Jeffy Chen | 19e3d33 | 2017-03-03 18:24:02 +0800 | [diff] [blame] | 63 | { |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 64 | static int warning_displayed; |
Jeffy Chen | 19e3d33 | 2017-03-03 18:24:02 +0800 | [diff] [blame] | 65 | |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 66 | if (!warning_displayed) { |
Jeffy Chen | 19e3d33 | 2017-03-03 18:24:02 +0800 | [diff] [blame] | 67 | printk(BIOS_WARNING, "WARNING: tis_plat_irq_status() not implemented, wasting 10ms to wait on Cr50!\n"); |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 68 | warning_displayed = 1; |
Jeffy Chen | 19e3d33 | 2017-03-03 18:24:02 +0800 | [diff] [blame] | 69 | } |
| 70 | mdelay(10); |
| 71 | |
| 72 | return 1; |
| 73 | } |
| 74 | |
| 75 | /* |
Elyes HAOUAS | 6688f46 | 2018-08-29 17:22:44 +0200 | [diff] [blame] | 76 | * TPM may trigger a IRQ after finish processing previous transfer. |
| 77 | * Waiting for this IRQ to sync TPM status. |
Jeffy Chen | 19e3d33 | 2017-03-03 18:24:02 +0800 | [diff] [blame] | 78 | * |
| 79 | * Returns 1 on success, 0 on failure (timeout). |
| 80 | */ |
| 81 | static int tpm_sync(void) |
| 82 | { |
| 83 | struct stopwatch sw; |
| 84 | |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 85 | stopwatch_init_msecs_expire(&sw, 10); |
Jeffy Chen | 19e3d33 | 2017-03-03 18:24:02 +0800 | [diff] [blame] | 86 | while (!tis_plat_irq_status()) { |
| 87 | if (stopwatch_expired(&sw)) { |
Elyes HAOUAS | 6688f46 | 2018-08-29 17:22:44 +0200 | [diff] [blame] | 88 | printk(BIOS_ERR, "Timeout wait for TPM IRQ!\n"); |
Jeffy Chen | 19e3d33 | 2017-03-03 18:24:02 +0800 | [diff] [blame] | 89 | return 0; |
| 90 | } |
| 91 | } |
| 92 | return 1; |
| 93 | } |
| 94 | |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 95 | /* |
| 96 | * Each TPM2 SPI transaction starts the same: CS is asserted, the 4 byte |
| 97 | * header is sent to the TPM, the master waits til TPM is ready to continue. |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 98 | * |
| 99 | * Returns 1 on success, 0 on failure (TPM SPI flow control timeout.) |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 100 | */ |
Martin Roth | 38ddbfb | 2019-10-23 21:41:00 -0600 | [diff] [blame] | 101 | static int start_transaction(int read_write, size_t bytes, unsigned int addr) |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 102 | { |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 103 | spi_frame_header header, header_resp; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 104 | uint8_t byte; |
| 105 | int i; |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 106 | int ret; |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 107 | struct stopwatch sw; |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 108 | static int tpm_sync_needed; |
| 109 | static struct stopwatch wake_up_sw; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 110 | |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 111 | if (CONFIG(TPM_CR50)) { |
Vadim Bendebury | 3b62d6b | 2017-10-30 18:29:03 -0700 | [diff] [blame] | 112 | /* |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 113 | * First Cr50 access in each coreboot stage where TPM is used will be |
| 114 | * prepended by a wake up pulse on the CS line. |
Vadim Bendebury | 3b62d6b | 2017-10-30 18:29:03 -0700 | [diff] [blame] | 115 | */ |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 116 | int wakeup_needed = 1; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 117 | |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 118 | /* Wait for TPM to finish previous transaction if needed */ |
| 119 | if (tpm_sync_needed) { |
| 120 | tpm_sync(); |
| 121 | /* |
| 122 | * During the first invocation of this function on each stage |
| 123 | * this if () clause code does not run (as tpm_sync_needed |
| 124 | * value is zero), during all following invocations the |
| 125 | * stopwatch below is guaranteed to be started. |
| 126 | */ |
| 127 | if (!stopwatch_expired(&wake_up_sw)) |
| 128 | wakeup_needed = 0; |
| 129 | } else { |
| 130 | tpm_sync_needed = 1; |
| 131 | } |
Vadim Bendebury | 3b62d6b | 2017-10-30 18:29:03 -0700 | [diff] [blame] | 132 | |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 133 | if (wakeup_needed) { |
| 134 | /* Just in case Cr50 is asleep. */ |
| 135 | spi_claim_bus(&spi_slave); |
| 136 | udelay(1); |
| 137 | spi_release_bus(&spi_slave); |
| 138 | udelay(100); |
| 139 | } |
| 140 | |
| 141 | /* |
| 142 | * The Cr50 on H1 does not go to sleep for 1 second after any |
| 143 | * SPI slave activity, let's be conservative and limit the |
| 144 | * window to 900 ms. |
| 145 | */ |
| 146 | stopwatch_init_msecs_expire(&wake_up_sw, 900); |
| 147 | } |
Jeffy Chen | f9a40ea | 2017-03-03 18:24:02 +0800 | [diff] [blame] | 148 | |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 149 | /* |
| 150 | * The first byte of the frame header encodes the transaction type |
| 151 | * (read or write) and transfer size (set to lentgh - 1), limited to |
| 152 | * 64 bytes. |
| 153 | */ |
| 154 | header.body[0] = (read_write ? 0x80 : 0) | 0x40 | (bytes - 1); |
| 155 | |
| 156 | /* The rest of the frame header is the TPM register address. */ |
| 157 | for (i = 0; i < 3; i++) |
| 158 | header.body[i + 1] = (addr >> (8 * (2 - i))) & 0xff; |
| 159 | |
| 160 | /* CS assert wakes up the slave. */ |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 161 | spi_claim_bus(&spi_slave); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 162 | |
| 163 | /* |
| 164 | * The TCG TPM over SPI specification introduces the notion of SPI |
| 165 | * flow control (Section "6.4.5 Flow Control"). |
| 166 | * |
| 167 | * Again, the slave (TPM device) expects each transaction to start |
| 168 | * with a 4 byte header trasmitted by master. The header indicates if |
| 169 | * the master needs to read or write a register, and the register |
| 170 | * address. |
| 171 | * |
| 172 | * If the slave needs to stall the transaction (for instance it is not |
| 173 | * ready to send the register value to the master), it sets the MOSI |
| 174 | * line to 0 during the last clock of the 4 byte header. In this case |
| 175 | * the master is supposed to start polling the SPI bus, one byte at |
| 176 | * time, until the last bit in the received byte (transferred during |
| 177 | * the last clock of the byte) is set to 1. |
| 178 | * |
| 179 | * Due to some SPI controllers' shortcomings (Rockchip comes to |
| 180 | * mind...) we trasmit the 4 byte header without checking the byte |
| 181 | * transmitted by the TPM during the transaction's last byte. |
| 182 | * |
| 183 | * We know that cr50 is guaranteed to set the flow control bit to 0 |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 184 | * during the header transfer. Real TPM2 are fast enough to not require |
| 185 | * to stall the master. They might still use this feature, so test the |
| 186 | * last bit after shifting in the address bytes. |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 187 | * crosbug.com/p/52132 has been opened to track this. |
| 188 | */ |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 189 | |
| 190 | header_resp.body[3] = 0; |
| 191 | if (CONFIG(TPM_CR50)) |
| 192 | ret = spi_xfer(&spi_slave, header.body, sizeof(header.body), NULL, 0); |
| 193 | else |
| 194 | ret = spi_xfer(&spi_slave, header.body, sizeof(header.body), |
| 195 | header_resp.body, sizeof(header_resp.body)); |
| 196 | if (ret) { |
| 197 | printk(BIOS_ERR, "SPI-TPM: transfer error\n"); |
| 198 | spi_release_bus(&spi_slave); |
| 199 | return 0; |
| 200 | } |
| 201 | |
| 202 | if (header_resp.body[3] & 1) |
| 203 | return 1; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 204 | |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 205 | /* |
| 206 | * Now poll the bus until TPM removes the stall bit. Give it up to 100 |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 207 | * ms to sort it out - it could be saving stuff in nvram at some point. |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 208 | */ |
| 209 | stopwatch_init_msecs_expire(&sw, 100); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 210 | do { |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 211 | if (stopwatch_expired(&sw)) { |
| 212 | printk(BIOS_ERR, "TPM flow control failure\n"); |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 213 | spi_release_bus(&spi_slave); |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 214 | return 0; |
| 215 | } |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 216 | spi_xfer(&spi_slave, NULL, 0, &byte, 1); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 217 | } while (!(byte & 1)); |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 218 | |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 219 | return 1; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 220 | } |
| 221 | |
| 222 | /* |
| 223 | * Print out the contents of a buffer, if debug is enabled. Skip registers |
| 224 | * other than FIFO, unless debug_level_ is 2. |
| 225 | */ |
| 226 | static void trace_dump(const char *prefix, uint32_t reg, |
| 227 | size_t bytes, const uint8_t *buffer, |
| 228 | int force) |
| 229 | { |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 230 | static char prev_prefix; |
| 231 | static unsigned int prev_reg; |
| 232 | static int current_char; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 233 | const int BYTES_PER_LINE = 32; |
| 234 | |
| 235 | if (!force) { |
| 236 | if (!debug_level_) |
| 237 | return; |
| 238 | |
| 239 | if ((debug_level_ < 2) && (reg != TPM_DATA_FIFO_REG)) |
| 240 | return; |
| 241 | } |
| 242 | |
| 243 | /* |
| 244 | * Do not print register address again if the last dump print was for |
| 245 | * that register. |
| 246 | */ |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 247 | if (prev_prefix != *prefix || (prev_reg != reg)) { |
| 248 | prev_prefix = *prefix; |
| 249 | prev_reg = reg; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 250 | printk(BIOS_DEBUG, "\n%s %2.2x:", prefix, reg); |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 251 | current_char = 0; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 252 | } |
| 253 | |
| 254 | if ((reg != TPM_DATA_FIFO_REG) && (bytes == 4)) { |
| 255 | /* |
| 256 | * This must be a regular register address, print the 32 bit |
| 257 | * value. |
| 258 | */ |
| 259 | printk(BIOS_DEBUG, " %8.8x", *(const uint32_t *)buffer); |
| 260 | } else { |
| 261 | int i; |
| 262 | |
| 263 | /* |
| 264 | * Data read from or written to FIFO or not in 4 byte |
| 265 | * quantiites is printed byte at a time. |
| 266 | */ |
| 267 | for (i = 0; i < bytes; i++) { |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 268 | if (current_char && |
| 269 | !(current_char % BYTES_PER_LINE)) { |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 270 | printk(BIOS_DEBUG, "\n "); |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 271 | current_char = 0; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 272 | } |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 273 | (current_char)++; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 274 | printk(BIOS_DEBUG, " %2.2x", buffer[i]); |
| 275 | } |
| 276 | } |
| 277 | } |
| 278 | |
| 279 | /* |
| 280 | * Once transaction is initiated and the TPM indicated that it is ready to go, |
| 281 | * write the actual bytes to the register. |
| 282 | */ |
| 283 | static void write_bytes(const void *buffer, size_t bytes) |
| 284 | { |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 285 | spi_xfer(&spi_slave, buffer, bytes, NULL, 0); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 286 | } |
| 287 | |
| 288 | /* |
| 289 | * Once transaction is initiated and the TPM indicated that it is ready to go, |
| 290 | * read the actual bytes from the register. |
| 291 | */ |
| 292 | static void read_bytes(void *buffer, size_t bytes) |
| 293 | { |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 294 | spi_xfer(&spi_slave, NULL, 0, buffer, bytes); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 295 | } |
| 296 | |
| 297 | /* |
| 298 | * To write a register, start transaction, transfer data to the TPM, deassert |
| 299 | * CS when done. |
| 300 | * |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 301 | * Returns one to indicate success, zero to indicate failure. |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 302 | */ |
Martin Roth | 38ddbfb | 2019-10-23 21:41:00 -0600 | [diff] [blame] | 303 | static int tpm2_write_reg(unsigned int reg_number, const void *buffer, size_t bytes) |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 304 | { |
| 305 | trace_dump("W", reg_number, bytes, buffer, 0); |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 306 | if (!start_transaction(false, bytes, reg_number)) |
| 307 | return 0; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 308 | write_bytes(buffer, bytes); |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 309 | spi_release_bus(&spi_slave); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 310 | return 1; |
| 311 | } |
| 312 | |
| 313 | /* |
| 314 | * To read a register, start transaction, transfer data from the TPM, deassert |
| 315 | * CS when done. |
| 316 | * |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 317 | * Returns one to indicate success, zero to indicate failure. In case of |
| 318 | * failure zero out the user buffer. |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 319 | */ |
Martin Roth | 38ddbfb | 2019-10-23 21:41:00 -0600 | [diff] [blame] | 320 | static int tpm2_read_reg(unsigned int reg_number, void *buffer, size_t bytes) |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 321 | { |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 322 | if (!start_transaction(true, bytes, reg_number)) { |
| 323 | memset(buffer, 0, bytes); |
| 324 | return 0; |
| 325 | } |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 326 | read_bytes(buffer, bytes); |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 327 | spi_release_bus(&spi_slave); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 328 | trace_dump("R", reg_number, bytes, buffer, 0); |
| 329 | return 1; |
| 330 | } |
| 331 | |
| 332 | /* |
| 333 | * Status register is accessed often, wrap reading and writing it into |
| 334 | * dedicated functions. |
| 335 | */ |
| 336 | static int read_tpm_sts(uint32_t *status) |
| 337 | { |
| 338 | return tpm2_read_reg(TPM_STS_REG, status, sizeof(*status)); |
| 339 | } |
| 340 | |
| 341 | static int write_tpm_sts(uint32_t status) |
| 342 | { |
| 343 | return tpm2_write_reg(TPM_STS_REG, &status, sizeof(status)); |
| 344 | } |
| 345 | |
| 346 | /* |
| 347 | * The TPM may limit the transaction bytes count (burst count) below the 64 |
| 348 | * bytes max. The current value is available as a field of the status |
| 349 | * register. |
| 350 | */ |
| 351 | static uint32_t get_burst_count(void) |
| 352 | { |
| 353 | uint32_t status; |
| 354 | |
| 355 | read_tpm_sts(&status); |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 356 | return (status & TPM_STS_BURST_COUNT_MASK) >> TPM_STS_BURST_COUNT_SHIFT; |
| 357 | } |
| 358 | |
| 359 | static uint8_t tpm2_read_access_reg(void) |
| 360 | { |
| 361 | uint8_t access; |
| 362 | tpm2_read_reg(TPM_ACCESS_REG, &access, sizeof(access)); |
| 363 | /* We do not care about access establishment bit state. Ignore it. */ |
| 364 | return access & ~TPM_ACCESS_ESTABLISHMENT; |
| 365 | } |
| 366 | |
| 367 | static void tpm2_write_access_reg(uint8_t cmd) |
| 368 | { |
| 369 | /* Writes to access register can set only 1 bit at a time. */ |
| 370 | assert (!(cmd & (cmd - 1))); |
| 371 | |
| 372 | tpm2_write_reg(TPM_ACCESS_REG, &cmd, sizeof(cmd)); |
| 373 | } |
| 374 | |
| 375 | static int tpm2_claim_locality(void) |
| 376 | { |
| 377 | uint8_t access; |
Shelley Chen | 85eb031 | 2017-11-07 14:24:19 -0800 | [diff] [blame] | 378 | struct stopwatch sw; |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 379 | |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 380 | /* |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 381 | * Locality is released by TPM reset. |
| 382 | * |
| 383 | * If locality is taken at this point, this could be due to the fact |
| 384 | * that the TPM is performing a long operation and has not processed |
| 385 | * reset request yet. We'll wait up to CR50_TIMEOUT_INIT_MS and see if |
| 386 | * it releases locality when reset is processed. |
Shelley Chen | 85eb031 | 2017-11-07 14:24:19 -0800 | [diff] [blame] | 387 | */ |
| 388 | stopwatch_init_msecs_expire(&sw, CR50_TIMEOUT_INIT_MS); |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 389 | do { |
Shelley Chen | 85eb031 | 2017-11-07 14:24:19 -0800 | [diff] [blame] | 390 | access = tpm2_read_access_reg(); |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 391 | if (access & TPM_ACCESS_ACTIVE_LOCALITY) { |
| 392 | /* |
| 393 | * Don't bombard the chip with traffic, let it keep |
| 394 | * processing the command. |
| 395 | */ |
| 396 | mdelay(2); |
| 397 | continue; |
| 398 | } |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 399 | |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 400 | /* |
| 401 | * Ok, the locality is free, TPM must be reset, let's claim |
| 402 | * it. |
| 403 | */ |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 404 | |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 405 | tpm2_write_access_reg(TPM_ACCESS_REQUEST_USE); |
| 406 | access = tpm2_read_access_reg(); |
| 407 | if (access != (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) { |
| 408 | break; |
| 409 | } |
| 410 | |
| 411 | printk(BIOS_INFO, "TPM ready after %ld ms\n", |
| 412 | stopwatch_duration_msecs(&sw)); |
| 413 | |
| 414 | return 1; |
| 415 | } while (!stopwatch_expired(&sw)); |
| 416 | |
| 417 | printk(BIOS_ERR, |
| 418 | "Failed to claim locality 0 after %ld ms, status: %#x\n", |
| 419 | stopwatch_duration_msecs(&sw), access); |
| 420 | |
| 421 | return 0; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 422 | } |
| 423 | |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 424 | /* Device/vendor ID values of the TPM devices this driver supports. */ |
| 425 | static const uint32_t supported_did_vids[] = { |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 426 | 0x00281ae0, /* H1 based Cr50 security chip. */ |
| 427 | 0x0000104a /* ST33HTPH2E32 */ |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 428 | }; |
| 429 | |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 430 | int tpm2_init(struct spi_slave *spi_if) |
| 431 | { |
| 432 | uint32_t did_vid, status; |
| 433 | uint8_t cmd; |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 434 | int retries; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 435 | |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 436 | memcpy(&spi_slave, spi_if, sizeof(*spi_if)); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 437 | |
Elyes HAOUAS | 6688f46 | 2018-08-29 17:22:44 +0200 | [diff] [blame] | 438 | /* clear any pending IRQs */ |
Shelley Chen | f2e7b37 | 2017-12-15 15:25:08 -0800 | [diff] [blame] | 439 | tis_plat_irq_status(); |
| 440 | |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 441 | /* |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 442 | * 150 ms should be enough to synchronize with the TPM even under the |
| 443 | * worst nested reset request conditions. In vast majority of cases |
| 444 | * there would be no wait at all. |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 445 | */ |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 446 | printk(BIOS_INFO, "Probing TPM: "); |
| 447 | for (retries = 15; retries > 0; retries--) { |
| 448 | int i; |
| 449 | |
Elyes HAOUAS | 6688f46 | 2018-08-29 17:22:44 +0200 | [diff] [blame] | 450 | /* In case of failure to read div_vid is set to zero. */ |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 451 | tpm2_read_reg(TPM_DID_VID_REG, &did_vid, sizeof(did_vid)); |
| 452 | |
| 453 | for (i = 0; i < ARRAY_SIZE(supported_did_vids); i++) |
| 454 | if (did_vid == supported_did_vids[i]) |
Elyes HAOUAS | 6688f46 | 2018-08-29 17:22:44 +0200 | [diff] [blame] | 455 | break; /* TPM is up and ready. */ |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 456 | |
| 457 | if (i < ARRAY_SIZE(supported_did_vids)) |
| 458 | break; |
| 459 | |
| 460 | /* TPM might be resetting, let's retry in a bit. */ |
| 461 | mdelay(10); |
| 462 | printk(BIOS_INFO, "."); |
| 463 | } |
| 464 | |
| 465 | if (!retries) { |
| 466 | printk(BIOS_ERR, "\n%s: Failed to connect to the TPM\n", |
| 467 | __func__); |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 468 | return -1; |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 469 | } |
| 470 | |
| 471 | printk(BIOS_INFO, " done!\n"); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 472 | |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 473 | // FIXME: Move this to tpm_setup() |
| 474 | if (ENV_SEPARATE_VERSTAGE || ENV_BOOTBLOCK || !CONFIG(VBOOT)) |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 475 | /* |
| 476 | * Claim locality 0, do it only during the first |
| 477 | * initialization after reset. |
| 478 | */ |
| 479 | if (!tpm2_claim_locality()) |
| 480 | return -1; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 481 | |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 482 | if (!read_tpm_sts(&status)) { |
| 483 | printk(BIOS_ERR, "Reading status reg failed\n"); |
| 484 | return -1; |
| 485 | } |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 486 | if ((status & TPM_STS_FAMILY_MASK) != TPM_STS_FAMILY_TPM_2_0) { |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 487 | printk(BIOS_ERR, "unexpected TPM family value, status: %#x\n", |
| 488 | status); |
| 489 | return -1; |
| 490 | } |
| 491 | |
| 492 | /* |
| 493 | * Locality claimed, read the revision value and set up the tpm_info |
| 494 | * structure. |
| 495 | */ |
| 496 | tpm2_read_reg(TPM_RID_REG, &cmd, sizeof(cmd)); |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 497 | tpm_info.vendor_id = did_vid & 0xffff; |
| 498 | tpm_info.device_id = did_vid >> 16; |
| 499 | tpm_info.revision = cmd; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 500 | |
| 501 | printk(BIOS_INFO, "Connected to device vid:did:rid of %4.4x:%4.4x:%2.2x\n", |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 502 | tpm_info.vendor_id, tpm_info.device_id, tpm_info.revision); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 503 | |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 504 | /* Let's report device FW version if available. */ |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 505 | if (tpm_info.vendor_id == 0x1ae0) { |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 506 | int chunk_count = 0; |
Vadim Bendebury | 9e561f8 | 2016-07-31 11:19:20 -0700 | [diff] [blame] | 507 | size_t chunk_size; |
| 508 | /* |
| 509 | * let's read 50 bytes at a time; leave room for the trailing |
| 510 | * zero. |
| 511 | */ |
| 512 | char vstr[51]; |
| 513 | |
| 514 | chunk_size = sizeof(vstr) - 1; |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 515 | |
| 516 | printk(BIOS_INFO, "Firmware version: "); |
| 517 | |
| 518 | /* |
| 519 | * Does not really matter what's written, this just makes sure |
| 520 | * the version is reported from the beginning. |
| 521 | */ |
Vadim Bendebury | 9e561f8 | 2016-07-31 11:19:20 -0700 | [diff] [blame] | 522 | tpm2_write_reg(TPM_FW_VER, &chunk_size, 1); |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 523 | |
Vadim Bendebury | 9e561f8 | 2016-07-31 11:19:20 -0700 | [diff] [blame] | 524 | /* Print it out in sizeof(vstr) - 1 byte chunks. */ |
| 525 | vstr[chunk_size] = 0; |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 526 | do { |
Vadim Bendebury | 9e561f8 | 2016-07-31 11:19:20 -0700 | [diff] [blame] | 527 | tpm2_read_reg(TPM_FW_VER, vstr, chunk_size); |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 528 | printk(BIOS_INFO, "%s", vstr); |
| 529 | |
| 530 | /* |
Vadim Bendebury | 9e561f8 | 2016-07-31 11:19:20 -0700 | [diff] [blame] | 531 | * While string is not over, and is no longer than 300 |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 532 | * characters. |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 533 | */ |
Vadim Bendebury | 9e561f8 | 2016-07-31 11:19:20 -0700 | [diff] [blame] | 534 | } while (vstr[chunk_size - 1] && |
| 535 | (chunk_count++ < (300 / chunk_size))); |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 536 | |
| 537 | printk(BIOS_INFO, "\n"); |
| 538 | } |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 539 | return 0; |
| 540 | } |
| 541 | |
| 542 | /* |
| 543 | * This is in seconds, certain TPM commands, like key generation, can take |
| 544 | * long time to complete. |
| 545 | * |
| 546 | * Returns one to indicate success, zero (not yet implemented) to indicate |
| 547 | * failure. |
| 548 | */ |
| 549 | #define MAX_STATUS_TIMEOUT 120 |
| 550 | static int wait_for_status(uint32_t status_mask, uint32_t status_expected) |
| 551 | { |
| 552 | uint32_t status; |
| 553 | struct stopwatch sw; |
| 554 | |
| 555 | stopwatch_init_usecs_expire(&sw, MAX_STATUS_TIMEOUT * 1000 * 1000); |
| 556 | do { |
| 557 | udelay(1000); |
| 558 | if (stopwatch_expired(&sw)) { |
| 559 | printk(BIOS_ERR, "failed to get expected status %x\n", |
| 560 | status_expected); |
| 561 | return false; |
| 562 | } |
| 563 | read_tpm_sts(&status); |
| 564 | } while ((status & status_mask) != status_expected); |
| 565 | |
| 566 | return 1; |
| 567 | } |
| 568 | |
| 569 | enum fifo_transfer_direction { |
| 570 | fifo_transmit = 0, |
| 571 | fifo_receive = 1 |
| 572 | }; |
| 573 | |
| 574 | /* Union allows to avoid casting away 'const' on transmit buffers. */ |
| 575 | union fifo_transfer_buffer { |
| 576 | uint8_t *rx_buffer; |
| 577 | const uint8_t *tx_buffer; |
| 578 | }; |
| 579 | |
| 580 | /* |
| 581 | * Transfer requested number of bytes to or from TPM FIFO, accounting for the |
| 582 | * current burst count value. |
| 583 | */ |
| 584 | static void fifo_transfer(size_t transfer_size, |
| 585 | union fifo_transfer_buffer buffer, |
| 586 | enum fifo_transfer_direction direction) |
| 587 | { |
| 588 | size_t transaction_size; |
| 589 | size_t burst_count; |
| 590 | size_t handled_so_far = 0; |
| 591 | |
| 592 | do { |
| 593 | do { |
| 594 | /* Could be zero when TPM is busy. */ |
| 595 | burst_count = get_burst_count(); |
| 596 | } while (!burst_count); |
| 597 | |
| 598 | transaction_size = transfer_size - handled_so_far; |
| 599 | transaction_size = MIN(transaction_size, burst_count); |
| 600 | |
| 601 | /* |
| 602 | * The SPI frame header does not allow to pass more than 64 |
| 603 | * bytes. |
| 604 | */ |
| 605 | transaction_size = MIN(transaction_size, 64); |
| 606 | |
| 607 | if (direction == fifo_receive) |
| 608 | tpm2_read_reg(TPM_DATA_FIFO_REG, |
| 609 | buffer.rx_buffer + handled_so_far, |
| 610 | transaction_size); |
| 611 | else |
| 612 | tpm2_write_reg(TPM_DATA_FIFO_REG, |
| 613 | buffer.tx_buffer + handled_so_far, |
| 614 | transaction_size); |
| 615 | |
| 616 | handled_so_far += transaction_size; |
| 617 | |
| 618 | } while (handled_so_far != transfer_size); |
| 619 | } |
| 620 | |
| 621 | size_t tpm2_process_command(const void *tpm2_command, size_t command_size, |
| 622 | void *tpm2_response, size_t max_response) |
| 623 | { |
| 624 | uint32_t status; |
| 625 | uint32_t expected_status_bits; |
| 626 | size_t payload_size; |
| 627 | size_t bytes_to_go; |
| 628 | const uint8_t *cmd_body = tpm2_command; |
| 629 | uint8_t *rsp_body = tpm2_response; |
| 630 | union fifo_transfer_buffer fifo_buffer; |
| 631 | const int HEADER_SIZE = 6; |
| 632 | |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 633 | /* Do not try using an uninitialized TPM. */ |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 634 | if (!tpm_info.vendor_id) |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 635 | return 0; |
| 636 | |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 637 | /* Skip the two byte tag, read the size field. */ |
| 638 | payload_size = read_be32(cmd_body + 2); |
| 639 | |
| 640 | /* Sanity check. */ |
| 641 | if (payload_size != command_size) { |
| 642 | printk(BIOS_ERR, |
| 643 | "Command size mismatch: encoded %zd != requested %zd\n", |
| 644 | payload_size, command_size); |
| 645 | trace_dump("W", TPM_DATA_FIFO_REG, command_size, cmd_body, 1); |
| 646 | printk(BIOS_DEBUG, "\n"); |
| 647 | return 0; |
| 648 | } |
| 649 | |
| 650 | /* Let the TPM know that the command is coming. */ |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 651 | write_tpm_sts(TPM_STS_COMMAND_READY); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 652 | |
| 653 | /* |
Elyes HAOUAS | 6688f46 | 2018-08-29 17:22:44 +0200 | [diff] [blame] | 654 | * TPM commands and responses written to and read from the FIFO |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 655 | * register (0x24) are datagrams of variable size, prepended by a 6 |
| 656 | * byte header. |
| 657 | * |
| 658 | * The specification description of the state machine is a bit vague, |
| 659 | * but from experience it looks like there is no need to wait for the |
| 660 | * sts.expect bit to be set, at least with the 9670 and cr50 devices. |
| 661 | * Just write the command into FIFO, making sure not to exceed the |
| 662 | * burst count or the maximum PDU size, whatever is smaller. |
| 663 | */ |
| 664 | fifo_buffer.tx_buffer = cmd_body; |
| 665 | fifo_transfer(command_size, fifo_buffer, fifo_transmit); |
| 666 | |
| 667 | /* Now tell the TPM it can start processing the command. */ |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 668 | write_tpm_sts(TPM_STS_GO); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 669 | |
| 670 | /* Now wait for it to report that the response is ready. */ |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 671 | expected_status_bits = TPM_STS_VALID | TPM_STS_DATA_AVAIL; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 672 | if (!wait_for_status(expected_status_bits, expected_status_bits)) { |
| 673 | /* |
| 674 | * If timed out, which should never happen, let's at least |
| 675 | * print out the offending command. |
| 676 | */ |
| 677 | trace_dump("W", TPM_DATA_FIFO_REG, command_size, cmd_body, 1); |
| 678 | printk(BIOS_DEBUG, "\n"); |
| 679 | return 0; |
| 680 | } |
| 681 | |
| 682 | /* |
| 683 | * The response is ready, let's read it. First we read the FIFO |
| 684 | * payload header, to see how much data to expect. The response header |
| 685 | * size is fixed to six bytes, the total payload size is stored in |
| 686 | * network order in the last four bytes. |
| 687 | */ |
| 688 | tpm2_read_reg(TPM_DATA_FIFO_REG, rsp_body, HEADER_SIZE); |
| 689 | |
| 690 | /* Find out the total payload size, skipping the two byte tag. */ |
| 691 | payload_size = read_be32(rsp_body + 2); |
| 692 | |
| 693 | if (payload_size > max_response) { |
| 694 | /* |
| 695 | * TODO(vbendeb): at least drain the FIFO here or somehow let |
| 696 | * the TPM know that the response can be dropped. |
| 697 | */ |
Elyes HAOUAS | 6688f46 | 2018-08-29 17:22:44 +0200 | [diff] [blame] | 698 | printk(BIOS_ERR, " TPM response too long (%zd bytes)", |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 699 | payload_size); |
| 700 | return 0; |
| 701 | } |
| 702 | |
| 703 | /* |
| 704 | * Now let's read all but the last byte in the FIFO to make sure the |
| 705 | * status register is showing correct flow control bits: 'more data' |
| 706 | * until the last byte and then 'no more data' once the last byte is |
| 707 | * read. |
| 708 | */ |
| 709 | bytes_to_go = payload_size - 1 - HEADER_SIZE; |
| 710 | fifo_buffer.rx_buffer = rsp_body + HEADER_SIZE; |
| 711 | fifo_transfer(bytes_to_go, fifo_buffer, fifo_receive); |
| 712 | |
| 713 | /* Verify that there is still data to read. */ |
| 714 | read_tpm_sts(&status); |
| 715 | if ((status & expected_status_bits) != expected_status_bits) { |
| 716 | printk(BIOS_ERR, "unexpected intermediate status %#x\n", |
| 717 | status); |
| 718 | return 0; |
| 719 | } |
| 720 | |
| 721 | /* Read the last byte of the PDU. */ |
| 722 | tpm2_read_reg(TPM_DATA_FIFO_REG, rsp_body + payload_size - 1, 1); |
| 723 | |
| 724 | /* Terminate the dump, if enabled. */ |
| 725 | if (debug_level_) |
| 726 | printk(BIOS_DEBUG, "\n"); |
| 727 | |
| 728 | /* Verify that 'data available' is not asseretd any more. */ |
| 729 | read_tpm_sts(&status); |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 730 | if ((status & expected_status_bits) != TPM_STS_VALID) { |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 731 | printk(BIOS_ERR, "unexpected final status %#x\n", status); |
| 732 | return 0; |
| 733 | } |
| 734 | |
| 735 | /* Move the TPM back to idle state. */ |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 736 | write_tpm_sts(TPM_STS_COMMAND_READY); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 737 | |
| 738 | return payload_size; |
| 739 | } |