Patrick Georgi | 593124d | 2020-05-10 19:44:08 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: BSD-3-Clause */ |
| 2 | /* This is a driver for a SPI interfaced TPM2 device. |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 3 | * |
| 4 | * It assumes that the required SPI interface has been initialized before the |
| 5 | * driver is started. A 'sruct spi_slave' pointer passed at initialization is |
Martin Roth | 0949e73 | 2021-10-01 14:28:22 -0600 | [diff] [blame] | 6 | * used to direct traffic to the correct SPI interface. This driver does not |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 7 | * provide a way to instantiate multiple TPM devices. Also, to keep things |
| 8 | * simple, the driver unconditionally uses of TPM locality zero. |
| 9 | * |
| 10 | * References to documentation are based on the TCG issued "TPM Profile (PTP) |
| 11 | * Specification Revision 00.43". |
| 12 | */ |
| 13 | |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 14 | #include <assert.h> |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 15 | #include <commonlib/endian.h> |
| 16 | #include <console/console.h> |
| 17 | #include <delay.h> |
Yu-Ping Wu | ae1e702 | 2022-05-17 09:33:18 +0800 | [diff] [blame] | 18 | #include <drivers/tpm/cr50.h> |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 19 | #include <endian.h> |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 20 | #include <security/tpm/tis.h> |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 21 | #include <string.h> |
| 22 | #include <timer.h> |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 23 | #include <types.h> |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 24 | |
| 25 | #include "tpm.h" |
| 26 | |
| 27 | /* Assorted TPM2 registers for interface type FIFO. */ |
Vadim Bendebury | 05155c0 | 2016-06-23 12:03:18 -0700 | [diff] [blame] | 28 | #define TPM_ACCESS_REG (TPM_LOCALITY_0_SPI_BASE + 0) |
| 29 | #define TPM_STS_REG (TPM_LOCALITY_0_SPI_BASE + 0x18) |
| 30 | #define TPM_DATA_FIFO_REG (TPM_LOCALITY_0_SPI_BASE + 0x24) |
Sergii Dmytruk | df85350 | 2022-10-30 17:18:33 +0200 | [diff] [blame] | 31 | #define TPM_INTF_ID_REG (TPM_LOCALITY_0_SPI_BASE + 0x30) |
Vadim Bendebury | 05155c0 | 2016-06-23 12:03:18 -0700 | [diff] [blame] | 32 | #define TPM_DID_VID_REG (TPM_LOCALITY_0_SPI_BASE + 0xf00) |
| 33 | #define TPM_RID_REG (TPM_LOCALITY_0_SPI_BASE + 0xf04) |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 34 | #define TPM_FW_VER (TPM_LOCALITY_0_SPI_BASE + 0xf90) |
Jes Klinke | dcae807 | 2020-07-29 14:22:41 -0700 | [diff] [blame] | 35 | #define CR50_BOARD_CFG (TPM_LOCALITY_0_SPI_BASE + 0xfe0) |
| 36 | |
Shelley Chen | 85eb031 | 2017-11-07 14:24:19 -0800 | [diff] [blame] | 37 | #define CR50_TIMEOUT_INIT_MS 30000 /* Very long timeout for TPM init */ |
| 38 | |
Furquan Shaikh | bdf86a6 | 2017-04-03 23:52:01 -0700 | [diff] [blame] | 39 | /* SPI slave structure for TPM device. */ |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 40 | static struct spi_slave spi_slave; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 41 | |
| 42 | /* Cached TPM device identification. */ |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 43 | static struct tpm2_info tpm_info; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 44 | |
| 45 | /* |
Martin Roth | f48acbd | 2020-07-24 12:24:27 -0600 | [diff] [blame] | 46 | * TODO(vbendeb): make CONFIG(DEBUG_TPM) an int to allow different level of |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 47 | * debug traces. Right now it is either 0 or 1. |
| 48 | */ |
Martin Roth | c25c1eb | 2020-07-24 12:26:21 -0600 | [diff] [blame] | 49 | static const int debug_level_ = CONFIG(DEBUG_TPM); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 50 | |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 51 | /* |
| 52 | * SPI frame header for TPM transactions is 4 bytes in size, it is described |
| 53 | * in section "6.4.6 Spi Bit Protocol". |
| 54 | */ |
| 55 | typedef struct { |
| 56 | unsigned char body[4]; |
| 57 | } spi_frame_header; |
| 58 | |
| 59 | void tpm2_get_info(struct tpm2_info *info) |
| 60 | { |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 61 | *info = tpm_info; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 62 | } |
| 63 | |
Jeffy Chen | 19e3d33 | 2017-03-03 18:24:02 +0800 | [diff] [blame] | 64 | /* |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 65 | * Each TPM2 SPI transaction starts the same: CS is asserted, the 4 byte |
| 66 | * header is sent to the TPM, the master waits til TPM is ready to continue. |
| 67 | */ |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 68 | static enum cb_err start_transaction(int read_write, size_t bytes, unsigned int addr) |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 69 | { |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 70 | spi_frame_header header, header_resp; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 71 | uint8_t byte; |
| 72 | int i; |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 73 | int ret; |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 74 | struct stopwatch sw; |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 75 | static int tpm_sync_needed; |
| 76 | static struct stopwatch wake_up_sw; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 77 | |
Jes B. Klinke | c6b041a1 | 2022-04-19 14:00:33 -0700 | [diff] [blame] | 78 | if (CONFIG(TPM_GOOGLE)) { |
Vadim Bendebury | 3b62d6b | 2017-10-30 18:29:03 -0700 | [diff] [blame] | 79 | /* |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 80 | * First Cr50 access in each coreboot stage where TPM is used will be |
| 81 | * prepended by a wake up pulse on the CS line. |
Vadim Bendebury | 3b62d6b | 2017-10-30 18:29:03 -0700 | [diff] [blame] | 82 | */ |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 83 | int wakeup_needed = 1; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 84 | |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 85 | /* Wait for TPM to finish previous transaction if needed */ |
| 86 | if (tpm_sync_needed) { |
Yu-Ping Wu | ae1e702 | 2022-05-17 09:33:18 +0800 | [diff] [blame] | 87 | if (cr50_wait_tpm_ready() != CB_SUCCESS) |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 88 | printk(BIOS_ERR, "Timeout waiting for TPM IRQ!\n"); |
| 89 | |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 90 | /* |
| 91 | * During the first invocation of this function on each stage |
| 92 | * this if () clause code does not run (as tpm_sync_needed |
| 93 | * value is zero), during all following invocations the |
| 94 | * stopwatch below is guaranteed to be started. |
| 95 | */ |
| 96 | if (!stopwatch_expired(&wake_up_sw)) |
| 97 | wakeup_needed = 0; |
| 98 | } else { |
| 99 | tpm_sync_needed = 1; |
| 100 | } |
Vadim Bendebury | 3b62d6b | 2017-10-30 18:29:03 -0700 | [diff] [blame] | 101 | |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 102 | if (wakeup_needed) { |
| 103 | /* Just in case Cr50 is asleep. */ |
| 104 | spi_claim_bus(&spi_slave); |
| 105 | udelay(1); |
| 106 | spi_release_bus(&spi_slave); |
| 107 | udelay(100); |
| 108 | } |
| 109 | |
| 110 | /* |
| 111 | * The Cr50 on H1 does not go to sleep for 1 second after any |
| 112 | * SPI slave activity, let's be conservative and limit the |
| 113 | * window to 900 ms. |
| 114 | */ |
| 115 | stopwatch_init_msecs_expire(&wake_up_sw, 900); |
| 116 | } |
Jeffy Chen | f9a40ea | 2017-03-03 18:24:02 +0800 | [diff] [blame] | 117 | |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 118 | /* |
| 119 | * The first byte of the frame header encodes the transaction type |
Martin Roth | 0949e73 | 2021-10-01 14:28:22 -0600 | [diff] [blame] | 120 | * (read or write) and transfer size (set to length - 1), limited to |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 121 | * 64 bytes. |
| 122 | */ |
| 123 | header.body[0] = (read_write ? 0x80 : 0) | 0x40 | (bytes - 1); |
| 124 | |
| 125 | /* The rest of the frame header is the TPM register address. */ |
| 126 | for (i = 0; i < 3; i++) |
| 127 | header.body[i + 1] = (addr >> (8 * (2 - i))) & 0xff; |
| 128 | |
| 129 | /* CS assert wakes up the slave. */ |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 130 | spi_claim_bus(&spi_slave); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 131 | |
| 132 | /* |
| 133 | * The TCG TPM over SPI specification introduces the notion of SPI |
| 134 | * flow control (Section "6.4.5 Flow Control"). |
| 135 | * |
| 136 | * Again, the slave (TPM device) expects each transaction to start |
| 137 | * with a 4 byte header trasmitted by master. The header indicates if |
| 138 | * the master needs to read or write a register, and the register |
| 139 | * address. |
| 140 | * |
| 141 | * If the slave needs to stall the transaction (for instance it is not |
| 142 | * ready to send the register value to the master), it sets the MOSI |
| 143 | * line to 0 during the last clock of the 4 byte header. In this case |
| 144 | * the master is supposed to start polling the SPI bus, one byte at |
| 145 | * time, until the last bit in the received byte (transferred during |
| 146 | * the last clock of the byte) is set to 1. |
| 147 | * |
| 148 | * Due to some SPI controllers' shortcomings (Rockchip comes to |
Martin Roth | 0949e73 | 2021-10-01 14:28:22 -0600 | [diff] [blame] | 149 | * mind...) we transmit the 4 byte header without checking the byte |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 150 | * transmitted by the TPM during the transaction's last byte. |
| 151 | * |
| 152 | * We know that cr50 is guaranteed to set the flow control bit to 0 |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 153 | * during the header transfer. Real TPM2 are fast enough to not require |
| 154 | * to stall the master. They might still use this feature, so test the |
| 155 | * last bit after shifting in the address bytes. |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 156 | * crosbug.com/p/52132 has been opened to track this. |
| 157 | */ |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 158 | |
| 159 | header_resp.body[3] = 0; |
Jes B. Klinke | c6b041a1 | 2022-04-19 14:00:33 -0700 | [diff] [blame] | 160 | if (CONFIG(TPM_GOOGLE)) |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 161 | ret = spi_xfer(&spi_slave, header.body, sizeof(header.body), NULL, 0); |
| 162 | else |
| 163 | ret = spi_xfer(&spi_slave, header.body, sizeof(header.body), |
| 164 | header_resp.body, sizeof(header_resp.body)); |
| 165 | if (ret) { |
| 166 | printk(BIOS_ERR, "SPI-TPM: transfer error\n"); |
| 167 | spi_release_bus(&spi_slave); |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 168 | return CB_ERR; |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 169 | } |
| 170 | |
| 171 | if (header_resp.body[3] & 1) |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 172 | return CB_SUCCESS; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 173 | |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 174 | /* |
| 175 | * Now poll the bus until TPM removes the stall bit. Give it up to 100 |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 176 | * ms to sort it out - it could be saving stuff in nvram at some point. |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 177 | */ |
| 178 | stopwatch_init_msecs_expire(&sw, 100); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 179 | do { |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 180 | if (stopwatch_expired(&sw)) { |
| 181 | printk(BIOS_ERR, "TPM flow control failure\n"); |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 182 | spi_release_bus(&spi_slave); |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 183 | return CB_ERR; |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 184 | } |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 185 | spi_xfer(&spi_slave, NULL, 0, &byte, 1); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 186 | } while (!(byte & 1)); |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 187 | |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 188 | return CB_SUCCESS; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 189 | } |
| 190 | |
| 191 | /* |
| 192 | * Print out the contents of a buffer, if debug is enabled. Skip registers |
| 193 | * other than FIFO, unless debug_level_ is 2. |
| 194 | */ |
| 195 | static void trace_dump(const char *prefix, uint32_t reg, |
| 196 | size_t bytes, const uint8_t *buffer, |
| 197 | int force) |
| 198 | { |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 199 | static char prev_prefix; |
| 200 | static unsigned int prev_reg; |
| 201 | static int current_char; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 202 | const int BYTES_PER_LINE = 32; |
| 203 | |
| 204 | if (!force) { |
| 205 | if (!debug_level_) |
| 206 | return; |
| 207 | |
| 208 | if ((debug_level_ < 2) && (reg != TPM_DATA_FIFO_REG)) |
| 209 | return; |
| 210 | } |
| 211 | |
| 212 | /* |
| 213 | * Do not print register address again if the last dump print was for |
| 214 | * that register. |
| 215 | */ |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 216 | if (prev_prefix != *prefix || (prev_reg != reg)) { |
| 217 | prev_prefix = *prefix; |
| 218 | prev_reg = reg; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 219 | printk(BIOS_DEBUG, "\n%s %2.2x:", prefix, reg); |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 220 | current_char = 0; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 221 | } |
| 222 | |
| 223 | if ((reg != TPM_DATA_FIFO_REG) && (bytes == 4)) { |
| 224 | /* |
| 225 | * This must be a regular register address, print the 32 bit |
| 226 | * value. |
| 227 | */ |
| 228 | printk(BIOS_DEBUG, " %8.8x", *(const uint32_t *)buffer); |
| 229 | } else { |
| 230 | int i; |
| 231 | |
| 232 | /* |
| 233 | * Data read from or written to FIFO or not in 4 byte |
| 234 | * quantiites is printed byte at a time. |
| 235 | */ |
| 236 | for (i = 0; i < bytes; i++) { |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 237 | if (current_char && |
| 238 | !(current_char % BYTES_PER_LINE)) { |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 239 | printk(BIOS_DEBUG, "\n "); |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 240 | current_char = 0; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 241 | } |
Arthur Heymans | 0ca944b | 2019-11-20 19:51:06 +0100 | [diff] [blame] | 242 | (current_char)++; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 243 | printk(BIOS_DEBUG, " %2.2x", buffer[i]); |
| 244 | } |
| 245 | } |
| 246 | } |
| 247 | |
| 248 | /* |
| 249 | * Once transaction is initiated and the TPM indicated that it is ready to go, |
| 250 | * write the actual bytes to the register. |
| 251 | */ |
| 252 | static void write_bytes(const void *buffer, size_t bytes) |
| 253 | { |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 254 | spi_xfer(&spi_slave, buffer, bytes, NULL, 0); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 255 | } |
| 256 | |
| 257 | /* |
| 258 | * Once transaction is initiated and the TPM indicated that it is ready to go, |
| 259 | * read the actual bytes from the register. |
| 260 | */ |
| 261 | static void read_bytes(void *buffer, size_t bytes) |
| 262 | { |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 263 | spi_xfer(&spi_slave, NULL, 0, buffer, bytes); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 264 | } |
| 265 | |
| 266 | /* |
| 267 | * To write a register, start transaction, transfer data to the TPM, deassert |
| 268 | * CS when done. |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 269 | */ |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 270 | static enum cb_err tpm2_write_reg(unsigned int reg_number, const void *buffer, size_t bytes) |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 271 | { |
| 272 | trace_dump("W", reg_number, bytes, buffer, 0); |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 273 | if (start_transaction(false, bytes, reg_number) != CB_SUCCESS) |
| 274 | return CB_ERR; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 275 | write_bytes(buffer, bytes); |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 276 | spi_release_bus(&spi_slave); |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 277 | return CB_SUCCESS; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 278 | } |
| 279 | |
| 280 | /* |
| 281 | * To read a register, start transaction, transfer data from the TPM, deassert |
| 282 | * CS when done. |
| 283 | * |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 284 | * In case of failure zero out the user buffer. |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 285 | */ |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 286 | static enum cb_err tpm2_read_reg(unsigned int reg_number, void *buffer, size_t bytes) |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 287 | { |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 288 | if (start_transaction(true, bytes, reg_number) != CB_SUCCESS) { |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 289 | memset(buffer, 0, bytes); |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 290 | return CB_ERR; |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 291 | } |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 292 | read_bytes(buffer, bytes); |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 293 | spi_release_bus(&spi_slave); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 294 | trace_dump("R", reg_number, bytes, buffer, 0); |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 295 | return CB_SUCCESS; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 296 | } |
| 297 | |
| 298 | /* |
| 299 | * Status register is accessed often, wrap reading and writing it into |
| 300 | * dedicated functions. |
| 301 | */ |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 302 | static enum cb_err read_tpm_sts(uint32_t *status) |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 303 | { |
| 304 | return tpm2_read_reg(TPM_STS_REG, status, sizeof(*status)); |
| 305 | } |
| 306 | |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 307 | static enum cb_err __must_check write_tpm_sts(uint32_t status) |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 308 | { |
| 309 | return tpm2_write_reg(TPM_STS_REG, &status, sizeof(status)); |
| 310 | } |
| 311 | |
| 312 | /* |
| 313 | * The TPM may limit the transaction bytes count (burst count) below the 64 |
| 314 | * bytes max. The current value is available as a field of the status |
| 315 | * register. |
| 316 | */ |
| 317 | static uint32_t get_burst_count(void) |
| 318 | { |
| 319 | uint32_t status; |
| 320 | |
| 321 | read_tpm_sts(&status); |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 322 | return (status & TPM_STS_BURST_COUNT_MASK) >> TPM_STS_BURST_COUNT_SHIFT; |
| 323 | } |
| 324 | |
| 325 | static uint8_t tpm2_read_access_reg(void) |
| 326 | { |
| 327 | uint8_t access; |
| 328 | tpm2_read_reg(TPM_ACCESS_REG, &access, sizeof(access)); |
| 329 | /* We do not care about access establishment bit state. Ignore it. */ |
| 330 | return access & ~TPM_ACCESS_ESTABLISHMENT; |
| 331 | } |
| 332 | |
| 333 | static void tpm2_write_access_reg(uint8_t cmd) |
| 334 | { |
| 335 | /* Writes to access register can set only 1 bit at a time. */ |
| 336 | assert (!(cmd & (cmd - 1))); |
| 337 | |
| 338 | tpm2_write_reg(TPM_ACCESS_REG, &cmd, sizeof(cmd)); |
| 339 | } |
| 340 | |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 341 | static enum cb_err tpm2_claim_locality(void) |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 342 | { |
| 343 | uint8_t access; |
Shelley Chen | 85eb031 | 2017-11-07 14:24:19 -0800 | [diff] [blame] | 344 | struct stopwatch sw; |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 345 | |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 346 | /* |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 347 | * Locality is released by TPM reset. |
| 348 | * |
| 349 | * If locality is taken at this point, this could be due to the fact |
| 350 | * that the TPM is performing a long operation and has not processed |
| 351 | * reset request yet. We'll wait up to CR50_TIMEOUT_INIT_MS and see if |
| 352 | * it releases locality when reset is processed. |
Shelley Chen | 85eb031 | 2017-11-07 14:24:19 -0800 | [diff] [blame] | 353 | */ |
| 354 | stopwatch_init_msecs_expire(&sw, CR50_TIMEOUT_INIT_MS); |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 355 | do { |
Shelley Chen | 85eb031 | 2017-11-07 14:24:19 -0800 | [diff] [blame] | 356 | access = tpm2_read_access_reg(); |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 357 | if (access & TPM_ACCESS_ACTIVE_LOCALITY) { |
| 358 | /* |
| 359 | * Don't bombard the chip with traffic, let it keep |
| 360 | * processing the command. |
| 361 | */ |
| 362 | mdelay(2); |
| 363 | continue; |
| 364 | } |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 365 | |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 366 | /* |
| 367 | * Ok, the locality is free, TPM must be reset, let's claim |
| 368 | * it. |
| 369 | */ |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 370 | |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 371 | tpm2_write_access_reg(TPM_ACCESS_REQUEST_USE); |
| 372 | access = tpm2_read_access_reg(); |
| 373 | if (access != (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) { |
| 374 | break; |
| 375 | } |
| 376 | |
Rob Barnes | d522f38 | 2022-09-12 06:31:47 -0600 | [diff] [blame] | 377 | printk(BIOS_INFO, "TPM ready after %lld ms\n", |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 378 | stopwatch_duration_msecs(&sw)); |
| 379 | |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 380 | return CB_SUCCESS; |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 381 | } while (!stopwatch_expired(&sw)); |
| 382 | |
| 383 | printk(BIOS_ERR, |
Rob Barnes | d522f38 | 2022-09-12 06:31:47 -0600 | [diff] [blame] | 384 | "Failed to claim locality 0 after %lld ms, status: %#x\n", |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 385 | stopwatch_duration_msecs(&sw), access); |
| 386 | |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 387 | return CB_ERR; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 388 | } |
| 389 | |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 390 | /* Device/vendor ID values of the TPM devices this driver supports. */ |
| 391 | static const uint32_t supported_did_vids[] = { |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 392 | 0x00281ae0, /* H1 based Cr50 security chip. */ |
Jes Klinke | 1430b04 | 2022-03-28 14:22:24 -0700 | [diff] [blame] | 393 | 0x504a6666, /* H1D3C based Ti50 security chip. */ |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 394 | 0x0000104a /* ST33HTPH2E32 */ |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 395 | }; |
| 396 | |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 397 | int tpm2_init(struct spi_slave *spi_if) |
| 398 | { |
Sergii Dmytruk | df85350 | 2022-10-30 17:18:33 +0200 | [diff] [blame] | 399 | uint32_t did_vid, status, intf_id; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 400 | uint8_t cmd; |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 401 | int retries; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 402 | |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 403 | memcpy(&spi_slave, spi_if, sizeof(*spi_if)); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 404 | |
Yu-Ping Wu | ae1e702 | 2022-05-17 09:33:18 +0800 | [diff] [blame] | 405 | /* Clear any pending IRQs. */ |
| 406 | if (CONFIG(TPM_GOOGLE)) |
Grzegorz Bernacki | 7758b47 | 2023-06-14 12:01:32 +0000 | [diff] [blame^] | 407 | cr50_plat_irq_status(); |
Shelley Chen | f2e7b37 | 2017-12-15 15:25:08 -0800 | [diff] [blame] | 408 | |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 409 | /* |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 410 | * 150 ms should be enough to synchronize with the TPM even under the |
| 411 | * worst nested reset request conditions. In vast majority of cases |
| 412 | * there would be no wait at all. |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 413 | */ |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 414 | printk(BIOS_INFO, "Probing TPM: "); |
| 415 | for (retries = 15; retries > 0; retries--) { |
| 416 | int i; |
| 417 | |
Elyes HAOUAS | 6688f46 | 2018-08-29 17:22:44 +0200 | [diff] [blame] | 418 | /* In case of failure to read div_vid is set to zero. */ |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 419 | tpm2_read_reg(TPM_DID_VID_REG, &did_vid, sizeof(did_vid)); |
| 420 | |
| 421 | for (i = 0; i < ARRAY_SIZE(supported_did_vids); i++) |
| 422 | if (did_vid == supported_did_vids[i]) |
Elyes HAOUAS | 6688f46 | 2018-08-29 17:22:44 +0200 | [diff] [blame] | 423 | break; /* TPM is up and ready. */ |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 424 | |
| 425 | if (i < ARRAY_SIZE(supported_did_vids)) |
| 426 | break; |
| 427 | |
| 428 | /* TPM might be resetting, let's retry in a bit. */ |
| 429 | mdelay(10); |
| 430 | printk(BIOS_INFO, "."); |
| 431 | } |
| 432 | |
| 433 | if (!retries) { |
| 434 | printk(BIOS_ERR, "\n%s: Failed to connect to the TPM\n", |
| 435 | __func__); |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 436 | return -1; |
Vadim Bendebury | 9a506d5 | 2017-10-25 15:45:00 -0700 | [diff] [blame] | 437 | } |
| 438 | |
| 439 | printk(BIOS_INFO, " done!\n"); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 440 | |
Sergii Dmytruk | df85350 | 2022-10-30 17:18:33 +0200 | [diff] [blame] | 441 | /* Google TPMs haven't always been 100% accurate in reflecting the spec (particularly |
| 442 | * on older versions) and are always TPM 2.0. */ |
| 443 | if (!CONFIG(TPM_GOOGLE)) { |
| 444 | if (tpm2_read_reg(TPM_INTF_ID_REG, &intf_id, sizeof(intf_id)) != CB_SUCCESS) { |
| 445 | printk(BIOS_ERR, "\n%s: Failed to read interface ID register\n", |
| 446 | __func__); |
| 447 | return -1; |
| 448 | } |
| 449 | if ((be32toh(intf_id) & 0xF) == 0xF) { |
| 450 | printk(BIOS_DEBUG, "\n%s: Not a TPM2 device\n", __func__); |
| 451 | return -1; |
| 452 | } |
| 453 | } |
| 454 | |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 455 | // FIXME: Move this to tpm_setup() |
Tim Wawrzynczak | 6b8599f | 2022-02-14 16:04:21 -0700 | [diff] [blame] | 456 | if (tpm_first_access_this_boot()) |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 457 | /* |
| 458 | * Claim locality 0, do it only during the first |
| 459 | * initialization after reset. |
| 460 | */ |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 461 | if (tpm2_claim_locality() != CB_SUCCESS) |
Vadim Bendebury | 8727e64 | 2017-11-16 21:00:41 -0800 | [diff] [blame] | 462 | return -1; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 463 | |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 464 | if (read_tpm_sts(&status) != CB_SUCCESS) { |
Patrick Rudolph | 7bcd9a1 | 2020-03-20 09:55:43 +0100 | [diff] [blame] | 465 | printk(BIOS_ERR, "Reading status reg failed\n"); |
| 466 | return -1; |
| 467 | } |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 468 | if ((status & TPM_STS_FAMILY_MASK) != TPM_STS_FAMILY_TPM_2_0) { |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 469 | printk(BIOS_ERR, "unexpected TPM family value, status: %#x\n", |
| 470 | status); |
| 471 | return -1; |
| 472 | } |
| 473 | |
| 474 | /* |
| 475 | * Locality claimed, read the revision value and set up the tpm_info |
| 476 | * structure. |
| 477 | */ |
| 478 | tpm2_read_reg(TPM_RID_REG, &cmd, sizeof(cmd)); |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 479 | tpm_info.vendor_id = did_vid & 0xffff; |
| 480 | tpm_info.device_id = did_vid >> 16; |
| 481 | tpm_info.revision = cmd; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 482 | |
| 483 | printk(BIOS_INFO, "Connected to device vid:did:rid of %4.4x:%4.4x:%2.2x\n", |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 484 | tpm_info.vendor_id, tpm_info.device_id, tpm_info.revision); |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 485 | |
Jes Klinke | 1430b04 | 2022-03-28 14:22:24 -0700 | [diff] [blame] | 486 | /* Do some GSC-specific things here. */ |
| 487 | if (CONFIG(TPM_GOOGLE)) { |
Tim Wawrzynczak | 6b8599f | 2022-02-14 16:04:21 -0700 | [diff] [blame] | 488 | if (tpm_first_access_this_boot()) { |
| 489 | /* This is called for the side-effect of printing the firmware version |
| 490 | string */ |
Jes Klinke | 1430b04 | 2022-03-28 14:22:24 -0700 | [diff] [blame] | 491 | cr50_get_firmware_version(NULL); |
| 492 | cr50_set_board_cfg(); |
Jes Klinke | dcae807 | 2020-07-29 14:22:41 -0700 | [diff] [blame] | 493 | } |
Vadim Bendebury | 58826fc | 2016-06-23 18:17:33 -0700 | [diff] [blame] | 494 | } |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 495 | return 0; |
| 496 | } |
| 497 | |
| 498 | /* |
| 499 | * This is in seconds, certain TPM commands, like key generation, can take |
| 500 | * long time to complete. |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 501 | */ |
| 502 | #define MAX_STATUS_TIMEOUT 120 |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 503 | static enum cb_err wait_for_status(uint32_t status_mask, uint32_t status_expected) |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 504 | { |
| 505 | uint32_t status; |
| 506 | struct stopwatch sw; |
| 507 | |
| 508 | stopwatch_init_usecs_expire(&sw, MAX_STATUS_TIMEOUT * 1000 * 1000); |
| 509 | do { |
| 510 | udelay(1000); |
| 511 | if (stopwatch_expired(&sw)) { |
| 512 | printk(BIOS_ERR, "failed to get expected status %x\n", |
| 513 | status_expected); |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 514 | return CB_ERR; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 515 | } |
| 516 | read_tpm_sts(&status); |
| 517 | } while ((status & status_mask) != status_expected); |
| 518 | |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 519 | return CB_SUCCESS; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 520 | } |
| 521 | |
| 522 | enum fifo_transfer_direction { |
| 523 | fifo_transmit = 0, |
| 524 | fifo_receive = 1 |
| 525 | }; |
| 526 | |
| 527 | /* Union allows to avoid casting away 'const' on transmit buffers. */ |
| 528 | union fifo_transfer_buffer { |
| 529 | uint8_t *rx_buffer; |
| 530 | const uint8_t *tx_buffer; |
| 531 | }; |
| 532 | |
| 533 | /* |
| 534 | * Transfer requested number of bytes to or from TPM FIFO, accounting for the |
| 535 | * current burst count value. |
| 536 | */ |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 537 | static enum cb_err __must_check fifo_transfer(size_t transfer_size, |
| 538 | union fifo_transfer_buffer buffer, |
| 539 | enum fifo_transfer_direction direction) |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 540 | { |
| 541 | size_t transaction_size; |
| 542 | size_t burst_count; |
| 543 | size_t handled_so_far = 0; |
| 544 | |
| 545 | do { |
| 546 | do { |
| 547 | /* Could be zero when TPM is busy. */ |
| 548 | burst_count = get_burst_count(); |
| 549 | } while (!burst_count); |
| 550 | |
| 551 | transaction_size = transfer_size - handled_so_far; |
| 552 | transaction_size = MIN(transaction_size, burst_count); |
| 553 | |
| 554 | /* |
| 555 | * The SPI frame header does not allow to pass more than 64 |
| 556 | * bytes. |
| 557 | */ |
| 558 | transaction_size = MIN(transaction_size, 64); |
| 559 | |
Caveh Jalali | 8274c29 | 2020-09-10 01:25:29 -0700 | [diff] [blame] | 560 | if (direction == fifo_receive) { |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 561 | if (tpm2_read_reg(TPM_DATA_FIFO_REG, |
| 562 | buffer.rx_buffer + handled_so_far, |
| 563 | transaction_size) != CB_SUCCESS) |
| 564 | return CB_ERR; |
Caveh Jalali | 8274c29 | 2020-09-10 01:25:29 -0700 | [diff] [blame] | 565 | } else { |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 566 | if (tpm2_write_reg(TPM_DATA_FIFO_REG, |
| 567 | buffer.tx_buffer + handled_so_far, |
| 568 | transaction_size) != CB_SUCCESS) |
| 569 | return CB_ERR; |
Caveh Jalali | 8274c29 | 2020-09-10 01:25:29 -0700 | [diff] [blame] | 570 | } |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 571 | |
| 572 | handled_so_far += transaction_size; |
| 573 | |
| 574 | } while (handled_so_far != transfer_size); |
Caveh Jalali | 8274c29 | 2020-09-10 01:25:29 -0700 | [diff] [blame] | 575 | |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 576 | return CB_SUCCESS; |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 577 | } |
| 578 | |
| 579 | size_t tpm2_process_command(const void *tpm2_command, size_t command_size, |
| 580 | void *tpm2_response, size_t max_response) |
| 581 | { |
| 582 | uint32_t status; |
| 583 | uint32_t expected_status_bits; |
| 584 | size_t payload_size; |
| 585 | size_t bytes_to_go; |
| 586 | const uint8_t *cmd_body = tpm2_command; |
| 587 | uint8_t *rsp_body = tpm2_response; |
| 588 | union fifo_transfer_buffer fifo_buffer; |
| 589 | const int HEADER_SIZE = 6; |
| 590 | |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 591 | /* Do not try using an uninitialized TPM. */ |
Patrick Georgi | c9b1359 | 2019-11-29 11:47:47 +0100 | [diff] [blame] | 592 | if (!tpm_info.vendor_id) |
Vadim Bendebury | 731ef9b | 2016-12-15 21:49:23 -0800 | [diff] [blame] | 593 | return 0; |
| 594 | |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 595 | /* Skip the two byte tag, read the size field. */ |
| 596 | payload_size = read_be32(cmd_body + 2); |
| 597 | |
| 598 | /* Sanity check. */ |
| 599 | if (payload_size != command_size) { |
| 600 | printk(BIOS_ERR, |
| 601 | "Command size mismatch: encoded %zd != requested %zd\n", |
| 602 | payload_size, command_size); |
| 603 | trace_dump("W", TPM_DATA_FIFO_REG, command_size, cmd_body, 1); |
| 604 | printk(BIOS_DEBUG, "\n"); |
| 605 | return 0; |
| 606 | } |
| 607 | |
| 608 | /* Let the TPM know that the command is coming. */ |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 609 | if (write_tpm_sts(TPM_STS_COMMAND_READY) != CB_SUCCESS) { |
Caveh Jalali | 8274c29 | 2020-09-10 01:25:29 -0700 | [diff] [blame] | 610 | printk(BIOS_ERR, "TPM_STS_COMMAND_READY failed\n"); |
| 611 | return 0; |
| 612 | } |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 613 | |
| 614 | /* |
Elyes HAOUAS | 6688f46 | 2018-08-29 17:22:44 +0200 | [diff] [blame] | 615 | * TPM commands and responses written to and read from the FIFO |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 616 | * register (0x24) are datagrams of variable size, prepended by a 6 |
| 617 | * byte header. |
| 618 | * |
| 619 | * The specification description of the state machine is a bit vague, |
| 620 | * but from experience it looks like there is no need to wait for the |
| 621 | * sts.expect bit to be set, at least with the 9670 and cr50 devices. |
| 622 | * Just write the command into FIFO, making sure not to exceed the |
| 623 | * burst count or the maximum PDU size, whatever is smaller. |
| 624 | */ |
| 625 | fifo_buffer.tx_buffer = cmd_body; |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 626 | if (fifo_transfer(command_size, fifo_buffer, fifo_transmit) != CB_SUCCESS) { |
Caveh Jalali | 8274c29 | 2020-09-10 01:25:29 -0700 | [diff] [blame] | 627 | printk(BIOS_ERR, "fifo_transfer %zd command bytes failed\n", |
| 628 | command_size); |
| 629 | return 0; |
| 630 | } |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 631 | |
| 632 | /* Now tell the TPM it can start processing the command. */ |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 633 | if (write_tpm_sts(TPM_STS_GO) != CB_SUCCESS) { |
Caveh Jalali | 8274c29 | 2020-09-10 01:25:29 -0700 | [diff] [blame] | 634 | printk(BIOS_ERR, "TPM_STS_GO failed\n"); |
| 635 | return 0; |
| 636 | } |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 637 | |
| 638 | /* Now wait for it to report that the response is ready. */ |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 639 | expected_status_bits = TPM_STS_VALID | TPM_STS_DATA_AVAIL; |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 640 | if (wait_for_status(expected_status_bits, expected_status_bits) != CB_SUCCESS) { |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 641 | /* |
| 642 | * If timed out, which should never happen, let's at least |
| 643 | * print out the offending command. |
| 644 | */ |
| 645 | trace_dump("W", TPM_DATA_FIFO_REG, command_size, cmd_body, 1); |
| 646 | printk(BIOS_DEBUG, "\n"); |
| 647 | return 0; |
| 648 | } |
| 649 | |
| 650 | /* |
| 651 | * The response is ready, let's read it. First we read the FIFO |
| 652 | * payload header, to see how much data to expect. The response header |
| 653 | * size is fixed to six bytes, the total payload size is stored in |
| 654 | * network order in the last four bytes. |
| 655 | */ |
| 656 | tpm2_read_reg(TPM_DATA_FIFO_REG, rsp_body, HEADER_SIZE); |
| 657 | |
| 658 | /* Find out the total payload size, skipping the two byte tag. */ |
| 659 | payload_size = read_be32(rsp_body + 2); |
| 660 | |
| 661 | if (payload_size > max_response) { |
| 662 | /* |
| 663 | * TODO(vbendeb): at least drain the FIFO here or somehow let |
| 664 | * the TPM know that the response can be dropped. |
| 665 | */ |
Elyes HAOUAS | 6688f46 | 2018-08-29 17:22:44 +0200 | [diff] [blame] | 666 | printk(BIOS_ERR, " TPM response too long (%zd bytes)", |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 667 | payload_size); |
| 668 | return 0; |
| 669 | } |
| 670 | |
| 671 | /* |
| 672 | * Now let's read all but the last byte in the FIFO to make sure the |
| 673 | * status register is showing correct flow control bits: 'more data' |
| 674 | * until the last byte and then 'no more data' once the last byte is |
| 675 | * read. |
| 676 | */ |
| 677 | bytes_to_go = payload_size - 1 - HEADER_SIZE; |
| 678 | fifo_buffer.rx_buffer = rsp_body + HEADER_SIZE; |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 679 | if (fifo_transfer(bytes_to_go, fifo_buffer, fifo_receive) != CB_SUCCESS) { |
Caveh Jalali | 8274c29 | 2020-09-10 01:25:29 -0700 | [diff] [blame] | 680 | printk(BIOS_ERR, "fifo_transfer %zd receive bytes failed\n", |
| 681 | bytes_to_go); |
| 682 | return 0; |
| 683 | } |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 684 | |
| 685 | /* Verify that there is still data to read. */ |
| 686 | read_tpm_sts(&status); |
| 687 | if ((status & expected_status_bits) != expected_status_bits) { |
| 688 | printk(BIOS_ERR, "unexpected intermediate status %#x\n", |
| 689 | status); |
| 690 | return 0; |
| 691 | } |
| 692 | |
| 693 | /* Read the last byte of the PDU. */ |
| 694 | tpm2_read_reg(TPM_DATA_FIFO_REG, rsp_body + payload_size - 1, 1); |
| 695 | |
| 696 | /* Terminate the dump, if enabled. */ |
| 697 | if (debug_level_) |
| 698 | printk(BIOS_DEBUG, "\n"); |
| 699 | |
| 700 | /* Verify that 'data available' is not asseretd any more. */ |
| 701 | read_tpm_sts(&status); |
Furquan Shaikh | 260b297 | 2017-04-07 13:26:01 -0700 | [diff] [blame] | 702 | if ((status & expected_status_bits) != TPM_STS_VALID) { |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 703 | printk(BIOS_ERR, "unexpected final status %#x\n", status); |
| 704 | return 0; |
| 705 | } |
| 706 | |
| 707 | /* Move the TPM back to idle state. */ |
Tim Wawrzynczak | 591c7eb | 2022-02-15 17:59:58 -0700 | [diff] [blame] | 708 | if (write_tpm_sts(TPM_STS_COMMAND_READY) != CB_SUCCESS) { |
Caveh Jalali | 8274c29 | 2020-09-10 01:25:29 -0700 | [diff] [blame] | 709 | printk(BIOS_ERR, "TPM_STS_COMMAND_READY failed\n"); |
| 710 | return 0; |
| 711 | } |
Vadim Bendebury | e31d243 | 2016-04-09 18:33:49 -0700 | [diff] [blame] | 712 | |
| 713 | return payload_size; |
| 714 | } |
Karthikeyan Ramasubramanian | 7b58f94 | 2020-08-20 22:53:00 -0600 | [diff] [blame] | 715 | |
Subrata Banik | 60b2ab8 | 2022-03-09 12:55:34 +0530 | [diff] [blame] | 716 | enum cb_err tis_vendor_write(unsigned int addr, const void *buffer, size_t bytes) |
Karthikeyan Ramasubramanian | 7b58f94 | 2020-08-20 22:53:00 -0600 | [diff] [blame] | 717 | { |
Tim Wawrzynczak | 6b8599f | 2022-02-14 16:04:21 -0700 | [diff] [blame] | 718 | return tpm2_write_reg(addr, buffer, bytes); |
| 719 | } |
| 720 | |
Subrata Banik | 60b2ab8 | 2022-03-09 12:55:34 +0530 | [diff] [blame] | 721 | enum cb_err tis_vendor_read(unsigned int addr, void *buffer, size_t bytes) |
Tim Wawrzynczak | 6b8599f | 2022-02-14 16:04:21 -0700 | [diff] [blame] | 722 | { |
| 723 | return tpm2_read_reg(addr, buffer, bytes); |
Karthikeyan Ramasubramanian | 7b58f94 | 2020-08-20 22:53:00 -0600 | [diff] [blame] | 724 | } |