Raul E Rangel | 3ba2180 | 2021-06-24 17:03:35 -0600 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | |
Raul E Rangel | d373d5d | 2021-06-25 11:07:23 -0600 | [diff] [blame] | 3 | #include <amdblocks/lpc.h> |
| 4 | #include <amdblocks/spi.h> |
| 5 | #include <assert.h> |
Raul E Rangel | 3ba2180 | 2021-06-24 17:03:35 -0600 | [diff] [blame] | 6 | #include <boot_device.h> |
Raul E Rangel | d373d5d | 2021-06-25 11:07:23 -0600 | [diff] [blame] | 7 | #include <commonlib/bsd/helpers.h> |
Raul E Rangel | 3ba2180 | 2021-06-24 17:03:35 -0600 | [diff] [blame] | 8 | #include <commonlib/region.h> |
Raul E Rangel | d373d5d | 2021-06-25 11:07:23 -0600 | [diff] [blame] | 9 | #include <console/console.h> |
| 10 | #include <delay.h> |
| 11 | #include <device/pci_ops.h> |
| 12 | #include <soc/pci_devs.h> |
Raul E Rangel | 3ba2180 | 2021-06-24 17:03:35 -0600 | [diff] [blame] | 13 | #include <spi_flash.h> |
| 14 | #include <string.h> |
Raul E Rangel | 6f3c901 | 2021-07-12 14:19:43 -0600 | [diff] [blame] | 15 | #include <thread.h> |
Raul E Rangel | 3ba2180 | 2021-06-24 17:03:35 -0600 | [diff] [blame] | 16 | #include <types.h> |
| 17 | |
| 18 | /* The ROM is memory mapped just below 4GiB. Form a pointer for the base. */ |
| 19 | #define rom_base ((void *)(uintptr_t)(0x100000000ULL - CONFIG_ROM_SIZE)) |
| 20 | |
Raul E Rangel | d373d5d | 2021-06-25 11:07:23 -0600 | [diff] [blame] | 21 | struct spi_dma_transaction { |
| 22 | uint8_t *destination; |
| 23 | size_t source; |
| 24 | size_t size; |
| 25 | size_t transfer_size; |
| 26 | size_t remaining; |
| 27 | }; |
| 28 | |
Raul E Rangel | 3ba2180 | 2021-06-24 17:03:35 -0600 | [diff] [blame] | 29 | static void *spi_dma_mmap(const struct region_device *rd, size_t offset, size_t size __unused) |
| 30 | { |
| 31 | const struct mem_region_device *mdev; |
| 32 | |
| 33 | mdev = container_of(rd, __typeof__(*mdev), rdev); |
| 34 | |
| 35 | return &mdev->base[offset]; |
| 36 | } |
| 37 | |
| 38 | static int spi_dma_munmap(const struct region_device *rd __unused, void *mapping __unused) |
| 39 | { |
| 40 | return 0; |
| 41 | } |
| 42 | |
| 43 | static ssize_t spi_dma_readat_mmap(const struct region_device *rd, void *b, size_t offset, |
| 44 | size_t size) |
| 45 | { |
| 46 | const struct mem_region_device *mdev; |
| 47 | |
| 48 | mdev = container_of(rd, __typeof__(*mdev), rdev); |
| 49 | |
| 50 | memcpy(b, &mdev->base[offset], size); |
| 51 | |
| 52 | return size; |
| 53 | } |
| 54 | |
Raul E Rangel | d373d5d | 2021-06-25 11:07:23 -0600 | [diff] [blame] | 55 | static bool spi_dma_is_busy(void) |
| 56 | { |
| 57 | return pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL) |
| 58 | & LPC_ROM_DMA_CTRL_START; |
| 59 | } |
| 60 | |
| 61 | static bool spi_dma_has_error(void) |
| 62 | { |
| 63 | return pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL) |
| 64 | & LPC_ROM_DMA_CTRL_ERROR; |
| 65 | } |
| 66 | |
| 67 | static bool can_use_dma(void *destination, size_t source, size_t size) |
| 68 | { |
| 69 | /* |
| 70 | * Print a notice if reading more than 1024 bytes using mmap. This makes |
| 71 | * it easier to debug why the SPI DMA wasn't used. |
| 72 | */ |
| 73 | const size_t warning_size = 1024; |
| 74 | |
| 75 | if (size < LPC_ROM_DMA_MIN_ALIGNMENT) |
| 76 | return false; |
| 77 | |
| 78 | if (!IS_ALIGNED((uintptr_t)destination, LPC_ROM_DMA_MIN_ALIGNMENT)) { |
| 79 | if (size > warning_size) |
| 80 | printk(BIOS_DEBUG, "Target %p is unaligned\n", destination); |
| 81 | return false; |
| 82 | } |
| 83 | |
| 84 | if (!IS_ALIGNED(source, LPC_ROM_DMA_MIN_ALIGNMENT)) { |
| 85 | if (size > warning_size) |
| 86 | printk(BIOS_DEBUG, "Source %#zx is unaligned\n", source); |
| 87 | return false; |
| 88 | } |
| 89 | |
| 90 | return true; |
| 91 | } |
| 92 | |
| 93 | static void start_spi_dma_transaction(struct spi_dma_transaction *transaction) |
| 94 | { |
| 95 | uint32_t ctrl; |
| 96 | |
| 97 | printk(BIOS_SPEW, "%s: dest: %p, source: %#zx, remaining: %zu\n", __func__, |
| 98 | transaction->destination, transaction->source, transaction->remaining); |
| 99 | |
| 100 | /* |
| 101 | * We should have complete control over the DMA controller, so there shouldn't |
| 102 | * be any outstanding transactions. |
| 103 | */ |
| 104 | assert(!spi_dma_is_busy()); |
| 105 | assert(IS_ALIGNED((uintptr_t)transaction->destination, LPC_ROM_DMA_MIN_ALIGNMENT)); |
| 106 | assert(IS_ALIGNED(transaction->source, LPC_ROM_DMA_MIN_ALIGNMENT)); |
| 107 | assert(transaction->remaining >= LPC_ROM_DMA_MIN_ALIGNMENT); |
| 108 | |
| 109 | pci_write_config32(SOC_LPC_DEV, LPC_ROM_DMA_SRC_ADDR, transaction->source); |
| 110 | pci_write_config32(SOC_LPC_DEV, LPC_ROM_DMA_DST_ADDR, |
| 111 | (uintptr_t)transaction->destination); |
| 112 | |
| 113 | ctrl = pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL); |
| 114 | ctrl &= ~LPC_ROM_DMA_CTRL_DW_COUNT_MASK; |
| 115 | |
| 116 | transaction->transfer_size = |
| 117 | MIN(LPC_ROM_DMA_CTRL_MAX_BYTES, |
| 118 | ALIGN_DOWN(transaction->remaining, LPC_ROM_DMA_MIN_ALIGNMENT)); |
| 119 | |
| 120 | ctrl |= LPC_ROM_DMA_CTRL_DW_COUNT(transaction->transfer_size); |
| 121 | ctrl |= LPC_ROM_DMA_CTRL_ERROR; /* Clear error */ |
| 122 | ctrl |= LPC_ROM_DMA_CTRL_START; |
| 123 | |
Raul E Rangel | 964eb67 | 2021-11-02 11:51:48 -0600 | [diff] [blame] | 124 | /* |
| 125 | * Ensure we have exclusive access to the SPI controller before starting the LPC SPI DMA |
| 126 | * transaction. |
| 127 | */ |
| 128 | thread_mutex_lock(&spi_hw_mutex); |
| 129 | |
Raul E Rangel | d373d5d | 2021-06-25 11:07:23 -0600 | [diff] [blame] | 130 | pci_write_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL, ctrl); |
| 131 | } |
| 132 | |
| 133 | /* Returns true if transaction is still in progress. */ |
| 134 | static bool continue_spi_dma_transaction(const struct region_device *rd, |
| 135 | struct spi_dma_transaction *transaction) |
| 136 | { |
| 137 | /* Verify we are looking at the correct transaction */ |
| 138 | assert(pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_SRC_ADDR) == transaction->source); |
| 139 | |
| 140 | if (spi_dma_is_busy()) |
| 141 | return true; |
| 142 | |
Raul E Rangel | 964eb67 | 2021-11-02 11:51:48 -0600 | [diff] [blame] | 143 | /* |
| 144 | * Unlock the SPI mutex between DMA transactions to allow other users of the SPI |
| 145 | * controller to interleave their transactions. |
| 146 | */ |
| 147 | thread_mutex_unlock(&spi_hw_mutex); |
| 148 | |
Raul E Rangel | d373d5d | 2021-06-25 11:07:23 -0600 | [diff] [blame] | 149 | if (spi_dma_has_error()) { |
| 150 | printk(BIOS_ERR, |
| 151 | "ERROR: SPI DMA failure: dest: %p, source: %#zx, size: %zu\n", |
| 152 | transaction->destination, transaction->source, |
| 153 | transaction->transfer_size); |
| 154 | return false; |
| 155 | } |
| 156 | |
| 157 | transaction->destination += transaction->transfer_size; |
| 158 | transaction->source += transaction->transfer_size; |
| 159 | transaction->remaining -= transaction->transfer_size; |
| 160 | |
| 161 | if (transaction->remaining >= LPC_ROM_DMA_MIN_ALIGNMENT) { |
| 162 | start_spi_dma_transaction(transaction); |
| 163 | return true; |
| 164 | } |
| 165 | |
| 166 | if (transaction->remaining > 0) { |
| 167 | /* Use mmap to finish off the transfer */ |
| 168 | spi_dma_readat_mmap(rd, transaction->destination, transaction->source, |
| 169 | transaction->remaining); |
| 170 | |
| 171 | transaction->destination += transaction->remaining; |
| 172 | transaction->source += transaction->remaining; |
| 173 | transaction->remaining -= transaction->remaining; |
| 174 | } |
| 175 | |
| 176 | return false; |
| 177 | } |
| 178 | |
Raul E Rangel | 6f3c901 | 2021-07-12 14:19:43 -0600 | [diff] [blame] | 179 | static struct thread_mutex spi_dma_hw_mutex; |
| 180 | |
Raul E Rangel | d373d5d | 2021-06-25 11:07:23 -0600 | [diff] [blame] | 181 | static ssize_t spi_dma_readat_dma(const struct region_device *rd, void *destination, |
| 182 | size_t source, size_t size) |
| 183 | { |
| 184 | struct spi_dma_transaction transaction = { |
| 185 | .destination = destination, |
| 186 | .source = source, |
| 187 | .size = size, |
| 188 | .remaining = size, |
| 189 | }; |
| 190 | |
| 191 | printk(BIOS_SPEW, "%s: start: dest: %p, source: %#zx, size: %zu\n", __func__, |
| 192 | destination, source, size); |
| 193 | |
Raul E Rangel | 6f3c901 | 2021-07-12 14:19:43 -0600 | [diff] [blame] | 194 | thread_mutex_lock(&spi_dma_hw_mutex); |
| 195 | |
Raul E Rangel | d373d5d | 2021-06-25 11:07:23 -0600 | [diff] [blame] | 196 | start_spi_dma_transaction(&transaction); |
| 197 | |
| 198 | do { |
| 199 | udelay(2); |
| 200 | } while (continue_spi_dma_transaction(rd, &transaction)); |
| 201 | |
Raul E Rangel | 6f3c901 | 2021-07-12 14:19:43 -0600 | [diff] [blame] | 202 | thread_mutex_unlock(&spi_dma_hw_mutex); |
| 203 | |
Raul E Rangel | d373d5d | 2021-06-25 11:07:23 -0600 | [diff] [blame] | 204 | printk(BIOS_SPEW, "%s: end: dest: %p, source: %#zx, remaining: %zu\n", |
| 205 | __func__, destination, source, transaction.remaining); |
| 206 | |
Raul E Rangel | 3af732a | 2021-07-14 13:51:27 -0600 | [diff] [blame] | 207 | /* Allow queued up transaction to continue */ |
| 208 | thread_yield(); |
| 209 | |
Raul E Rangel | d373d5d | 2021-06-25 11:07:23 -0600 | [diff] [blame] | 210 | if (transaction.remaining) |
| 211 | return -1; |
| 212 | |
| 213 | return transaction.size; |
| 214 | } |
| 215 | |
| 216 | static ssize_t spi_dma_readat(const struct region_device *rd, void *b, size_t offset, |
| 217 | size_t size) |
| 218 | { |
| 219 | if (can_use_dma(b, offset, size)) |
| 220 | return spi_dma_readat_dma(rd, b, offset, size); |
| 221 | else |
| 222 | return spi_dma_readat_mmap(rd, b, offset, size); |
| 223 | } |
| 224 | |
Raul E Rangel | 3ba2180 | 2021-06-24 17:03:35 -0600 | [diff] [blame] | 225 | const struct region_device_ops spi_dma_rdev_ro_ops = { |
| 226 | .mmap = spi_dma_mmap, |
| 227 | .munmap = spi_dma_munmap, |
Raul E Rangel | d373d5d | 2021-06-25 11:07:23 -0600 | [diff] [blame] | 228 | .readat = spi_dma_readat, |
Raul E Rangel | 3ba2180 | 2021-06-24 17:03:35 -0600 | [diff] [blame] | 229 | }; |
| 230 | |
| 231 | static const struct mem_region_device boot_dev = { |
| 232 | .base = rom_base, |
| 233 | .rdev = REGION_DEV_INIT(&spi_dma_rdev_ro_ops, 0, CONFIG_ROM_SIZE), |
| 234 | }; |
| 235 | |
| 236 | const struct region_device *boot_device_ro(void) |
| 237 | { |
| 238 | return &boot_dev.rdev; |
| 239 | } |
| 240 | |
| 241 | uint32_t spi_flash_get_mmap_windows(struct flash_mmap_window *table) |
| 242 | { |
| 243 | table->flash_base = 0; |
| 244 | table->host_base = (uint32_t)(uintptr_t)rom_base; |
| 245 | table->size = CONFIG_ROM_SIZE; |
| 246 | |
| 247 | return 1; |
| 248 | } |
Raul E Rangel | d373d5d | 2021-06-25 11:07:23 -0600 | [diff] [blame] | 249 | |
| 250 | /* |
| 251 | * Without this magic bit, the SPI DMA controller will write 0s into the destination if an MMAP |
| 252 | * read happens while a DMA transaction is in progress. i.e., PSP is reading from SPI. The bit |
| 253 | * that fixes this was added to Cezanne, Renoir and later SoCs. So the SPI DMA controller is not |
| 254 | * reliable on any prior generations. |
| 255 | */ |
| 256 | static void spi_dma_fix(void) |
| 257 | { |
| 258 | /* Internal only registers */ |
| 259 | uint8_t val = spi_read8(0xfc); |
| 260 | val |= BIT(6); |
| 261 | spi_write8(0xfc, val); |
| 262 | } |
| 263 | |
| 264 | void boot_device_init(void) |
| 265 | { |
| 266 | spi_dma_fix(); |
| 267 | } |