blob: 701b61abb26a81a41f9efee0b3fff3e7aadfff3a [file] [log] [blame]
Raul E Rangel3ba21802021-06-24 17:03:35 -06001/* SPDX-License-Identifier: GPL-2.0-only */
2
Raul E Rangeld373d5d2021-06-25 11:07:23 -06003#include <amdblocks/lpc.h>
4#include <amdblocks/spi.h>
5#include <assert.h>
Raul E Rangel3ba21802021-06-24 17:03:35 -06006#include <boot_device.h>
Karthikeyan Ramasubramaniane4fd7dc2023-04-10 17:46:41 -06007#include <cbfs.h>
Raul E Rangeld373d5d2021-06-25 11:07:23 -06008#include <commonlib/bsd/helpers.h>
Raul E Rangel3ba21802021-06-24 17:03:35 -06009#include <commonlib/region.h>
Raul E Rangeld373d5d2021-06-25 11:07:23 -060010#include <console/console.h>
11#include <delay.h>
12#include <device/pci_ops.h>
13#include <soc/pci_devs.h>
Raul E Rangel3ba21802021-06-24 17:03:35 -060014#include <spi_flash.h>
15#include <string.h>
Raul E Rangel6f3c9012021-07-12 14:19:43 -060016#include <thread.h>
Raul E Rangel3ba21802021-06-24 17:03:35 -060017#include <types.h>
18
19/* The ROM is memory mapped just below 4GiB. Form a pointer for the base. */
20#define rom_base ((void *)(uintptr_t)(0x100000000ULL - CONFIG_ROM_SIZE))
21
Raul E Rangeld373d5d2021-06-25 11:07:23 -060022struct spi_dma_transaction {
23 uint8_t *destination;
24 size_t source;
25 size_t size;
26 size_t transfer_size;
27 size_t remaining;
28};
29
Raul E Rangel3ba21802021-06-24 17:03:35 -060030static ssize_t spi_dma_readat_mmap(const struct region_device *rd, void *b, size_t offset,
31 size_t size)
32{
33 const struct mem_region_device *mdev;
34
35 mdev = container_of(rd, __typeof__(*mdev), rdev);
36
37 memcpy(b, &mdev->base[offset], size);
38
39 return size;
40}
41
Raul E Rangeld373d5d2021-06-25 11:07:23 -060042static bool spi_dma_is_busy(void)
43{
44 return pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL)
45 & LPC_ROM_DMA_CTRL_START;
46}
47
48static bool spi_dma_has_error(void)
49{
50 return pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL)
51 & LPC_ROM_DMA_CTRL_ERROR;
52}
53
54static bool can_use_dma(void *destination, size_t source, size_t size)
55{
56 /*
57 * Print a notice if reading more than 1024 bytes using mmap. This makes
58 * it easier to debug why the SPI DMA wasn't used.
59 */
60 const size_t warning_size = 1024;
61
62 if (size < LPC_ROM_DMA_MIN_ALIGNMENT)
63 return false;
64
65 if (!IS_ALIGNED((uintptr_t)destination, LPC_ROM_DMA_MIN_ALIGNMENT)) {
66 if (size > warning_size)
67 printk(BIOS_DEBUG, "Target %p is unaligned\n", destination);
68 return false;
69 }
70
71 if (!IS_ALIGNED(source, LPC_ROM_DMA_MIN_ALIGNMENT)) {
72 if (size > warning_size)
73 printk(BIOS_DEBUG, "Source %#zx is unaligned\n", source);
74 return false;
75 }
76
77 return true;
78}
79
80static void start_spi_dma_transaction(struct spi_dma_transaction *transaction)
81{
82 uint32_t ctrl;
83
84 printk(BIOS_SPEW, "%s: dest: %p, source: %#zx, remaining: %zu\n", __func__,
85 transaction->destination, transaction->source, transaction->remaining);
86
87 /*
88 * We should have complete control over the DMA controller, so there shouldn't
89 * be any outstanding transactions.
90 */
91 assert(!spi_dma_is_busy());
92 assert(IS_ALIGNED((uintptr_t)transaction->destination, LPC_ROM_DMA_MIN_ALIGNMENT));
93 assert(IS_ALIGNED(transaction->source, LPC_ROM_DMA_MIN_ALIGNMENT));
94 assert(transaction->remaining >= LPC_ROM_DMA_MIN_ALIGNMENT);
95
96 pci_write_config32(SOC_LPC_DEV, LPC_ROM_DMA_SRC_ADDR, transaction->source);
97 pci_write_config32(SOC_LPC_DEV, LPC_ROM_DMA_DST_ADDR,
98 (uintptr_t)transaction->destination);
99
100 ctrl = pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL);
101 ctrl &= ~LPC_ROM_DMA_CTRL_DW_COUNT_MASK;
102
103 transaction->transfer_size =
104 MIN(LPC_ROM_DMA_CTRL_MAX_BYTES,
105 ALIGN_DOWN(transaction->remaining, LPC_ROM_DMA_MIN_ALIGNMENT));
106
107 ctrl |= LPC_ROM_DMA_CTRL_DW_COUNT(transaction->transfer_size);
108 ctrl |= LPC_ROM_DMA_CTRL_ERROR; /* Clear error */
109 ctrl |= LPC_ROM_DMA_CTRL_START;
110
Raul E Rangel964eb672021-11-02 11:51:48 -0600111 /*
112 * Ensure we have exclusive access to the SPI controller before starting the LPC SPI DMA
113 * transaction.
114 */
115 thread_mutex_lock(&spi_hw_mutex);
116
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600117 pci_write_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL, ctrl);
118}
119
120/* Returns true if transaction is still in progress. */
121static bool continue_spi_dma_transaction(const struct region_device *rd,
122 struct spi_dma_transaction *transaction)
123{
124 /* Verify we are looking at the correct transaction */
125 assert(pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_SRC_ADDR) == transaction->source);
126
127 if (spi_dma_is_busy())
128 return true;
129
Raul E Rangel964eb672021-11-02 11:51:48 -0600130 /*
131 * Unlock the SPI mutex between DMA transactions to allow other users of the SPI
132 * controller to interleave their transactions.
133 */
134 thread_mutex_unlock(&spi_hw_mutex);
135
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600136 if (spi_dma_has_error()) {
Julius Wernere9665952022-01-21 17:06:20 -0800137 printk(BIOS_ERR, "SPI DMA failure: dest: %p, source: %#zx, size: %zu\n",
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600138 transaction->destination, transaction->source,
139 transaction->transfer_size);
140 return false;
141 }
142
143 transaction->destination += transaction->transfer_size;
144 transaction->source += transaction->transfer_size;
145 transaction->remaining -= transaction->transfer_size;
146
147 if (transaction->remaining >= LPC_ROM_DMA_MIN_ALIGNMENT) {
148 start_spi_dma_transaction(transaction);
149 return true;
150 }
151
152 if (transaction->remaining > 0) {
153 /* Use mmap to finish off the transfer */
154 spi_dma_readat_mmap(rd, transaction->destination, transaction->source,
155 transaction->remaining);
156
157 transaction->destination += transaction->remaining;
158 transaction->source += transaction->remaining;
159 transaction->remaining -= transaction->remaining;
160 }
161
162 return false;
163}
164
Raul E Rangel6f3c9012021-07-12 14:19:43 -0600165static struct thread_mutex spi_dma_hw_mutex;
166
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600167static ssize_t spi_dma_readat_dma(const struct region_device *rd, void *destination,
168 size_t source, size_t size)
169{
170 struct spi_dma_transaction transaction = {
171 .destination = destination,
172 .source = source,
173 .size = size,
174 .remaining = size,
175 };
176
177 printk(BIOS_SPEW, "%s: start: dest: %p, source: %#zx, size: %zu\n", __func__,
178 destination, source, size);
179
Raul E Rangel6f3c9012021-07-12 14:19:43 -0600180 thread_mutex_lock(&spi_dma_hw_mutex);
181
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600182 start_spi_dma_transaction(&transaction);
183
184 do {
185 udelay(2);
186 } while (continue_spi_dma_transaction(rd, &transaction));
187
Raul E Rangel6f3c9012021-07-12 14:19:43 -0600188 thread_mutex_unlock(&spi_dma_hw_mutex);
189
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600190 printk(BIOS_SPEW, "%s: end: dest: %p, source: %#zx, remaining: %zu\n",
191 __func__, destination, source, transaction.remaining);
192
Raul E Rangel3af732a2021-07-14 13:51:27 -0600193 /* Allow queued up transaction to continue */
194 thread_yield();
195
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600196 if (transaction.remaining)
197 return -1;
198
199 return transaction.size;
200}
201
202static ssize_t spi_dma_readat(const struct region_device *rd, void *b, size_t offset,
203 size_t size)
204{
205 if (can_use_dma(b, offset, size))
206 return spi_dma_readat_dma(rd, b, offset, size);
207 else
208 return spi_dma_readat_mmap(rd, b, offset, size);
209}
210
Karthikeyan Ramasubramaniane4fd7dc2023-04-10 17:46:41 -0600211static void *spi_dma_mmap(const struct region_device *rd, size_t offset, size_t size)
212{
213 const struct mem_region_device *mdev;
214 void *mapping;
215
216 mdev = container_of(rd, __typeof__(*mdev), rdev);
217
218 if (!CONFIG_CBFS_CACHE_SIZE)
219 return &mdev->base[offset];
220
221 mapping = mem_pool_alloc(&cbfs_cache, size);
222 if (!mapping) {
223 printk(BIOS_INFO, "%s: Could not allocate %zu bytes from memory pool\n",
224 __func__, size);
225 /* Fall-back to memory map */
226 return &mdev->base[offset];
227 }
228
229 if (spi_dma_readat(rd, mapping, offset, size) != size) {
230 printk(BIOS_ERR, "%s: Error reading into mmap buffer\n", __func__);
231 mem_pool_free(&cbfs_cache, mapping);
232 /* Fall-back to memory mapped read - not expected to fail atleast for now */
233 spi_dma_readat_mmap(rd, mapping, offset, size);
234 }
235
236 return mapping;
237}
238
239static int spi_dma_munmap(const struct region_device *rd __always_unused, void *mapping)
240{
241 if (CONFIG_CBFS_CACHE_SIZE)
242 mem_pool_free(&cbfs_cache, mapping);
243 return 0;
244}
245
Raul E Rangel3ba21802021-06-24 17:03:35 -0600246const struct region_device_ops spi_dma_rdev_ro_ops = {
247 .mmap = spi_dma_mmap,
248 .munmap = spi_dma_munmap,
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600249 .readat = spi_dma_readat,
Raul E Rangel3ba21802021-06-24 17:03:35 -0600250};
251
252static const struct mem_region_device boot_dev = {
253 .base = rom_base,
254 .rdev = REGION_DEV_INIT(&spi_dma_rdev_ro_ops, 0, CONFIG_ROM_SIZE),
255};
256
257const struct region_device *boot_device_ro(void)
258{
259 return &boot_dev.rdev;
260}
261
262uint32_t spi_flash_get_mmap_windows(struct flash_mmap_window *table)
263{
264 table->flash_base = 0;
265 table->host_base = (uint32_t)(uintptr_t)rom_base;
266 table->size = CONFIG_ROM_SIZE;
267
268 return 1;
269}
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600270
271/*
272 * Without this magic bit, the SPI DMA controller will write 0s into the destination if an MMAP
273 * read happens while a DMA transaction is in progress. i.e., PSP is reading from SPI. The bit
274 * that fixes this was added to Cezanne, Renoir and later SoCs. So the SPI DMA controller is not
275 * reliable on any prior generations.
276 */
277static void spi_dma_fix(void)
278{
279 /* Internal only registers */
280 uint8_t val = spi_read8(0xfc);
281 val |= BIT(6);
282 spi_write8(0xfc, val);
283}
284
285void boot_device_init(void)
286{
287 spi_dma_fix();
288}