blob: 97ba2be2443512c1c49ebf3f05b58bd9021b133f [file] [log] [blame]
Raul E Rangel3ba21802021-06-24 17:03:35 -06001/* SPDX-License-Identifier: GPL-2.0-only */
2
Raul E Rangeld373d5d2021-06-25 11:07:23 -06003#include <amdblocks/lpc.h>
4#include <amdblocks/spi.h>
5#include <assert.h>
Raul E Rangel3ba21802021-06-24 17:03:35 -06006#include <boot_device.h>
Raul E Rangeld373d5d2021-06-25 11:07:23 -06007#include <commonlib/bsd/helpers.h>
Raul E Rangel3ba21802021-06-24 17:03:35 -06008#include <commonlib/region.h>
Raul E Rangeld373d5d2021-06-25 11:07:23 -06009#include <console/console.h>
10#include <delay.h>
11#include <device/pci_ops.h>
12#include <soc/pci_devs.h>
Raul E Rangel3ba21802021-06-24 17:03:35 -060013#include <spi_flash.h>
14#include <string.h>
Raul E Rangel6f3c9012021-07-12 14:19:43 -060015#include <thread.h>
Raul E Rangel3ba21802021-06-24 17:03:35 -060016#include <types.h>
17
18/* The ROM is memory mapped just below 4GiB. Form a pointer for the base. */
19#define rom_base ((void *)(uintptr_t)(0x100000000ULL - CONFIG_ROM_SIZE))
20
Raul E Rangeld373d5d2021-06-25 11:07:23 -060021struct spi_dma_transaction {
22 uint8_t *destination;
23 size_t source;
24 size_t size;
25 size_t transfer_size;
26 size_t remaining;
27};
28
Raul E Rangel3ba21802021-06-24 17:03:35 -060029static void *spi_dma_mmap(const struct region_device *rd, size_t offset, size_t size __unused)
30{
31 const struct mem_region_device *mdev;
32
33 mdev = container_of(rd, __typeof__(*mdev), rdev);
34
35 return &mdev->base[offset];
36}
37
38static int spi_dma_munmap(const struct region_device *rd __unused, void *mapping __unused)
39{
40 return 0;
41}
42
43static ssize_t spi_dma_readat_mmap(const struct region_device *rd, void *b, size_t offset,
44 size_t size)
45{
46 const struct mem_region_device *mdev;
47
48 mdev = container_of(rd, __typeof__(*mdev), rdev);
49
50 memcpy(b, &mdev->base[offset], size);
51
52 return size;
53}
54
Raul E Rangeld373d5d2021-06-25 11:07:23 -060055static bool spi_dma_is_busy(void)
56{
57 return pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL)
58 & LPC_ROM_DMA_CTRL_START;
59}
60
61static bool spi_dma_has_error(void)
62{
63 return pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL)
64 & LPC_ROM_DMA_CTRL_ERROR;
65}
66
67static bool can_use_dma(void *destination, size_t source, size_t size)
68{
69 /*
70 * Print a notice if reading more than 1024 bytes using mmap. This makes
71 * it easier to debug why the SPI DMA wasn't used.
72 */
73 const size_t warning_size = 1024;
74
75 if (size < LPC_ROM_DMA_MIN_ALIGNMENT)
76 return false;
77
78 if (!IS_ALIGNED((uintptr_t)destination, LPC_ROM_DMA_MIN_ALIGNMENT)) {
79 if (size > warning_size)
80 printk(BIOS_DEBUG, "Target %p is unaligned\n", destination);
81 return false;
82 }
83
84 if (!IS_ALIGNED(source, LPC_ROM_DMA_MIN_ALIGNMENT)) {
85 if (size > warning_size)
86 printk(BIOS_DEBUG, "Source %#zx is unaligned\n", source);
87 return false;
88 }
89
90 return true;
91}
92
93static void start_spi_dma_transaction(struct spi_dma_transaction *transaction)
94{
95 uint32_t ctrl;
96
97 printk(BIOS_SPEW, "%s: dest: %p, source: %#zx, remaining: %zu\n", __func__,
98 transaction->destination, transaction->source, transaction->remaining);
99
100 /*
101 * We should have complete control over the DMA controller, so there shouldn't
102 * be any outstanding transactions.
103 */
104 assert(!spi_dma_is_busy());
105 assert(IS_ALIGNED((uintptr_t)transaction->destination, LPC_ROM_DMA_MIN_ALIGNMENT));
106 assert(IS_ALIGNED(transaction->source, LPC_ROM_DMA_MIN_ALIGNMENT));
107 assert(transaction->remaining >= LPC_ROM_DMA_MIN_ALIGNMENT);
108
109 pci_write_config32(SOC_LPC_DEV, LPC_ROM_DMA_SRC_ADDR, transaction->source);
110 pci_write_config32(SOC_LPC_DEV, LPC_ROM_DMA_DST_ADDR,
111 (uintptr_t)transaction->destination);
112
113 ctrl = pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL);
114 ctrl &= ~LPC_ROM_DMA_CTRL_DW_COUNT_MASK;
115
116 transaction->transfer_size =
117 MIN(LPC_ROM_DMA_CTRL_MAX_BYTES,
118 ALIGN_DOWN(transaction->remaining, LPC_ROM_DMA_MIN_ALIGNMENT));
119
120 ctrl |= LPC_ROM_DMA_CTRL_DW_COUNT(transaction->transfer_size);
121 ctrl |= LPC_ROM_DMA_CTRL_ERROR; /* Clear error */
122 ctrl |= LPC_ROM_DMA_CTRL_START;
123
Raul E Rangel964eb672021-11-02 11:51:48 -0600124 /*
125 * Ensure we have exclusive access to the SPI controller before starting the LPC SPI DMA
126 * transaction.
127 */
128 thread_mutex_lock(&spi_hw_mutex);
129
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600130 pci_write_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL, ctrl);
131}
132
133/* Returns true if transaction is still in progress. */
134static bool continue_spi_dma_transaction(const struct region_device *rd,
135 struct spi_dma_transaction *transaction)
136{
137 /* Verify we are looking at the correct transaction */
138 assert(pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_SRC_ADDR) == transaction->source);
139
140 if (spi_dma_is_busy())
141 return true;
142
Raul E Rangel964eb672021-11-02 11:51:48 -0600143 /*
144 * Unlock the SPI mutex between DMA transactions to allow other users of the SPI
145 * controller to interleave their transactions.
146 */
147 thread_mutex_unlock(&spi_hw_mutex);
148
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600149 if (spi_dma_has_error()) {
Julius Wernere9665952022-01-21 17:06:20 -0800150 printk(BIOS_ERR, "SPI DMA failure: dest: %p, source: %#zx, size: %zu\n",
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600151 transaction->destination, transaction->source,
152 transaction->transfer_size);
153 return false;
154 }
155
156 transaction->destination += transaction->transfer_size;
157 transaction->source += transaction->transfer_size;
158 transaction->remaining -= transaction->transfer_size;
159
160 if (transaction->remaining >= LPC_ROM_DMA_MIN_ALIGNMENT) {
161 start_spi_dma_transaction(transaction);
162 return true;
163 }
164
165 if (transaction->remaining > 0) {
166 /* Use mmap to finish off the transfer */
167 spi_dma_readat_mmap(rd, transaction->destination, transaction->source,
168 transaction->remaining);
169
170 transaction->destination += transaction->remaining;
171 transaction->source += transaction->remaining;
172 transaction->remaining -= transaction->remaining;
173 }
174
175 return false;
176}
177
Raul E Rangel6f3c9012021-07-12 14:19:43 -0600178static struct thread_mutex spi_dma_hw_mutex;
179
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600180static ssize_t spi_dma_readat_dma(const struct region_device *rd, void *destination,
181 size_t source, size_t size)
182{
183 struct spi_dma_transaction transaction = {
184 .destination = destination,
185 .source = source,
186 .size = size,
187 .remaining = size,
188 };
189
190 printk(BIOS_SPEW, "%s: start: dest: %p, source: %#zx, size: %zu\n", __func__,
191 destination, source, size);
192
Raul E Rangel6f3c9012021-07-12 14:19:43 -0600193 thread_mutex_lock(&spi_dma_hw_mutex);
194
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600195 start_spi_dma_transaction(&transaction);
196
197 do {
198 udelay(2);
199 } while (continue_spi_dma_transaction(rd, &transaction));
200
Raul E Rangel6f3c9012021-07-12 14:19:43 -0600201 thread_mutex_unlock(&spi_dma_hw_mutex);
202
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600203 printk(BIOS_SPEW, "%s: end: dest: %p, source: %#zx, remaining: %zu\n",
204 __func__, destination, source, transaction.remaining);
205
Raul E Rangel3af732a2021-07-14 13:51:27 -0600206 /* Allow queued up transaction to continue */
207 thread_yield();
208
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600209 if (transaction.remaining)
210 return -1;
211
212 return transaction.size;
213}
214
215static ssize_t spi_dma_readat(const struct region_device *rd, void *b, size_t offset,
216 size_t size)
217{
218 if (can_use_dma(b, offset, size))
219 return spi_dma_readat_dma(rd, b, offset, size);
220 else
221 return spi_dma_readat_mmap(rd, b, offset, size);
222}
223
Raul E Rangel3ba21802021-06-24 17:03:35 -0600224const struct region_device_ops spi_dma_rdev_ro_ops = {
225 .mmap = spi_dma_mmap,
226 .munmap = spi_dma_munmap,
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600227 .readat = spi_dma_readat,
Raul E Rangel3ba21802021-06-24 17:03:35 -0600228};
229
230static const struct mem_region_device boot_dev = {
231 .base = rom_base,
232 .rdev = REGION_DEV_INIT(&spi_dma_rdev_ro_ops, 0, CONFIG_ROM_SIZE),
233};
234
235const struct region_device *boot_device_ro(void)
236{
237 return &boot_dev.rdev;
238}
239
240uint32_t spi_flash_get_mmap_windows(struct flash_mmap_window *table)
241{
242 table->flash_base = 0;
243 table->host_base = (uint32_t)(uintptr_t)rom_base;
244 table->size = CONFIG_ROM_SIZE;
245
246 return 1;
247}
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600248
249/*
250 * Without this magic bit, the SPI DMA controller will write 0s into the destination if an MMAP
251 * read happens while a DMA transaction is in progress. i.e., PSP is reading from SPI. The bit
252 * that fixes this was added to Cezanne, Renoir and later SoCs. So the SPI DMA controller is not
253 * reliable on any prior generations.
254 */
255static void spi_dma_fix(void)
256{
257 /* Internal only registers */
258 uint8_t val = spi_read8(0xfc);
259 val |= BIT(6);
260 spi_write8(0xfc, val);
261}
262
263void boot_device_init(void)
264{
265 spi_dma_fix();
266}