blob: baf1eb8036049e6612e8dc3ff852a81f5f63db6c [file] [log] [blame]
Raul E Rangel3ba21802021-06-24 17:03:35 -06001/* SPDX-License-Identifier: GPL-2.0-only */
2
Raul E Rangeld373d5d2021-06-25 11:07:23 -06003#include <amdblocks/lpc.h>
4#include <amdblocks/spi.h>
5#include <assert.h>
Raul E Rangel3ba21802021-06-24 17:03:35 -06006#include <boot_device.h>
Raul E Rangeld373d5d2021-06-25 11:07:23 -06007#include <commonlib/bsd/cb_err.h>
8#include <commonlib/bsd/helpers.h>
Raul E Rangel3ba21802021-06-24 17:03:35 -06009#include <commonlib/region.h>
Raul E Rangeld373d5d2021-06-25 11:07:23 -060010#include <console/console.h>
11#include <delay.h>
12#include <device/pci_ops.h>
13#include <soc/pci_devs.h>
Raul E Rangel3ba21802021-06-24 17:03:35 -060014#include <spi_flash.h>
15#include <string.h>
Raul E Rangel6f3c9012021-07-12 14:19:43 -060016#include <thread.h>
Raul E Rangel3ba21802021-06-24 17:03:35 -060017#include <types.h>
18
19/* The ROM is memory mapped just below 4GiB. Form a pointer for the base. */
20#define rom_base ((void *)(uintptr_t)(0x100000000ULL - CONFIG_ROM_SIZE))
21
Raul E Rangeld373d5d2021-06-25 11:07:23 -060022struct spi_dma_transaction {
23 uint8_t *destination;
24 size_t source;
25 size_t size;
26 size_t transfer_size;
27 size_t remaining;
28};
29
Raul E Rangel3ba21802021-06-24 17:03:35 -060030static void *spi_dma_mmap(const struct region_device *rd, size_t offset, size_t size __unused)
31{
32 const struct mem_region_device *mdev;
33
34 mdev = container_of(rd, __typeof__(*mdev), rdev);
35
36 return &mdev->base[offset];
37}
38
39static int spi_dma_munmap(const struct region_device *rd __unused, void *mapping __unused)
40{
41 return 0;
42}
43
44static ssize_t spi_dma_readat_mmap(const struct region_device *rd, void *b, size_t offset,
45 size_t size)
46{
47 const struct mem_region_device *mdev;
48
49 mdev = container_of(rd, __typeof__(*mdev), rdev);
50
51 memcpy(b, &mdev->base[offset], size);
52
53 return size;
54}
55
Raul E Rangeld373d5d2021-06-25 11:07:23 -060056static bool spi_dma_is_busy(void)
57{
58 return pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL)
59 & LPC_ROM_DMA_CTRL_START;
60}
61
62static bool spi_dma_has_error(void)
63{
64 return pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL)
65 & LPC_ROM_DMA_CTRL_ERROR;
66}
67
68static bool can_use_dma(void *destination, size_t source, size_t size)
69{
70 /*
71 * Print a notice if reading more than 1024 bytes using mmap. This makes
72 * it easier to debug why the SPI DMA wasn't used.
73 */
74 const size_t warning_size = 1024;
75
76 if (size < LPC_ROM_DMA_MIN_ALIGNMENT)
77 return false;
78
79 if (!IS_ALIGNED((uintptr_t)destination, LPC_ROM_DMA_MIN_ALIGNMENT)) {
80 if (size > warning_size)
81 printk(BIOS_DEBUG, "Target %p is unaligned\n", destination);
82 return false;
83 }
84
85 if (!IS_ALIGNED(source, LPC_ROM_DMA_MIN_ALIGNMENT)) {
86 if (size > warning_size)
87 printk(BIOS_DEBUG, "Source %#zx is unaligned\n", source);
88 return false;
89 }
90
91 return true;
92}
93
94static void start_spi_dma_transaction(struct spi_dma_transaction *transaction)
95{
96 uint32_t ctrl;
97
98 printk(BIOS_SPEW, "%s: dest: %p, source: %#zx, remaining: %zu\n", __func__,
99 transaction->destination, transaction->source, transaction->remaining);
100
101 /*
102 * We should have complete control over the DMA controller, so there shouldn't
103 * be any outstanding transactions.
104 */
105 assert(!spi_dma_is_busy());
106 assert(IS_ALIGNED((uintptr_t)transaction->destination, LPC_ROM_DMA_MIN_ALIGNMENT));
107 assert(IS_ALIGNED(transaction->source, LPC_ROM_DMA_MIN_ALIGNMENT));
108 assert(transaction->remaining >= LPC_ROM_DMA_MIN_ALIGNMENT);
109
110 pci_write_config32(SOC_LPC_DEV, LPC_ROM_DMA_SRC_ADDR, transaction->source);
111 pci_write_config32(SOC_LPC_DEV, LPC_ROM_DMA_DST_ADDR,
112 (uintptr_t)transaction->destination);
113
114 ctrl = pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL);
115 ctrl &= ~LPC_ROM_DMA_CTRL_DW_COUNT_MASK;
116
117 transaction->transfer_size =
118 MIN(LPC_ROM_DMA_CTRL_MAX_BYTES,
119 ALIGN_DOWN(transaction->remaining, LPC_ROM_DMA_MIN_ALIGNMENT));
120
121 ctrl |= LPC_ROM_DMA_CTRL_DW_COUNT(transaction->transfer_size);
122 ctrl |= LPC_ROM_DMA_CTRL_ERROR; /* Clear error */
123 ctrl |= LPC_ROM_DMA_CTRL_START;
124
Raul E Rangel964eb672021-11-02 11:51:48 -0600125 /*
126 * Ensure we have exclusive access to the SPI controller before starting the LPC SPI DMA
127 * transaction.
128 */
129 thread_mutex_lock(&spi_hw_mutex);
130
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600131 pci_write_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL, ctrl);
132}
133
134/* Returns true if transaction is still in progress. */
135static bool continue_spi_dma_transaction(const struct region_device *rd,
136 struct spi_dma_transaction *transaction)
137{
138 /* Verify we are looking at the correct transaction */
139 assert(pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_SRC_ADDR) == transaction->source);
140
141 if (spi_dma_is_busy())
142 return true;
143
Raul E Rangel964eb672021-11-02 11:51:48 -0600144 /*
145 * Unlock the SPI mutex between DMA transactions to allow other users of the SPI
146 * controller to interleave their transactions.
147 */
148 thread_mutex_unlock(&spi_hw_mutex);
149
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600150 if (spi_dma_has_error()) {
151 printk(BIOS_ERR,
152 "ERROR: SPI DMA failure: dest: %p, source: %#zx, size: %zu\n",
153 transaction->destination, transaction->source,
154 transaction->transfer_size);
155 return false;
156 }
157
158 transaction->destination += transaction->transfer_size;
159 transaction->source += transaction->transfer_size;
160 transaction->remaining -= transaction->transfer_size;
161
162 if (transaction->remaining >= LPC_ROM_DMA_MIN_ALIGNMENT) {
163 start_spi_dma_transaction(transaction);
164 return true;
165 }
166
167 if (transaction->remaining > 0) {
168 /* Use mmap to finish off the transfer */
169 spi_dma_readat_mmap(rd, transaction->destination, transaction->source,
170 transaction->remaining);
171
172 transaction->destination += transaction->remaining;
173 transaction->source += transaction->remaining;
174 transaction->remaining -= transaction->remaining;
175 }
176
177 return false;
178}
179
Raul E Rangel6f3c9012021-07-12 14:19:43 -0600180static struct thread_mutex spi_dma_hw_mutex;
181
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600182static ssize_t spi_dma_readat_dma(const struct region_device *rd, void *destination,
183 size_t source, size_t size)
184{
185 struct spi_dma_transaction transaction = {
186 .destination = destination,
187 .source = source,
188 .size = size,
189 .remaining = size,
190 };
191
192 printk(BIOS_SPEW, "%s: start: dest: %p, source: %#zx, size: %zu\n", __func__,
193 destination, source, size);
194
Raul E Rangel6f3c9012021-07-12 14:19:43 -0600195 thread_mutex_lock(&spi_dma_hw_mutex);
196
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600197 start_spi_dma_transaction(&transaction);
198
199 do {
200 udelay(2);
201 } while (continue_spi_dma_transaction(rd, &transaction));
202
Raul E Rangel6f3c9012021-07-12 14:19:43 -0600203 thread_mutex_unlock(&spi_dma_hw_mutex);
204
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600205 printk(BIOS_SPEW, "%s: end: dest: %p, source: %#zx, remaining: %zu\n",
206 __func__, destination, source, transaction.remaining);
207
Raul E Rangel3af732a2021-07-14 13:51:27 -0600208 /* Allow queued up transaction to continue */
209 thread_yield();
210
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600211 if (transaction.remaining)
212 return -1;
213
214 return transaction.size;
215}
216
217static ssize_t spi_dma_readat(const struct region_device *rd, void *b, size_t offset,
218 size_t size)
219{
220 if (can_use_dma(b, offset, size))
221 return spi_dma_readat_dma(rd, b, offset, size);
222 else
223 return spi_dma_readat_mmap(rd, b, offset, size);
224}
225
Raul E Rangel3ba21802021-06-24 17:03:35 -0600226const struct region_device_ops spi_dma_rdev_ro_ops = {
227 .mmap = spi_dma_mmap,
228 .munmap = spi_dma_munmap,
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600229 .readat = spi_dma_readat,
Raul E Rangel3ba21802021-06-24 17:03:35 -0600230};
231
232static const struct mem_region_device boot_dev = {
233 .base = rom_base,
234 .rdev = REGION_DEV_INIT(&spi_dma_rdev_ro_ops, 0, CONFIG_ROM_SIZE),
235};
236
237const struct region_device *boot_device_ro(void)
238{
239 return &boot_dev.rdev;
240}
241
242uint32_t spi_flash_get_mmap_windows(struct flash_mmap_window *table)
243{
244 table->flash_base = 0;
245 table->host_base = (uint32_t)(uintptr_t)rom_base;
246 table->size = CONFIG_ROM_SIZE;
247
248 return 1;
249}
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600250
251/*
252 * Without this magic bit, the SPI DMA controller will write 0s into the destination if an MMAP
253 * read happens while a DMA transaction is in progress. i.e., PSP is reading from SPI. The bit
254 * that fixes this was added to Cezanne, Renoir and later SoCs. So the SPI DMA controller is not
255 * reliable on any prior generations.
256 */
257static void spi_dma_fix(void)
258{
259 /* Internal only registers */
260 uint8_t val = spi_read8(0xfc);
261 val |= BIT(6);
262 spi_write8(0xfc, val);
263}
264
265void boot_device_init(void)
266{
267 spi_dma_fix();
268}