blob: 5c697790c8e73838cf65c06c3292c782eb3682b9 [file] [log] [blame]
Raul E Rangel3ba21802021-06-24 17:03:35 -06001/* SPDX-License-Identifier: GPL-2.0-only */
2
Raul E Rangeld373d5d2021-06-25 11:07:23 -06003#include <amdblocks/lpc.h>
4#include <amdblocks/spi.h>
5#include <assert.h>
Raul E Rangel3ba21802021-06-24 17:03:35 -06006#include <boot_device.h>
Raul E Rangeld373d5d2021-06-25 11:07:23 -06007#include <commonlib/bsd/cb_err.h>
8#include <commonlib/bsd/helpers.h>
Raul E Rangel3ba21802021-06-24 17:03:35 -06009#include <commonlib/region.h>
Raul E Rangeld373d5d2021-06-25 11:07:23 -060010#include <console/console.h>
11#include <delay.h>
12#include <device/pci_ops.h>
13#include <soc/pci_devs.h>
Raul E Rangel3ba21802021-06-24 17:03:35 -060014#include <spi_flash.h>
15#include <string.h>
Raul E Rangel6f3c9012021-07-12 14:19:43 -060016#include <thread.h>
Raul E Rangel3ba21802021-06-24 17:03:35 -060017#include <types.h>
18
19/* The ROM is memory mapped just below 4GiB. Form a pointer for the base. */
20#define rom_base ((void *)(uintptr_t)(0x100000000ULL - CONFIG_ROM_SIZE))
21
Raul E Rangeld373d5d2021-06-25 11:07:23 -060022struct spi_dma_transaction {
23 uint8_t *destination;
24 size_t source;
25 size_t size;
26 size_t transfer_size;
27 size_t remaining;
28};
29
Raul E Rangel3ba21802021-06-24 17:03:35 -060030static void *spi_dma_mmap(const struct region_device *rd, size_t offset, size_t size __unused)
31{
32 const struct mem_region_device *mdev;
33
34 mdev = container_of(rd, __typeof__(*mdev), rdev);
35
36 return &mdev->base[offset];
37}
38
39static int spi_dma_munmap(const struct region_device *rd __unused, void *mapping __unused)
40{
41 return 0;
42}
43
44static ssize_t spi_dma_readat_mmap(const struct region_device *rd, void *b, size_t offset,
45 size_t size)
46{
47 const struct mem_region_device *mdev;
48
49 mdev = container_of(rd, __typeof__(*mdev), rdev);
50
51 memcpy(b, &mdev->base[offset], size);
52
53 return size;
54}
55
Raul E Rangeld373d5d2021-06-25 11:07:23 -060056static bool spi_dma_is_busy(void)
57{
58 return pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL)
59 & LPC_ROM_DMA_CTRL_START;
60}
61
62static bool spi_dma_has_error(void)
63{
64 return pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL)
65 & LPC_ROM_DMA_CTRL_ERROR;
66}
67
68static bool can_use_dma(void *destination, size_t source, size_t size)
69{
70 /*
71 * Print a notice if reading more than 1024 bytes using mmap. This makes
72 * it easier to debug why the SPI DMA wasn't used.
73 */
74 const size_t warning_size = 1024;
75
76 if (size < LPC_ROM_DMA_MIN_ALIGNMENT)
77 return false;
78
79 if (!IS_ALIGNED((uintptr_t)destination, LPC_ROM_DMA_MIN_ALIGNMENT)) {
80 if (size > warning_size)
81 printk(BIOS_DEBUG, "Target %p is unaligned\n", destination);
82 return false;
83 }
84
85 if (!IS_ALIGNED(source, LPC_ROM_DMA_MIN_ALIGNMENT)) {
86 if (size > warning_size)
87 printk(BIOS_DEBUG, "Source %#zx is unaligned\n", source);
88 return false;
89 }
90
91 return true;
92}
93
94static void start_spi_dma_transaction(struct spi_dma_transaction *transaction)
95{
96 uint32_t ctrl;
97
98 printk(BIOS_SPEW, "%s: dest: %p, source: %#zx, remaining: %zu\n", __func__,
99 transaction->destination, transaction->source, transaction->remaining);
100
101 /*
102 * We should have complete control over the DMA controller, so there shouldn't
103 * be any outstanding transactions.
104 */
105 assert(!spi_dma_is_busy());
106 assert(IS_ALIGNED((uintptr_t)transaction->destination, LPC_ROM_DMA_MIN_ALIGNMENT));
107 assert(IS_ALIGNED(transaction->source, LPC_ROM_DMA_MIN_ALIGNMENT));
108 assert(transaction->remaining >= LPC_ROM_DMA_MIN_ALIGNMENT);
109
110 pci_write_config32(SOC_LPC_DEV, LPC_ROM_DMA_SRC_ADDR, transaction->source);
111 pci_write_config32(SOC_LPC_DEV, LPC_ROM_DMA_DST_ADDR,
112 (uintptr_t)transaction->destination);
113
114 ctrl = pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL);
115 ctrl &= ~LPC_ROM_DMA_CTRL_DW_COUNT_MASK;
116
117 transaction->transfer_size =
118 MIN(LPC_ROM_DMA_CTRL_MAX_BYTES,
119 ALIGN_DOWN(transaction->remaining, LPC_ROM_DMA_MIN_ALIGNMENT));
120
121 ctrl |= LPC_ROM_DMA_CTRL_DW_COUNT(transaction->transfer_size);
122 ctrl |= LPC_ROM_DMA_CTRL_ERROR; /* Clear error */
123 ctrl |= LPC_ROM_DMA_CTRL_START;
124
125 pci_write_config32(SOC_LPC_DEV, LPC_ROM_DMA_EC_HOST_CONTROL, ctrl);
126}
127
128/* Returns true if transaction is still in progress. */
129static bool continue_spi_dma_transaction(const struct region_device *rd,
130 struct spi_dma_transaction *transaction)
131{
132 /* Verify we are looking at the correct transaction */
133 assert(pci_read_config32(SOC_LPC_DEV, LPC_ROM_DMA_SRC_ADDR) == transaction->source);
134
135 if (spi_dma_is_busy())
136 return true;
137
138 if (spi_dma_has_error()) {
139 printk(BIOS_ERR,
140 "ERROR: SPI DMA failure: dest: %p, source: %#zx, size: %zu\n",
141 transaction->destination, transaction->source,
142 transaction->transfer_size);
143 return false;
144 }
145
146 transaction->destination += transaction->transfer_size;
147 transaction->source += transaction->transfer_size;
148 transaction->remaining -= transaction->transfer_size;
149
150 if (transaction->remaining >= LPC_ROM_DMA_MIN_ALIGNMENT) {
151 start_spi_dma_transaction(transaction);
152 return true;
153 }
154
155 if (transaction->remaining > 0) {
156 /* Use mmap to finish off the transfer */
157 spi_dma_readat_mmap(rd, transaction->destination, transaction->source,
158 transaction->remaining);
159
160 transaction->destination += transaction->remaining;
161 transaction->source += transaction->remaining;
162 transaction->remaining -= transaction->remaining;
163 }
164
165 return false;
166}
167
Raul E Rangel6f3c9012021-07-12 14:19:43 -0600168static struct thread_mutex spi_dma_hw_mutex;
169
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600170static ssize_t spi_dma_readat_dma(const struct region_device *rd, void *destination,
171 size_t source, size_t size)
172{
173 struct spi_dma_transaction transaction = {
174 .destination = destination,
175 .source = source,
176 .size = size,
177 .remaining = size,
178 };
179
180 printk(BIOS_SPEW, "%s: start: dest: %p, source: %#zx, size: %zu\n", __func__,
181 destination, source, size);
182
Raul E Rangel6f3c9012021-07-12 14:19:43 -0600183 thread_mutex_lock(&spi_dma_hw_mutex);
184
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600185 start_spi_dma_transaction(&transaction);
186
187 do {
188 udelay(2);
189 } while (continue_spi_dma_transaction(rd, &transaction));
190
Raul E Rangel6f3c9012021-07-12 14:19:43 -0600191 thread_mutex_unlock(&spi_dma_hw_mutex);
192
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600193 printk(BIOS_SPEW, "%s: end: dest: %p, source: %#zx, remaining: %zu\n",
194 __func__, destination, source, transaction.remaining);
195
Raul E Rangel3af732a2021-07-14 13:51:27 -0600196 /* Allow queued up transaction to continue */
197 thread_yield();
198
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600199 if (transaction.remaining)
200 return -1;
201
202 return transaction.size;
203}
204
205static ssize_t spi_dma_readat(const struct region_device *rd, void *b, size_t offset,
206 size_t size)
207{
208 if (can_use_dma(b, offset, size))
209 return spi_dma_readat_dma(rd, b, offset, size);
210 else
211 return spi_dma_readat_mmap(rd, b, offset, size);
212}
213
Raul E Rangel3ba21802021-06-24 17:03:35 -0600214const struct region_device_ops spi_dma_rdev_ro_ops = {
215 .mmap = spi_dma_mmap,
216 .munmap = spi_dma_munmap,
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600217 .readat = spi_dma_readat,
Raul E Rangel3ba21802021-06-24 17:03:35 -0600218};
219
220static const struct mem_region_device boot_dev = {
221 .base = rom_base,
222 .rdev = REGION_DEV_INIT(&spi_dma_rdev_ro_ops, 0, CONFIG_ROM_SIZE),
223};
224
225const struct region_device *boot_device_ro(void)
226{
227 return &boot_dev.rdev;
228}
229
230uint32_t spi_flash_get_mmap_windows(struct flash_mmap_window *table)
231{
232 table->flash_base = 0;
233 table->host_base = (uint32_t)(uintptr_t)rom_base;
234 table->size = CONFIG_ROM_SIZE;
235
236 return 1;
237}
Raul E Rangeld373d5d2021-06-25 11:07:23 -0600238
239/*
240 * Without this magic bit, the SPI DMA controller will write 0s into the destination if an MMAP
241 * read happens while a DMA transaction is in progress. i.e., PSP is reading from SPI. The bit
242 * that fixes this was added to Cezanne, Renoir and later SoCs. So the SPI DMA controller is not
243 * reliable on any prior generations.
244 */
245static void spi_dma_fix(void)
246{
247 /* Internal only registers */
248 uint8_t val = spi_read8(0xfc);
249 val |= BIT(6);
250 spi_write8(0xfc, val);
251}
252
253void boot_device_init(void)
254{
255 spi_dma_fix();
256}