blob: 4e0285de5bfcc4dc27f0d4d40cc331c4e6730bd9 [file] [log] [blame]
Angel Ponsa2ee7612020-04-04 18:51:15 +02001/* SPDX-License-Identifier: GPL-2.0-only */
2/* NVIDIA Tegra SPI controller (T114 and later) */
Gabe Blackd40be112013-10-09 23:45:07 -07003
Julius Wernerf0d21ff32014-10-20 13:24:14 -07004#include <arch/cache.h>
Kyösti Mälkki13f66502019-03-03 08:01:05 +02005#include <device/mmio.h>
Gabe Blackd40be112013-10-09 23:45:07 -07006#include <assert.h>
Julius Wernerf0d21ff32014-10-20 13:24:14 -07007#include <console/console.h>
Julius Wernerf0d21ff32014-10-20 13:24:14 -07008#include <delay.h>
Julius Wernerf0d21ff32014-10-20 13:24:14 -07009#include <soc/addressmap.h>
10#include <soc/dma.h>
11#include <soc/spi.h>
Gabe Blackd40be112013-10-09 23:45:07 -070012#include <spi-generic.h>
Gabe Black967058f2014-03-21 21:32:12 -070013#include <spi_flash.h>
Julius Werneredf6b572013-10-25 17:49:26 -070014#include <timer.h>
Elyes HAOUAS30818552019-06-23 07:03:59 +020015#include <types.h>
Gabe Blackd40be112013-10-09 23:45:07 -070016
Gabe Blackd40be112013-10-09 23:45:07 -070017#if defined(CONFIG_DEBUG_SPI) && CONFIG_DEBUG_SPI
18# define DEBUG_SPI(x,...) printk(BIOS_DEBUG, "TEGRA_SPI: " x)
19#else
20# define DEBUG_SPI(x,...)
21#endif
22
23/*
24 * 64 packets in FIFO mode, BLOCK_SIZE packets in DMA mode. Packets can vary
25 * in size from 4 to 32 bits. To keep things simple we'll use 8-bit packets.
26 */
27#define SPI_PACKET_SIZE_BYTES 1
28#define SPI_MAX_TRANSFER_BYTES_FIFO (64 * SPI_PACKET_SIZE_BYTES)
Hung-Te Lin2fc3b622013-10-21 21:43:03 +080029#define SPI_MAX_TRANSFER_BYTES_DMA (65535 * SPI_PACKET_SIZE_BYTES)
Gabe Blackd40be112013-10-09 23:45:07 -070030
Julius Werneredf6b572013-10-25 17:49:26 -070031/*
32 * This is used to workaround an issue seen where it may take some time for
33 * packets to show up in the FIFO after they have been received and the
34 * BLOCK_COUNT has been incremented.
35 */
36#define SPI_FIFO_XFER_TIMEOUT_US 1000
37
Gabe Blackd40be112013-10-09 23:45:07 -070038/* COMMAND1 */
39#define SPI_CMD1_GO (1 << 31)
40#define SPI_CMD1_M_S (1 << 30)
41#define SPI_CMD1_MODE_MASK 0x3
42#define SPI_CMD1_MODE_SHIFT 28
43#define SPI_CMD1_CS_SEL_MASK 0x3
44#define SPI_CMD1_CS_SEL_SHIFT 26
45#define SPI_CMD1_CS_POL_INACTIVE3 (1 << 25)
46#define SPI_CMD1_CS_POL_INACTIVE2 (1 << 24)
47#define SPI_CMD1_CS_POL_INACTIVE1 (1 << 23)
48#define SPI_CMD1_CS_POL_INACTIVE0 (1 << 22)
49#define SPI_CMD1_CS_SW_HW (1 << 21)
50#define SPI_CMD1_CS_SW_VAL (1 << 20)
51#define SPI_CMD1_IDLE_SDA_MASK 0x3
52#define SPI_CMD1_IDLE_SDA_SHIFT 18
53#define SPI_CMD1_BIDIR (1 << 17)
54#define SPI_CMD1_LSBI_FE (1 << 16)
55#define SPI_CMD1_LSBY_FE (1 << 15)
56#define SPI_CMD1_BOTH_EN_BIT (1 << 14)
57#define SPI_CMD1_BOTH_EN_BYTE (1 << 13)
58#define SPI_CMD1_RX_EN (1 << 12)
59#define SPI_CMD1_TX_EN (1 << 11)
60#define SPI_CMD1_PACKED (1 << 5)
61#define SPI_CMD1_BIT_LEN_MASK 0x1f
62#define SPI_CMD1_BIT_LEN_SHIFT 0
63
64/* COMMAND2 */
65#define SPI_CMD2_TX_CLK_TAP_DELAY (1 << 6)
66#define SPI_CMD2_TX_CLK_TAP_DELAY_MASK (0x3F << 6)
67#define SPI_CMD2_RX_CLK_TAP_DELAY (1 << 0)
68#define SPI_CMD2_RX_CLK_TAP_DELAY_MASK (0x3F << 0)
69
70/* SPI_TRANS_STATUS */
71#define SPI_STATUS_RDY (1 << 30)
72#define SPI_STATUS_SLV_IDLE_COUNT_MASK 0xff
73#define SPI_STATUS_SLV_IDLE_COUNT_SHIFT 16
74#define SPI_STATUS_BLOCK_COUNT 0xffff
75#define SPI_STATUS_BLOCK_COUNT_SHIFT 0
76
77/* SPI_FIFO_STATUS */
Julius Werneredf6b572013-10-25 17:49:26 -070078#define SPI_FIFO_STATUS_CS_INACTIVE (1 << 31)
79#define SPI_FIFO_STATUS_FRAME_END (1 << 30)
80#define SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_MASK 0x7f
81#define SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_SHIFT 23
82#define SPI_FIFO_STATUS_TX_FIFO_EMPTY_COUNT_MASK 0x7f
83#define SPI_FIFO_STATUS_TX_FIFO_EMPTY_COUNT_SHIFT 16
84#define SPI_FIFO_STATUS_RX_FIFO_FLUSH (1 << 15)
85#define SPI_FIFO_STATUS_TX_FIFO_FLUSH (1 << 14)
86#define SPI_FIFO_STATUS_ERR (1 << 8)
87#define SPI_FIFO_STATUS_TX_FIFO_OVF (1 << 7)
88#define SPI_FIFO_STATUS_TX_FIFO_UNR (1 << 6)
89#define SPI_FIFO_STATUS_RX_FIFO_OVF (1 << 5)
90#define SPI_FIFO_STATUS_RX_FIFO_UNR (1 << 4)
91#define SPI_FIFO_STATUS_TX_FIFO_FULL (1 << 3)
92#define SPI_FIFO_STATUS_TX_FIFO_EMPTY (1 << 2)
93#define SPI_FIFO_STATUS_RX_FIFO_FULL (1 << 1)
94#define SPI_FIFO_STATUS_RX_FIFO_EMPTY (1 << 0)
Gabe Blackd40be112013-10-09 23:45:07 -070095
96/* SPI_DMA_CTL */
97#define SPI_DMA_CTL_DMA (1 << 31)
98#define SPI_DMA_CTL_CONT (1 << 30)
99#define SPI_DMA_CTL_IE_RX (1 << 29)
100#define SPI_DMA_CTL_IE_TX (1 << 28)
101#define SPI_DMA_CTL_RX_TRIG_MASK 0x3
102#define SPI_DMA_CTL_RX_TRIG_SHIFT 19
103#define SPI_DMA_CTL_TX_TRIG_MASK 0x3
104#define SPI_DMA_CTL_TX_TRIG_SHIFT 15
105
106/* SPI_DMA_BLK */
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800107#define SPI_DMA_CTL_BLOCK_SIZE_MASK 0xffff
Gabe Blackd40be112013-10-09 23:45:07 -0700108#define SPI_DMA_CTL_BLOCK_SIZE_SHIFT 0
109
Gabe Blackd40be112013-10-09 23:45:07 -0700110static struct tegra_spi_channel tegra_spi_channels[] = {
111 /*
112 * Note: Tegra pinmux must be setup for corresponding SPI channel in
113 * order for its registers to be accessible. If pinmux has not been
114 * set up, access to the channel's registers will simply hang.
115 *
116 * TODO(dhendrix): Clarify or remove this comment (is clock setup
117 * necessary first, or just pinmux, or both?)
118 */
119 {
120 .slave = { .bus = 1, },
121 .regs = (struct tegra_spi_regs *)TEGRA_SPI1_BASE,
Julius Werneredf6b572013-10-25 17:49:26 -0700122 .req_sel = APBDMA_SLAVE_SL2B1,
Gabe Blackd40be112013-10-09 23:45:07 -0700123 },
124 {
125 .slave = { .bus = 2, },
126 .regs = (struct tegra_spi_regs *)TEGRA_SPI2_BASE,
Julius Werneredf6b572013-10-25 17:49:26 -0700127 .req_sel = APBDMA_SLAVE_SL2B2,
Gabe Blackd40be112013-10-09 23:45:07 -0700128 },
129 {
130 .slave = { .bus = 3, },
131 .regs = (struct tegra_spi_regs *)TEGRA_SPI3_BASE,
Julius Werneredf6b572013-10-25 17:49:26 -0700132 .req_sel = APBDMA_SLAVE_SL2B3,
Gabe Blackd40be112013-10-09 23:45:07 -0700133 },
134 {
135 .slave = { .bus = 4, },
136 .regs = (struct tegra_spi_regs *)TEGRA_SPI4_BASE,
Julius Werneredf6b572013-10-25 17:49:26 -0700137 .req_sel = APBDMA_SLAVE_SL2B4,
Gabe Blackd40be112013-10-09 23:45:07 -0700138 },
139 {
140 .slave = { .bus = 5, },
141 .regs = (struct tegra_spi_regs *)TEGRA_SPI5_BASE,
Julius Werneredf6b572013-10-25 17:49:26 -0700142 .req_sel = APBDMA_SLAVE_SL2B5,
Gabe Blackd40be112013-10-09 23:45:07 -0700143 },
144 {
145 .slave = { .bus = 6, },
146 .regs = (struct tegra_spi_regs *)TEGRA_SPI6_BASE,
Julius Werneredf6b572013-10-25 17:49:26 -0700147 .req_sel = APBDMA_SLAVE_SL2B6,
Gabe Blackd40be112013-10-09 23:45:07 -0700148 },
149};
150
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800151enum spi_direction {
152 SPI_SEND,
153 SPI_RECEIVE,
154};
Gabe Blackd40be112013-10-09 23:45:07 -0700155
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800156struct tegra_spi_channel *tegra_spi_init(unsigned int bus)
Gabe Blackd40be112013-10-09 23:45:07 -0700157{
158 int i;
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800159 struct tegra_spi_channel *spi = NULL;
Gabe Blackd40be112013-10-09 23:45:07 -0700160
161 for (i = 0; i < ARRAY_SIZE(tegra_spi_channels); i++) {
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800162 if (tegra_spi_channels[i].slave.bus == bus) {
163 spi = &tegra_spi_channels[i];
164 break;
165 }
Gabe Blackd40be112013-10-09 23:45:07 -0700166 }
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800167 if (!spi)
168 return NULL;
Gabe Blackd40be112013-10-09 23:45:07 -0700169
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800170 /* software drives chip-select, set value to high */
Julius Werner55009af2019-12-02 22:03:27 -0800171 setbits32(&spi->regs->command1,
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800172 SPI_CMD1_CS_SW_HW | SPI_CMD1_CS_SW_VAL);
173
174 /* 8-bit transfers, unpacked mode, most significant bit first */
Julius Werner55009af2019-12-02 22:03:27 -0800175 clrbits32(&spi->regs->command1,
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800176 SPI_CMD1_BIT_LEN_MASK | SPI_CMD1_PACKED);
Julius Werner55009af2019-12-02 22:03:27 -0800177 setbits32(&spi->regs->command1, 7 << SPI_CMD1_BIT_LEN_SHIFT);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800178
179 return spi;
Gabe Blackd40be112013-10-09 23:45:07 -0700180}
181
182static struct tegra_spi_channel * const to_tegra_spi(int bus) {
183 return &tegra_spi_channels[bus - 1];
184}
185
186static unsigned int tegra_spi_speed(unsigned int bus)
187{
188 /* FIXME: implement this properly, for now use max value (50MHz) */
189 return 50000000;
190}
191
Furquan Shaikh94f86992016-12-01 07:12:32 -0800192static int spi_ctrlr_claim_bus(const struct spi_slave *slave)
Gabe Blackd40be112013-10-09 23:45:07 -0700193{
194 struct tegra_spi_regs *regs = to_tegra_spi(slave->bus)->regs;
195 u32 val;
196
Gabe Blackec9293f2014-03-27 21:26:46 -0700197 tegra_spi_init(slave->bus);
198
Gabe Blackd40be112013-10-09 23:45:07 -0700199 val = read32(&regs->command1);
200
201 /* select appropriate chip-select line */
202 val &= ~(SPI_CMD1_CS_SEL_MASK << SPI_CMD1_CS_SEL_SHIFT);
203 val |= (slave->cs << SPI_CMD1_CS_SEL_SHIFT);
204
205 /* drive chip-select with the inverse of the "inactive" value */
206 if (val & (SPI_CMD1_CS_POL_INACTIVE0 << slave->cs))
207 val &= ~SPI_CMD1_CS_SW_VAL;
208 else
209 val |= SPI_CMD1_CS_SW_VAL;
210
Julius Werner2f37bd62015-02-19 14:51:15 -0800211 write32(&regs->command1, val);
Gabe Blackec9293f2014-03-27 21:26:46 -0700212 return 0;
Gabe Blackd40be112013-10-09 23:45:07 -0700213}
214
Furquan Shaikh94f86992016-12-01 07:12:32 -0800215static void spi_ctrlr_release_bus(const struct spi_slave *slave)
Gabe Blackd40be112013-10-09 23:45:07 -0700216{
217 struct tegra_spi_regs *regs = to_tegra_spi(slave->bus)->regs;
218 u32 val;
219
220 val = read32(&regs->command1);
221
222 if (val & (SPI_CMD1_CS_POL_INACTIVE0 << slave->cs))
223 val |= SPI_CMD1_CS_SW_VAL;
224 else
225 val &= ~SPI_CMD1_CS_SW_VAL;
226
Julius Werner2f37bd62015-02-19 14:51:15 -0800227 write32(&regs->command1, val);
Gabe Blackd40be112013-10-09 23:45:07 -0700228}
229
230static void dump_fifo_status(struct tegra_spi_channel *spi)
231{
232 u32 status = read32(&spi->regs->fifo_status);
233
234 printk(BIOS_INFO, "Raw FIFO status: 0x%08x\n", status);
235 if (status & SPI_FIFO_STATUS_TX_FIFO_OVF)
236 printk(BIOS_INFO, "\tTx overflow detected\n");
237 if (status & SPI_FIFO_STATUS_TX_FIFO_UNR)
238 printk(BIOS_INFO, "\tTx underrun detected\n");
239 if (status & SPI_FIFO_STATUS_RX_FIFO_OVF)
240 printk(BIOS_INFO, "\tRx overflow detected\n");
241 if (status & SPI_FIFO_STATUS_RX_FIFO_UNR)
242 printk(BIOS_INFO, "\tRx underrun detected\n");
243
244 printk(BIOS_INFO, "TX_FIFO: 0x%08x, TX_DATA: 0x%08x\n",
245 read32(&spi->regs->tx_fifo), read32(&spi->regs->tx_data));
246 printk(BIOS_INFO, "RX_FIFO: 0x%08x, RX_DATA: 0x%08x\n",
247 read32(&spi->regs->rx_fifo), read32(&spi->regs->rx_data));
248}
249
250static void clear_fifo_status(struct tegra_spi_channel *spi)
251{
Julius Werner55009af2019-12-02 22:03:27 -0800252 clrbits32(&spi->regs->fifo_status,
Gabe Blackd40be112013-10-09 23:45:07 -0700253 SPI_FIFO_STATUS_ERR |
254 SPI_FIFO_STATUS_TX_FIFO_OVF |
255 SPI_FIFO_STATUS_TX_FIFO_UNR |
256 SPI_FIFO_STATUS_RX_FIFO_OVF |
257 SPI_FIFO_STATUS_RX_FIFO_UNR);
258}
259
260static void dump_spi_regs(struct tegra_spi_channel *spi)
261{
262 printk(BIOS_INFO, "SPI regs:\n"
263 "\tdma_blk: 0x%08x\n"
264 "\tcommand1: 0x%08x\n"
265 "\tdma_ctl: 0x%08x\n"
266 "\ttrans_status: 0x%08x\n",
267 read32(&spi->regs->dma_blk),
268 read32(&spi->regs->command1),
269 read32(&spi->regs->dma_ctl),
270 read32(&spi->regs->trans_status));
271}
272
273static void dump_dma_regs(struct apb_dma_channel *dma)
274{
Jacob Garber61a2d252019-10-08 21:24:27 -0600275 if (dma == NULL)
276 return;
277
Gabe Blackd40be112013-10-09 23:45:07 -0700278 printk(BIOS_INFO, "DMA regs:\n"
279 "\tahb_ptr: 0x%08x\n"
280 "\tapb_ptr: 0x%08x\n"
281 "\tahb_seq: 0x%08x\n"
282 "\tapb_seq: 0x%08x\n"
283 "\tcsr: 0x%08x\n"
284 "\tcsre: 0x%08x\n"
285 "\twcount: 0x%08x\n"
286 "\tdma_byte_sta: 0x%08x\n"
287 "\tword_transfer: 0x%08x\n",
288 read32(&dma->regs->ahb_ptr),
289 read32(&dma->regs->apb_ptr),
290 read32(&dma->regs->ahb_seq),
291 read32(&dma->regs->apb_seq),
292 read32(&dma->regs->csr),
293 read32(&dma->regs->csre),
294 read32(&dma->regs->wcount),
295 read32(&dma->regs->dma_byte_sta),
296 read32(&dma->regs->word_transfer));
297}
298
Gabe Blackd40be112013-10-09 23:45:07 -0700299static inline unsigned int spi_byte_count(struct tegra_spi_channel *spi)
300{
301 /* FIXME: Make this take total packet size into account */
302 return read32(&spi->regs->trans_status) &
303 (SPI_STATUS_BLOCK_COUNT << SPI_STATUS_BLOCK_COUNT_SHIFT);
304}
305
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800306/*
307 * This calls udelay() with a calculated value based on the SPI speed and
308 * number of bytes remaining to be transferred. It assumes that if the
309 * calculated delay period is less than MIN_DELAY_US then it is probably
310 * not worth the overhead of yielding.
311 */
312#define MIN_DELAY_US 250
313static void spi_delay(struct tegra_spi_channel *spi,
314 unsigned int bytes_remaining)
Gabe Blackd40be112013-10-09 23:45:07 -0700315{
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800316 unsigned int ns_per_byte, delay_us;
Gabe Blackd40be112013-10-09 23:45:07 -0700317
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800318 ns_per_byte = 1000000000 / (tegra_spi_speed(spi->slave.bus) / 8);
319 delay_us = (ns_per_byte * bytes_remaining) / 1000;
Gabe Blackd40be112013-10-09 23:45:07 -0700320
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800321 if (delay_us < MIN_DELAY_US)
322 return;
Gabe Blackd40be112013-10-09 23:45:07 -0700323
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800324 udelay(delay_us);
Gabe Blackd40be112013-10-09 23:45:07 -0700325}
326
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800327static void tegra_spi_wait(struct tegra_spi_channel *spi)
Gabe Blackd40be112013-10-09 23:45:07 -0700328{
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800329 unsigned int count, dma_blk;
Gabe Blackd40be112013-10-09 23:45:07 -0700330
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800331 dma_blk = 1 + (read32(&spi->regs->dma_blk) &
332 (SPI_DMA_CTL_BLOCK_SIZE_MASK << SPI_DMA_CTL_BLOCK_SIZE_SHIFT));
Gabe Blackd40be112013-10-09 23:45:07 -0700333
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800334 while ((count = spi_byte_count(spi)) != dma_blk)
335 spi_delay(spi, dma_blk - count);
Gabe Blackd40be112013-10-09 23:45:07 -0700336}
337
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800338static int fifo_error(struct tegra_spi_channel *spi)
339{
340 return read32(&spi->regs->fifo_status) & SPI_FIFO_STATUS_ERR ? 1 : 0;
341}
342
343static int tegra_spi_pio_prepare(struct tegra_spi_channel *spi,
344 unsigned int bytes, enum spi_direction dir)
345{
346 u8 *p = spi->out_buf;
347 unsigned int todo = MIN(bytes, SPI_MAX_TRANSFER_BYTES_FIFO);
348 u32 flush_mask, enable_mask;
349
350 if (dir == SPI_SEND) {
351 flush_mask = SPI_FIFO_STATUS_TX_FIFO_FLUSH;
352 enable_mask = SPI_CMD1_TX_EN;
353 } else {
354 flush_mask = SPI_FIFO_STATUS_RX_FIFO_FLUSH;
355 enable_mask = SPI_CMD1_RX_EN;
356 }
357
Julius Werner55009af2019-12-02 22:03:27 -0800358 setbits32(&spi->regs->fifo_status, flush_mask);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800359 while (read32(&spi->regs->fifo_status) & flush_mask)
360 ;
361
Julius Werner55009af2019-12-02 22:03:27 -0800362 setbits32(&spi->regs->command1, enable_mask);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800363
364 /* BLOCK_SIZE in SPI_DMA_BLK register applies to both DMA and
365 * PIO transfers */
Julius Werner2f37bd62015-02-19 14:51:15 -0800366 write32(&spi->regs->dma_blk, todo - 1);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800367
368 if (dir == SPI_SEND) {
369 unsigned int to_fifo = bytes;
370 while (to_fifo) {
Julius Werner2f37bd62015-02-19 14:51:15 -0800371 write32(&spi->regs->tx_fifo, *p);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800372 p++;
373 to_fifo--;
374 }
375 }
376
377 return todo;
378}
379
380static void tegra_spi_pio_start(struct tegra_spi_channel *spi)
381{
Julius Werner55009af2019-12-02 22:03:27 -0800382 setbits32(&spi->regs->trans_status, SPI_STATUS_RDY);
383 setbits32(&spi->regs->command1, SPI_CMD1_GO);
Gabe Blackf296c942014-04-07 01:01:56 -0700384 /* Make sure the write to command1 completes. */
385 read32(&spi->regs->command1);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800386}
387
Julius Werneredf6b572013-10-25 17:49:26 -0700388static inline u32 rx_fifo_count(struct tegra_spi_channel *spi)
389{
390 return (read32(&spi->regs->fifo_status) >>
391 SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_SHIFT) &
392 SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_MASK;
393}
394
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800395static int tegra_spi_pio_finish(struct tegra_spi_channel *spi)
396{
397 u8 *p = spi->in_buf;
Aaron Durbin53a83fb2014-09-24 09:51:19 -0500398 struct stopwatch sw;
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800399
Julius Werner55009af2019-12-02 22:03:27 -0800400 clrbits32(&spi->regs->command1, SPI_CMD1_RX_EN | SPI_CMD1_TX_EN);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800401
Julius Werneredf6b572013-10-25 17:49:26 -0700402 /*
403 * Allow some time in case the Rx FIFO does not yet have
404 * all packets pushed into it. See chrome-os-partner:24215.
405 */
Aaron Durbin53a83fb2014-09-24 09:51:19 -0500406 stopwatch_init_usecs_expire(&sw, SPI_FIFO_XFER_TIMEOUT_US);
Julius Werneredf6b572013-10-25 17:49:26 -0700407 do {
408 if (rx_fifo_count(spi) == spi_byte_count(spi))
409 break;
Aaron Durbin53a83fb2014-09-24 09:51:19 -0500410 } while (!stopwatch_expired(&sw));
Julius Werneredf6b572013-10-25 17:49:26 -0700411
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800412 while (!(read32(&spi->regs->fifo_status) &
413 SPI_FIFO_STATUS_RX_FIFO_EMPTY)) {
414 *p = read8(&spi->regs->rx_fifo);
415 p++;
416 }
417
418 if (fifo_error(spi)) {
419 printk(BIOS_ERR, "%s: ERROR:\n", __func__);
420 dump_spi_regs(spi);
421 dump_fifo_status(spi);
422 return -1;
423 }
424
425 return 0;
426}
427
428static void setup_dma_params(struct tegra_spi_channel *spi,
429 struct apb_dma_channel *dma)
Gabe Blackd40be112013-10-09 23:45:07 -0700430{
431 /* APB bus width = 8-bits, address wrap for each word */
Julius Werner55009af2019-12-02 22:03:27 -0800432 clrbits32(&dma->regs->apb_seq,
David Hendricks044656372014-04-09 16:02:52 -0700433 APB_BUS_WIDTH_MASK << APB_BUS_WIDTH_SHIFT);
Gabe Blackd40be112013-10-09 23:45:07 -0700434 /* AHB 1 word burst, bus width = 32 bits (fixed in hardware),
435 * no address wrapping */
Julius Werner55009af2019-12-02 22:03:27 -0800436 clrsetbits32(&dma->regs->ahb_seq,
Julius Werneredf6b572013-10-25 17:49:26 -0700437 (AHB_BURST_MASK << AHB_BURST_SHIFT),
438 4 << AHB_BURST_SHIFT);
439
440 /* Set ONCE mode to transfer one "block" at a time (64KB) and enable
441 * flow control. */
Julius Werner55009af2019-12-02 22:03:27 -0800442 clrbits32(&dma->regs->csr,
Julius Werneredf6b572013-10-25 17:49:26 -0700443 APB_CSR_REQ_SEL_MASK << APB_CSR_REQ_SEL_SHIFT);
Julius Werner55009af2019-12-02 22:03:27 -0800444 setbits32(&dma->regs->csr, APB_CSR_ONCE | APB_CSR_FLOW |
Julius Werneredf6b572013-10-25 17:49:26 -0700445 (spi->req_sel << APB_CSR_REQ_SEL_SHIFT));
Gabe Blackd40be112013-10-09 23:45:07 -0700446}
447
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800448static int tegra_spi_dma_prepare(struct tegra_spi_channel *spi,
449 unsigned int bytes, enum spi_direction dir)
450{
451 unsigned int todo, wcount;
452
453 /*
454 * For DMA we need to think of things in terms of word count.
455 * AHB width is fixed at 32-bits. To avoid overrunning
456 * the in/out buffers we must align down. (Note: lowest 2-bits
457 * in WCOUNT register are ignored, and WCOUNT seems to count
458 * words starting at n-1)
459 *
460 * Example: If "bytes" is 7 and we are transferring 1-byte at a time,
461 * WCOUNT should be 4. The remaining 3 bytes must be transferred
462 * using PIO.
463 */
464 todo = MIN(bytes, SPI_MAX_TRANSFER_BYTES_DMA - TEGRA_DMA_ALIGN_BYTES);
465 todo = ALIGN_DOWN(todo, TEGRA_DMA_ALIGN_BYTES);
466 wcount = ALIGN_DOWN(todo - TEGRA_DMA_ALIGN_BYTES, TEGRA_DMA_ALIGN_BYTES);
467
468 if (dir == SPI_SEND) {
469 spi->dma_out = dma_claim();
470 if (!spi->dma_out)
471 return -1;
472
473 /* ensure bytes to send will be visible to DMA controller */
474 dcache_clean_by_mva(spi->out_buf, bytes);
475
Julius Werner2f37bd62015-02-19 14:51:15 -0800476 write32(&spi->dma_out->regs->apb_ptr,
477 (u32)&spi->regs->tx_fifo);
478 write32(&spi->dma_out->regs->ahb_ptr, (u32)spi->out_buf);
Julius Werner55009af2019-12-02 22:03:27 -0800479 setbits32(&spi->dma_out->regs->csr, APB_CSR_DIR);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800480 setup_dma_params(spi, spi->dma_out);
Julius Werner2f37bd62015-02-19 14:51:15 -0800481 write32(&spi->dma_out->regs->wcount, wcount);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800482 } else {
483 spi->dma_in = dma_claim();
484 if (!spi->dma_in)
485 return -1;
486
487 /* avoid data collisions */
488 dcache_clean_invalidate_by_mva(spi->in_buf, bytes);
489
Julius Werner2f37bd62015-02-19 14:51:15 -0800490 write32(&spi->dma_in->regs->apb_ptr, (u32)&spi->regs->rx_fifo);
491 write32(&spi->dma_in->regs->ahb_ptr, (u32)spi->in_buf);
Julius Werner55009af2019-12-02 22:03:27 -0800492 clrbits32(&spi->dma_in->regs->csr, APB_CSR_DIR);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800493 setup_dma_params(spi, spi->dma_in);
Julius Werner2f37bd62015-02-19 14:51:15 -0800494 write32(&spi->dma_in->regs->wcount, wcount);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800495 }
496
497 /* BLOCK_SIZE starts at n-1 */
Julius Werner2f37bd62015-02-19 14:51:15 -0800498 write32(&spi->regs->dma_blk, todo - 1);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800499 return todo;
500}
501
502static void tegra_spi_dma_start(struct tegra_spi_channel *spi)
503{
504 /*
505 * The RDY bit in SPI_TRANS_STATUS needs to be cleared manually
506 * (set bit to clear) between each transaction. Otherwise the next
507 * transaction does not start.
508 */
Julius Werner55009af2019-12-02 22:03:27 -0800509 setbits32(&spi->regs->trans_status, SPI_STATUS_RDY);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800510
511 if (spi->dma_out)
Julius Werner55009af2019-12-02 22:03:27 -0800512 setbits32(&spi->regs->command1, SPI_CMD1_TX_EN);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800513 if (spi->dma_in)
Julius Werner55009af2019-12-02 22:03:27 -0800514 setbits32(&spi->regs->command1, SPI_CMD1_RX_EN);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800515
516 /*
517 * To avoid underrun conditions, enable APB DMA before SPI DMA for
518 * Tx and enable SPI DMA before APB DMA before Rx.
519 */
520 if (spi->dma_out)
521 dma_start(spi->dma_out);
Julius Werner55009af2019-12-02 22:03:27 -0800522 setbits32(&spi->regs->dma_ctl, SPI_DMA_CTL_DMA);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800523 if (spi->dma_in)
524 dma_start(spi->dma_in);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800525}
526
527static int tegra_spi_dma_finish(struct tegra_spi_channel *spi)
528{
529 int ret;
530 unsigned int todo;
531
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800532 if (spi->dma_in) {
Jacob Garber61a2d252019-10-08 21:24:27 -0600533 todo = read32(&spi->dma_in->regs->wcount);
534
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800535 while ((read32(&spi->dma_in->regs->dma_byte_sta) < todo) ||
536 dma_busy(spi->dma_in))
537 ; /* this shouldn't take long, no udelay */
538 dma_stop(spi->dma_in);
Julius Werner55009af2019-12-02 22:03:27 -0800539 clrbits32(&spi->regs->command1, SPI_CMD1_RX_EN);
David Hendricks0c9cc5e2014-04-09 16:04:14 -0700540 dma_release(spi->dma_in);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800541 }
542
543 if (spi->dma_out) {
Jacob Garber61a2d252019-10-08 21:24:27 -0600544 todo = read32(&spi->dma_out->regs->wcount);
545
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800546 while ((read32(&spi->dma_out->regs->dma_byte_sta) < todo) ||
Jacob Garber61a2d252019-10-08 21:24:27 -0600547 dma_busy(spi->dma_out)) {
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800548 spi_delay(spi, todo - spi_byte_count(spi));
Jacob Garber61a2d252019-10-08 21:24:27 -0600549 }
Julius Werner55009af2019-12-02 22:03:27 -0800550 clrbits32(&spi->regs->command1, SPI_CMD1_TX_EN);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800551 dma_stop(spi->dma_out);
David Hendricks0c9cc5e2014-04-09 16:04:14 -0700552 dma_release(spi->dma_out);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800553 }
554
555 if (fifo_error(spi)) {
556 printk(BIOS_ERR, "%s: ERROR:\n", __func__);
557 dump_dma_regs(spi->dma_out);
558 dump_dma_regs(spi->dma_in);
559 dump_spi_regs(spi);
560 dump_fifo_status(spi);
561 ret = -1;
562 goto done;
563 }
564
565 ret = 0;
566done:
567 spi->dma_in = NULL;
568 spi->dma_out = NULL;
569 return ret;
570}
571
Gabe Blackd40be112013-10-09 23:45:07 -0700572/*
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800573 * xfer_setup() prepares a transfer. It does sanity checking, alignment, and
574 * sets transfer mode used by this channel (if not set already).
Gabe Blackd40be112013-10-09 23:45:07 -0700575 *
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800576 * A few caveats to watch out for:
577 * - The number of bytes which can be transferred may be smaller than the
578 * number of bytes the caller specifies. The number of bytes ready for
579 * a transfer will be returned (unless an error occurs).
Gabe Blackd40be112013-10-09 23:45:07 -0700580 *
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800581 * - Only one mode can be used for both RX and TX. The transfer mode of the
582 * SPI channel (spi->xfer_mode) is checked each time this function is called.
583 * If conflicting modes are detected, spi->xfer_mode will be set to
584 * XFER_MODE_NONE and an error will be returned.
585 *
586 * Returns bytes ready for transfer if successful, <0 to indicate error.
Gabe Blackd40be112013-10-09 23:45:07 -0700587 */
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800588static int xfer_setup(struct tegra_spi_channel *spi, void *buf,
589 unsigned int bytes, enum spi_direction dir)
Gabe Blackd40be112013-10-09 23:45:07 -0700590{
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800591 unsigned int line_size = dcache_line_bytes();
592 unsigned int align;
593 int ret = -1;
Gabe Blackd40be112013-10-09 23:45:07 -0700594
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800595 if (!bytes)
596 return 0;
597
598 if (dir == SPI_SEND)
599 spi->out_buf = buf;
600 else if (dir == SPI_RECEIVE)
601 spi->in_buf = buf;
602
603 /*
604 * Alignment consideratons:
605 * When we enable caching we'll need to clean/invalidate portions of
606 * memory. So we need to be careful about memory alignment. Also, DMA
607 * likes to operate on 4-bytes at a time on the AHB side. So for
Elyes HAOUAS1a8d0e52021-01-16 14:55:44 +0100608 * example, if we only want to receive 1 byte, 4 bytes will be
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800609 * written in memory even if those extra 3 bytes are beyond the length
610 * we want.
611 *
612 * For now we'll use PIO to send/receive unaligned bytes. We may
613 * consider setting aside some space for a kind of bounce buffer to
614 * stay in DMA mode once we have a chance to benchmark the two
615 * approaches.
616 */
617
618 if (bytes < line_size) {
619 if (spi->xfer_mode == XFER_MODE_DMA) {
620 spi->xfer_mode = XFER_MODE_NONE;
621 ret = -1;
622 } else {
623 spi->xfer_mode = XFER_MODE_PIO;
624 ret = tegra_spi_pio_prepare(spi, bytes, dir);
625 }
626 goto done;
Gabe Blackd40be112013-10-09 23:45:07 -0700627 }
628
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800629 /* transfer bytes before the aligned boundary */
630 align = line_size - ((uintptr_t)buf % line_size);
631 if ((align != 0) && (align != line_size)) {
632 if (spi->xfer_mode == XFER_MODE_DMA) {
633 spi->xfer_mode = XFER_MODE_NONE;
634 ret = -1;
635 } else {
636 spi->xfer_mode = XFER_MODE_PIO;
637 ret = tegra_spi_pio_prepare(spi, align, dir);
638 }
639 goto done;
Gabe Blackd40be112013-10-09 23:45:07 -0700640 }
641
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800642 /* do aligned DMA transfer */
643 align = (((uintptr_t)buf + bytes) % line_size);
644 if (bytes - align > 0) {
645 unsigned int dma_bytes = bytes - align;
646
647 if (spi->xfer_mode == XFER_MODE_PIO) {
648 spi->xfer_mode = XFER_MODE_NONE;
649 ret = -1;
650 } else {
651 spi->xfer_mode = XFER_MODE_DMA;
652 ret = tegra_spi_dma_prepare(spi, dma_bytes, dir);
653 }
654
655 goto done;
656 }
657
658 /* transfer any remaining unaligned bytes */
659 if (align) {
660 if (spi->xfer_mode == XFER_MODE_DMA) {
661 spi->xfer_mode = XFER_MODE_NONE;
662 ret = -1;
663 } else {
664 spi->xfer_mode = XFER_MODE_PIO;
665 ret = tegra_spi_pio_prepare(spi, align, dir);
666 }
667 goto done;
668 }
669
670done:
671 return ret;
Gabe Blackd40be112013-10-09 23:45:07 -0700672}
673
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800674static void xfer_start(struct tegra_spi_channel *spi)
Gabe Blackd40be112013-10-09 23:45:07 -0700675{
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800676 if (spi->xfer_mode == XFER_MODE_DMA)
677 tegra_spi_dma_start(spi);
678 else
679 tegra_spi_pio_start(spi);
Gabe Blackd40be112013-10-09 23:45:07 -0700680}
681
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800682static void xfer_wait(struct tegra_spi_channel *spi)
683{
684 tegra_spi_wait(spi);
685}
686
687static int xfer_finish(struct tegra_spi_channel *spi)
688{
689 int ret;
690
691 if (spi->xfer_mode == XFER_MODE_DMA)
692 ret = tegra_spi_dma_finish(spi);
693 else
694 ret = tegra_spi_pio_finish(spi);
695
696 spi->xfer_mode = XFER_MODE_NONE;
697 return ret;
698}
699
Furquan Shaikh94f86992016-12-01 07:12:32 -0800700static int spi_ctrlr_xfer(const struct spi_slave *slave, const void *dout,
701 size_t out_bytes, void *din, size_t in_bytes)
David Hendricks1101a712013-11-22 18:41:38 -0800702{
Gabe Blackd40be112013-10-09 23:45:07 -0700703 struct tegra_spi_channel *spi = to_tegra_spi(slave->bus);
Gabe Blackd40be112013-10-09 23:45:07 -0700704 u8 *out_buf = (u8 *)dout;
705 u8 *in_buf = (u8 *)din;
Furquan Shaikh0dba0252016-11-30 04:34:22 -0800706 size_t todo;
Gabe Black967058f2014-03-21 21:32:12 -0700707 int ret = 0;
Gabe Blackd40be112013-10-09 23:45:07 -0700708
Gabe Blackd40be112013-10-09 23:45:07 -0700709 /* tegra bus numbers start at 1 */
710 ASSERT(slave->bus >= 1 && slave->bus <= ARRAY_SIZE(tegra_spi_channels));
711
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800712 while (out_bytes || in_bytes) {
713 int x = 0;
Gabe Blackd40be112013-10-09 23:45:07 -0700714
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800715 if (out_bytes == 0)
716 todo = in_bytes;
717 else if (in_bytes == 0)
718 todo = out_bytes;
719 else
720 todo = MIN(out_bytes, in_bytes);
Gabe Blackd40be112013-10-09 23:45:07 -0700721
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800722 if (out_bytes) {
723 x = xfer_setup(spi, out_buf, todo, SPI_SEND);
724 if (x < 0) {
725 if (spi->xfer_mode == XFER_MODE_NONE) {
726 spi->xfer_mode = XFER_MODE_PIO;
727 continue;
728 } else {
729 ret = -1;
730 break;
731 }
Gabe Blackd40be112013-10-09 23:45:07 -0700732 }
Gabe Blackd40be112013-10-09 23:45:07 -0700733 }
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800734 if (in_bytes) {
735 x = xfer_setup(spi, in_buf, todo, SPI_RECEIVE);
736 if (x < 0) {
737 if (spi->xfer_mode == XFER_MODE_NONE) {
738 spi->xfer_mode = XFER_MODE_PIO;
739 continue;
740 } else {
741 ret = -1;
742 break;
743 }
Gabe Blackd40be112013-10-09 23:45:07 -0700744 }
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800745 }
746
747 /*
748 * Note: Some devices (such as Chrome EC) are sensitive to
749 * delays, so be careful when adding debug prints not to
750 * cause timeouts between transfers.
751 */
752 xfer_start(spi);
753 xfer_wait(spi);
754 if (xfer_finish(spi)) {
755 ret = -1;
756 break;
757 }
758
Gabe Black967058f2014-03-21 21:32:12 -0700759 /* Post-processing. */
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800760 if (out_bytes) {
761 out_bytes -= x;
762 out_buf += x;
763 }
764 if (in_bytes) {
Gabe Black967058f2014-03-21 21:32:12 -0700765 in_bytes -= x;
766 in_buf += x;
Gabe Blackd40be112013-10-09 23:45:07 -0700767 }
768 }
769
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800770 if (ret < 0) {
771 printk(BIOS_ERR, "%s: Error detected\n", __func__);
772 printk(BIOS_ERR, "Transaction size: %u, bytes remaining: "
773 "%u out / %u in\n", todo, out_bytes, in_bytes);
Gabe Blackd40be112013-10-09 23:45:07 -0700774 clear_fifo_status(spi);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800775 }
Gabe Blackd40be112013-10-09 23:45:07 -0700776 return ret;
777}
778
Furquan Shaikh94f86992016-12-01 07:12:32 -0800779static const struct spi_ctrlr spi_ctrlr = {
780 .claim_bus = spi_ctrlr_claim_bus,
781 .release_bus = spi_ctrlr_release_bus,
782 .xfer = spi_ctrlr_xfer,
Furquan Shaikhde705fa2017-04-19 19:27:28 -0700783 .max_xfer_size = SPI_CTRLR_DEFAULT_MAX_XFER_SIZE,
Furquan Shaikh94f86992016-12-01 07:12:32 -0800784};
785
Furquan Shaikhb46e9f62017-05-18 10:46:46 -0700786const struct spi_ctrlr_buses spi_ctrlr_bus_map[] = {
787 {
788 .ctrlr = &spi_ctrlr,
789 .bus_start = 1,
790 .bus_end = ARRAY_SIZE(tegra_spi_channels)
791 },
792};
Gabe Blackd40be112013-10-09 23:45:07 -0700793
Furquan Shaikhb46e9f62017-05-18 10:46:46 -0700794const size_t spi_ctrlr_bus_map_count = ARRAY_SIZE(spi_ctrlr_bus_map);