blob: d5f786141eb3376d7006259f1dafbe507162f13f [file] [log] [blame]
Gabe Blackd40be112013-10-09 23:45:07 -07001/*
2 * NVIDIA Tegra SPI controller (T114 and later)
3 *
4 * Copyright (c) 2010-2013 NVIDIA Corporation
5 * Copyright (C) 2013 Google Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
Gabe Blackd40be112013-10-09 23:45:07 -070015 */
16
Julius Wernerf0d21ff32014-10-20 13:24:14 -070017#include <arch/cache.h>
18#include <arch/io.h>
Gabe Blackd40be112013-10-09 23:45:07 -070019#include <assert.h>
Aaron Durbinc6588c52015-05-15 13:15:34 -050020#include <boot_device.h>
Julius Wernerf0d21ff32014-10-20 13:24:14 -070021#include <console/console.h>
Gabe Blackd40be112013-10-09 23:45:07 -070022#include <cbfs.h>
Julius Wernerf0d21ff32014-10-20 13:24:14 -070023#include <delay.h>
Gabe Blackd40be112013-10-09 23:45:07 -070024#include <inttypes.h>
Julius Wernerf0d21ff32014-10-20 13:24:14 -070025#include <soc/addressmap.h>
26#include <soc/dma.h>
27#include <soc/spi.h>
Gabe Blackd40be112013-10-09 23:45:07 -070028#include <spi-generic.h>
Gabe Black967058f2014-03-21 21:32:12 -070029#include <spi_flash.h>
Gabe Blackd40be112013-10-09 23:45:07 -070030#include <stdint.h>
31#include <stdlib.h>
32#include <string.h>
Aaron Durbinc6588c52015-05-15 13:15:34 -050033#include <symbols.h>
Julius Werneredf6b572013-10-25 17:49:26 -070034#include <timer.h>
Gabe Blackd40be112013-10-09 23:45:07 -070035
Gabe Blackd40be112013-10-09 23:45:07 -070036
37#if defined(CONFIG_DEBUG_SPI) && CONFIG_DEBUG_SPI
38# define DEBUG_SPI(x,...) printk(BIOS_DEBUG, "TEGRA_SPI: " x)
39#else
40# define DEBUG_SPI(x,...)
41#endif
42
43/*
44 * 64 packets in FIFO mode, BLOCK_SIZE packets in DMA mode. Packets can vary
45 * in size from 4 to 32 bits. To keep things simple we'll use 8-bit packets.
46 */
47#define SPI_PACKET_SIZE_BYTES 1
48#define SPI_MAX_TRANSFER_BYTES_FIFO (64 * SPI_PACKET_SIZE_BYTES)
Hung-Te Lin2fc3b622013-10-21 21:43:03 +080049#define SPI_MAX_TRANSFER_BYTES_DMA (65535 * SPI_PACKET_SIZE_BYTES)
Gabe Blackd40be112013-10-09 23:45:07 -070050
Julius Werneredf6b572013-10-25 17:49:26 -070051/*
52 * This is used to workaround an issue seen where it may take some time for
53 * packets to show up in the FIFO after they have been received and the
54 * BLOCK_COUNT has been incremented.
55 */
56#define SPI_FIFO_XFER_TIMEOUT_US 1000
57
Gabe Blackd40be112013-10-09 23:45:07 -070058/* COMMAND1 */
59#define SPI_CMD1_GO (1 << 31)
60#define SPI_CMD1_M_S (1 << 30)
61#define SPI_CMD1_MODE_MASK 0x3
62#define SPI_CMD1_MODE_SHIFT 28
63#define SPI_CMD1_CS_SEL_MASK 0x3
64#define SPI_CMD1_CS_SEL_SHIFT 26
65#define SPI_CMD1_CS_POL_INACTIVE3 (1 << 25)
66#define SPI_CMD1_CS_POL_INACTIVE2 (1 << 24)
67#define SPI_CMD1_CS_POL_INACTIVE1 (1 << 23)
68#define SPI_CMD1_CS_POL_INACTIVE0 (1 << 22)
69#define SPI_CMD1_CS_SW_HW (1 << 21)
70#define SPI_CMD1_CS_SW_VAL (1 << 20)
71#define SPI_CMD1_IDLE_SDA_MASK 0x3
72#define SPI_CMD1_IDLE_SDA_SHIFT 18
73#define SPI_CMD1_BIDIR (1 << 17)
74#define SPI_CMD1_LSBI_FE (1 << 16)
75#define SPI_CMD1_LSBY_FE (1 << 15)
76#define SPI_CMD1_BOTH_EN_BIT (1 << 14)
77#define SPI_CMD1_BOTH_EN_BYTE (1 << 13)
78#define SPI_CMD1_RX_EN (1 << 12)
79#define SPI_CMD1_TX_EN (1 << 11)
80#define SPI_CMD1_PACKED (1 << 5)
81#define SPI_CMD1_BIT_LEN_MASK 0x1f
82#define SPI_CMD1_BIT_LEN_SHIFT 0
83
84/* COMMAND2 */
85#define SPI_CMD2_TX_CLK_TAP_DELAY (1 << 6)
86#define SPI_CMD2_TX_CLK_TAP_DELAY_MASK (0x3F << 6)
87#define SPI_CMD2_RX_CLK_TAP_DELAY (1 << 0)
88#define SPI_CMD2_RX_CLK_TAP_DELAY_MASK (0x3F << 0)
89
90/* SPI_TRANS_STATUS */
91#define SPI_STATUS_RDY (1 << 30)
92#define SPI_STATUS_SLV_IDLE_COUNT_MASK 0xff
93#define SPI_STATUS_SLV_IDLE_COUNT_SHIFT 16
94#define SPI_STATUS_BLOCK_COUNT 0xffff
95#define SPI_STATUS_BLOCK_COUNT_SHIFT 0
96
97/* SPI_FIFO_STATUS */
Julius Werneredf6b572013-10-25 17:49:26 -070098#define SPI_FIFO_STATUS_CS_INACTIVE (1 << 31)
99#define SPI_FIFO_STATUS_FRAME_END (1 << 30)
100#define SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_MASK 0x7f
101#define SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_SHIFT 23
102#define SPI_FIFO_STATUS_TX_FIFO_EMPTY_COUNT_MASK 0x7f
103#define SPI_FIFO_STATUS_TX_FIFO_EMPTY_COUNT_SHIFT 16
104#define SPI_FIFO_STATUS_RX_FIFO_FLUSH (1 << 15)
105#define SPI_FIFO_STATUS_TX_FIFO_FLUSH (1 << 14)
106#define SPI_FIFO_STATUS_ERR (1 << 8)
107#define SPI_FIFO_STATUS_TX_FIFO_OVF (1 << 7)
108#define SPI_FIFO_STATUS_TX_FIFO_UNR (1 << 6)
109#define SPI_FIFO_STATUS_RX_FIFO_OVF (1 << 5)
110#define SPI_FIFO_STATUS_RX_FIFO_UNR (1 << 4)
111#define SPI_FIFO_STATUS_TX_FIFO_FULL (1 << 3)
112#define SPI_FIFO_STATUS_TX_FIFO_EMPTY (1 << 2)
113#define SPI_FIFO_STATUS_RX_FIFO_FULL (1 << 1)
114#define SPI_FIFO_STATUS_RX_FIFO_EMPTY (1 << 0)
Gabe Blackd40be112013-10-09 23:45:07 -0700115
116/* SPI_DMA_CTL */
117#define SPI_DMA_CTL_DMA (1 << 31)
118#define SPI_DMA_CTL_CONT (1 << 30)
119#define SPI_DMA_CTL_IE_RX (1 << 29)
120#define SPI_DMA_CTL_IE_TX (1 << 28)
121#define SPI_DMA_CTL_RX_TRIG_MASK 0x3
122#define SPI_DMA_CTL_RX_TRIG_SHIFT 19
123#define SPI_DMA_CTL_TX_TRIG_MASK 0x3
124#define SPI_DMA_CTL_TX_TRIG_SHIFT 15
125
126/* SPI_DMA_BLK */
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800127#define SPI_DMA_CTL_BLOCK_SIZE_MASK 0xffff
Gabe Blackd40be112013-10-09 23:45:07 -0700128#define SPI_DMA_CTL_BLOCK_SIZE_SHIFT 0
129
Gabe Blackd40be112013-10-09 23:45:07 -0700130static struct tegra_spi_channel tegra_spi_channels[] = {
131 /*
132 * Note: Tegra pinmux must be setup for corresponding SPI channel in
133 * order for its registers to be accessible. If pinmux has not been
134 * set up, access to the channel's registers will simply hang.
135 *
136 * TODO(dhendrix): Clarify or remove this comment (is clock setup
137 * necessary first, or just pinmux, or both?)
138 */
139 {
140 .slave = { .bus = 1, },
141 .regs = (struct tegra_spi_regs *)TEGRA_SPI1_BASE,
Julius Werneredf6b572013-10-25 17:49:26 -0700142 .req_sel = APBDMA_SLAVE_SL2B1,
Gabe Blackd40be112013-10-09 23:45:07 -0700143 },
144 {
145 .slave = { .bus = 2, },
146 .regs = (struct tegra_spi_regs *)TEGRA_SPI2_BASE,
Julius Werneredf6b572013-10-25 17:49:26 -0700147 .req_sel = APBDMA_SLAVE_SL2B2,
Gabe Blackd40be112013-10-09 23:45:07 -0700148 },
149 {
150 .slave = { .bus = 3, },
151 .regs = (struct tegra_spi_regs *)TEGRA_SPI3_BASE,
Julius Werneredf6b572013-10-25 17:49:26 -0700152 .req_sel = APBDMA_SLAVE_SL2B3,
Gabe Blackd40be112013-10-09 23:45:07 -0700153 },
154 {
155 .slave = { .bus = 4, },
156 .regs = (struct tegra_spi_regs *)TEGRA_SPI4_BASE,
Julius Werneredf6b572013-10-25 17:49:26 -0700157 .req_sel = APBDMA_SLAVE_SL2B4,
Gabe Blackd40be112013-10-09 23:45:07 -0700158 },
159 {
160 .slave = { .bus = 5, },
161 .regs = (struct tegra_spi_regs *)TEGRA_SPI5_BASE,
Julius Werneredf6b572013-10-25 17:49:26 -0700162 .req_sel = APBDMA_SLAVE_SL2B5,
Gabe Blackd40be112013-10-09 23:45:07 -0700163 },
164 {
165 .slave = { .bus = 6, },
166 .regs = (struct tegra_spi_regs *)TEGRA_SPI6_BASE,
Julius Werneredf6b572013-10-25 17:49:26 -0700167 .req_sel = APBDMA_SLAVE_SL2B6,
Gabe Blackd40be112013-10-09 23:45:07 -0700168 },
169};
170
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800171enum spi_direction {
172 SPI_SEND,
173 SPI_RECEIVE,
174};
Gabe Blackd40be112013-10-09 23:45:07 -0700175
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800176struct tegra_spi_channel *tegra_spi_init(unsigned int bus)
Gabe Blackd40be112013-10-09 23:45:07 -0700177{
178 int i;
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800179 struct tegra_spi_channel *spi = NULL;
Gabe Blackd40be112013-10-09 23:45:07 -0700180
181 for (i = 0; i < ARRAY_SIZE(tegra_spi_channels); i++) {
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800182 if (tegra_spi_channels[i].slave.bus == bus) {
183 spi = &tegra_spi_channels[i];
184 break;
185 }
Gabe Blackd40be112013-10-09 23:45:07 -0700186 }
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800187 if (!spi)
188 return NULL;
Gabe Blackd40be112013-10-09 23:45:07 -0700189
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800190 /* software drives chip-select, set value to high */
191 setbits_le32(&spi->regs->command1,
192 SPI_CMD1_CS_SW_HW | SPI_CMD1_CS_SW_VAL);
193
194 /* 8-bit transfers, unpacked mode, most significant bit first */
195 clrbits_le32(&spi->regs->command1,
196 SPI_CMD1_BIT_LEN_MASK | SPI_CMD1_PACKED);
197 setbits_le32(&spi->regs->command1, 7 << SPI_CMD1_BIT_LEN_SHIFT);
198
199 return spi;
Gabe Blackd40be112013-10-09 23:45:07 -0700200}
201
202static struct tegra_spi_channel * const to_tegra_spi(int bus) {
203 return &tegra_spi_channels[bus - 1];
204}
205
206static unsigned int tegra_spi_speed(unsigned int bus)
207{
208 /* FIXME: implement this properly, for now use max value (50MHz) */
209 return 50000000;
210}
211
Gabe Blackec9293f2014-03-27 21:26:46 -0700212int spi_claim_bus(struct spi_slave *slave)
Gabe Blackd40be112013-10-09 23:45:07 -0700213{
214 struct tegra_spi_regs *regs = to_tegra_spi(slave->bus)->regs;
215 u32 val;
216
Gabe Blackec9293f2014-03-27 21:26:46 -0700217 tegra_spi_init(slave->bus);
218
Gabe Blackd40be112013-10-09 23:45:07 -0700219 val = read32(&regs->command1);
220
221 /* select appropriate chip-select line */
222 val &= ~(SPI_CMD1_CS_SEL_MASK << SPI_CMD1_CS_SEL_SHIFT);
223 val |= (slave->cs << SPI_CMD1_CS_SEL_SHIFT);
224
225 /* drive chip-select with the inverse of the "inactive" value */
226 if (val & (SPI_CMD1_CS_POL_INACTIVE0 << slave->cs))
227 val &= ~SPI_CMD1_CS_SW_VAL;
228 else
229 val |= SPI_CMD1_CS_SW_VAL;
230
Julius Werner2f37bd62015-02-19 14:51:15 -0800231 write32(&regs->command1, val);
Gabe Blackec9293f2014-03-27 21:26:46 -0700232 return 0;
Gabe Blackd40be112013-10-09 23:45:07 -0700233}
234
Gabe Blackec9293f2014-03-27 21:26:46 -0700235void spi_release_bus(struct spi_slave *slave)
Gabe Blackd40be112013-10-09 23:45:07 -0700236{
237 struct tegra_spi_regs *regs = to_tegra_spi(slave->bus)->regs;
238 u32 val;
239
240 val = read32(&regs->command1);
241
242 if (val & (SPI_CMD1_CS_POL_INACTIVE0 << slave->cs))
243 val |= SPI_CMD1_CS_SW_VAL;
244 else
245 val &= ~SPI_CMD1_CS_SW_VAL;
246
Julius Werner2f37bd62015-02-19 14:51:15 -0800247 write32(&regs->command1, val);
Gabe Blackd40be112013-10-09 23:45:07 -0700248}
249
250static void dump_fifo_status(struct tegra_spi_channel *spi)
251{
252 u32 status = read32(&spi->regs->fifo_status);
253
254 printk(BIOS_INFO, "Raw FIFO status: 0x%08x\n", status);
255 if (status & SPI_FIFO_STATUS_TX_FIFO_OVF)
256 printk(BIOS_INFO, "\tTx overflow detected\n");
257 if (status & SPI_FIFO_STATUS_TX_FIFO_UNR)
258 printk(BIOS_INFO, "\tTx underrun detected\n");
259 if (status & SPI_FIFO_STATUS_RX_FIFO_OVF)
260 printk(BIOS_INFO, "\tRx overflow detected\n");
261 if (status & SPI_FIFO_STATUS_RX_FIFO_UNR)
262 printk(BIOS_INFO, "\tRx underrun detected\n");
263
264 printk(BIOS_INFO, "TX_FIFO: 0x%08x, TX_DATA: 0x%08x\n",
265 read32(&spi->regs->tx_fifo), read32(&spi->regs->tx_data));
266 printk(BIOS_INFO, "RX_FIFO: 0x%08x, RX_DATA: 0x%08x\n",
267 read32(&spi->regs->rx_fifo), read32(&spi->regs->rx_data));
268}
269
270static void clear_fifo_status(struct tegra_spi_channel *spi)
271{
272 clrbits_le32(&spi->regs->fifo_status,
273 SPI_FIFO_STATUS_ERR |
274 SPI_FIFO_STATUS_TX_FIFO_OVF |
275 SPI_FIFO_STATUS_TX_FIFO_UNR |
276 SPI_FIFO_STATUS_RX_FIFO_OVF |
277 SPI_FIFO_STATUS_RX_FIFO_UNR);
278}
279
280static void dump_spi_regs(struct tegra_spi_channel *spi)
281{
282 printk(BIOS_INFO, "SPI regs:\n"
283 "\tdma_blk: 0x%08x\n"
284 "\tcommand1: 0x%08x\n"
285 "\tdma_ctl: 0x%08x\n"
286 "\ttrans_status: 0x%08x\n",
287 read32(&spi->regs->dma_blk),
288 read32(&spi->regs->command1),
289 read32(&spi->regs->dma_ctl),
290 read32(&spi->regs->trans_status));
291}
292
293static void dump_dma_regs(struct apb_dma_channel *dma)
294{
295 printk(BIOS_INFO, "DMA regs:\n"
296 "\tahb_ptr: 0x%08x\n"
297 "\tapb_ptr: 0x%08x\n"
298 "\tahb_seq: 0x%08x\n"
299 "\tapb_seq: 0x%08x\n"
300 "\tcsr: 0x%08x\n"
301 "\tcsre: 0x%08x\n"
302 "\twcount: 0x%08x\n"
303 "\tdma_byte_sta: 0x%08x\n"
304 "\tword_transfer: 0x%08x\n",
305 read32(&dma->regs->ahb_ptr),
306 read32(&dma->regs->apb_ptr),
307 read32(&dma->regs->ahb_seq),
308 read32(&dma->regs->apb_seq),
309 read32(&dma->regs->csr),
310 read32(&dma->regs->csre),
311 read32(&dma->regs->wcount),
312 read32(&dma->regs->dma_byte_sta),
313 read32(&dma->regs->word_transfer));
314}
315
Gabe Blackd40be112013-10-09 23:45:07 -0700316static inline unsigned int spi_byte_count(struct tegra_spi_channel *spi)
317{
318 /* FIXME: Make this take total packet size into account */
319 return read32(&spi->regs->trans_status) &
320 (SPI_STATUS_BLOCK_COUNT << SPI_STATUS_BLOCK_COUNT_SHIFT);
321}
322
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800323/*
324 * This calls udelay() with a calculated value based on the SPI speed and
325 * number of bytes remaining to be transferred. It assumes that if the
326 * calculated delay period is less than MIN_DELAY_US then it is probably
327 * not worth the overhead of yielding.
328 */
329#define MIN_DELAY_US 250
330static void spi_delay(struct tegra_spi_channel *spi,
331 unsigned int bytes_remaining)
Gabe Blackd40be112013-10-09 23:45:07 -0700332{
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800333 unsigned int ns_per_byte, delay_us;
Gabe Blackd40be112013-10-09 23:45:07 -0700334
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800335 ns_per_byte = 1000000000 / (tegra_spi_speed(spi->slave.bus) / 8);
336 delay_us = (ns_per_byte * bytes_remaining) / 1000;
Gabe Blackd40be112013-10-09 23:45:07 -0700337
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800338 if (delay_us < MIN_DELAY_US)
339 return;
Gabe Blackd40be112013-10-09 23:45:07 -0700340
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800341 udelay(delay_us);
Gabe Blackd40be112013-10-09 23:45:07 -0700342}
343
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800344static void tegra_spi_wait(struct tegra_spi_channel *spi)
Gabe Blackd40be112013-10-09 23:45:07 -0700345{
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800346 unsigned int count, dma_blk;
Gabe Blackd40be112013-10-09 23:45:07 -0700347
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800348 dma_blk = 1 + (read32(&spi->regs->dma_blk) &
349 (SPI_DMA_CTL_BLOCK_SIZE_MASK << SPI_DMA_CTL_BLOCK_SIZE_SHIFT));
Gabe Blackd40be112013-10-09 23:45:07 -0700350
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800351 while ((count = spi_byte_count(spi)) != dma_blk)
352 spi_delay(spi, dma_blk - count);
Gabe Blackd40be112013-10-09 23:45:07 -0700353}
354
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800355
356static int fifo_error(struct tegra_spi_channel *spi)
357{
358 return read32(&spi->regs->fifo_status) & SPI_FIFO_STATUS_ERR ? 1 : 0;
359}
360
361static int tegra_spi_pio_prepare(struct tegra_spi_channel *spi,
362 unsigned int bytes, enum spi_direction dir)
363{
364 u8 *p = spi->out_buf;
365 unsigned int todo = MIN(bytes, SPI_MAX_TRANSFER_BYTES_FIFO);
366 u32 flush_mask, enable_mask;
367
368 if (dir == SPI_SEND) {
369 flush_mask = SPI_FIFO_STATUS_TX_FIFO_FLUSH;
370 enable_mask = SPI_CMD1_TX_EN;
371 } else {
372 flush_mask = SPI_FIFO_STATUS_RX_FIFO_FLUSH;
373 enable_mask = SPI_CMD1_RX_EN;
374 }
375
376 setbits_le32(&spi->regs->fifo_status, flush_mask);
377 while (read32(&spi->regs->fifo_status) & flush_mask)
378 ;
379
380 setbits_le32(&spi->regs->command1, enable_mask);
381
382 /* BLOCK_SIZE in SPI_DMA_BLK register applies to both DMA and
383 * PIO transfers */
Julius Werner2f37bd62015-02-19 14:51:15 -0800384 write32(&spi->regs->dma_blk, todo - 1);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800385
386 if (dir == SPI_SEND) {
387 unsigned int to_fifo = bytes;
388 while (to_fifo) {
Julius Werner2f37bd62015-02-19 14:51:15 -0800389 write32(&spi->regs->tx_fifo, *p);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800390 p++;
391 to_fifo--;
392 }
393 }
394
395 return todo;
396}
397
398static void tegra_spi_pio_start(struct tegra_spi_channel *spi)
399{
400 setbits_le32(&spi->regs->trans_status, SPI_STATUS_RDY);
401 setbits_le32(&spi->regs->command1, SPI_CMD1_GO);
Gabe Blackf296c942014-04-07 01:01:56 -0700402 /* Make sure the write to command1 completes. */
403 read32(&spi->regs->command1);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800404}
405
Julius Werneredf6b572013-10-25 17:49:26 -0700406static inline u32 rx_fifo_count(struct tegra_spi_channel *spi)
407{
408 return (read32(&spi->regs->fifo_status) >>
409 SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_SHIFT) &
410 SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_MASK;
411}
412
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800413static int tegra_spi_pio_finish(struct tegra_spi_channel *spi)
414{
415 u8 *p = spi->in_buf;
Aaron Durbin53a83fb2014-09-24 09:51:19 -0500416 struct stopwatch sw;
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800417
418 clrbits_le32(&spi->regs->command1, SPI_CMD1_RX_EN | SPI_CMD1_TX_EN);
419
Julius Werneredf6b572013-10-25 17:49:26 -0700420 /*
421 * Allow some time in case the Rx FIFO does not yet have
422 * all packets pushed into it. See chrome-os-partner:24215.
423 */
Aaron Durbin53a83fb2014-09-24 09:51:19 -0500424 stopwatch_init_usecs_expire(&sw, SPI_FIFO_XFER_TIMEOUT_US);
Julius Werneredf6b572013-10-25 17:49:26 -0700425 do {
426 if (rx_fifo_count(spi) == spi_byte_count(spi))
427 break;
Aaron Durbin53a83fb2014-09-24 09:51:19 -0500428 } while (!stopwatch_expired(&sw));
Julius Werneredf6b572013-10-25 17:49:26 -0700429
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800430 while (!(read32(&spi->regs->fifo_status) &
431 SPI_FIFO_STATUS_RX_FIFO_EMPTY)) {
432 *p = read8(&spi->regs->rx_fifo);
433 p++;
434 }
435
436 if (fifo_error(spi)) {
437 printk(BIOS_ERR, "%s: ERROR:\n", __func__);
438 dump_spi_regs(spi);
439 dump_fifo_status(spi);
440 return -1;
441 }
442
443 return 0;
444}
445
446static void setup_dma_params(struct tegra_spi_channel *spi,
447 struct apb_dma_channel *dma)
Gabe Blackd40be112013-10-09 23:45:07 -0700448{
449 /* APB bus width = 8-bits, address wrap for each word */
Julius Werneredf6b572013-10-25 17:49:26 -0700450 clrbits_le32(&dma->regs->apb_seq,
David Hendricks044656372014-04-09 16:02:52 -0700451 APB_BUS_WIDTH_MASK << APB_BUS_WIDTH_SHIFT);
Gabe Blackd40be112013-10-09 23:45:07 -0700452 /* AHB 1 word burst, bus width = 32 bits (fixed in hardware),
453 * no address wrapping */
454 clrsetbits_le32(&dma->regs->ahb_seq,
Julius Werneredf6b572013-10-25 17:49:26 -0700455 (AHB_BURST_MASK << AHB_BURST_SHIFT),
456 4 << AHB_BURST_SHIFT);
457
458 /* Set ONCE mode to transfer one "block" at a time (64KB) and enable
459 * flow control. */
460 clrbits_le32(&dma->regs->csr,
461 APB_CSR_REQ_SEL_MASK << APB_CSR_REQ_SEL_SHIFT);
462 setbits_le32(&dma->regs->csr, APB_CSR_ONCE | APB_CSR_FLOW |
463 (spi->req_sel << APB_CSR_REQ_SEL_SHIFT));
Gabe Blackd40be112013-10-09 23:45:07 -0700464}
465
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800466static int tegra_spi_dma_prepare(struct tegra_spi_channel *spi,
467 unsigned int bytes, enum spi_direction dir)
468{
469 unsigned int todo, wcount;
470
471 /*
472 * For DMA we need to think of things in terms of word count.
473 * AHB width is fixed at 32-bits. To avoid overrunning
474 * the in/out buffers we must align down. (Note: lowest 2-bits
475 * in WCOUNT register are ignored, and WCOUNT seems to count
476 * words starting at n-1)
477 *
478 * Example: If "bytes" is 7 and we are transferring 1-byte at a time,
479 * WCOUNT should be 4. The remaining 3 bytes must be transferred
480 * using PIO.
481 */
482 todo = MIN(bytes, SPI_MAX_TRANSFER_BYTES_DMA - TEGRA_DMA_ALIGN_BYTES);
483 todo = ALIGN_DOWN(todo, TEGRA_DMA_ALIGN_BYTES);
484 wcount = ALIGN_DOWN(todo - TEGRA_DMA_ALIGN_BYTES, TEGRA_DMA_ALIGN_BYTES);
485
486 if (dir == SPI_SEND) {
487 spi->dma_out = dma_claim();
488 if (!spi->dma_out)
489 return -1;
490
491 /* ensure bytes to send will be visible to DMA controller */
492 dcache_clean_by_mva(spi->out_buf, bytes);
493
Julius Werner2f37bd62015-02-19 14:51:15 -0800494 write32(&spi->dma_out->regs->apb_ptr,
495 (u32)&spi->regs->tx_fifo);
496 write32(&spi->dma_out->regs->ahb_ptr, (u32)spi->out_buf);
Julius Werneredf6b572013-10-25 17:49:26 -0700497 setbits_le32(&spi->dma_out->regs->csr, APB_CSR_DIR);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800498 setup_dma_params(spi, spi->dma_out);
Julius Werner2f37bd62015-02-19 14:51:15 -0800499 write32(&spi->dma_out->regs->wcount, wcount);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800500 } else {
501 spi->dma_in = dma_claim();
502 if (!spi->dma_in)
503 return -1;
504
505 /* avoid data collisions */
506 dcache_clean_invalidate_by_mva(spi->in_buf, bytes);
507
Julius Werner2f37bd62015-02-19 14:51:15 -0800508 write32(&spi->dma_in->regs->apb_ptr, (u32)&spi->regs->rx_fifo);
509 write32(&spi->dma_in->regs->ahb_ptr, (u32)spi->in_buf);
Julius Werneredf6b572013-10-25 17:49:26 -0700510 clrbits_le32(&spi->dma_in->regs->csr, APB_CSR_DIR);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800511 setup_dma_params(spi, spi->dma_in);
Julius Werner2f37bd62015-02-19 14:51:15 -0800512 write32(&spi->dma_in->regs->wcount, wcount);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800513 }
514
515 /* BLOCK_SIZE starts at n-1 */
Julius Werner2f37bd62015-02-19 14:51:15 -0800516 write32(&spi->regs->dma_blk, todo - 1);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800517 return todo;
518}
519
520static void tegra_spi_dma_start(struct tegra_spi_channel *spi)
521{
522 /*
523 * The RDY bit in SPI_TRANS_STATUS needs to be cleared manually
524 * (set bit to clear) between each transaction. Otherwise the next
525 * transaction does not start.
526 */
527 setbits_le32(&spi->regs->trans_status, SPI_STATUS_RDY);
528
529 if (spi->dma_out)
530 setbits_le32(&spi->regs->command1, SPI_CMD1_TX_EN);
531 if (spi->dma_in)
532 setbits_le32(&spi->regs->command1, SPI_CMD1_RX_EN);
533
534 /*
535 * To avoid underrun conditions, enable APB DMA before SPI DMA for
536 * Tx and enable SPI DMA before APB DMA before Rx.
537 */
538 if (spi->dma_out)
539 dma_start(spi->dma_out);
540 setbits_le32(&spi->regs->dma_ctl, SPI_DMA_CTL_DMA);
541 if (spi->dma_in)
542 dma_start(spi->dma_in);
543
544
545}
546
547static int tegra_spi_dma_finish(struct tegra_spi_channel *spi)
548{
549 int ret;
550 unsigned int todo;
551
552 todo = read32(&spi->dma_in->regs->wcount);
553
554 if (spi->dma_in) {
555 while ((read32(&spi->dma_in->regs->dma_byte_sta) < todo) ||
556 dma_busy(spi->dma_in))
557 ; /* this shouldn't take long, no udelay */
558 dma_stop(spi->dma_in);
559 clrbits_le32(&spi->regs->command1, SPI_CMD1_RX_EN);
David Hendricks0c9cc5e2014-04-09 16:04:14 -0700560 dma_release(spi->dma_in);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800561 }
562
563 if (spi->dma_out) {
564 while ((read32(&spi->dma_out->regs->dma_byte_sta) < todo) ||
565 dma_busy(spi->dma_out))
566 spi_delay(spi, todo - spi_byte_count(spi));
567 clrbits_le32(&spi->regs->command1, SPI_CMD1_TX_EN);
568 dma_stop(spi->dma_out);
David Hendricks0c9cc5e2014-04-09 16:04:14 -0700569 dma_release(spi->dma_out);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800570 }
571
572 if (fifo_error(spi)) {
573 printk(BIOS_ERR, "%s: ERROR:\n", __func__);
574 dump_dma_regs(spi->dma_out);
575 dump_dma_regs(spi->dma_in);
576 dump_spi_regs(spi);
577 dump_fifo_status(spi);
578 ret = -1;
579 goto done;
580 }
581
582 ret = 0;
583done:
584 spi->dma_in = NULL;
585 spi->dma_out = NULL;
586 return ret;
587}
588
Gabe Blackd40be112013-10-09 23:45:07 -0700589/*
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800590 * xfer_setup() prepares a transfer. It does sanity checking, alignment, and
591 * sets transfer mode used by this channel (if not set already).
Gabe Blackd40be112013-10-09 23:45:07 -0700592 *
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800593 * A few caveats to watch out for:
594 * - The number of bytes which can be transferred may be smaller than the
595 * number of bytes the caller specifies. The number of bytes ready for
596 * a transfer will be returned (unless an error occurs).
Gabe Blackd40be112013-10-09 23:45:07 -0700597 *
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800598 * - Only one mode can be used for both RX and TX. The transfer mode of the
599 * SPI channel (spi->xfer_mode) is checked each time this function is called.
600 * If conflicting modes are detected, spi->xfer_mode will be set to
601 * XFER_MODE_NONE and an error will be returned.
602 *
603 * Returns bytes ready for transfer if successful, <0 to indicate error.
Gabe Blackd40be112013-10-09 23:45:07 -0700604 */
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800605static int xfer_setup(struct tegra_spi_channel *spi, void *buf,
606 unsigned int bytes, enum spi_direction dir)
Gabe Blackd40be112013-10-09 23:45:07 -0700607{
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800608 unsigned int line_size = dcache_line_bytes();
609 unsigned int align;
610 int ret = -1;
Gabe Blackd40be112013-10-09 23:45:07 -0700611
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800612 if (!bytes)
613 return 0;
614
615 if (dir == SPI_SEND)
616 spi->out_buf = buf;
617 else if (dir == SPI_RECEIVE)
618 spi->in_buf = buf;
619
620 /*
621 * Alignment consideratons:
622 * When we enable caching we'll need to clean/invalidate portions of
623 * memory. So we need to be careful about memory alignment. Also, DMA
624 * likes to operate on 4-bytes at a time on the AHB side. So for
625 * example, if we only want to receive 1 byte, 4 bytes will be be
626 * written in memory even if those extra 3 bytes are beyond the length
627 * we want.
628 *
629 * For now we'll use PIO to send/receive unaligned bytes. We may
630 * consider setting aside some space for a kind of bounce buffer to
631 * stay in DMA mode once we have a chance to benchmark the two
632 * approaches.
633 */
634
635 if (bytes < line_size) {
636 if (spi->xfer_mode == XFER_MODE_DMA) {
637 spi->xfer_mode = XFER_MODE_NONE;
638 ret = -1;
639 } else {
640 spi->xfer_mode = XFER_MODE_PIO;
641 ret = tegra_spi_pio_prepare(spi, bytes, dir);
642 }
643 goto done;
Gabe Blackd40be112013-10-09 23:45:07 -0700644 }
645
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800646 /* transfer bytes before the aligned boundary */
647 align = line_size - ((uintptr_t)buf % line_size);
648 if ((align != 0) && (align != line_size)) {
649 if (spi->xfer_mode == XFER_MODE_DMA) {
650 spi->xfer_mode = XFER_MODE_NONE;
651 ret = -1;
652 } else {
653 spi->xfer_mode = XFER_MODE_PIO;
654 ret = tegra_spi_pio_prepare(spi, align, dir);
655 }
656 goto done;
Gabe Blackd40be112013-10-09 23:45:07 -0700657 }
658
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800659 /* do aligned DMA transfer */
660 align = (((uintptr_t)buf + bytes) % line_size);
661 if (bytes - align > 0) {
662 unsigned int dma_bytes = bytes - align;
663
664 if (spi->xfer_mode == XFER_MODE_PIO) {
665 spi->xfer_mode = XFER_MODE_NONE;
666 ret = -1;
667 } else {
668 spi->xfer_mode = XFER_MODE_DMA;
669 ret = tegra_spi_dma_prepare(spi, dma_bytes, dir);
670 }
671
672 goto done;
673 }
674
675 /* transfer any remaining unaligned bytes */
676 if (align) {
677 if (spi->xfer_mode == XFER_MODE_DMA) {
678 spi->xfer_mode = XFER_MODE_NONE;
679 ret = -1;
680 } else {
681 spi->xfer_mode = XFER_MODE_PIO;
682 ret = tegra_spi_pio_prepare(spi, align, dir);
683 }
684 goto done;
685 }
686
687done:
688 return ret;
Gabe Blackd40be112013-10-09 23:45:07 -0700689}
690
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800691static void xfer_start(struct tegra_spi_channel *spi)
Gabe Blackd40be112013-10-09 23:45:07 -0700692{
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800693 if (spi->xfer_mode == XFER_MODE_DMA)
694 tegra_spi_dma_start(spi);
695 else
696 tegra_spi_pio_start(spi);
Gabe Blackd40be112013-10-09 23:45:07 -0700697}
698
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800699static void xfer_wait(struct tegra_spi_channel *spi)
700{
701 tegra_spi_wait(spi);
702}
703
704static int xfer_finish(struct tegra_spi_channel *spi)
705{
706 int ret;
707
708 if (spi->xfer_mode == XFER_MODE_DMA)
709 ret = tegra_spi_dma_finish(spi);
710 else
711 ret = tegra_spi_pio_finish(spi);
712
713 spi->xfer_mode = XFER_MODE_NONE;
714 return ret;
715}
716
David Hendricks1101a712013-11-22 18:41:38 -0800717unsigned int spi_crop_chunk(unsigned int cmd_len, unsigned int buf_len)
Gabe Blackd40be112013-10-09 23:45:07 -0700718{
David Hendricks1101a712013-11-22 18:41:38 -0800719 return buf_len;
720}
721
722int spi_xfer(struct spi_slave *slave, const void *dout,
723 unsigned int out_bytes, void *din, unsigned int in_bytes)
724{
Gabe Blackd40be112013-10-09 23:45:07 -0700725 struct tegra_spi_channel *spi = to_tegra_spi(slave->bus);
Gabe Blackd40be112013-10-09 23:45:07 -0700726 u8 *out_buf = (u8 *)dout;
727 u8 *in_buf = (u8 *)din;
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800728 unsigned int todo;
Gabe Black967058f2014-03-21 21:32:12 -0700729 int ret = 0;
Gabe Blackd40be112013-10-09 23:45:07 -0700730
Gabe Blackd40be112013-10-09 23:45:07 -0700731 /* tegra bus numbers start at 1 */
732 ASSERT(slave->bus >= 1 && slave->bus <= ARRAY_SIZE(tegra_spi_channels));
733
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800734 while (out_bytes || in_bytes) {
735 int x = 0;
Gabe Blackd40be112013-10-09 23:45:07 -0700736
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800737 if (out_bytes == 0)
738 todo = in_bytes;
739 else if (in_bytes == 0)
740 todo = out_bytes;
741 else
742 todo = MIN(out_bytes, in_bytes);
Gabe Blackd40be112013-10-09 23:45:07 -0700743
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800744 if (out_bytes) {
745 x = xfer_setup(spi, out_buf, todo, SPI_SEND);
746 if (x < 0) {
747 if (spi->xfer_mode == XFER_MODE_NONE) {
748 spi->xfer_mode = XFER_MODE_PIO;
749 continue;
750 } else {
751 ret = -1;
752 break;
753 }
Gabe Blackd40be112013-10-09 23:45:07 -0700754 }
Gabe Blackd40be112013-10-09 23:45:07 -0700755 }
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800756 if (in_bytes) {
757 x = xfer_setup(spi, in_buf, todo, SPI_RECEIVE);
758 if (x < 0) {
759 if (spi->xfer_mode == XFER_MODE_NONE) {
760 spi->xfer_mode = XFER_MODE_PIO;
761 continue;
762 } else {
763 ret = -1;
764 break;
765 }
Gabe Blackd40be112013-10-09 23:45:07 -0700766 }
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800767 }
768
769 /*
770 * Note: Some devices (such as Chrome EC) are sensitive to
771 * delays, so be careful when adding debug prints not to
772 * cause timeouts between transfers.
773 */
774 xfer_start(spi);
775 xfer_wait(spi);
776 if (xfer_finish(spi)) {
777 ret = -1;
778 break;
779 }
780
Gabe Black967058f2014-03-21 21:32:12 -0700781 /* Post-processing. */
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800782 if (out_bytes) {
783 out_bytes -= x;
784 out_buf += x;
785 }
786 if (in_bytes) {
Gabe Black967058f2014-03-21 21:32:12 -0700787 in_bytes -= x;
788 in_buf += x;
Gabe Blackd40be112013-10-09 23:45:07 -0700789 }
790 }
791
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800792 if (ret < 0) {
793 printk(BIOS_ERR, "%s: Error detected\n", __func__);
794 printk(BIOS_ERR, "Transaction size: %u, bytes remaining: "
795 "%u out / %u in\n", todo, out_bytes, in_bytes);
Gabe Blackd40be112013-10-09 23:45:07 -0700796 clear_fifo_status(spi);
Hung-Te Lin2fc3b622013-10-21 21:43:03 +0800797 }
Gabe Blackd40be112013-10-09 23:45:07 -0700798 return ret;
799}
800
David Hendricks1101a712013-11-22 18:41:38 -0800801#define JEDEC_READ 0x03
802#define JEDEC_READ_OUTSIZE 0x04
803#define JEDEC_FAST_READ_DUAL 0x3b
804#define JEDEC_FAST_READ_DUAL_OUTSIZE 0x05
Gabe Blackd40be112013-10-09 23:45:07 -0700805
Aaron Durbinc6588c52015-05-15 13:15:34 -0500806static struct spi_slave *boot_slave;
807
808static ssize_t tegra_spi_readat(const struct region_device *rdev, void *dest,
809 size_t offset, size_t count)
Gabe Blackd40be112013-10-09 23:45:07 -0700810{
David Hendricks1101a712013-11-22 18:41:38 -0800811 u8 spi_read_cmd[JEDEC_FAST_READ_DUAL_OUTSIZE];
812 unsigned int read_cmd_bytes;
Gabe Blackd40be112013-10-09 23:45:07 -0700813 int ret = count;
David Hendricks1101a712013-11-22 18:41:38 -0800814 struct tegra_spi_channel *channel;
Gabe Blackd40be112013-10-09 23:45:07 -0700815
Aaron Durbinc6588c52015-05-15 13:15:34 -0500816 channel = to_tegra_spi(boot_slave->bus);
David Hendricks1101a712013-11-22 18:41:38 -0800817
818 if (channel->dual_mode) {
819 /*
820 * Command 0x3b will interleave data only, command 0xbb will
821 * interleave the address as well. It's nice to see the address
822 * plainly when debugging, and we're mostly concerned with
823 * large transfers so the optimization of using 0xbb isn't
824 * really worthwhile.
825 */
826 spi_read_cmd[0] = JEDEC_FAST_READ_DUAL;
827 spi_read_cmd[4] = 0x00; /* dummy byte */
828 read_cmd_bytes = JEDEC_FAST_READ_DUAL_OUTSIZE;
829 } else {
830 spi_read_cmd[0] = JEDEC_READ;
831 read_cmd_bytes = JEDEC_READ_OUTSIZE;
832 }
Gabe Blackd40be112013-10-09 23:45:07 -0700833 spi_read_cmd[1] = (offset >> 16) & 0xff;
834 spi_read_cmd[2] = (offset >> 8) & 0xff;
835 spi_read_cmd[3] = offset & 0xff;
836
Aaron Durbinc6588c52015-05-15 13:15:34 -0500837 spi_claim_bus(boot_slave);
Gabe Blackd40be112013-10-09 23:45:07 -0700838
Aaron Durbinc6588c52015-05-15 13:15:34 -0500839 if (spi_xfer(boot_slave, spi_read_cmd,
David Hendricks1101a712013-11-22 18:41:38 -0800840 read_cmd_bytes, NULL, 0) < 0) {
Gabe Blackd40be112013-10-09 23:45:07 -0700841 ret = -1;
842 printk(BIOS_ERR, "%s: Failed to transfer %u bytes\n",
843 __func__, sizeof(spi_read_cmd));
844 goto tegra_spi_cbfs_read_exit;
845 }
846
David Hendricks1101a712013-11-22 18:41:38 -0800847 if (channel->dual_mode) {
848 setbits_le32(&channel->regs->command1, SPI_CMD1_BOTH_EN_BIT);
849 }
Aaron Durbinc6588c52015-05-15 13:15:34 -0500850 if (spi_xfer(boot_slave, NULL, 0, dest, count)) {
Gabe Blackd40be112013-10-09 23:45:07 -0700851 ret = -1;
852 printk(BIOS_ERR, "%s: Failed to transfer %u bytes\n",
853 __func__, count);
854 }
David Hendricks1101a712013-11-22 18:41:38 -0800855 if (channel->dual_mode)
856 clrbits_le32(&channel->regs->command1, SPI_CMD1_BOTH_EN_BIT);
Gabe Blackd40be112013-10-09 23:45:07 -0700857
858tegra_spi_cbfs_read_exit:
859 /* de-assert /CS */
Aaron Durbinc6588c52015-05-15 13:15:34 -0500860 spi_release_bus(boot_slave);
861 return ret;
862}
863
Gabe Blackd40be112013-10-09 23:45:07 -0700864struct spi_slave *spi_setup_slave(unsigned int bus, unsigned int cs)
865{
866 struct tegra_spi_channel *channel = to_tegra_spi(bus);
867 if (!channel)
868 return NULL;
869
870 return &channel->slave;
871}
Aaron Durbinc6588c52015-05-15 13:15:34 -0500872
873static const struct region_device_ops tegra_spi_ops = {
874 .mmap = mmap_helper_rdev_mmap,
875 .munmap = mmap_helper_rdev_munmap,
876 .readat = tegra_spi_readat,
877};
878
879static struct mmap_helper_region_device mdev =
880 MMAP_HELPER_REGION_INIT(&tegra_spi_ops, 0, CONFIG_ROM_SIZE);
881
882const struct region_device *boot_device_ro(void)
883{
884 return &mdev.rdev;
885}
886
887void boot_device_init(void)
888{
889 struct tegra_spi_channel *boot_chan;
890
891 boot_chan = &tegra_spi_channels[CONFIG_BOOT_MEDIA_SPI_BUS - 1];
892 boot_chan->slave.cs = CONFIG_BOOT_MEDIA_SPI_CHIP_SELECT;
893
894#if CONFIG_SPI_FLASH_FAST_READ_DUAL_OUTPUT_3B == 1
895 boot_chan->dual_mode = 1;
896#endif
897 boot_slave = &boot_chan->slave;
898
899 mmap_helper_device_init(&mdev, _cbfs_cache, _cbfs_cache_size);
900}