blob: 1c345ccf12aae2923a2f4fb35cc60435f9b06639 [file] [log] [blame]
Patrick Georgiac959032020-05-05 22:49:26 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Akash Asthana634c7832019-07-29 18:11:15 +05302
3#include <spi-generic.h>
4#include <spi_flash.h>
5#include <arch/cache.h>
6#include <device/mmio.h>
7#include <soc/addressmap.h>
Ravi Kumar Bokka42394582021-07-16 16:59:25 +05308#include <soc/qspi_common.h>
Akash Asthana634c7832019-07-29 18:11:15 +05309#include <soc/gpio.h>
10#include <soc/clock.h>
11#include <symbols.h>
12#include <assert.h>
13#include <gpio.h>
14#include <string.h>
15
16#define CACHE_LINE_SIZE 64
17
18static int curr_desc_idx = -1;
19
20struct cmd_desc {
21 uint32_t data_address;
22 uint32_t next_descriptor;
23 uint32_t direction:1;
24 uint32_t multi_io_mode:3;
25 uint32_t reserved1:4;
26 uint32_t fragment:1;
27 uint32_t reserved2:7;
28 uint32_t length:16;
29 //------------------------//
30 uint32_t bounce_src;
31 uint32_t bounce_dst;
32 uint32_t bounce_length;
33 uint64_t padding[5];
34};
35
36enum qspi_mode {
37 SDR_1BIT = 1,
38 SDR_2BIT = 2,
39 SDR_4BIT = 3,
40 DDR_1BIT = 5,
41 DDR_2BIT = 6,
42 DDR_4BIT = 7,
43};
44
45enum cs_state {
46 CS_DEASSERT,
47 CS_ASSERT
48};
49
50struct xfer_cfg {
51 enum qspi_mode mode;
52};
53
54enum bus_xfer_direction {
55 MASTER_READ = 0,
56 MASTER_WRITE = 1,
57};
58
59struct {
60 struct cmd_desc descriptors[3];
61 uint8_t buffers[3][CACHE_LINE_SIZE];
62} *dma = (void *)_dma_coherent;
63
64static void dma_transfer_chain(struct cmd_desc *chain)
65{
66 uint32_t mstr_int_status;
67
Ravi Kumar Bokka42394582021-07-16 16:59:25 +053068 write32(&qcom_qspi->mstr_int_sts, 0xFFFFFFFF);
69 write32(&qcom_qspi->next_dma_desc_addr, (uint32_t)(uintptr_t) chain);
Akash Asthana634c7832019-07-29 18:11:15 +053070
71 while (1) {
Ravi Kumar Bokka42394582021-07-16 16:59:25 +053072 mstr_int_status = read32(&qcom_qspi->mstr_int_sts);
Akash Asthana634c7832019-07-29 18:11:15 +053073 if (mstr_int_status & DMA_CHAIN_DONE)
74 break;
75 }
76}
77
78static void flush_chain(void)
79{
80 struct cmd_desc *desc = &dma->descriptors[0];
81 uint8_t *src;
82 uint8_t *dst;
83
84 dma_transfer_chain(desc);
85
86 while (desc) {
87 if (desc->direction == MASTER_READ) {
88 if (desc->bounce_length == 0)
89 dcache_invalidate_by_mva(
90 (void *)(uintptr_t) desc->data_address,
91 desc->length);
92 else {
93 src = (void *)(uintptr_t) desc->bounce_src;
94 dst = (void *)(uintptr_t) desc->bounce_dst;
95 memcpy(dst, src, desc->bounce_length);
96 }
97 }
98 desc = (void *)(uintptr_t) desc->next_descriptor;
99 }
100 curr_desc_idx = -1;
101}
102
103static struct cmd_desc *allocate_descriptor(void)
104{
105 struct cmd_desc *current;
106 struct cmd_desc *next;
107 uint8_t index;
108
109 current = (curr_desc_idx == -1) ?
110 NULL : &dma->descriptors[curr_desc_idx];
111
112 index = ++curr_desc_idx;
113 next = &dma->descriptors[index];
114
115 next->data_address = (uint32_t) (uintptr_t) dma->buffers[index];
116
117 next->next_descriptor = 0;
118 next->direction = MASTER_READ;
119 next->multi_io_mode = 0;
120 next->reserved1 = 0;
satya priya60108fd2020-03-17 15:09:44 +0530121 /*
122 * QSPI controller doesn't support transfer starts with read segment.
123 * So to support read transfers that are not preceded by write, set
124 * transfer fragment bit = 1
125 */
126 next->fragment = 1;
Akash Asthana634c7832019-07-29 18:11:15 +0530127 next->reserved2 = 0;
128 next->length = 0;
129 next->bounce_src = 0;
130 next->bounce_dst = 0;
131 next->bounce_length = 0;
132
satya priya60108fd2020-03-17 15:09:44 +0530133 if (current)
Akash Asthana634c7832019-07-29 18:11:15 +0530134 current->next_descriptor = (uint32_t)(uintptr_t) next;
Akash Asthana634c7832019-07-29 18:11:15 +0530135
136 return next;
137}
138
139static void cs_change(enum cs_state state)
140{
Ravi Kumar Bokka42394582021-07-16 16:59:25 +0530141 gpio_set(QSPI_CS, state == CS_DEASSERT);
Akash Asthana634c7832019-07-29 18:11:15 +0530142}
143
144static void configure_gpios(void)
145{
Ravi Kumar Bokka42394582021-07-16 16:59:25 +0530146 gpio_output(QSPI_CS, 1);
Akash Asthana634c7832019-07-29 18:11:15 +0530147
Ravi Kumar Bokka42394582021-07-16 16:59:25 +0530148 gpio_configure(QSPI_DATA_0, GPIO_FUNC_QSPI_DATA_0,
Shelley Chendd6b0612022-03-29 18:35:20 -0700149 GPIO_NO_PULL, GPIO_8MA, GPIO_OUTPUT);
Akash Asthana634c7832019-07-29 18:11:15 +0530150
Ravi Kumar Bokka42394582021-07-16 16:59:25 +0530151 gpio_configure(QSPI_DATA_1, GPIO_FUNC_QSPI_DATA_1,
Shelley Chendd6b0612022-03-29 18:35:20 -0700152 GPIO_NO_PULL, GPIO_8MA, GPIO_OUTPUT);
Akash Asthana634c7832019-07-29 18:11:15 +0530153
Ravi Kumar Bokka42394582021-07-16 16:59:25 +0530154 gpio_configure(QSPI_CLK, GPIO_FUNC_QSPI_CLK,
Ravi Kumar Bokkaa8e9dba2021-07-10 23:54:20 +0530155 GPIO_NO_PULL, GPIO_8MA, GPIO_OUTPUT);
Akash Asthana634c7832019-07-29 18:11:15 +0530156}
157
158static void queue_bounce_data(uint8_t *data, uint32_t data_bytes,
159 enum qspi_mode data_mode, bool write)
160{
161 struct cmd_desc *desc;
162 uint8_t *ptr;
163
164 desc = allocate_descriptor();
165 desc->direction = write;
166 desc->multi_io_mode = data_mode;
167 ptr = (void *)(uintptr_t) desc->data_address;
168
169 if (write) {
170 memcpy(ptr, data, data_bytes);
171 } else {
172 desc->bounce_src = (uint32_t)(uintptr_t) ptr;
173 desc->bounce_dst = (uint32_t)(uintptr_t) data;
174 desc->bounce_length = data_bytes;
175 }
176
177 desc->length = data_bytes;
178}
179
180static void queue_direct_data(uint8_t *data, uint32_t data_bytes,
181 enum qspi_mode data_mode, bool write)
182{
183 struct cmd_desc *desc;
184
185 desc = allocate_descriptor();
186 desc->direction = write;
187 desc->multi_io_mode = data_mode;
188 desc->data_address = (uint32_t)(uintptr_t) data;
189 desc->length = data_bytes;
190
191 if (write)
192 dcache_clean_by_mva(data, data_bytes);
193 else
194 dcache_invalidate_by_mva(data, data_bytes);
195}
196
197static void queue_data(uint8_t *data, uint32_t data_bytes,
198 enum qspi_mode data_mode, bool write)
199{
200 uint8_t *aligned_ptr;
201 uint8_t *epilog_ptr;
202 uint32_t prolog_bytes, aligned_bytes, epilog_bytes;
203
204 if (data_bytes == 0)
205 return;
206
207 aligned_ptr =
208 (uint8_t *)ALIGN_UP((uintptr_t)data, CACHE_LINE_SIZE);
209
210 prolog_bytes = MIN(data_bytes, aligned_ptr - data);
211 aligned_bytes = ALIGN_DOWN(data_bytes - prolog_bytes, CACHE_LINE_SIZE);
212 epilog_bytes = data_bytes - prolog_bytes - aligned_bytes;
213
214 epilog_ptr = data + prolog_bytes + aligned_bytes;
215
216 if (prolog_bytes)
217 queue_bounce_data(data, prolog_bytes, data_mode, write);
218 if (aligned_bytes)
219 queue_direct_data(aligned_ptr, aligned_bytes, data_mode, write);
220 if (epilog_bytes)
221 queue_bounce_data(epilog_ptr, epilog_bytes, data_mode, write);
222}
223
Shelley Chen363202b2022-05-11 18:29:19 -0700224/*
225 * The way to encode the sampling delay is:
226 *
227 * QSPI_SAMPLE_CLK_CONFIG delay (cycle)
228 * ----------------------------------------
229 * 0xFFFh = 1111 1111 1111b 7/8
230 * 0xDB6h = 1101 1011 0110b 6/8
231 * 0xB6Dh = 1011 0110 1101b 5/8
232 * 0x924h = 1001 0010 0100b 4/8
233 * 0x6DBh = 0110 1101 1011b 3/8
234 * 0x492h = 0100 1001 0010b 2/8
235 * 0x249h = 0010 0100 1001b 1/8
236 * 0x000h = 0000 0000 0000b None
237 */
238static void reg_init(uint32_t sdelay)
Akash Asthana634c7832019-07-29 18:11:15 +0530239{
240 uint32_t spi_mode;
241 uint32_t tx_data_oe_delay, tx_data_delay;
242 uint32_t mstr_config;
Shelley Chen363202b2022-05-11 18:29:19 -0700243 uint32_t sampling_delay;
Akash Asthana634c7832019-07-29 18:11:15 +0530244
245 spi_mode = 0;
246
247 tx_data_oe_delay = 0;
248 tx_data_delay = 0;
249
250 mstr_config = (tx_data_oe_delay << TX_DATA_OE_DELAY_SHIFT) |
251 (tx_data_delay << TX_DATA_DELAY_SHIFT) | (SBL_EN) |
252 (spi_mode << SPI_MODE_SHIFT) |
253 (PIN_HOLDN) |
Akash Asthana634c7832019-07-29 18:11:15 +0530254 (DMA_ENABLE) |
255 (FULL_CYCLE_MODE);
256
Ravi Kumar Bokka42394582021-07-16 16:59:25 +0530257 write32(&qcom_qspi->mstr_cfg, mstr_config);
258 write32(&qcom_qspi->ahb_mstr_cfg, 0xA42);
259 write32(&qcom_qspi->mstr_int_en, 0x0);
260 write32(&qcom_qspi->mstr_int_sts, 0xFFFFFFFF);
261 write32(&qcom_qspi->rd_fifo_cfg, 0x0);
262 write32(&qcom_qspi->rd_fifo_rst, RESET_FIFO);
Shelley Chen363202b2022-05-11 18:29:19 -0700263 sampling_delay = sdelay << 9 | sdelay << 6 | sdelay << 3 | sdelay << 0;
264 write32(&qcom_qspi->sampling_clk_cfg, sampling_delay);
Akash Asthana634c7832019-07-29 18:11:15 +0530265}
266
Shelley Chen363202b2022-05-11 18:29:19 -0700267void quadspi_init(uint32_t hz, uint32_t sdelay)
Akash Asthana634c7832019-07-29 18:11:15 +0530268{
269 assert(dcache_line_bytes() == CACHE_LINE_SIZE);
270 clock_configure_qspi(hz * 4);
271 configure_gpios();
Shelley Chen363202b2022-05-11 18:29:19 -0700272 reg_init(sdelay);
Akash Asthana634c7832019-07-29 18:11:15 +0530273}
274
Ravi Kumar Bokka42394582021-07-16 16:59:25 +0530275int qspi_claim_bus(const struct spi_slave *slave)
Akash Asthana634c7832019-07-29 18:11:15 +0530276{
277 cs_change(CS_ASSERT);
278 return 0;
279}
280
Ravi Kumar Bokka42394582021-07-16 16:59:25 +0530281void qspi_release_bus(const struct spi_slave *slave)
Akash Asthana634c7832019-07-29 18:11:15 +0530282{
283 cs_change(CS_DEASSERT);
284}
285
286static int xfer(enum qspi_mode mode, const void *dout, size_t out_bytes,
287 void *din, size_t in_bytes)
288{
289 if ((out_bytes && !dout) || (in_bytes && !din) ||
290 (in_bytes && out_bytes)) {
291 return -1;
292 }
293
294 queue_data((uint8_t *) (out_bytes ? dout : din),
295 in_bytes | out_bytes, mode, !!out_bytes);
296
297 flush_chain();
298
299 return 0;
300}
301
Ravi Kumar Bokka42394582021-07-16 16:59:25 +0530302int qspi_xfer(const struct spi_slave *slave, const void *dout,
Akash Asthana634c7832019-07-29 18:11:15 +0530303 size_t out_bytes, void *din, size_t in_bytes)
304{
305 return xfer(SDR_1BIT, dout, out_bytes, din, in_bytes);
306}
307
Ravi Kumar Bokka42394582021-07-16 16:59:25 +0530308int qspi_xfer_dual(const struct spi_slave *slave, const void *dout,
Akash Asthana634c7832019-07-29 18:11:15 +0530309 size_t out_bytes, void *din, size_t in_bytes)
310{
311 return xfer(SDR_2BIT, dout, out_bytes, din, in_bytes);
312}