blob: e9a7ead18b125c15f5c7e9031fffe78599c536c4 [file] [log] [blame]
Patrick Georgi6615ef32010-08-13 09:18:58 +00001/*
Patrick Georgi6615ef32010-08-13 09:18:58 +00002 *
3 * Copyright (C) 2010 Patrick Georgi
Nico Huber90292652013-06-13 14:37:15 +02004 * Copyright (C) 2013 secunet Security Networks AG
Patrick Georgi6615ef32010-08-13 09:18:58 +00005 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
Nico Huber90292652013-06-13 14:37:15 +020030//#define XHCI_SPEW_DEBUG
Patrick Georgi6615ef32010-08-13 09:18:58 +000031
Nico Huber90292652013-06-13 14:37:15 +020032#include <inttypes.h>
Patrick Georgi6615ef32010-08-13 09:18:58 +000033#include <arch/virtual.h>
Patrick Georgi6615ef32010-08-13 09:18:58 +000034#include "xhci_private.h"
Nico Huber90292652013-06-13 14:37:15 +020035#include "xhci.h"
Patrick Georgi6615ef32010-08-13 09:18:58 +000036
Yidi Lin5ef258b2022-08-10 14:59:18 +080037static void xhci_start(hci_t *controller);
38static void xhci_stop(hci_t *controller);
39static void xhci_reset(hci_t *controller);
40static void xhci_reinit(hci_t *controller);
41static void xhci_shutdown(hci_t *controller);
42static int xhci_bulk(endpoint_t *ep, int size, u8 *data, int finalize);
43static int xhci_control(usbdev_t *dev, direction_t dir, int drlen, void *devreq,
Patrick Georgi6615ef32010-08-13 09:18:58 +000044 int dalen, u8 *data);
Yidi Lin5ef258b2022-08-10 14:59:18 +080045static void* xhci_create_intr_queue(endpoint_t *ep, int reqsize, int reqcount, int reqtiming);
46static void xhci_destroy_intr_queue(endpoint_t *ep, void *queue);
47static u8* xhci_poll_intr_queue(void *queue);
Patrick Georgi6615ef32010-08-13 09:18:58 +000048
Nico Huber90292652013-06-13 14:37:15 +020049/*
50 * Some structures must not cross page boundaries. To get this,
51 * we align them by their size (or the next greater power of 2).
52 */
53void *
54xhci_align(const size_t min_align, const size_t size)
Patrick Georgi6615ef32010-08-13 09:18:58 +000055{
Nico Huber90292652013-06-13 14:37:15 +020056 size_t align;
57 if (!(size & (size - 1)))
58 align = size; /* It's a power of 2 */
59 else
60 align = 1 << ((sizeof(unsigned) << 3) - __builtin_clz(size));
61 if (align < min_align)
62 align = min_align;
63 xhci_spew("Aligning %zu to %zu\n", size, align);
Julius Werner1f864342013-09-03 17:15:31 -070064 return dma_memalign(align, size);
Nico Huber90292652013-06-13 14:37:15 +020065}
66
67void
68xhci_clear_trb(trb_t *const trb, const int pcs)
69{
70 trb->ptr_low = 0;
71 trb->ptr_high = 0;
72 trb->status = 0;
73 trb->control = !pcs;
74}
75
76void
77xhci_init_cycle_ring(transfer_ring_t *const tr, const size_t ring_size)
78{
79 memset((void *)tr->ring, 0, ring_size * sizeof(*tr->ring));
80 TRB_SET(TT, &tr->ring[ring_size - 1], TRB_LINK);
81 TRB_SET(TC, &tr->ring[ring_size - 1], 1);
82 /* only one segment that points to itself */
83 tr->ring[ring_size - 1].ptr_low = virt_to_phys(tr->ring);
84
85 tr->pcs = 1;
86 tr->cur = tr->ring;
87}
88
89/* On Panther Point: switch ports shared with EHCI to xHCI */
Julius Wernereab2a292019-03-05 16:55:15 -080090#if CONFIG(LP_USB_PCI)
Nico Huber90292652013-06-13 14:37:15 +020091static void
92xhci_switch_ppt_ports(pcidev_t addr)
93{
94 if (pci_read_config32(addr, 0x00) == 0x1e318086) {
95 u32 reg32 = pci_read_config32(addr, 0xdc) & 0xf;
96 xhci_debug("Ports capable of SuperSpeed: 0x%"PRIx32"\n", reg32);
97
98 /* For now, do not enable SuperSpeed on any ports */
99 //pci_write_config32(addr, 0xd8, reg32);
100 pci_write_config32(addr, 0xd8, 0x00000000);
101 reg32 = pci_read_config32(addr, 0xd8) & 0xf;
102 xhci_debug("Configured for SuperSpeed: 0x%"PRIx32"\n", reg32);
103
104 reg32 = pci_read_config32(addr, 0xd4) & 0xf;
105 xhci_debug("Trying to switch over: 0x%"PRIx32"\n", reg32);
106
107 pci_write_config32(addr, 0xd0, reg32);
108 reg32 = pci_read_config32(addr, 0xd0) & 0xf;
109 xhci_debug("Actually switched over: 0x%"PRIx32"\n", reg32);
110 }
111}
Marc Jones86127c72014-12-29 22:07:04 -0700112#endif
Nico Huber90292652013-06-13 14:37:15 +0200113
Julius Wernereab2a292019-03-05 16:55:15 -0800114#if CONFIG(LP_USB_PCI)
Nico Huberc3714422013-07-19 14:03:47 +0200115/* On Panther Point: switch all ports back to EHCI */
116static void
117xhci_switchback_ppt_ports(pcidev_t addr)
118{
119 if (pci_read_config32(addr, 0x00) == 0x1e318086) {
120 u32 reg32 = pci_read_config32(addr, 0xd0) & 0xf;
121 xhci_debug("Switching ports back: 0x%"PRIx32"\n", reg32);
122 pci_write_config32(addr, 0xd0, 0x00000000);
123 reg32 = pci_read_config32(addr, 0xd0) & 0xf;
124 xhci_debug("Still switched to xHCI: 0x%"PRIx32"\n", reg32);
125 }
126}
Patrick Georgi22045392015-05-04 19:05:41 +0200127#endif
Nico Huberc3714422013-07-19 14:03:47 +0200128
Nico Huber90292652013-06-13 14:37:15 +0200129static long
130xhci_handshake(volatile u32 *const reg, u32 mask, u32 wait_for, long timeout_us)
131{
Caveh Jalali96f231a2020-05-28 18:19:36 -0700132 if (timeout_us <= 0)
133 return 0;
134 while ((*reg & mask) != wait_for && timeout_us != 0) {
135 --timeout_us;
136 udelay(1);
137 }
Nico Huber90292652013-06-13 14:37:15 +0200138 return timeout_us;
139}
140
141static int
142xhci_wait_ready(xhci_t *const xhci)
143{
144 xhci_debug("Waiting for controller to be ready... ");
145 if (!xhci_handshake(&xhci->opreg->usbsts, USBSTS_CNR, 0, 100000L)) {
146 usb_debug("timeout!\n");
147 return -1;
148 }
149 usb_debug("ok.\n");
150 return 0;
151}
152
153hci_t *
Yidi Lin5ef258b2022-08-10 14:59:18 +0800154xhci_init(unsigned long physical_bar)
Nico Huber90292652013-06-13 14:37:15 +0200155{
156 int i;
157
Fred Reitberger45194b12023-06-05 16:01:13 -0400158 if (!physical_bar)
159 goto _exit_xhci;
160
Nico Huber90292652013-06-13 14:37:15 +0200161 /* First, allocate and initialize static controller structures */
162
163 hci_t *const controller = new_controller();
Nico Huber90292652013-06-13 14:37:15 +0200164 controller->type = XHCI;
165 controller->start = xhci_start;
166 controller->stop = xhci_stop;
167 controller->reset = xhci_reset;
168 controller->init = xhci_reinit;
169 controller->shutdown = xhci_shutdown;
170 controller->bulk = xhci_bulk;
171 controller->control = xhci_control;
172 controller->set_address = xhci_set_address;
Yidi Lin5ef258b2022-08-10 14:59:18 +0800173 controller->finish_device_config = xhci_finish_device_config;
Nico Huber90292652013-06-13 14:37:15 +0200174 controller->destroy_device = xhci_destroy_dev;
175 controller->create_intr_queue = xhci_create_intr_queue;
176 controller->destroy_intr_queue = xhci_destroy_intr_queue;
177 controller->poll_intr_queue = xhci_poll_intr_queue;
Nico Huber8b8e9632014-07-07 17:20:53 +0200178 controller->pcidev = 0;
Nico Huber90292652013-06-13 14:37:15 +0200179
Furquan Shaikh79a591f2014-05-13 13:47:32 -0700180 controller->reg_base = (uintptr_t)physical_bar;
Julius Werner7234d602014-04-08 12:54:25 -0700181 controller->instance = xzalloc(sizeof(xhci_t));
Nico Huber90292652013-06-13 14:37:15 +0200182 xhci_t *const xhci = (xhci_t *)controller->instance;
Nico Huber90292652013-06-13 14:37:15 +0200183
184 init_device_entry(controller, 0);
185 xhci->roothub = controller->devices[0];
186 xhci->cr.ring = xhci_align(64, COMMAND_RING_SIZE * sizeof(trb_t));
187 xhci->er.ring = xhci_align(64, EVENT_RING_SIZE * sizeof(trb_t));
188 xhci->ev_ring_table = xhci_align(64, sizeof(erst_entry_t));
189 if (!xhci->roothub || !xhci->cr.ring ||
190 !xhci->er.ring || !xhci->ev_ring_table) {
191 xhci_debug("Out of memory\n");
192 goto _free_xhci;
193 }
194
Julius Werner50c1f272020-05-01 14:56:49 -0700195 xhci->capreg = phys_to_virt(physical_bar);
Duncan Laurie287cf6c2020-03-17 19:32:14 -0700196 xhci->opreg = phys_to_virt(physical_bar) + CAP_GET(CAPLEN, xhci->capreg);
Julius Werner6c1a6692020-04-29 17:36:12 -0700197 xhci->hcrreg = phys_to_virt(physical_bar) + xhci->capreg->rtsoff;
198 xhci->dbreg = phys_to_virt(physical_bar) + xhci->capreg->dboff;
Duncan Laurie287cf6c2020-03-17 19:32:14 -0700199
Caveh Jalali8079a6a2020-09-01 20:37:49 -0700200 xhci_debug("regbase: 0x%"PRIxPTR"\n", physical_bar);
Duncan Laurie287cf6c2020-03-17 19:32:14 -0700201 xhci_debug("caplen: 0x%"PRIx32"\n", CAP_GET(CAPLEN, xhci->capreg));
Julius Werner6c1a6692020-04-29 17:36:12 -0700202 xhci_debug("rtsoff: 0x%"PRIx32"\n", xhci->capreg->rtsoff);
203 xhci_debug("dboff: 0x%"PRIx32"\n", xhci->capreg->dboff);
Nico Huber90292652013-06-13 14:37:15 +0200204
205 xhci_debug("hciversion: %"PRIx8".%"PRIx8"\n",
Duncan Laurie287cf6c2020-03-17 19:32:14 -0700206 CAP_GET(CAPVER_HI, xhci->capreg), CAP_GET(CAPVER_LO, xhci->capreg));
207 if ((CAP_GET(CAPVER, xhci->capreg) < 0x96) ||
Dossym Nurmukhanov4eadcb02020-05-04 12:45:06 -0700208 (CAP_GET(CAPVER, xhci->capreg) > 0x120)) {
Nico Huber90292652013-06-13 14:37:15 +0200209 xhci_debug("Unsupported xHCI version\n");
210 goto _free_xhci;
211 }
212
Julius Werner1f864342013-09-03 17:15:31 -0700213 xhci_debug("context size: %dB\n", CTXSIZE(xhci));
Caveh Jalali8079a6a2020-09-01 20:37:49 -0700214 xhci_debug("maxslots: 0x%02"PRIx32"\n", CAP_GET(MAXSLOTS, xhci->capreg));
215 xhci_debug("maxports: 0x%02"PRIx32"\n", CAP_GET(MAXPORTS, xhci->capreg));
Nico Huber90292652013-06-13 14:37:15 +0200216 const unsigned pagesize = xhci->opreg->pagesize << 12;
217 xhci_debug("pagesize: 0x%04x\n", pagesize);
218
219 /*
220 * We haven't touched the hardware yet. So we allocate all dynamic
221 * structures at first and can still chicken out easily if we run out
222 * of memory.
223 */
Duncan Laurie287cf6c2020-03-17 19:32:14 -0700224 xhci->max_slots_en = CAP_GET(MAXSLOTS, xhci->capreg) &
225 CONFIG_LP_MASK_MaxSlotsEn;
Julius Werner1f864342013-09-03 17:15:31 -0700226 xhci->dcbaa = xhci_align(64, (xhci->max_slots_en + 1) * sizeof(u64));
227 xhci->dev = malloc((xhci->max_slots_en + 1) * sizeof(*xhci->dev));
228 if (!xhci->dcbaa || !xhci->dev) {
Nico Huber90292652013-06-13 14:37:15 +0200229 xhci_debug("Out of memory\n");
230 goto _free_xhci;
231 }
Julius Werner1f864342013-09-03 17:15:31 -0700232 memset(xhci->dcbaa, 0x00, (xhci->max_slots_en + 1) * sizeof(u64));
233 memset(xhci->dev, 0x00, (xhci->max_slots_en + 1) * sizeof(*xhci->dev));
Nico Huber90292652013-06-13 14:37:15 +0200234
235 /*
236 * Let dcbaa[0] point to another array of pointers, sp_ptrs.
237 * The pointers therein point to scratchpad buffers (pages).
238 */
Duncan Laurie287cf6c2020-03-17 19:32:14 -0700239 const size_t max_sp_bufs =
240 CAP_GET(MAX_SCRATCH_BUFS_HI, xhci->capreg) << 5 |
241 CAP_GET(MAX_SCRATCH_BUFS_LO, xhci->capreg);
Nico Huber90292652013-06-13 14:37:15 +0200242 xhci_debug("max scratchpad bufs: 0x%zx\n", max_sp_bufs);
243 if (max_sp_bufs) {
244 const size_t sp_ptrs_size = max_sp_bufs * sizeof(u64);
245 xhci->sp_ptrs = xhci_align(64, sp_ptrs_size);
246 if (!xhci->sp_ptrs) {
247 xhci_debug("Out of memory\n");
248 goto _free_xhci_structs;
249 }
250 memset(xhci->sp_ptrs, 0x00, sp_ptrs_size);
251 for (i = 0; i < max_sp_bufs; ++i) {
252 /* Could use mmap() here if we had it.
253 Maybe there is another way. */
254 void *const page = memalign(pagesize, pagesize);
255 if (!page) {
256 xhci_debug("Out of memory\n");
257 goto _free_xhci_structs;
258 }
259 xhci->sp_ptrs[i] = virt_to_phys(page);
260 }
261 xhci->dcbaa[0] = virt_to_phys(xhci->sp_ptrs);
262 }
263
Julius Werner1f864342013-09-03 17:15:31 -0700264 if (dma_initialized()) {
265 xhci->dma_buffer = dma_memalign(64 * 1024, DMA_SIZE);
266 if (!xhci->dma_buffer) {
267 xhci_debug("Not enough memory for DMA bounce buffer\n");
268 goto _free_xhci_structs;
269 }
270 }
271
Nico Huber90292652013-06-13 14:37:15 +0200272 /* Now start working on the hardware */
Nico Huber90292652013-06-13 14:37:15 +0200273 if (xhci_wait_ready(xhci))
Julius Werner1f864342013-09-03 17:15:31 -0700274 goto _free_xhci_structs;
Nico Huber90292652013-06-13 14:37:15 +0200275
276 /* TODO: Check if BIOS claims ownership (and hand over) */
277
278 xhci_reset(controller);
279 xhci_reinit(controller);
280
Nico Huber90292652013-06-13 14:37:15 +0200281 xhci->roothub->controller = controller;
282 xhci->roothub->init = xhci_rh_init;
283 xhci->roothub->init(xhci->roothub);
284
285 return controller;
286
287_free_xhci_structs:
Julius Werner7234d602014-04-08 12:54:25 -0700288 free(xhci->dma_buffer);
Nico Huber90292652013-06-13 14:37:15 +0200289 if (xhci->sp_ptrs) {
290 for (i = 0; i < max_sp_bufs; ++i) {
291 if (xhci->sp_ptrs[i])
292 free(phys_to_virt(xhci->sp_ptrs[i]));
293 }
294 }
295 free(xhci->sp_ptrs);
296 free(xhci->dcbaa);
297_free_xhci:
298 free((void *)xhci->ev_ring_table);
299 free((void *)xhci->er.ring);
300 free((void *)xhci->cr.ring);
301 free(xhci->roothub);
Julius Werner1f864342013-09-03 17:15:31 -0700302 free(xhci->dev);
Nico Huber90292652013-06-13 14:37:15 +0200303 free(xhci);
Marc Jones86127c72014-12-29 22:07:04 -0700304/* _free_controller: */
Nico Huber90292652013-06-13 14:37:15 +0200305 detach_controller(controller);
306 free(controller);
Fred Reitberger45194b12023-06-05 16:01:13 -0400307_exit_xhci:
Nico Huber90292652013-06-13 14:37:15 +0200308 return NULL;
309}
310
Julius Wernereab2a292019-03-05 16:55:15 -0800311#if CONFIG(LP_USB_PCI)
Stefan Reinauer8992e532013-05-02 16:16:41 -0700312hci_t *
Yidi Lin5ef258b2022-08-10 14:59:18 +0800313xhci_pci_init(pcidev_t addr)
Stefan Reinauer8992e532013-05-02 16:16:41 -0700314{
315 u32 reg_addr;
Patrick Georgifdb348a2013-12-21 11:41:22 +0100316 hci_t *controller;
Stefan Reinauer8992e532013-05-02 16:16:41 -0700317
Raul E Rangel8346a442018-08-06 16:12:37 -0600318 reg_addr = pci_read_config32(addr, PCI_BASE_ADDRESS_0) &
319 PCI_BASE_ADDRESS_MEM_MASK;
320 if (pci_read_config32(addr, PCI_BASE_ADDRESS_1) > 0)
Stefan Reinauer8992e532013-05-02 16:16:41 -0700321 fatal("We don't do 64bit addressing.\n");
Stefan Reinauer8992e532013-05-02 16:16:41 -0700322
Nico Huber6e230662014-07-07 16:33:59 +0200323 controller = xhci_init((unsigned long)reg_addr);
Nico Huberf4316f82014-07-07 17:11:53 +0200324 if (controller) {
Duncan Laurie7724f112020-03-18 12:19:14 -0700325 xhci_t *xhci = controller->instance;
Nico Huberf4316f82014-07-07 17:11:53 +0200326 controller->pcidev = addr;
Stefan Reinauer8992e532013-05-02 16:16:41 -0700327
Nico Huberf4316f82014-07-07 17:11:53 +0200328 xhci_switch_ppt_ports(addr);
Duncan Laurie7724f112020-03-18 12:19:14 -0700329
330 /* Set up any quirks for controller root hub */
331 xhci->roothub->quirks = pci_quirk_check(addr);
Nico Huberf4316f82014-07-07 17:11:53 +0200332 }
Stefan Reinauer8992e532013-05-02 16:16:41 -0700333
334 return controller;
335}
336#endif
337
Nico Huber90292652013-06-13 14:37:15 +0200338static void
339xhci_reset(hci_t *const controller)
340{
341 xhci_t *const xhci = XHCI_INST(controller);
342
343 xhci_stop(controller);
344
345 xhci->opreg->usbcmd |= USBCMD_HCRST;
Rajmohan Mani1528ffa2015-10-30 17:00:24 -0700346
347 /* Existing Intel xHCI controllers require a delay of 1 ms,
348 * after setting the CMD_RESET bit, and before accessing any
349 * HC registers. This allows the HC to complete the
350 * reset operation and be ready for HC register access.
351 * Without this delay, the subsequent HC register access,
352 * may result in a system hang very rarely.
353 */
Julius Wernereab2a292019-03-05 16:55:15 -0800354 if (CONFIG(LP_ARCH_X86))
Rajmohan Mani1528ffa2015-10-30 17:00:24 -0700355 mdelay(1);
356
Nico Huber90292652013-06-13 14:37:15 +0200357 xhci_debug("Resetting controller... ");
358 if (!xhci_handshake(&xhci->opreg->usbcmd, USBCMD_HCRST, 0, 1000000L))
359 usb_debug("timeout!\n");
360 else
361 usb_debug("ok.\n");
Patrick Georgi6615ef32010-08-13 09:18:58 +0000362}
363
Nico Huber6e711c62012-11-12 16:20:32 +0100364static void
Yidi Lin5ef258b2022-08-10 14:59:18 +0800365xhci_reinit(hci_t *controller)
Nico Huber6e711c62012-11-12 16:20:32 +0100366{
Nico Huber90292652013-06-13 14:37:15 +0200367 xhci_t *const xhci = XHCI_INST(controller);
Nico Huber6e711c62012-11-12 16:20:32 +0100368
Nico Huber90292652013-06-13 14:37:15 +0200369 if (xhci_wait_ready(xhci))
370 return;
371
372 /* Enable all available slots */
Julius Werner1f864342013-09-03 17:15:31 -0700373 xhci->opreg->config = xhci->max_slots_en;
Nico Huber90292652013-06-13 14:37:15 +0200374
375 /* Set DCBAA */
376 xhci->opreg->dcbaap_lo = virt_to_phys(xhci->dcbaa);
377 xhci->opreg->dcbaap_hi = 0;
378
379 /* Initialize command ring */
380 xhci_init_cycle_ring(&xhci->cr, COMMAND_RING_SIZE);
Caveh Jalali8079a6a2020-09-01 20:37:49 -0700381 xhci_debug("command ring @%p (0x%08"PRIxPTR")\n",
Nico Huber90292652013-06-13 14:37:15 +0200382 xhci->cr.ring, virt_to_phys(xhci->cr.ring));
383 xhci->opreg->crcr_lo = virt_to_phys(xhci->cr.ring) | CRCR_RCS;
384 xhci->opreg->crcr_hi = 0;
385
386 /* Make sure interrupts are disabled */
387 xhci->opreg->usbcmd &= ~USBCMD_INTE;
388
389 /* Initialize event ring */
390 xhci_reset_event_ring(&xhci->er);
Caveh Jalali8079a6a2020-09-01 20:37:49 -0700391 xhci_debug("event ring @%p (0x%08"PRIxPTR")\n",
Nico Huber90292652013-06-13 14:37:15 +0200392 xhci->er.ring, virt_to_phys(xhci->er.ring));
Caveh Jalali8079a6a2020-09-01 20:37:49 -0700393 xhci_debug("ERST Max: 0x%"PRIx32" -> 0x%x entries\n",
Duncan Laurie287cf6c2020-03-17 19:32:14 -0700394 CAP_GET(ERST_MAX, xhci->capreg),
395 1 << CAP_GET(ERST_MAX, xhci->capreg));
Nico Huber90292652013-06-13 14:37:15 +0200396 memset((void*)xhci->ev_ring_table, 0x00, sizeof(erst_entry_t));
397 xhci->ev_ring_table[0].seg_base_lo = virt_to_phys(xhci->er.ring);
398 xhci->ev_ring_table[0].seg_base_hi = 0;
399 xhci->ev_ring_table[0].seg_size = EVENT_RING_SIZE;
400
Yidi Lind42ee152015-05-07 15:36:04 +0800401 /* pass event ring table to hardware */
402 wmb();
Nico Huber90292652013-06-13 14:37:15 +0200403 /* Initialize primary interrupter */
404 xhci->hcrreg->intrrs[0].erstsz = 1;
405 xhci_update_event_dq(xhci);
406 /* erstba has to be written at last */
407 xhci->hcrreg->intrrs[0].erstba_lo = virt_to_phys(xhci->ev_ring_table);
408 xhci->hcrreg->intrrs[0].erstba_hi = 0;
409
410 xhci_start(controller);
411
412#ifdef USB_DEBUG
Patrick Georgi6615ef32010-08-13 09:18:58 +0000413 int i;
Nico Huber90292652013-06-13 14:37:15 +0200414 for (i = 0; i < 32; ++i) {
415 xhci_debug("NOOP run #%d\n", i);
416 trb_t *const cmd = xhci_next_command_trb(xhci);
417 TRB_SET(TT, cmd, TRB_CMD_NOOP);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000418
Nico Huber90292652013-06-13 14:37:15 +0200419 xhci_post_command(xhci);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000420
Nico Huber90292652013-06-13 14:37:15 +0200421 /* Wait for result in event ring */
Raul E Rangeld29c81d2018-07-16 09:21:11 -0600422 int cc = xhci_wait_for_command_done(xhci, cmd, 1);
423
424 xhci_debug("Command ring is %srunning: cc: %d\n",
425 (xhci->opreg->crcr_lo & CRCR_CRR) ? "" : "not ", cc);
426 if (cc != CC_SUCCESS)
427 xhci_debug("noop command failed.\n");
Patrick Georgi6615ef32010-08-13 09:18:58 +0000428 }
Nico Huber90292652013-06-13 14:37:15 +0200429#endif
Patrick Georgi6615ef32010-08-13 09:18:58 +0000430}
431
432static void
Nico Huber90292652013-06-13 14:37:15 +0200433xhci_shutdown(hci_t *const controller)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000434{
Nico Huber90292652013-06-13 14:37:15 +0200435 int i;
436
Patrick Georgi6615ef32010-08-13 09:18:58 +0000437 if (controller == 0)
438 return;
Nico Huber90292652013-06-13 14:37:15 +0200439
440 detach_controller(controller);
441
Julius Werner7234d602014-04-08 12:54:25 -0700442 xhci_t *const xhci = XHCI_INST(controller);
Nico Huber90292652013-06-13 14:37:15 +0200443 xhci_stop(controller);
444
Julius Wernereab2a292019-03-05 16:55:15 -0800445#if CONFIG(LP_USB_PCI)
Patrick Georgifdb348a2013-12-21 11:41:22 +0100446 if (controller->pcidev)
447 xhci_switchback_ppt_ports(controller->pcidev);
Patrick Georgi22045392015-05-04 19:05:41 +0200448#endif
Nico Huberc3714422013-07-19 14:03:47 +0200449
Nico Huber90292652013-06-13 14:37:15 +0200450 if (xhci->sp_ptrs) {
Duncan Laurie287cf6c2020-03-17 19:32:14 -0700451 const size_t max_sp_bufs =
452 CAP_GET(MAX_SCRATCH_BUFS_HI, xhci->capreg) << 5 |
453 CAP_GET(MAX_SCRATCH_BUFS_LO, xhci->capreg);
Nico Huber90292652013-06-13 14:37:15 +0200454 for (i = 0; i < max_sp_bufs; ++i) {
455 if (xhci->sp_ptrs[i])
456 free(phys_to_virt(xhci->sp_ptrs[i]));
457 }
458 }
459 free(xhci->sp_ptrs);
Julius Werner7234d602014-04-08 12:54:25 -0700460 free(xhci->dma_buffer);
Nico Huber90292652013-06-13 14:37:15 +0200461 free(xhci->dcbaa);
Julius Werner1f864342013-09-03 17:15:31 -0700462 free(xhci->dev);
Nico Huber90292652013-06-13 14:37:15 +0200463 free((void *)xhci->ev_ring_table);
464 free((void *)xhci->er.ring);
465 free((void *)xhci->cr.ring);
466 free(xhci);
467 free(controller);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000468}
469
470static void
Yidi Lin5ef258b2022-08-10 14:59:18 +0800471xhci_start(hci_t *controller)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000472{
Nico Huber90292652013-06-13 14:37:15 +0200473 xhci_t *const xhci = XHCI_INST(controller);
474
475 xhci->opreg->usbcmd |= USBCMD_RS;
476 if (!xhci_handshake(&xhci->opreg->usbsts, USBSTS_HCH, 0, 1000000L))
477 xhci_debug("Controller didn't start within 1s\n");
Patrick Georgi6615ef32010-08-13 09:18:58 +0000478}
479
480static void
Yidi Lin5ef258b2022-08-10 14:59:18 +0800481xhci_stop(hci_t *controller)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000482{
Nico Huber90292652013-06-13 14:37:15 +0200483 xhci_t *const xhci = XHCI_INST(controller);
484
485 xhci->opreg->usbcmd &= ~USBCMD_RS;
486 if (!xhci_handshake(&xhci->opreg->usbsts,
487 USBSTS_HCH, USBSTS_HCH, 1000000L))
488 xhci_debug("Controller didn't halt within 1s\n");
Patrick Georgi6615ef32010-08-13 09:18:58 +0000489}
490
491static int
Shawn Nematbakhshc666e552014-04-02 09:14:32 -0700492xhci_reset_endpoint(usbdev_t *const dev, endpoint_t *const ep)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000493{
Nico Huber90292652013-06-13 14:37:15 +0200494 xhci_t *const xhci = XHCI_INST(dev->controller);
Nico Huber90292652013-06-13 14:37:15 +0200495 const int slot_id = dev->address;
496 const int ep_id = ep ? xhci_ep_id(ep) : 1;
Julius Werner1f864342013-09-03 17:15:31 -0700497 epctx_t *const epctx = xhci->dev[slot_id].ctx.ep[ep_id];
Nico Huber90292652013-06-13 14:37:15 +0200498
499 xhci_debug("Resetting ID %d EP %d (ep state: %d)\n",
Julius Werner1f864342013-09-03 17:15:31 -0700500 slot_id, ep_id, EC_GET(STATE, epctx));
Nico Huber90292652013-06-13 14:37:15 +0200501
502 /* Run Reset Endpoint Command if the EP is in Halted state */
Julius Werner1f864342013-09-03 17:15:31 -0700503 if (EC_GET(STATE, epctx) == 2) {
Nico Huber90292652013-06-13 14:37:15 +0200504 const int cc = xhci_cmd_reset_endpoint(xhci, slot_id, ep_id);
505 if (cc != CC_SUCCESS) {
506 xhci_debug("Reset Endpoint Command failed: %d\n", cc);
507 return 1;
508 }
509 }
510
511 /* Clear TT buffer for bulk and control endpoints behind a TT */
512 const int hub = dev->hub;
513 if (hub && dev->speed < HIGH_SPEED &&
514 dev->controller->devices[hub]->speed == HIGH_SPEED)
515 /* TODO */;
516
Nico Huber90292652013-06-13 14:37:15 +0200517 /* Reset transfer ring if the endpoint is in the right state */
Julius Werner1f864342013-09-03 17:15:31 -0700518 const unsigned ep_state = EC_GET(STATE, epctx);
Nico Huber90292652013-06-13 14:37:15 +0200519 if (ep_state == 3 || ep_state == 4) {
Julius Werner1f864342013-09-03 17:15:31 -0700520 transfer_ring_t *const tr =
521 xhci->dev[slot_id].transfer_rings[ep_id];
Nico Huber90292652013-06-13 14:37:15 +0200522 const int cc = xhci_cmd_set_tr_dq(xhci, slot_id, ep_id,
523 tr->ring, 1);
524 if (cc != CC_SUCCESS) {
525 xhci_debug("Set TR Dequeue Command failed: %d\n", cc);
526 return 1;
527 }
528 xhci_init_cycle_ring(tr, TRANSFER_RING_SIZE);
529 }
530
531 xhci_debug("Finished resetting ID %d EP %d (ep state: %d)\n",
Julius Werner1f864342013-09-03 17:15:31 -0700532 slot_id, ep_id, EC_GET(STATE, epctx));
Nico Huber90292652013-06-13 14:37:15 +0200533
534 return 0;
535}
536
537static void
538xhci_enqueue_trb(transfer_ring_t *const tr)
539{
540 const int chain = TRB_GET(CH, tr->cur);
541 TRB_SET(C, tr->cur, tr->pcs);
542 ++tr->cur;
543
544 while (TRB_GET(TT, tr->cur) == TRB_LINK) {
545 xhci_spew("Handling LINK pointer\n");
546 const int tc = TRB_GET(TC, tr->cur);
547 TRB_SET(CH, tr->cur, chain);
Yidi Lind42ee152015-05-07 15:36:04 +0800548 wmb();
Nico Huber90292652013-06-13 14:37:15 +0200549 TRB_SET(C, tr->cur, tr->pcs);
550 tr->cur = phys_to_virt(tr->cur->ptr_low);
551 if (tc)
552 tr->pcs ^= 1;
553 }
554}
555
556static void
Liangfeng Wu8c7e4162016-05-24 19:40:46 +0800557xhci_ring_doorbell(endpoint_t *const ep)
558{
559 /* Ensure all TRB changes are written to memory. */
560 wmb();
561 XHCI_INST(ep->dev->controller)->dbreg[ep->dev->address] =
562 xhci_ep_id(ep);
563}
564
565static void
Nico Huber90292652013-06-13 14:37:15 +0200566xhci_enqueue_td(transfer_ring_t *const tr, const int ep, const size_t mps,
567 const int dalen, void *const data, const int dir)
568{
569 trb_t *trb = NULL; /* cur TRB */
570 u8 *cur_start = data; /* cur data pointer */
571 size_t length = dalen; /* remaining bytes */
572 size_t packets = (length + mps - 1) / mps; /* remaining packets */
573 size_t residue = 0; /* residue from last TRB */
574 size_t trb_count = 0; /* TRBs added so far */
575
576 while (length || !trb_count /* enqueue at least one */) {
577 const size_t cur_end = ((size_t)cur_start + 0x10000) & ~0xffff;
578 size_t cur_length = cur_end - (size_t)cur_start;
579 if (length < cur_length) {
580 cur_length = length;
581 packets = 0;
582 length = 0;
Julius Wernereab2a292019-03-05 16:55:15 -0800583 } else if (!CONFIG(LP_USB_XHCI_MTK_QUIRK)) {
Nico Huber90292652013-06-13 14:37:15 +0200584 packets -= (residue + cur_length) / mps;
585 residue = (residue + cur_length) % mps;
586 length -= cur_length;
587 }
588
589 trb = tr->cur;
590 xhci_clear_trb(trb, tr->pcs);
591 trb->ptr_low = virt_to_phys(cur_start);
592 TRB_SET(TL, trb, cur_length);
Rajmohan Manid6fb32b2014-05-30 13:06:01 -0700593 TRB_SET(TDS, trb, MIN(TRB_MAX_TD_SIZE, packets));
Julius Werner83da5012013-09-27 12:45:11 -0700594 TRB_SET(CH, trb, 1);
Nico Huber90292652013-06-13 14:37:15 +0200595
Julius Wernereab2a292019-03-05 16:55:15 -0800596 if (length && CONFIG(LP_USB_XHCI_MTK_QUIRK)) {
Yidi Lind42ee152015-05-07 15:36:04 +0800597 /*
598 * For MTK's xHCI controller, TDS defines a number of
599 * packets that remain to be transferred for a TD after
600 * processing all Max packets in all previous TRBs, that
601 * means don't include the current TRB's.
602 */
603 packets -= (residue + cur_length) / mps;
604 residue = (residue + cur_length) % mps;
605 length -= cur_length;
606 }
607
Nico Huber90292652013-06-13 14:37:15 +0200608 /* Check for first, data stage TRB */
609 if (!trb_count && ep == 1) {
610 TRB_SET(DIR, trb, dir);
611 TRB_SET(TT, trb, TRB_DATA_STAGE);
612 } else {
613 TRB_SET(TT, trb, TRB_NORMAL);
614 }
Sourabh Banerjeee73335c2014-09-24 16:14:45 +0530615 /*
616 * This is a workaround for Synopsys DWC3. If the ENT flag is
617 * not set for the Normal and Data Stage TRBs. We get Event TRB
618 * with length 0x20d from the controller when we enqueue a TRB
619 * for the IN endpoint with length 0x200.
620 */
621 if (!length)
622 TRB_SET(ENT, trb, 1);
Nico Huber90292652013-06-13 14:37:15 +0200623
Nico Huber90292652013-06-13 14:37:15 +0200624 xhci_enqueue_trb(tr);
625
626 cur_start += cur_length;
627 ++trb_count;
628 }
Julius Werner83da5012013-09-27 12:45:11 -0700629
630 trb = tr->cur;
631 xhci_clear_trb(trb, tr->pcs);
632 trb->ptr_low = virt_to_phys(trb); /* for easier debugging only */
633 TRB_SET(TT, trb, TRB_EVENT_DATA);
634 TRB_SET(IOC, trb, 1);
635
636 xhci_enqueue_trb(tr);
Nico Huber90292652013-06-13 14:37:15 +0200637}
638
639static int
640xhci_control(usbdev_t *const dev, const direction_t dir,
641 const int drlen, void *const devreq,
Julius Werner1f864342013-09-03 17:15:31 -0700642 const int dalen, unsigned char *const src)
Nico Huber90292652013-06-13 14:37:15 +0200643{
Julius Werner1f864342013-09-03 17:15:31 -0700644 unsigned char *data = src;
Nico Huber90292652013-06-13 14:37:15 +0200645 xhci_t *const xhci = XHCI_INST(dev->controller);
Julius Werner1f864342013-09-03 17:15:31 -0700646 epctx_t *const epctx = xhci->dev[dev->address].ctx.ep0;
647 transfer_ring_t *const tr = xhci->dev[dev->address].transfer_rings[1];
Nico Huber90292652013-06-13 14:37:15 +0200648
649 const size_t off = (size_t)data & 0xffff;
Julius Werner83da5012013-09-27 12:45:11 -0700650 if ((off + dalen) > ((TRANSFER_RING_SIZE - 4) << 16)) {
Nico Huber90292652013-06-13 14:37:15 +0200651 xhci_debug("Unsupported transfer size\n");
Julius Wernere9738db2013-02-21 13:41:40 -0800652 return -1;
Nico Huber90292652013-06-13 14:37:15 +0200653 }
654
Julius Werner49ba2832013-09-26 15:13:44 -0700655 /* Reset endpoint if it's not running */
Julius Werner1f864342013-09-03 17:15:31 -0700656 const unsigned ep_state = EC_GET(STATE, epctx);
Julius Werner49ba2832013-09-26 15:13:44 -0700657 if (ep_state > 1) {
Shawn Nematbakhshc666e552014-04-02 09:14:32 -0700658 if (xhci_reset_endpoint(dev, NULL))
Julius Wernere9738db2013-02-21 13:41:40 -0800659 return -1;
Nico Huber90292652013-06-13 14:37:15 +0200660 }
661
Julius Werner1f864342013-09-03 17:15:31 -0700662 if (dalen && !dma_coherent(src)) {
663 data = xhci->dma_buffer;
664 if (dalen > DMA_SIZE) {
665 xhci_debug("Control transfer too large: %d\n", dalen);
666 return -1;
667 }
668 if (dir == OUT)
669 memcpy(data, src, dalen);
670 }
671
Nico Huber90292652013-06-13 14:37:15 +0200672 /* Fill and enqueue setup TRB */
673 trb_t *const setup = tr->cur;
674 xhci_clear_trb(setup, tr->pcs);
675 setup->ptr_low = ((u32 *)devreq)[0];
676 setup->ptr_high = ((u32 *)devreq)[1];
677 TRB_SET(TL, setup, 8);
678 TRB_SET(TRT, setup, (dalen)
679 ? ((dir == OUT) ? TRB_TRT_OUT_DATA : TRB_TRT_IN_DATA)
680 : TRB_TRT_NO_DATA);
681 TRB_SET(TT, setup, TRB_SETUP_STAGE);
682 TRB_SET(IDT, setup, 1);
683 TRB_SET(IOC, setup, 1);
684 xhci_enqueue_trb(tr);
685
686 /* Fill and enqueue data TRBs (if any) */
687 if (dalen) {
Julius Werner1f864342013-09-03 17:15:31 -0700688 const unsigned mps = EC_GET(MPS, epctx);
Nico Huber90292652013-06-13 14:37:15 +0200689 const unsigned dt_dir = (dir == OUT) ? TRB_DIR_OUT : TRB_DIR_IN;
690 xhci_enqueue_td(tr, 1, mps, dalen, data, dt_dir);
691 }
692
693 /* Fill status TRB */
694 trb_t *const status = tr->cur;
695 xhci_clear_trb(status, tr->pcs);
696 TRB_SET(DIR, status, (dir == OUT) ? TRB_DIR_IN : TRB_DIR_OUT);
697 TRB_SET(TT, status, TRB_STATUS_STAGE);
698 TRB_SET(IOC, status, 1);
699 xhci_enqueue_trb(tr);
700
701 /* Ring doorbell for EP0 */
Liangfeng Wu8c7e4162016-05-24 19:40:46 +0800702 xhci_ring_doorbell(&dev->endpoints[0]);
Nico Huber90292652013-06-13 14:37:15 +0200703
704 /* Wait for transfer events */
Julius Wernere9738db2013-02-21 13:41:40 -0800705 int i, transferred = 0;
Nico Huber90292652013-06-13 14:37:15 +0200706 const int n_stages = 2 + !!dalen;
707 for (i = 0; i < n_stages; ++i) {
708 const int ret = xhci_wait_for_transfer(xhci, dev->address, 1);
Julius Wernere9738db2013-02-21 13:41:40 -0800709 transferred += ret;
710 if (ret < 0) {
Nico Huber90292652013-06-13 14:37:15 +0200711 if (ret == TIMEOUT) {
712 xhci_debug("Stopping ID %d EP 1\n",
713 dev->address);
714 xhci_cmd_stop_endpoint(xhci, dev->address, 1);
715 }
716 xhci_debug("Stage %d/%d failed: %d\n"
717 " trb ring: @%p\n"
718 " setup trb: @%p\n"
719 " status trb: @%p\n"
720 " ep state: %d -> %d\n"
721 " usbsts: 0x%08"PRIx32"\n",
722 i, n_stages, ret,
723 tr->ring, setup, status,
Julius Werner1f864342013-09-03 17:15:31 -0700724 ep_state, EC_GET(STATE, epctx),
Nico Huber90292652013-06-13 14:37:15 +0200725 xhci->opreg->usbsts);
Julius Wernere9738db2013-02-21 13:41:40 -0800726 return ret;
Nico Huber90292652013-06-13 14:37:15 +0200727 }
728 }
729
Julius Werner1f864342013-09-03 17:15:31 -0700730 if (dir == IN && data != src)
731 memcpy(src, data, transferred);
Julius Wernere9738db2013-02-21 13:41:40 -0800732 return transferred;
Patrick Georgi6615ef32010-08-13 09:18:58 +0000733}
734
735/* finalize == 1: if data is of packet aligned size, add a zero length packet */
736static int
Julius Werner1f864342013-09-03 17:15:31 -0700737xhci_bulk(endpoint_t *const ep, const int size, u8 *const src,
Nico Huber90292652013-06-13 14:37:15 +0200738 const int finalize)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000739{
Nico Huber90292652013-06-13 14:37:15 +0200740 /* finalize: Hopefully the xHCI controller always does this.
741 We have no control over the packets. */
742
Julius Werner1f864342013-09-03 17:15:31 -0700743 u8 *data = src;
Nico Huber90292652013-06-13 14:37:15 +0200744 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
Julius Werner1f864342013-09-03 17:15:31 -0700745 const int slot_id = ep->dev->address;
Nico Huber90292652013-06-13 14:37:15 +0200746 const int ep_id = xhci_ep_id(ep);
Julius Werner1f864342013-09-03 17:15:31 -0700747 epctx_t *const epctx = xhci->dev[slot_id].ctx.ep[ep_id];
748 transfer_ring_t *const tr = xhci->dev[slot_id].transfer_rings[ep_id];
Nico Huber90292652013-06-13 14:37:15 +0200749
750 const size_t off = (size_t)data & 0xffff;
Julius Werner83da5012013-09-27 12:45:11 -0700751 if ((off + size) > ((TRANSFER_RING_SIZE - 2) << 16)) {
Nico Huber90292652013-06-13 14:37:15 +0200752 xhci_debug("Unsupported transfer size\n");
Julius Wernere9738db2013-02-21 13:41:40 -0800753 return -1;
Nico Huber90292652013-06-13 14:37:15 +0200754 }
755
Julius Werner1f864342013-09-03 17:15:31 -0700756 if (!dma_coherent(src)) {
757 data = xhci->dma_buffer;
758 if (size > DMA_SIZE) {
759 xhci_debug("Bulk transfer too large: %d\n", size);
760 return -1;
761 }
762 if (ep->direction == OUT)
763 memcpy(data, src, size);
764 }
765
Julius Werner49ba2832013-09-26 15:13:44 -0700766 /* Reset endpoint if it's not running */
Julius Werner1f864342013-09-03 17:15:31 -0700767 const unsigned ep_state = EC_GET(STATE, epctx);
Julius Werner49ba2832013-09-26 15:13:44 -0700768 if (ep_state > 1) {
Shawn Nematbakhshc666e552014-04-02 09:14:32 -0700769 if (xhci_reset_endpoint(ep->dev, ep))
Julius Wernere9738db2013-02-21 13:41:40 -0800770 return -1;
Nico Huber90292652013-06-13 14:37:15 +0200771 }
772
773 /* Enqueue transfer and ring doorbell */
Julius Werner1f864342013-09-03 17:15:31 -0700774 const unsigned mps = EC_GET(MPS, epctx);
Nico Huber90292652013-06-13 14:37:15 +0200775 const unsigned dir = (ep->direction == OUT) ? TRB_DIR_OUT : TRB_DIR_IN;
776 xhci_enqueue_td(tr, ep_id, mps, size, data, dir);
Liangfeng Wu8c7e4162016-05-24 19:40:46 +0800777 xhci_ring_doorbell(ep);
Nico Huber90292652013-06-13 14:37:15 +0200778
779 /* Wait for transfer event */
780 const int ret = xhci_wait_for_transfer(xhci, ep->dev->address, ep_id);
Julius Wernere9738db2013-02-21 13:41:40 -0800781 if (ret < 0) {
Nico Huber90292652013-06-13 14:37:15 +0200782 if (ret == TIMEOUT) {
783 xhci_debug("Stopping ID %d EP %d\n",
784 ep->dev->address, ep_id);
785 xhci_cmd_stop_endpoint(xhci, ep->dev->address, ep_id);
Nico Huber90292652013-06-13 14:37:15 +0200786 }
787 xhci_debug("Bulk transfer failed: %d\n"
788 " ep state: %d -> %d\n"
789 " usbsts: 0x%08"PRIx32"\n",
790 ret, ep_state,
Julius Werner1f864342013-09-03 17:15:31 -0700791 EC_GET(STATE, epctx),
Nico Huber90292652013-06-13 14:37:15 +0200792 xhci->opreg->usbsts);
Julius Wernere9738db2013-02-21 13:41:40 -0800793 return ret;
Nico Huber90292652013-06-13 14:37:15 +0200794 }
795
Julius Werner1f864342013-09-03 17:15:31 -0700796 if (ep->direction == IN && data != src)
797 memcpy(src, data, ret);
Julius Wernere9738db2013-02-21 13:41:40 -0800798 return ret;
Nico Huber90292652013-06-13 14:37:15 +0200799}
800
801static trb_t *
802xhci_next_trb(trb_t *cur, int *const pcs)
803{
804 ++cur;
805 while (TRB_GET(TT, cur) == TRB_LINK) {
806 if (pcs && TRB_GET(TC, cur))
807 *pcs ^= 1;
808 cur = phys_to_virt(cur->ptr_low);
809 }
810 return cur;
Patrick Georgi6615ef32010-08-13 09:18:58 +0000811}
812
813/* create and hook-up an intr queue into device schedule */
Nico Huber90292652013-06-13 14:37:15 +0200814static void *
815xhci_create_intr_queue(endpoint_t *const ep,
816 const int reqsize, const int reqcount,
817 const int reqtiming)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000818{
Nico Huber90292652013-06-13 14:37:15 +0200819 /* reqtiming: We ignore it and use the interval from the
820 endpoint descriptor configured earlier. */
821
822 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
Julius Werner1f864342013-09-03 17:15:31 -0700823 const int slot_id = ep->dev->address;
Nico Huber90292652013-06-13 14:37:15 +0200824 const int ep_id = xhci_ep_id(ep);
Julius Werner1f864342013-09-03 17:15:31 -0700825 transfer_ring_t *const tr = xhci->dev[slot_id].transfer_rings[ep_id];
Nico Huber90292652013-06-13 14:37:15 +0200826
827 if (reqcount > (TRANSFER_RING_SIZE - 2)) {
828 xhci_debug("reqcount is too high, at most %d supported\n",
829 TRANSFER_RING_SIZE - 2);
830 return NULL;
831 }
832 if (reqsize > 0x10000) {
833 xhci_debug("reqsize is too large, at most 64KiB supported\n");
834 return NULL;
835 }
Julius Werner1f864342013-09-03 17:15:31 -0700836 if (xhci->dev[slot_id].interrupt_queues[ep_id]) {
Nico Huber90292652013-06-13 14:37:15 +0200837 xhci_debug("Only one interrupt queue per endpoint supported\n");
838 return NULL;
839 }
840
841 /* Allocate intrq structure and reqdata chunks */
842
843 intrq_t *const intrq = malloc(sizeof(*intrq));
844 if (!intrq) {
845 xhci_debug("Out of memory\n");
846 return NULL;
847 }
848
849 int i;
850 int pcs = tr->pcs;
851 trb_t *cur = tr->cur;
852 for (i = 0; i < reqcount; ++i) {
853 if (TRB_GET(C, cur) == pcs) {
854 xhci_debug("Not enough empty TRBs\n");
855 goto _free_return;
856 }
857 void *const reqdata = xhci_align(1, reqsize);
858 if (!reqdata) {
859 xhci_debug("Out of memory\n");
860 goto _free_return;
861 }
862 xhci_clear_trb(cur, pcs);
863 cur->ptr_low = virt_to_phys(reqdata);
864 cur->ptr_high = 0;
865 TRB_SET(TL, cur, reqsize);
866 TRB_SET(TT, cur, TRB_NORMAL);
867 TRB_SET(ISP, cur, 1);
868 TRB_SET(IOC, cur, 1);
869
870 cur = xhci_next_trb(cur, &pcs);
871 }
872
873 intrq->size = reqsize;
874 intrq->count = reqcount;
875 intrq->next = tr->cur;
876 intrq->ready = NULL;
877 intrq->ep = ep;
Julius Werner1f864342013-09-03 17:15:31 -0700878 xhci->dev[slot_id].interrupt_queues[ep_id] = intrq;
Nico Huber90292652013-06-13 14:37:15 +0200879
880 /* Now enqueue all the prepared TRBs but the last
881 and ring the doorbell. */
882 for (i = 0; i < (reqcount - 1); ++i)
883 xhci_enqueue_trb(tr);
Liangfeng Wu8c7e4162016-05-24 19:40:46 +0800884 xhci_ring_doorbell(ep);
Nico Huber90292652013-06-13 14:37:15 +0200885
886 return intrq;
887
888_free_return:
889 cur = tr->cur;
890 for (--i; i >= 0; --i) {
891 free(phys_to_virt(cur->ptr_low));
892 cur = xhci_next_trb(cur, NULL);
893 }
894 free(intrq);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000895 return NULL;
896}
897
898/* remove queue from device schedule, dropping all data that came in */
899static void
Nico Huber90292652013-06-13 14:37:15 +0200900xhci_destroy_intr_queue(endpoint_t *const ep, void *const q)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000901{
Nico Huber90292652013-06-13 14:37:15 +0200902 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
Julius Werner1f864342013-09-03 17:15:31 -0700903 const int slot_id = ep->dev->address;
Nico Huber90292652013-06-13 14:37:15 +0200904 const int ep_id = xhci_ep_id(ep);
Julius Werner1f864342013-09-03 17:15:31 -0700905 transfer_ring_t *const tr = xhci->dev[slot_id].transfer_rings[ep_id];
Nico Huber90292652013-06-13 14:37:15 +0200906
907 intrq_t *const intrq = (intrq_t *)q;
908
909 /* Make sure the endpoint is stopped */
Julius Werner1f864342013-09-03 17:15:31 -0700910 if (EC_GET(STATE, xhci->dev[slot_id].ctx.ep[ep_id]) == 1) {
911 const int cc = xhci_cmd_stop_endpoint(xhci, slot_id, ep_id);
Nico Huber90292652013-06-13 14:37:15 +0200912 if (cc != CC_SUCCESS)
913 xhci_debug("Warning: Failed to stop endpoint\n");
914 }
915
916 /* Process all remaining transfer events */
917 xhci_handle_events(xhci);
918
919 /* Free all pending transfers and the interrupt queue structure */
920 int i;
921 for (i = 0; i < intrq->count; ++i) {
922 free(phys_to_virt(intrq->next->ptr_low));
923 intrq->next = xhci_next_trb(intrq->next, NULL);
924 }
Julius Werner1f864342013-09-03 17:15:31 -0700925 xhci->dev[slot_id].interrupt_queues[ep_id] = NULL;
Nico Huber90292652013-06-13 14:37:15 +0200926 free((void *)intrq);
927
928 /* Reset the controller's dequeue pointer and reinitialize the ring */
Julius Werner1f864342013-09-03 17:15:31 -0700929 xhci_cmd_set_tr_dq(xhci, slot_id, ep_id, tr->ring, 1);
Nico Huber90292652013-06-13 14:37:15 +0200930 xhci_init_cycle_ring(tr, TRANSFER_RING_SIZE);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000931}
932
933/* read one intr-packet from queue, if available. extend the queue for new input.
934 return NULL if nothing new available.
935 Recommended use: while (data=poll_intr_queue(q)) process(data);
936 */
Nico Huber90292652013-06-13 14:37:15 +0200937static u8 *
938xhci_poll_intr_queue(void *const q)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000939{
Nico Huber90292652013-06-13 14:37:15 +0200940 if (!q)
941 return NULL;
942
943 intrq_t *const intrq = (intrq_t *)q;
944 endpoint_t *const ep = intrq->ep;
945 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
946
947 /* TODO: Reset interrupt queue if it gets halted? */
948
949 xhci_handle_events(xhci);
950
951 u8 *reqdata = NULL;
952 while (!reqdata && intrq->ready) {
953 const int ep_id = xhci_ep_id(ep);
Julius Werner1f864342013-09-03 17:15:31 -0700954 transfer_ring_t *const tr =
955 xhci->dev[ep->dev->address].transfer_rings[ep_id];
Nico Huber90292652013-06-13 14:37:15 +0200956
957 /* Fetch the request's buffer */
958 reqdata = phys_to_virt(intrq->next->ptr_low);
959
960 /* Enqueue the last (spare) TRB and ring doorbell */
961 xhci_enqueue_trb(tr);
Liangfeng Wu8c7e4162016-05-24 19:40:46 +0800962 xhci_ring_doorbell(ep);
Nico Huber90292652013-06-13 14:37:15 +0200963
964 /* Reuse the current buffer for the next spare TRB */
965 xhci_clear_trb(tr->cur, tr->pcs);
966 tr->cur->ptr_low = virt_to_phys(reqdata);
967 tr->cur->ptr_high = 0;
968 TRB_SET(TL, tr->cur, intrq->size);
969 TRB_SET(TT, tr->cur, TRB_NORMAL);
970 TRB_SET(ISP, tr->cur, 1);
971 TRB_SET(IOC, tr->cur, 1);
972
973 /* Check if anything was transferred */
974 const size_t read = TRB_GET(TL, intrq->next);
975 if (!read)
976 reqdata = NULL;
977 else if (read < intrq->size)
978 /* At least zero it, poll interface is rather limited */
979 memset(reqdata + read, 0x00, intrq->size - read);
980
981 /* Advance the interrupt queue */
982 if (intrq->ready == intrq->next)
983 /* This was last TRB being ready */
984 intrq->ready = NULL;
985 intrq->next = xhci_next_trb(intrq->next, NULL);
986 }
987
988 return reqdata;
Patrick Georgi6615ef32010-08-13 09:18:58 +0000989}