blob: 2e70316ac7cb3778985212cf51d3cddba0071049 [file] [log] [blame]
Patrick Georgi6615ef32010-08-13 09:18:58 +00001/*
2 * This file is part of the libpayload project.
3 *
4 * Copyright (C) 2010 Patrick Georgi
Nico Huber90292652013-06-13 14:37:15 +02005 * Copyright (C) 2013 secunet Security Networks AG
Patrick Georgi6615ef32010-08-13 09:18:58 +00006 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
Nico Huber90292652013-06-13 14:37:15 +020031//#define XHCI_SPEW_DEBUG
Patrick Georgi6615ef32010-08-13 09:18:58 +000032
Nico Huber90292652013-06-13 14:37:15 +020033#include <inttypes.h>
Patrick Georgi6615ef32010-08-13 09:18:58 +000034#include <arch/virtual.h>
Patrick Georgi6615ef32010-08-13 09:18:58 +000035#include "xhci_private.h"
Nico Huber90292652013-06-13 14:37:15 +020036#include "xhci.h"
Patrick Georgi6615ef32010-08-13 09:18:58 +000037
38static void xhci_start (hci_t *controller);
39static void xhci_stop (hci_t *controller);
40static void xhci_reset (hci_t *controller);
Nico Huber90292652013-06-13 14:37:15 +020041static void xhci_reinit (hci_t *controller);
Patrick Georgi6615ef32010-08-13 09:18:58 +000042static void xhci_shutdown (hci_t *controller);
43static int xhci_bulk (endpoint_t *ep, int size, u8 *data, int finalize);
44static int xhci_control (usbdev_t *dev, direction_t dir, int drlen, void *devreq,
45 int dalen, u8 *data);
46static void* xhci_create_intr_queue (endpoint_t *ep, int reqsize, int reqcount, int reqtiming);
47static void xhci_destroy_intr_queue (endpoint_t *ep, void *queue);
48static u8* xhci_poll_intr_queue (void *queue);
49
Nico Huber90292652013-06-13 14:37:15 +020050/*
51 * Some structures must not cross page boundaries. To get this,
52 * we align them by their size (or the next greater power of 2).
53 */
54void *
55xhci_align(const size_t min_align, const size_t size)
Patrick Georgi6615ef32010-08-13 09:18:58 +000056{
Nico Huber90292652013-06-13 14:37:15 +020057 size_t align;
58 if (!(size & (size - 1)))
59 align = size; /* It's a power of 2 */
60 else
61 align = 1 << ((sizeof(unsigned) << 3) - __builtin_clz(size));
62 if (align < min_align)
63 align = min_align;
64 xhci_spew("Aligning %zu to %zu\n", size, align);
Julius Werner1f864342013-09-03 17:15:31 -070065 return dma_memalign(align, size);
Nico Huber90292652013-06-13 14:37:15 +020066}
67
68void
69xhci_clear_trb(trb_t *const trb, const int pcs)
70{
71 trb->ptr_low = 0;
72 trb->ptr_high = 0;
73 trb->status = 0;
74 trb->control = !pcs;
75}
76
77void
78xhci_init_cycle_ring(transfer_ring_t *const tr, const size_t ring_size)
79{
80 memset((void *)tr->ring, 0, ring_size * sizeof(*tr->ring));
81 TRB_SET(TT, &tr->ring[ring_size - 1], TRB_LINK);
82 TRB_SET(TC, &tr->ring[ring_size - 1], 1);
83 /* only one segment that points to itself */
84 tr->ring[ring_size - 1].ptr_low = virt_to_phys(tr->ring);
85
86 tr->pcs = 1;
87 tr->cur = tr->ring;
88}
89
90/* On Panther Point: switch ports shared with EHCI to xHCI */
Stefan Reinauer1b4d3942015-06-29 15:47:34 -070091#if IS_ENABLED(CONFIG_LP_USB_PCI)
Nico Huber90292652013-06-13 14:37:15 +020092static void
93xhci_switch_ppt_ports(pcidev_t addr)
94{
95 if (pci_read_config32(addr, 0x00) == 0x1e318086) {
96 u32 reg32 = pci_read_config32(addr, 0xdc) & 0xf;
97 xhci_debug("Ports capable of SuperSpeed: 0x%"PRIx32"\n", reg32);
98
99 /* For now, do not enable SuperSpeed on any ports */
100 //pci_write_config32(addr, 0xd8, reg32);
101 pci_write_config32(addr, 0xd8, 0x00000000);
102 reg32 = pci_read_config32(addr, 0xd8) & 0xf;
103 xhci_debug("Configured for SuperSpeed: 0x%"PRIx32"\n", reg32);
104
105 reg32 = pci_read_config32(addr, 0xd4) & 0xf;
106 xhci_debug("Trying to switch over: 0x%"PRIx32"\n", reg32);
107
108 pci_write_config32(addr, 0xd0, reg32);
109 reg32 = pci_read_config32(addr, 0xd0) & 0xf;
110 xhci_debug("Actually switched over: 0x%"PRIx32"\n", reg32);
111 }
112}
Marc Jones86127c72014-12-29 22:07:04 -0700113#endif
Nico Huber90292652013-06-13 14:37:15 +0200114
Stefan Reinauer1b4d3942015-06-29 15:47:34 -0700115#if IS_ENABLED(CONFIG_LP_USB_PCI)
Nico Huberc3714422013-07-19 14:03:47 +0200116/* On Panther Point: switch all ports back to EHCI */
117static void
118xhci_switchback_ppt_ports(pcidev_t addr)
119{
120 if (pci_read_config32(addr, 0x00) == 0x1e318086) {
121 u32 reg32 = pci_read_config32(addr, 0xd0) & 0xf;
122 xhci_debug("Switching ports back: 0x%"PRIx32"\n", reg32);
123 pci_write_config32(addr, 0xd0, 0x00000000);
124 reg32 = pci_read_config32(addr, 0xd0) & 0xf;
125 xhci_debug("Still switched to xHCI: 0x%"PRIx32"\n", reg32);
126 }
127}
Patrick Georgi22045392015-05-04 19:05:41 +0200128#endif
Nico Huberc3714422013-07-19 14:03:47 +0200129
Nico Huber90292652013-06-13 14:37:15 +0200130static long
131xhci_handshake(volatile u32 *const reg, u32 mask, u32 wait_for, long timeout_us)
132{
133 while ((*reg & mask) != wait_for && timeout_us--) udelay(1);
134 return timeout_us;
135}
136
137static int
138xhci_wait_ready(xhci_t *const xhci)
139{
140 xhci_debug("Waiting for controller to be ready... ");
141 if (!xhci_handshake(&xhci->opreg->usbsts, USBSTS_CNR, 0, 100000L)) {
142 usb_debug("timeout!\n");
143 return -1;
144 }
145 usb_debug("ok.\n");
146 return 0;
147}
148
149hci_t *
Nico Huber6e230662014-07-07 16:33:59 +0200150xhci_init (unsigned long physical_bar)
Nico Huber90292652013-06-13 14:37:15 +0200151{
152 int i;
153
154 /* First, allocate and initialize static controller structures */
155
156 hci_t *const controller = new_controller();
Nico Huber90292652013-06-13 14:37:15 +0200157 controller->type = XHCI;
158 controller->start = xhci_start;
159 controller->stop = xhci_stop;
160 controller->reset = xhci_reset;
161 controller->init = xhci_reinit;
162 controller->shutdown = xhci_shutdown;
163 controller->bulk = xhci_bulk;
164 controller->control = xhci_control;
165 controller->set_address = xhci_set_address;
166 controller->finish_device_config= xhci_finish_device_config;
167 controller->destroy_device = xhci_destroy_dev;
168 controller->create_intr_queue = xhci_create_intr_queue;
169 controller->destroy_intr_queue = xhci_destroy_intr_queue;
170 controller->poll_intr_queue = xhci_poll_intr_queue;
Nico Huber8b8e9632014-07-07 17:20:53 +0200171 controller->pcidev = 0;
Nico Huber90292652013-06-13 14:37:15 +0200172
Furquan Shaikh79a591f2014-05-13 13:47:32 -0700173 controller->reg_base = (uintptr_t)physical_bar;
Julius Werner7234d602014-04-08 12:54:25 -0700174 controller->instance = xzalloc(sizeof(xhci_t));
Nico Huber90292652013-06-13 14:37:15 +0200175 xhci_t *const xhci = (xhci_t *)controller->instance;
Nico Huber90292652013-06-13 14:37:15 +0200176
177 init_device_entry(controller, 0);
178 xhci->roothub = controller->devices[0];
179 xhci->cr.ring = xhci_align(64, COMMAND_RING_SIZE * sizeof(trb_t));
180 xhci->er.ring = xhci_align(64, EVENT_RING_SIZE * sizeof(trb_t));
181 xhci->ev_ring_table = xhci_align(64, sizeof(erst_entry_t));
182 if (!xhci->roothub || !xhci->cr.ring ||
183 !xhci->er.ring || !xhci->ev_ring_table) {
184 xhci_debug("Out of memory\n");
185 goto _free_xhci;
186 }
187
Nico Huber5b9e6f12014-07-10 12:56:34 +0200188 xhci->capreg = phys_to_virt(physical_bar);
Nico Huber90292652013-06-13 14:37:15 +0200189 xhci->opreg = ((void *)xhci->capreg) + xhci->capreg->caplength;
190 xhci->hcrreg = ((void *)xhci->capreg) + xhci->capreg->rtsoff;
191 xhci->dbreg = ((void *)xhci->capreg) + xhci->capreg->dboff;
Nico Huber5b9e6f12014-07-10 12:56:34 +0200192 xhci_debug("regbase: 0x%"PRIx32"\n", physical_bar);
Nico Huber90292652013-06-13 14:37:15 +0200193 xhci_debug("caplen: 0x%"PRIx32"\n", xhci->capreg->caplength);
194 xhci_debug("rtsoff: 0x%"PRIx32"\n", xhci->capreg->rtsoff);
195 xhci_debug("dboff: 0x%"PRIx32"\n", xhci->capreg->dboff);
196
197 xhci_debug("hciversion: %"PRIx8".%"PRIx8"\n",
198 xhci->capreg->hciver_hi, xhci->capreg->hciver_lo);
199 if ((xhci->capreg->hciversion < 0x96) ||
Liangfeng Wu8c7e4162016-05-24 19:40:46 +0800200 (xhci->capreg->hciversion > 0x110)) {
Nico Huber90292652013-06-13 14:37:15 +0200201 xhci_debug("Unsupported xHCI version\n");
202 goto _free_xhci;
203 }
204
Julius Werner1f864342013-09-03 17:15:31 -0700205 xhci_debug("context size: %dB\n", CTXSIZE(xhci));
Nico Huber90292652013-06-13 14:37:15 +0200206 xhci_debug("maxslots: 0x%02lx\n", xhci->capreg->MaxSlots);
207 xhci_debug("maxports: 0x%02lx\n", xhci->capreg->MaxPorts);
208 const unsigned pagesize = xhci->opreg->pagesize << 12;
209 xhci_debug("pagesize: 0x%04x\n", pagesize);
210
211 /*
212 * We haven't touched the hardware yet. So we allocate all dynamic
213 * structures at first and can still chicken out easily if we run out
214 * of memory.
215 */
Julius Werner1f864342013-09-03 17:15:31 -0700216 xhci->max_slots_en = xhci->capreg->MaxSlots & CONFIG_LP_MASK_MaxSlotsEn;
217 xhci->dcbaa = xhci_align(64, (xhci->max_slots_en + 1) * sizeof(u64));
218 xhci->dev = malloc((xhci->max_slots_en + 1) * sizeof(*xhci->dev));
219 if (!xhci->dcbaa || !xhci->dev) {
Nico Huber90292652013-06-13 14:37:15 +0200220 xhci_debug("Out of memory\n");
221 goto _free_xhci;
222 }
Julius Werner1f864342013-09-03 17:15:31 -0700223 memset(xhci->dcbaa, 0x00, (xhci->max_slots_en + 1) * sizeof(u64));
224 memset(xhci->dev, 0x00, (xhci->max_slots_en + 1) * sizeof(*xhci->dev));
Nico Huber90292652013-06-13 14:37:15 +0200225
226 /*
227 * Let dcbaa[0] point to another array of pointers, sp_ptrs.
228 * The pointers therein point to scratchpad buffers (pages).
229 */
Julius Werner57ddd9a2015-08-07 13:14:20 -0700230 const size_t max_sp_bufs = xhci->capreg->Max_Scratchpad_Bufs_Hi << 5 |
231 xhci->capreg->Max_Scratchpad_Bufs_Lo;
Nico Huber90292652013-06-13 14:37:15 +0200232 xhci_debug("max scratchpad bufs: 0x%zx\n", max_sp_bufs);
233 if (max_sp_bufs) {
234 const size_t sp_ptrs_size = max_sp_bufs * sizeof(u64);
235 xhci->sp_ptrs = xhci_align(64, sp_ptrs_size);
236 if (!xhci->sp_ptrs) {
237 xhci_debug("Out of memory\n");
238 goto _free_xhci_structs;
239 }
240 memset(xhci->sp_ptrs, 0x00, sp_ptrs_size);
241 for (i = 0; i < max_sp_bufs; ++i) {
242 /* Could use mmap() here if we had it.
243 Maybe there is another way. */
244 void *const page = memalign(pagesize, pagesize);
245 if (!page) {
246 xhci_debug("Out of memory\n");
247 goto _free_xhci_structs;
248 }
249 xhci->sp_ptrs[i] = virt_to_phys(page);
250 }
251 xhci->dcbaa[0] = virt_to_phys(xhci->sp_ptrs);
252 }
253
Julius Werner1f864342013-09-03 17:15:31 -0700254 if (dma_initialized()) {
255 xhci->dma_buffer = dma_memalign(64 * 1024, DMA_SIZE);
256 if (!xhci->dma_buffer) {
257 xhci_debug("Not enough memory for DMA bounce buffer\n");
258 goto _free_xhci_structs;
259 }
260 }
261
Nico Huber90292652013-06-13 14:37:15 +0200262 /* Now start working on the hardware */
Nico Huber90292652013-06-13 14:37:15 +0200263 if (xhci_wait_ready(xhci))
Julius Werner1f864342013-09-03 17:15:31 -0700264 goto _free_xhci_structs;
Nico Huber90292652013-06-13 14:37:15 +0200265
266 /* TODO: Check if BIOS claims ownership (and hand over) */
267
268 xhci_reset(controller);
269 xhci_reinit(controller);
270
Nico Huber90292652013-06-13 14:37:15 +0200271 xhci->roothub->controller = controller;
272 xhci->roothub->init = xhci_rh_init;
273 xhci->roothub->init(xhci->roothub);
274
275 return controller;
276
277_free_xhci_structs:
Julius Werner7234d602014-04-08 12:54:25 -0700278 free(xhci->dma_buffer);
Nico Huber90292652013-06-13 14:37:15 +0200279 if (xhci->sp_ptrs) {
280 for (i = 0; i < max_sp_bufs; ++i) {
281 if (xhci->sp_ptrs[i])
282 free(phys_to_virt(xhci->sp_ptrs[i]));
283 }
284 }
285 free(xhci->sp_ptrs);
286 free(xhci->dcbaa);
287_free_xhci:
288 free((void *)xhci->ev_ring_table);
289 free((void *)xhci->er.ring);
290 free((void *)xhci->cr.ring);
291 free(xhci->roothub);
Julius Werner1f864342013-09-03 17:15:31 -0700292 free(xhci->dev);
Nico Huber90292652013-06-13 14:37:15 +0200293 free(xhci);
Marc Jones86127c72014-12-29 22:07:04 -0700294/* _free_controller: */
Nico Huber90292652013-06-13 14:37:15 +0200295 detach_controller(controller);
296 free(controller);
297 return NULL;
298}
299
Stefan Reinauer1b4d3942015-06-29 15:47:34 -0700300#if IS_ENABLED(CONFIG_LP_USB_PCI)
Stefan Reinauer8992e532013-05-02 16:16:41 -0700301hci_t *
302xhci_pci_init (pcidev_t addr)
303{
304 u32 reg_addr;
Patrick Georgifdb348a2013-12-21 11:41:22 +0100305 hci_t *controller;
Stefan Reinauer8992e532013-05-02 16:16:41 -0700306
Nico Huber6a058902014-07-04 18:17:39 +0200307 reg_addr = pci_read_config32 (addr, 0x10) & ~0xf;
Stefan Reinauer8992e532013-05-02 16:16:41 -0700308 if (pci_read_config32 (addr, 0x14) > 0) {
309 fatal("We don't do 64bit addressing.\n");
310 }
311
Nico Huber6e230662014-07-07 16:33:59 +0200312 controller = xhci_init((unsigned long)reg_addr);
Nico Huberf4316f82014-07-07 17:11:53 +0200313 if (controller) {
314 controller->pcidev = addr;
Stefan Reinauer8992e532013-05-02 16:16:41 -0700315
Nico Huberf4316f82014-07-07 17:11:53 +0200316 xhci_switch_ppt_ports(addr);
317 }
Stefan Reinauer8992e532013-05-02 16:16:41 -0700318
319 return controller;
320}
321#endif
322
Nico Huber90292652013-06-13 14:37:15 +0200323static void
324xhci_reset(hci_t *const controller)
325{
326 xhci_t *const xhci = XHCI_INST(controller);
327
328 xhci_stop(controller);
329
330 xhci->opreg->usbcmd |= USBCMD_HCRST;
Rajmohan Mani1528ffa2015-10-30 17:00:24 -0700331
332 /* Existing Intel xHCI controllers require a delay of 1 ms,
333 * after setting the CMD_RESET bit, and before accessing any
334 * HC registers. This allows the HC to complete the
335 * reset operation and be ready for HC register access.
336 * Without this delay, the subsequent HC register access,
337 * may result in a system hang very rarely.
338 */
339 if (IS_ENABLED(CONFIG_LP_ARCH_X86))
340 mdelay(1);
341
Nico Huber90292652013-06-13 14:37:15 +0200342 xhci_debug("Resetting controller... ");
343 if (!xhci_handshake(&xhci->opreg->usbcmd, USBCMD_HCRST, 0, 1000000L))
344 usb_debug("timeout!\n");
345 else
346 usb_debug("ok.\n");
Patrick Georgi6615ef32010-08-13 09:18:58 +0000347}
348
Nico Huber6e711c62012-11-12 16:20:32 +0100349static void
350xhci_reinit (hci_t *controller)
351{
Nico Huber90292652013-06-13 14:37:15 +0200352 xhci_t *const xhci = XHCI_INST(controller);
Nico Huber6e711c62012-11-12 16:20:32 +0100353
Nico Huber90292652013-06-13 14:37:15 +0200354 if (xhci_wait_ready(xhci))
355 return;
356
357 /* Enable all available slots */
Julius Werner1f864342013-09-03 17:15:31 -0700358 xhci->opreg->config = xhci->max_slots_en;
Nico Huber90292652013-06-13 14:37:15 +0200359
360 /* Set DCBAA */
361 xhci->opreg->dcbaap_lo = virt_to_phys(xhci->dcbaa);
362 xhci->opreg->dcbaap_hi = 0;
363
364 /* Initialize command ring */
365 xhci_init_cycle_ring(&xhci->cr, COMMAND_RING_SIZE);
366 xhci_debug("command ring @%p (0x%08x)\n",
367 xhci->cr.ring, virt_to_phys(xhci->cr.ring));
368 xhci->opreg->crcr_lo = virt_to_phys(xhci->cr.ring) | CRCR_RCS;
369 xhci->opreg->crcr_hi = 0;
370
371 /* Make sure interrupts are disabled */
372 xhci->opreg->usbcmd &= ~USBCMD_INTE;
373
374 /* Initialize event ring */
375 xhci_reset_event_ring(&xhci->er);
376 xhci_debug("event ring @%p (0x%08x)\n",
377 xhci->er.ring, virt_to_phys(xhci->er.ring));
378 xhci_debug("ERST Max: 0x%lx -> 0x%lx entries\n",
379 xhci->capreg->ERST_Max, 1 << xhci->capreg->ERST_Max);
380 memset((void*)xhci->ev_ring_table, 0x00, sizeof(erst_entry_t));
381 xhci->ev_ring_table[0].seg_base_lo = virt_to_phys(xhci->er.ring);
382 xhci->ev_ring_table[0].seg_base_hi = 0;
383 xhci->ev_ring_table[0].seg_size = EVENT_RING_SIZE;
384
Yidi Lind42ee152015-05-07 15:36:04 +0800385 /* pass event ring table to hardware */
386 wmb();
Nico Huber90292652013-06-13 14:37:15 +0200387 /* Initialize primary interrupter */
388 xhci->hcrreg->intrrs[0].erstsz = 1;
389 xhci_update_event_dq(xhci);
390 /* erstba has to be written at last */
391 xhci->hcrreg->intrrs[0].erstba_lo = virt_to_phys(xhci->ev_ring_table);
392 xhci->hcrreg->intrrs[0].erstba_hi = 0;
393
394 xhci_start(controller);
395
396#ifdef USB_DEBUG
Patrick Georgi6615ef32010-08-13 09:18:58 +0000397 int i;
Nico Huber90292652013-06-13 14:37:15 +0200398 for (i = 0; i < 32; ++i) {
399 xhci_debug("NOOP run #%d\n", i);
400 trb_t *const cmd = xhci_next_command_trb(xhci);
401 TRB_SET(TT, cmd, TRB_CMD_NOOP);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000402
Nico Huber90292652013-06-13 14:37:15 +0200403 xhci_post_command(xhci);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000404
Nico Huber90292652013-06-13 14:37:15 +0200405 /* Wait for result in event ring */
406 xhci_wait_for_command_done(xhci, cmd, 1);
407 xhci_debug("Command ring is %srunning\n",
408 (xhci->opreg->crcr_lo & CRCR_CRR) ? "" : "not ");
Patrick Georgi6615ef32010-08-13 09:18:58 +0000409 }
Nico Huber90292652013-06-13 14:37:15 +0200410#endif
Patrick Georgi6615ef32010-08-13 09:18:58 +0000411}
412
413static void
Nico Huber90292652013-06-13 14:37:15 +0200414xhci_shutdown(hci_t *const controller)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000415{
Nico Huber90292652013-06-13 14:37:15 +0200416 int i;
417
Patrick Georgi6615ef32010-08-13 09:18:58 +0000418 if (controller == 0)
419 return;
Nico Huber90292652013-06-13 14:37:15 +0200420
421 detach_controller(controller);
422
Julius Werner7234d602014-04-08 12:54:25 -0700423 xhci_t *const xhci = XHCI_INST(controller);
Nico Huber90292652013-06-13 14:37:15 +0200424 xhci_stop(controller);
425
Stefan Reinauer1b4d3942015-06-29 15:47:34 -0700426#if IS_ENABLED(CONFIG_LP_USB_PCI)
Patrick Georgifdb348a2013-12-21 11:41:22 +0100427 if (controller->pcidev)
428 xhci_switchback_ppt_ports(controller->pcidev);
Patrick Georgi22045392015-05-04 19:05:41 +0200429#endif
Nico Huberc3714422013-07-19 14:03:47 +0200430
Nico Huber90292652013-06-13 14:37:15 +0200431 if (xhci->sp_ptrs) {
Julius Werner57ddd9a2015-08-07 13:14:20 -0700432 size_t max_sp_bufs = xhci->capreg->Max_Scratchpad_Bufs_Hi << 5 |
433 xhci->capreg->Max_Scratchpad_Bufs_Lo;
Nico Huber90292652013-06-13 14:37:15 +0200434 for (i = 0; i < max_sp_bufs; ++i) {
435 if (xhci->sp_ptrs[i])
436 free(phys_to_virt(xhci->sp_ptrs[i]));
437 }
438 }
439 free(xhci->sp_ptrs);
Julius Werner7234d602014-04-08 12:54:25 -0700440 free(xhci->dma_buffer);
Nico Huber90292652013-06-13 14:37:15 +0200441 free(xhci->dcbaa);
Julius Werner1f864342013-09-03 17:15:31 -0700442 free(xhci->dev);
Nico Huber90292652013-06-13 14:37:15 +0200443 free((void *)xhci->ev_ring_table);
444 free((void *)xhci->er.ring);
445 free((void *)xhci->cr.ring);
446 free(xhci);
447 free(controller);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000448}
449
450static void
451xhci_start (hci_t *controller)
452{
Nico Huber90292652013-06-13 14:37:15 +0200453 xhci_t *const xhci = XHCI_INST(controller);
454
455 xhci->opreg->usbcmd |= USBCMD_RS;
456 if (!xhci_handshake(&xhci->opreg->usbsts, USBSTS_HCH, 0, 1000000L))
457 xhci_debug("Controller didn't start within 1s\n");
Patrick Georgi6615ef32010-08-13 09:18:58 +0000458}
459
460static void
461xhci_stop (hci_t *controller)
462{
Nico Huber90292652013-06-13 14:37:15 +0200463 xhci_t *const xhci = XHCI_INST(controller);
464
465 xhci->opreg->usbcmd &= ~USBCMD_RS;
466 if (!xhci_handshake(&xhci->opreg->usbsts,
467 USBSTS_HCH, USBSTS_HCH, 1000000L))
468 xhci_debug("Controller didn't halt within 1s\n");
Patrick Georgi6615ef32010-08-13 09:18:58 +0000469}
470
471static int
Shawn Nematbakhshc666e552014-04-02 09:14:32 -0700472xhci_reset_endpoint(usbdev_t *const dev, endpoint_t *const ep)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000473{
Nico Huber90292652013-06-13 14:37:15 +0200474 xhci_t *const xhci = XHCI_INST(dev->controller);
Nico Huber90292652013-06-13 14:37:15 +0200475 const int slot_id = dev->address;
476 const int ep_id = ep ? xhci_ep_id(ep) : 1;
Julius Werner1f864342013-09-03 17:15:31 -0700477 epctx_t *const epctx = xhci->dev[slot_id].ctx.ep[ep_id];
Nico Huber90292652013-06-13 14:37:15 +0200478
479 xhci_debug("Resetting ID %d EP %d (ep state: %d)\n",
Julius Werner1f864342013-09-03 17:15:31 -0700480 slot_id, ep_id, EC_GET(STATE, epctx));
Nico Huber90292652013-06-13 14:37:15 +0200481
482 /* Run Reset Endpoint Command if the EP is in Halted state */
Julius Werner1f864342013-09-03 17:15:31 -0700483 if (EC_GET(STATE, epctx) == 2) {
Nico Huber90292652013-06-13 14:37:15 +0200484 const int cc = xhci_cmd_reset_endpoint(xhci, slot_id, ep_id);
485 if (cc != CC_SUCCESS) {
486 xhci_debug("Reset Endpoint Command failed: %d\n", cc);
487 return 1;
488 }
489 }
490
491 /* Clear TT buffer for bulk and control endpoints behind a TT */
492 const int hub = dev->hub;
493 if (hub && dev->speed < HIGH_SPEED &&
494 dev->controller->devices[hub]->speed == HIGH_SPEED)
495 /* TODO */;
496
Nico Huber90292652013-06-13 14:37:15 +0200497 /* Reset transfer ring if the endpoint is in the right state */
Julius Werner1f864342013-09-03 17:15:31 -0700498 const unsigned ep_state = EC_GET(STATE, epctx);
Nico Huber90292652013-06-13 14:37:15 +0200499 if (ep_state == 3 || ep_state == 4) {
Julius Werner1f864342013-09-03 17:15:31 -0700500 transfer_ring_t *const tr =
501 xhci->dev[slot_id].transfer_rings[ep_id];
Nico Huber90292652013-06-13 14:37:15 +0200502 const int cc = xhci_cmd_set_tr_dq(xhci, slot_id, ep_id,
503 tr->ring, 1);
504 if (cc != CC_SUCCESS) {
505 xhci_debug("Set TR Dequeue Command failed: %d\n", cc);
506 return 1;
507 }
508 xhci_init_cycle_ring(tr, TRANSFER_RING_SIZE);
509 }
510
511 xhci_debug("Finished resetting ID %d EP %d (ep state: %d)\n",
Julius Werner1f864342013-09-03 17:15:31 -0700512 slot_id, ep_id, EC_GET(STATE, epctx));
Nico Huber90292652013-06-13 14:37:15 +0200513
514 return 0;
515}
516
517static void
518xhci_enqueue_trb(transfer_ring_t *const tr)
519{
520 const int chain = TRB_GET(CH, tr->cur);
521 TRB_SET(C, tr->cur, tr->pcs);
522 ++tr->cur;
523
524 while (TRB_GET(TT, tr->cur) == TRB_LINK) {
525 xhci_spew("Handling LINK pointer\n");
526 const int tc = TRB_GET(TC, tr->cur);
527 TRB_SET(CH, tr->cur, chain);
Yidi Lind42ee152015-05-07 15:36:04 +0800528 wmb();
Nico Huber90292652013-06-13 14:37:15 +0200529 TRB_SET(C, tr->cur, tr->pcs);
530 tr->cur = phys_to_virt(tr->cur->ptr_low);
531 if (tc)
532 tr->pcs ^= 1;
533 }
534}
535
536static void
Liangfeng Wu8c7e4162016-05-24 19:40:46 +0800537xhci_ring_doorbell(endpoint_t *const ep)
538{
539 /* Ensure all TRB changes are written to memory. */
540 wmb();
541 XHCI_INST(ep->dev->controller)->dbreg[ep->dev->address] =
542 xhci_ep_id(ep);
543}
544
545static void
Nico Huber90292652013-06-13 14:37:15 +0200546xhci_enqueue_td(transfer_ring_t *const tr, const int ep, const size_t mps,
547 const int dalen, void *const data, const int dir)
548{
549 trb_t *trb = NULL; /* cur TRB */
550 u8 *cur_start = data; /* cur data pointer */
551 size_t length = dalen; /* remaining bytes */
552 size_t packets = (length + mps - 1) / mps; /* remaining packets */
553 size_t residue = 0; /* residue from last TRB */
554 size_t trb_count = 0; /* TRBs added so far */
555
556 while (length || !trb_count /* enqueue at least one */) {
557 const size_t cur_end = ((size_t)cur_start + 0x10000) & ~0xffff;
558 size_t cur_length = cur_end - (size_t)cur_start;
559 if (length < cur_length) {
560 cur_length = length;
561 packets = 0;
562 length = 0;
Chunfeng Yunb4a24992015-08-04 14:33:58 +0800563 } else if (!IS_ENABLED(CONFIG_LP_USB_XHCI_MTK_QUIRK)) {
Nico Huber90292652013-06-13 14:37:15 +0200564 packets -= (residue + cur_length) / mps;
565 residue = (residue + cur_length) % mps;
566 length -= cur_length;
567 }
568
569 trb = tr->cur;
570 xhci_clear_trb(trb, tr->pcs);
571 trb->ptr_low = virt_to_phys(cur_start);
572 TRB_SET(TL, trb, cur_length);
Rajmohan Manid6fb32b2014-05-30 13:06:01 -0700573 TRB_SET(TDS, trb, MIN(TRB_MAX_TD_SIZE, packets));
Julius Werner83da5012013-09-27 12:45:11 -0700574 TRB_SET(CH, trb, 1);
Nico Huber90292652013-06-13 14:37:15 +0200575
Chunfeng Yunb4a24992015-08-04 14:33:58 +0800576 if (length && IS_ENABLED(CONFIG_LP_USB_XHCI_MTK_QUIRK)) {
Yidi Lind42ee152015-05-07 15:36:04 +0800577 /*
578 * For MTK's xHCI controller, TDS defines a number of
579 * packets that remain to be transferred for a TD after
580 * processing all Max packets in all previous TRBs, that
581 * means don't include the current TRB's.
582 */
583 packets -= (residue + cur_length) / mps;
584 residue = (residue + cur_length) % mps;
585 length -= cur_length;
586 }
587
Nico Huber90292652013-06-13 14:37:15 +0200588 /* Check for first, data stage TRB */
589 if (!trb_count && ep == 1) {
590 TRB_SET(DIR, trb, dir);
591 TRB_SET(TT, trb, TRB_DATA_STAGE);
592 } else {
593 TRB_SET(TT, trb, TRB_NORMAL);
594 }
Sourabh Banerjeee73335c2014-09-24 16:14:45 +0530595 /*
596 * This is a workaround for Synopsys DWC3. If the ENT flag is
597 * not set for the Normal and Data Stage TRBs. We get Event TRB
598 * with length 0x20d from the controller when we enqueue a TRB
599 * for the IN endpoint with length 0x200.
600 */
601 if (!length)
602 TRB_SET(ENT, trb, 1);
Nico Huber90292652013-06-13 14:37:15 +0200603
Nico Huber90292652013-06-13 14:37:15 +0200604 xhci_enqueue_trb(tr);
605
606 cur_start += cur_length;
607 ++trb_count;
608 }
Julius Werner83da5012013-09-27 12:45:11 -0700609
610 trb = tr->cur;
611 xhci_clear_trb(trb, tr->pcs);
612 trb->ptr_low = virt_to_phys(trb); /* for easier debugging only */
613 TRB_SET(TT, trb, TRB_EVENT_DATA);
614 TRB_SET(IOC, trb, 1);
615
616 xhci_enqueue_trb(tr);
Nico Huber90292652013-06-13 14:37:15 +0200617}
618
619static int
620xhci_control(usbdev_t *const dev, const direction_t dir,
621 const int drlen, void *const devreq,
Julius Werner1f864342013-09-03 17:15:31 -0700622 const int dalen, unsigned char *const src)
Nico Huber90292652013-06-13 14:37:15 +0200623{
Julius Werner1f864342013-09-03 17:15:31 -0700624 unsigned char *data = src;
Nico Huber90292652013-06-13 14:37:15 +0200625 xhci_t *const xhci = XHCI_INST(dev->controller);
Julius Werner1f864342013-09-03 17:15:31 -0700626 epctx_t *const epctx = xhci->dev[dev->address].ctx.ep0;
627 transfer_ring_t *const tr = xhci->dev[dev->address].transfer_rings[1];
Nico Huber90292652013-06-13 14:37:15 +0200628
629 const size_t off = (size_t)data & 0xffff;
Julius Werner83da5012013-09-27 12:45:11 -0700630 if ((off + dalen) > ((TRANSFER_RING_SIZE - 4) << 16)) {
Nico Huber90292652013-06-13 14:37:15 +0200631 xhci_debug("Unsupported transfer size\n");
Julius Wernere9738db2013-02-21 13:41:40 -0800632 return -1;
Nico Huber90292652013-06-13 14:37:15 +0200633 }
634
Julius Werner49ba2832013-09-26 15:13:44 -0700635 /* Reset endpoint if it's not running */
Julius Werner1f864342013-09-03 17:15:31 -0700636 const unsigned ep_state = EC_GET(STATE, epctx);
Julius Werner49ba2832013-09-26 15:13:44 -0700637 if (ep_state > 1) {
Shawn Nematbakhshc666e552014-04-02 09:14:32 -0700638 if (xhci_reset_endpoint(dev, NULL))
Julius Wernere9738db2013-02-21 13:41:40 -0800639 return -1;
Nico Huber90292652013-06-13 14:37:15 +0200640 }
641
Julius Werner1f864342013-09-03 17:15:31 -0700642 if (dalen && !dma_coherent(src)) {
643 data = xhci->dma_buffer;
644 if (dalen > DMA_SIZE) {
645 xhci_debug("Control transfer too large: %d\n", dalen);
646 return -1;
647 }
648 if (dir == OUT)
649 memcpy(data, src, dalen);
650 }
651
Nico Huber90292652013-06-13 14:37:15 +0200652 /* Fill and enqueue setup TRB */
653 trb_t *const setup = tr->cur;
654 xhci_clear_trb(setup, tr->pcs);
655 setup->ptr_low = ((u32 *)devreq)[0];
656 setup->ptr_high = ((u32 *)devreq)[1];
657 TRB_SET(TL, setup, 8);
658 TRB_SET(TRT, setup, (dalen)
659 ? ((dir == OUT) ? TRB_TRT_OUT_DATA : TRB_TRT_IN_DATA)
660 : TRB_TRT_NO_DATA);
661 TRB_SET(TT, setup, TRB_SETUP_STAGE);
662 TRB_SET(IDT, setup, 1);
663 TRB_SET(IOC, setup, 1);
664 xhci_enqueue_trb(tr);
665
666 /* Fill and enqueue data TRBs (if any) */
667 if (dalen) {
Julius Werner1f864342013-09-03 17:15:31 -0700668 const unsigned mps = EC_GET(MPS, epctx);
Nico Huber90292652013-06-13 14:37:15 +0200669 const unsigned dt_dir = (dir == OUT) ? TRB_DIR_OUT : TRB_DIR_IN;
670 xhci_enqueue_td(tr, 1, mps, dalen, data, dt_dir);
671 }
672
673 /* Fill status TRB */
674 trb_t *const status = tr->cur;
675 xhci_clear_trb(status, tr->pcs);
676 TRB_SET(DIR, status, (dir == OUT) ? TRB_DIR_IN : TRB_DIR_OUT);
677 TRB_SET(TT, status, TRB_STATUS_STAGE);
678 TRB_SET(IOC, status, 1);
679 xhci_enqueue_trb(tr);
680
681 /* Ring doorbell for EP0 */
Liangfeng Wu8c7e4162016-05-24 19:40:46 +0800682 xhci_ring_doorbell(&dev->endpoints[0]);
Nico Huber90292652013-06-13 14:37:15 +0200683
684 /* Wait for transfer events */
Julius Wernere9738db2013-02-21 13:41:40 -0800685 int i, transferred = 0;
Nico Huber90292652013-06-13 14:37:15 +0200686 const int n_stages = 2 + !!dalen;
687 for (i = 0; i < n_stages; ++i) {
688 const int ret = xhci_wait_for_transfer(xhci, dev->address, 1);
Julius Wernere9738db2013-02-21 13:41:40 -0800689 transferred += ret;
690 if (ret < 0) {
Nico Huber90292652013-06-13 14:37:15 +0200691 if (ret == TIMEOUT) {
692 xhci_debug("Stopping ID %d EP 1\n",
693 dev->address);
694 xhci_cmd_stop_endpoint(xhci, dev->address, 1);
695 }
696 xhci_debug("Stage %d/%d failed: %d\n"
697 " trb ring: @%p\n"
698 " setup trb: @%p\n"
699 " status trb: @%p\n"
700 " ep state: %d -> %d\n"
701 " usbsts: 0x%08"PRIx32"\n",
702 i, n_stages, ret,
703 tr->ring, setup, status,
Julius Werner1f864342013-09-03 17:15:31 -0700704 ep_state, EC_GET(STATE, epctx),
Nico Huber90292652013-06-13 14:37:15 +0200705 xhci->opreg->usbsts);
Julius Wernere9738db2013-02-21 13:41:40 -0800706 return ret;
Nico Huber90292652013-06-13 14:37:15 +0200707 }
708 }
709
Julius Werner1f864342013-09-03 17:15:31 -0700710 if (dir == IN && data != src)
711 memcpy(src, data, transferred);
Julius Wernere9738db2013-02-21 13:41:40 -0800712 return transferred;
Patrick Georgi6615ef32010-08-13 09:18:58 +0000713}
714
715/* finalize == 1: if data is of packet aligned size, add a zero length packet */
716static int
Julius Werner1f864342013-09-03 17:15:31 -0700717xhci_bulk(endpoint_t *const ep, const int size, u8 *const src,
Nico Huber90292652013-06-13 14:37:15 +0200718 const int finalize)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000719{
Nico Huber90292652013-06-13 14:37:15 +0200720 /* finalize: Hopefully the xHCI controller always does this.
721 We have no control over the packets. */
722
Julius Werner1f864342013-09-03 17:15:31 -0700723 u8 *data = src;
Nico Huber90292652013-06-13 14:37:15 +0200724 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
Julius Werner1f864342013-09-03 17:15:31 -0700725 const int slot_id = ep->dev->address;
Nico Huber90292652013-06-13 14:37:15 +0200726 const int ep_id = xhci_ep_id(ep);
Julius Werner1f864342013-09-03 17:15:31 -0700727 epctx_t *const epctx = xhci->dev[slot_id].ctx.ep[ep_id];
728 transfer_ring_t *const tr = xhci->dev[slot_id].transfer_rings[ep_id];
Nico Huber90292652013-06-13 14:37:15 +0200729
730 const size_t off = (size_t)data & 0xffff;
Julius Werner83da5012013-09-27 12:45:11 -0700731 if ((off + size) > ((TRANSFER_RING_SIZE - 2) << 16)) {
Nico Huber90292652013-06-13 14:37:15 +0200732 xhci_debug("Unsupported transfer size\n");
Julius Wernere9738db2013-02-21 13:41:40 -0800733 return -1;
Nico Huber90292652013-06-13 14:37:15 +0200734 }
735
Julius Werner1f864342013-09-03 17:15:31 -0700736 if (!dma_coherent(src)) {
737 data = xhci->dma_buffer;
738 if (size > DMA_SIZE) {
739 xhci_debug("Bulk transfer too large: %d\n", size);
740 return -1;
741 }
742 if (ep->direction == OUT)
743 memcpy(data, src, size);
744 }
745
Julius Werner49ba2832013-09-26 15:13:44 -0700746 /* Reset endpoint if it's not running */
Julius Werner1f864342013-09-03 17:15:31 -0700747 const unsigned ep_state = EC_GET(STATE, epctx);
Julius Werner49ba2832013-09-26 15:13:44 -0700748 if (ep_state > 1) {
Shawn Nematbakhshc666e552014-04-02 09:14:32 -0700749 if (xhci_reset_endpoint(ep->dev, ep))
Julius Wernere9738db2013-02-21 13:41:40 -0800750 return -1;
Nico Huber90292652013-06-13 14:37:15 +0200751 }
752
753 /* Enqueue transfer and ring doorbell */
Julius Werner1f864342013-09-03 17:15:31 -0700754 const unsigned mps = EC_GET(MPS, epctx);
Nico Huber90292652013-06-13 14:37:15 +0200755 const unsigned dir = (ep->direction == OUT) ? TRB_DIR_OUT : TRB_DIR_IN;
756 xhci_enqueue_td(tr, ep_id, mps, size, data, dir);
Liangfeng Wu8c7e4162016-05-24 19:40:46 +0800757 xhci_ring_doorbell(ep);
Nico Huber90292652013-06-13 14:37:15 +0200758
759 /* Wait for transfer event */
760 const int ret = xhci_wait_for_transfer(xhci, ep->dev->address, ep_id);
Julius Wernere9738db2013-02-21 13:41:40 -0800761 if (ret < 0) {
Nico Huber90292652013-06-13 14:37:15 +0200762 if (ret == TIMEOUT) {
763 xhci_debug("Stopping ID %d EP %d\n",
764 ep->dev->address, ep_id);
765 xhci_cmd_stop_endpoint(xhci, ep->dev->address, ep_id);
Nico Huber90292652013-06-13 14:37:15 +0200766 }
767 xhci_debug("Bulk transfer failed: %d\n"
768 " ep state: %d -> %d\n"
769 " usbsts: 0x%08"PRIx32"\n",
770 ret, ep_state,
Julius Werner1f864342013-09-03 17:15:31 -0700771 EC_GET(STATE, epctx),
Nico Huber90292652013-06-13 14:37:15 +0200772 xhci->opreg->usbsts);
Julius Wernere9738db2013-02-21 13:41:40 -0800773 return ret;
Nico Huber90292652013-06-13 14:37:15 +0200774 }
775
Julius Werner1f864342013-09-03 17:15:31 -0700776 if (ep->direction == IN && data != src)
777 memcpy(src, data, ret);
Julius Wernere9738db2013-02-21 13:41:40 -0800778 return ret;
Nico Huber90292652013-06-13 14:37:15 +0200779}
780
781static trb_t *
782xhci_next_trb(trb_t *cur, int *const pcs)
783{
784 ++cur;
785 while (TRB_GET(TT, cur) == TRB_LINK) {
786 if (pcs && TRB_GET(TC, cur))
787 *pcs ^= 1;
788 cur = phys_to_virt(cur->ptr_low);
789 }
790 return cur;
Patrick Georgi6615ef32010-08-13 09:18:58 +0000791}
792
793/* create and hook-up an intr queue into device schedule */
Nico Huber90292652013-06-13 14:37:15 +0200794static void *
795xhci_create_intr_queue(endpoint_t *const ep,
796 const int reqsize, const int reqcount,
797 const int reqtiming)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000798{
Nico Huber90292652013-06-13 14:37:15 +0200799 /* reqtiming: We ignore it and use the interval from the
800 endpoint descriptor configured earlier. */
801
802 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
Julius Werner1f864342013-09-03 17:15:31 -0700803 const int slot_id = ep->dev->address;
Nico Huber90292652013-06-13 14:37:15 +0200804 const int ep_id = xhci_ep_id(ep);
Julius Werner1f864342013-09-03 17:15:31 -0700805 transfer_ring_t *const tr = xhci->dev[slot_id].transfer_rings[ep_id];
Nico Huber90292652013-06-13 14:37:15 +0200806
807 if (reqcount > (TRANSFER_RING_SIZE - 2)) {
808 xhci_debug("reqcount is too high, at most %d supported\n",
809 TRANSFER_RING_SIZE - 2);
810 return NULL;
811 }
812 if (reqsize > 0x10000) {
813 xhci_debug("reqsize is too large, at most 64KiB supported\n");
814 return NULL;
815 }
Julius Werner1f864342013-09-03 17:15:31 -0700816 if (xhci->dev[slot_id].interrupt_queues[ep_id]) {
Nico Huber90292652013-06-13 14:37:15 +0200817 xhci_debug("Only one interrupt queue per endpoint supported\n");
818 return NULL;
819 }
820
821 /* Allocate intrq structure and reqdata chunks */
822
823 intrq_t *const intrq = malloc(sizeof(*intrq));
824 if (!intrq) {
825 xhci_debug("Out of memory\n");
826 return NULL;
827 }
828
829 int i;
830 int pcs = tr->pcs;
831 trb_t *cur = tr->cur;
832 for (i = 0; i < reqcount; ++i) {
833 if (TRB_GET(C, cur) == pcs) {
834 xhci_debug("Not enough empty TRBs\n");
835 goto _free_return;
836 }
837 void *const reqdata = xhci_align(1, reqsize);
838 if (!reqdata) {
839 xhci_debug("Out of memory\n");
840 goto _free_return;
841 }
842 xhci_clear_trb(cur, pcs);
843 cur->ptr_low = virt_to_phys(reqdata);
844 cur->ptr_high = 0;
845 TRB_SET(TL, cur, reqsize);
846 TRB_SET(TT, cur, TRB_NORMAL);
847 TRB_SET(ISP, cur, 1);
848 TRB_SET(IOC, cur, 1);
849
850 cur = xhci_next_trb(cur, &pcs);
851 }
852
853 intrq->size = reqsize;
854 intrq->count = reqcount;
855 intrq->next = tr->cur;
856 intrq->ready = NULL;
857 intrq->ep = ep;
Julius Werner1f864342013-09-03 17:15:31 -0700858 xhci->dev[slot_id].interrupt_queues[ep_id] = intrq;
Nico Huber90292652013-06-13 14:37:15 +0200859
860 /* Now enqueue all the prepared TRBs but the last
861 and ring the doorbell. */
862 for (i = 0; i < (reqcount - 1); ++i)
863 xhci_enqueue_trb(tr);
Liangfeng Wu8c7e4162016-05-24 19:40:46 +0800864 xhci_ring_doorbell(ep);
Nico Huber90292652013-06-13 14:37:15 +0200865
866 return intrq;
867
868_free_return:
869 cur = tr->cur;
870 for (--i; i >= 0; --i) {
871 free(phys_to_virt(cur->ptr_low));
872 cur = xhci_next_trb(cur, NULL);
873 }
874 free(intrq);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000875 return NULL;
876}
877
878/* remove queue from device schedule, dropping all data that came in */
879static void
Nico Huber90292652013-06-13 14:37:15 +0200880xhci_destroy_intr_queue(endpoint_t *const ep, void *const q)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000881{
Nico Huber90292652013-06-13 14:37:15 +0200882 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
Julius Werner1f864342013-09-03 17:15:31 -0700883 const int slot_id = ep->dev->address;
Nico Huber90292652013-06-13 14:37:15 +0200884 const int ep_id = xhci_ep_id(ep);
Julius Werner1f864342013-09-03 17:15:31 -0700885 transfer_ring_t *const tr = xhci->dev[slot_id].transfer_rings[ep_id];
Nico Huber90292652013-06-13 14:37:15 +0200886
887 intrq_t *const intrq = (intrq_t *)q;
888
889 /* Make sure the endpoint is stopped */
Julius Werner1f864342013-09-03 17:15:31 -0700890 if (EC_GET(STATE, xhci->dev[slot_id].ctx.ep[ep_id]) == 1) {
891 const int cc = xhci_cmd_stop_endpoint(xhci, slot_id, ep_id);
Nico Huber90292652013-06-13 14:37:15 +0200892 if (cc != CC_SUCCESS)
893 xhci_debug("Warning: Failed to stop endpoint\n");
894 }
895
896 /* Process all remaining transfer events */
897 xhci_handle_events(xhci);
898
899 /* Free all pending transfers and the interrupt queue structure */
900 int i;
901 for (i = 0; i < intrq->count; ++i) {
902 free(phys_to_virt(intrq->next->ptr_low));
903 intrq->next = xhci_next_trb(intrq->next, NULL);
904 }
Julius Werner1f864342013-09-03 17:15:31 -0700905 xhci->dev[slot_id].interrupt_queues[ep_id] = NULL;
Nico Huber90292652013-06-13 14:37:15 +0200906 free((void *)intrq);
907
908 /* Reset the controller's dequeue pointer and reinitialize the ring */
Julius Werner1f864342013-09-03 17:15:31 -0700909 xhci_cmd_set_tr_dq(xhci, slot_id, ep_id, tr->ring, 1);
Nico Huber90292652013-06-13 14:37:15 +0200910 xhci_init_cycle_ring(tr, TRANSFER_RING_SIZE);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000911}
912
913/* read one intr-packet from queue, if available. extend the queue for new input.
914 return NULL if nothing new available.
915 Recommended use: while (data=poll_intr_queue(q)) process(data);
916 */
Nico Huber90292652013-06-13 14:37:15 +0200917static u8 *
918xhci_poll_intr_queue(void *const q)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000919{
Nico Huber90292652013-06-13 14:37:15 +0200920 if (!q)
921 return NULL;
922
923 intrq_t *const intrq = (intrq_t *)q;
924 endpoint_t *const ep = intrq->ep;
925 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
926
927 /* TODO: Reset interrupt queue if it gets halted? */
928
929 xhci_handle_events(xhci);
930
931 u8 *reqdata = NULL;
932 while (!reqdata && intrq->ready) {
933 const int ep_id = xhci_ep_id(ep);
Julius Werner1f864342013-09-03 17:15:31 -0700934 transfer_ring_t *const tr =
935 xhci->dev[ep->dev->address].transfer_rings[ep_id];
Nico Huber90292652013-06-13 14:37:15 +0200936
937 /* Fetch the request's buffer */
938 reqdata = phys_to_virt(intrq->next->ptr_low);
939
940 /* Enqueue the last (spare) TRB and ring doorbell */
941 xhci_enqueue_trb(tr);
Liangfeng Wu8c7e4162016-05-24 19:40:46 +0800942 xhci_ring_doorbell(ep);
Nico Huber90292652013-06-13 14:37:15 +0200943
944 /* Reuse the current buffer for the next spare TRB */
945 xhci_clear_trb(tr->cur, tr->pcs);
946 tr->cur->ptr_low = virt_to_phys(reqdata);
947 tr->cur->ptr_high = 0;
948 TRB_SET(TL, tr->cur, intrq->size);
949 TRB_SET(TT, tr->cur, TRB_NORMAL);
950 TRB_SET(ISP, tr->cur, 1);
951 TRB_SET(IOC, tr->cur, 1);
952
953 /* Check if anything was transferred */
954 const size_t read = TRB_GET(TL, intrq->next);
955 if (!read)
956 reqdata = NULL;
957 else if (read < intrq->size)
958 /* At least zero it, poll interface is rather limited */
959 memset(reqdata + read, 0x00, intrq->size - read);
960
961 /* Advance the interrupt queue */
962 if (intrq->ready == intrq->next)
963 /* This was last TRB being ready */
964 intrq->ready = NULL;
965 intrq->next = xhci_next_trb(intrq->next, NULL);
966 }
967
968 return reqdata;
Patrick Georgi6615ef32010-08-13 09:18:58 +0000969}