blob: aa2fe579a2a11354c7d56f993f00b689a3593b99 [file] [log] [blame]
Patrick Georgi6615ef32010-08-13 09:18:58 +00001/*
2 * This file is part of the libpayload project.
3 *
4 * Copyright (C) 2010 Patrick Georgi
Nico Huber90292652013-06-13 14:37:15 +02005 * Copyright (C) 2013 secunet Security Networks AG
Patrick Georgi6615ef32010-08-13 09:18:58 +00006 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
Nico Huber90292652013-06-13 14:37:15 +020031//#define XHCI_SPEW_DEBUG
Patrick Georgi6615ef32010-08-13 09:18:58 +000032
Nico Huber90292652013-06-13 14:37:15 +020033#include <inttypes.h>
Patrick Georgi6615ef32010-08-13 09:18:58 +000034#include <arch/virtual.h>
Patrick Georgi6615ef32010-08-13 09:18:58 +000035#include "xhci_private.h"
Nico Huber90292652013-06-13 14:37:15 +020036#include "xhci.h"
Patrick Georgi6615ef32010-08-13 09:18:58 +000037
38static void xhci_start (hci_t *controller);
39static void xhci_stop (hci_t *controller);
40static void xhci_reset (hci_t *controller);
Nico Huber90292652013-06-13 14:37:15 +020041static void xhci_reinit (hci_t *controller);
Patrick Georgi6615ef32010-08-13 09:18:58 +000042static void xhci_shutdown (hci_t *controller);
43static int xhci_bulk (endpoint_t *ep, int size, u8 *data, int finalize);
44static int xhci_control (usbdev_t *dev, direction_t dir, int drlen, void *devreq,
45 int dalen, u8 *data);
46static void* xhci_create_intr_queue (endpoint_t *ep, int reqsize, int reqcount, int reqtiming);
47static void xhci_destroy_intr_queue (endpoint_t *ep, void *queue);
48static u8* xhci_poll_intr_queue (void *queue);
49
Nico Huber90292652013-06-13 14:37:15 +020050/*
51 * Some structures must not cross page boundaries. To get this,
52 * we align them by their size (or the next greater power of 2).
53 */
54void *
55xhci_align(const size_t min_align, const size_t size)
Patrick Georgi6615ef32010-08-13 09:18:58 +000056{
Nico Huber90292652013-06-13 14:37:15 +020057 size_t align;
58 if (!(size & (size - 1)))
59 align = size; /* It's a power of 2 */
60 else
61 align = 1 << ((sizeof(unsigned) << 3) - __builtin_clz(size));
62 if (align < min_align)
63 align = min_align;
64 xhci_spew("Aligning %zu to %zu\n", size, align);
65 return memalign(align, size);
66}
67
68void
69xhci_clear_trb(trb_t *const trb, const int pcs)
70{
71 trb->ptr_low = 0;
72 trb->ptr_high = 0;
73 trb->status = 0;
74 trb->control = !pcs;
75}
76
77void
78xhci_init_cycle_ring(transfer_ring_t *const tr, const size_t ring_size)
79{
80 memset((void *)tr->ring, 0, ring_size * sizeof(*tr->ring));
81 TRB_SET(TT, &tr->ring[ring_size - 1], TRB_LINK);
82 TRB_SET(TC, &tr->ring[ring_size - 1], 1);
83 /* only one segment that points to itself */
84 tr->ring[ring_size - 1].ptr_low = virt_to_phys(tr->ring);
85
86 tr->pcs = 1;
87 tr->cur = tr->ring;
88}
89
90/* On Panther Point: switch ports shared with EHCI to xHCI */
91static void
92xhci_switch_ppt_ports(pcidev_t addr)
93{
94 if (pci_read_config32(addr, 0x00) == 0x1e318086) {
95 u32 reg32 = pci_read_config32(addr, 0xdc) & 0xf;
96 xhci_debug("Ports capable of SuperSpeed: 0x%"PRIx32"\n", reg32);
97
98 /* For now, do not enable SuperSpeed on any ports */
99 //pci_write_config32(addr, 0xd8, reg32);
100 pci_write_config32(addr, 0xd8, 0x00000000);
101 reg32 = pci_read_config32(addr, 0xd8) & 0xf;
102 xhci_debug("Configured for SuperSpeed: 0x%"PRIx32"\n", reg32);
103
104 reg32 = pci_read_config32(addr, 0xd4) & 0xf;
105 xhci_debug("Trying to switch over: 0x%"PRIx32"\n", reg32);
106
107 pci_write_config32(addr, 0xd0, reg32);
108 reg32 = pci_read_config32(addr, 0xd0) & 0xf;
109 xhci_debug("Actually switched over: 0x%"PRIx32"\n", reg32);
110 }
111}
112
Nico Huberc3714422013-07-19 14:03:47 +0200113/* On Panther Point: switch all ports back to EHCI */
114static void
115xhci_switchback_ppt_ports(pcidev_t addr)
116{
117 if (pci_read_config32(addr, 0x00) == 0x1e318086) {
118 u32 reg32 = pci_read_config32(addr, 0xd0) & 0xf;
119 xhci_debug("Switching ports back: 0x%"PRIx32"\n", reg32);
120 pci_write_config32(addr, 0xd0, 0x00000000);
121 reg32 = pci_read_config32(addr, 0xd0) & 0xf;
122 xhci_debug("Still switched to xHCI: 0x%"PRIx32"\n", reg32);
123 }
124}
125
Nico Huber90292652013-06-13 14:37:15 +0200126static long
127xhci_handshake(volatile u32 *const reg, u32 mask, u32 wait_for, long timeout_us)
128{
129 while ((*reg & mask) != wait_for && timeout_us--) udelay(1);
130 return timeout_us;
131}
132
133static int
134xhci_wait_ready(xhci_t *const xhci)
135{
136 xhci_debug("Waiting for controller to be ready... ");
137 if (!xhci_handshake(&xhci->opreg->usbsts, USBSTS_CNR, 0, 100000L)) {
138 usb_debug("timeout!\n");
139 return -1;
140 }
141 usb_debug("ok.\n");
142 return 0;
143}
144
145hci_t *
Stefan Reinauer8992e532013-05-02 16:16:41 -0700146xhci_init (const void *bar)
Nico Huber90292652013-06-13 14:37:15 +0200147{
148 int i;
149
150 /* First, allocate and initialize static controller structures */
151
152 hci_t *const controller = new_controller();
153 if (!controller) {
154 xhci_debug("Could not create USB controller instance\n");
155 return controller;
156 }
157
158 controller->type = XHCI;
159 controller->start = xhci_start;
160 controller->stop = xhci_stop;
161 controller->reset = xhci_reset;
162 controller->init = xhci_reinit;
163 controller->shutdown = xhci_shutdown;
164 controller->bulk = xhci_bulk;
165 controller->control = xhci_control;
166 controller->set_address = xhci_set_address;
167 controller->finish_device_config= xhci_finish_device_config;
168 controller->destroy_device = xhci_destroy_dev;
169 controller->create_intr_queue = xhci_create_intr_queue;
170 controller->destroy_intr_queue = xhci_destroy_intr_queue;
171 controller->poll_intr_queue = xhci_poll_intr_queue;
172 for (i = 0; i < 128; ++i) {
173 controller->devices[i] = NULL;
174 }
175
176 controller->instance = malloc(sizeof(xhci_t));
177 if (!controller->instance) {
178 xhci_debug("Out of memory creating xHCI controller instance\n");
179 goto _free_controller;
180 }
181 xhci_t *const xhci = (xhci_t *)controller->instance;
182 memset(xhci, 0x00, sizeof(*xhci));
183
184 init_device_entry(controller, 0);
185 xhci->roothub = controller->devices[0];
186 xhci->cr.ring = xhci_align(64, COMMAND_RING_SIZE * sizeof(trb_t));
187 xhci->er.ring = xhci_align(64, EVENT_RING_SIZE * sizeof(trb_t));
188 xhci->ev_ring_table = xhci_align(64, sizeof(erst_entry_t));
189 if (!xhci->roothub || !xhci->cr.ring ||
190 !xhci->er.ring || !xhci->ev_ring_table) {
191 xhci_debug("Out of memory\n");
192 goto _free_xhci;
193 }
194
Stefan Reinauer8992e532013-05-02 16:16:41 -0700195 controller->reg_base = (u32)(unsigned long)bar;
Nico Huber90292652013-06-13 14:37:15 +0200196
197 xhci->capreg = phys_to_virt(controller->reg_base);
198 xhci->opreg = ((void *)xhci->capreg) + xhci->capreg->caplength;
199 xhci->hcrreg = ((void *)xhci->capreg) + xhci->capreg->rtsoff;
200 xhci->dbreg = ((void *)xhci->capreg) + xhci->capreg->dboff;
201 xhci_debug("regbase: 0x%"PRIx32"\n", controller->reg_base);
202 xhci_debug("caplen: 0x%"PRIx32"\n", xhci->capreg->caplength);
203 xhci_debug("rtsoff: 0x%"PRIx32"\n", xhci->capreg->rtsoff);
204 xhci_debug("dboff: 0x%"PRIx32"\n", xhci->capreg->dboff);
205
206 xhci_debug("hciversion: %"PRIx8".%"PRIx8"\n",
207 xhci->capreg->hciver_hi, xhci->capreg->hciver_lo);
208 if ((xhci->capreg->hciversion < 0x96) ||
209 (xhci->capreg->hciversion > 0x100)) {
210 xhci_debug("Unsupported xHCI version\n");
211 goto _free_xhci;
212 }
213
214 xhci_debug("context size: %dB\n", xhci->capreg->csz ? 64 : 32);
215 if (xhci->capreg->csz) {
216 xhci_debug("Only 32B contexts are supported\n");
217 goto _free_xhci;
218 }
219
220 xhci_debug("maxslots: 0x%02lx\n", xhci->capreg->MaxSlots);
221 xhci_debug("maxports: 0x%02lx\n", xhci->capreg->MaxPorts);
222 const unsigned pagesize = xhci->opreg->pagesize << 12;
223 xhci_debug("pagesize: 0x%04x\n", pagesize);
224
225 /*
226 * We haven't touched the hardware yet. So we allocate all dynamic
227 * structures at first and can still chicken out easily if we run out
228 * of memory.
229 */
230 const size_t dcbaa_size = (xhci->capreg->MaxSlots + 1) * sizeof(u64);
231 xhci->dcbaa = xhci_align(64, dcbaa_size);
232 if (!xhci->dcbaa) {
233 xhci_debug("Out of memory\n");
234 goto _free_xhci;
235 }
236 memset((void*)xhci->dcbaa, 0x00, dcbaa_size);
237
238 /*
239 * Let dcbaa[0] point to another array of pointers, sp_ptrs.
240 * The pointers therein point to scratchpad buffers (pages).
241 */
242 const size_t max_sp_bufs = xhci->capreg->Max_Scratchpad_Bufs;
243 xhci_debug("max scratchpad bufs: 0x%zx\n", max_sp_bufs);
244 if (max_sp_bufs) {
245 const size_t sp_ptrs_size = max_sp_bufs * sizeof(u64);
246 xhci->sp_ptrs = xhci_align(64, sp_ptrs_size);
247 if (!xhci->sp_ptrs) {
248 xhci_debug("Out of memory\n");
249 goto _free_xhci_structs;
250 }
251 memset(xhci->sp_ptrs, 0x00, sp_ptrs_size);
252 for (i = 0; i < max_sp_bufs; ++i) {
253 /* Could use mmap() here if we had it.
254 Maybe there is another way. */
255 void *const page = memalign(pagesize, pagesize);
256 if (!page) {
257 xhci_debug("Out of memory\n");
258 goto _free_xhci_structs;
259 }
260 xhci->sp_ptrs[i] = virt_to_phys(page);
261 }
262 xhci->dcbaa[0] = virt_to_phys(xhci->sp_ptrs);
263 }
264
265 /* Now start working on the hardware */
Nico Huber90292652013-06-13 14:37:15 +0200266 if (xhci_wait_ready(xhci))
267 goto _free_xhci;
268
269 /* TODO: Check if BIOS claims ownership (and hand over) */
270
271 xhci_reset(controller);
272 xhci_reinit(controller);
273
Nico Huber90292652013-06-13 14:37:15 +0200274 xhci->roothub->controller = controller;
275 xhci->roothub->init = xhci_rh_init;
276 xhci->roothub->init(xhci->roothub);
277
278 return controller;
279
280_free_xhci_structs:
281 if (xhci->sp_ptrs) {
282 for (i = 0; i < max_sp_bufs; ++i) {
283 if (xhci->sp_ptrs[i])
284 free(phys_to_virt(xhci->sp_ptrs[i]));
285 }
286 }
287 free(xhci->sp_ptrs);
288 free(xhci->dcbaa);
289_free_xhci:
290 free((void *)xhci->ev_ring_table);
291 free((void *)xhci->er.ring);
292 free((void *)xhci->cr.ring);
293 free(xhci->roothub);
294 free(xhci);
295_free_controller:
296 detach_controller(controller);
297 free(controller);
298 return NULL;
299}
300
Stefan Reinauer8992e532013-05-02 16:16:41 -0700301#ifdef CONFIG_USB_PCI
302hci_t *
303xhci_pci_init (pcidev_t addr)
304{
305 u32 reg_addr;
306 hci_t controller;
307
308 reg_addr = (u32)phys_to_virt(pci_read_config32 (addr, 0x10) & ~0xf);
309 //controller->reg_base = pci_read_config32 (addr, 0x14) & ~0xf;
310 if (pci_read_config32 (addr, 0x14) > 0) {
311 fatal("We don't do 64bit addressing.\n");
312 }
313
314 controller = xhci_init((void *)(unsigned long)reg_addr);
315 controller->bus_address = addr;
316
317 xhci_switch_ppt_ports(addr);
318
319 return controller;
320}
321#endif
322
Nico Huber90292652013-06-13 14:37:15 +0200323static void
324xhci_reset(hci_t *const controller)
325{
326 xhci_t *const xhci = XHCI_INST(controller);
327
328 xhci_stop(controller);
329
330 xhci->opreg->usbcmd |= USBCMD_HCRST;
331 xhci_debug("Resetting controller... ");
332 if (!xhci_handshake(&xhci->opreg->usbcmd, USBCMD_HCRST, 0, 1000000L))
333 usb_debug("timeout!\n");
334 else
335 usb_debug("ok.\n");
Patrick Georgi6615ef32010-08-13 09:18:58 +0000336}
337
Nico Huber6e711c62012-11-12 16:20:32 +0100338static void
339xhci_reinit (hci_t *controller)
340{
Nico Huber90292652013-06-13 14:37:15 +0200341 xhci_t *const xhci = XHCI_INST(controller);
Nico Huber6e711c62012-11-12 16:20:32 +0100342
Nico Huber90292652013-06-13 14:37:15 +0200343 if (xhci_wait_ready(xhci))
344 return;
345
346 /* Enable all available slots */
347 xhci->opreg->config = xhci->capreg->MaxSlots & CONFIG_MASK_MaxSlotsEn;
348 xhci->max_slots_en = xhci->capreg->MaxSlots & CONFIG_MASK_MaxSlotsEn;
349
350 /* Set DCBAA */
351 xhci->opreg->dcbaap_lo = virt_to_phys(xhci->dcbaa);
352 xhci->opreg->dcbaap_hi = 0;
353
354 /* Initialize command ring */
355 xhci_init_cycle_ring(&xhci->cr, COMMAND_RING_SIZE);
356 xhci_debug("command ring @%p (0x%08x)\n",
357 xhci->cr.ring, virt_to_phys(xhci->cr.ring));
358 xhci->opreg->crcr_lo = virt_to_phys(xhci->cr.ring) | CRCR_RCS;
359 xhci->opreg->crcr_hi = 0;
360
361 /* Make sure interrupts are disabled */
362 xhci->opreg->usbcmd &= ~USBCMD_INTE;
363
364 /* Initialize event ring */
365 xhci_reset_event_ring(&xhci->er);
366 xhci_debug("event ring @%p (0x%08x)\n",
367 xhci->er.ring, virt_to_phys(xhci->er.ring));
368 xhci_debug("ERST Max: 0x%lx -> 0x%lx entries\n",
369 xhci->capreg->ERST_Max, 1 << xhci->capreg->ERST_Max);
370 memset((void*)xhci->ev_ring_table, 0x00, sizeof(erst_entry_t));
371 xhci->ev_ring_table[0].seg_base_lo = virt_to_phys(xhci->er.ring);
372 xhci->ev_ring_table[0].seg_base_hi = 0;
373 xhci->ev_ring_table[0].seg_size = EVENT_RING_SIZE;
374
375 /* Initialize primary interrupter */
376 xhci->hcrreg->intrrs[0].erstsz = 1;
377 xhci_update_event_dq(xhci);
378 /* erstba has to be written at last */
379 xhci->hcrreg->intrrs[0].erstba_lo = virt_to_phys(xhci->ev_ring_table);
380 xhci->hcrreg->intrrs[0].erstba_hi = 0;
381
382 xhci_start(controller);
383
384#ifdef USB_DEBUG
Patrick Georgi6615ef32010-08-13 09:18:58 +0000385 int i;
Nico Huber90292652013-06-13 14:37:15 +0200386 for (i = 0; i < 32; ++i) {
387 xhci_debug("NOOP run #%d\n", i);
388 trb_t *const cmd = xhci_next_command_trb(xhci);
389 TRB_SET(TT, cmd, TRB_CMD_NOOP);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000390
Nico Huber90292652013-06-13 14:37:15 +0200391 xhci_post_command(xhci);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000392
Nico Huber90292652013-06-13 14:37:15 +0200393 /* Wait for result in event ring */
394 xhci_wait_for_command_done(xhci, cmd, 1);
395 xhci_debug("Command ring is %srunning\n",
396 (xhci->opreg->crcr_lo & CRCR_CRR) ? "" : "not ");
Patrick Georgi6615ef32010-08-13 09:18:58 +0000397 }
Nico Huber90292652013-06-13 14:37:15 +0200398#endif
Patrick Georgi6615ef32010-08-13 09:18:58 +0000399}
400
401static void
Nico Huber90292652013-06-13 14:37:15 +0200402xhci_shutdown(hci_t *const controller)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000403{
Nico Huber90292652013-06-13 14:37:15 +0200404 int i;
405
Patrick Georgi6615ef32010-08-13 09:18:58 +0000406 if (controller == 0)
407 return;
Nico Huber90292652013-06-13 14:37:15 +0200408 xhci_t *const xhci = XHCI_INST(controller);
409
410 detach_controller(controller);
411
412 /* Detach device hierarchy (starting at root hub) */
413 usb_detach_device(controller, 0);
414
415 xhci_stop(controller);
416
Nico Huberc3714422013-07-19 14:03:47 +0200417 xhci_switchback_ppt_ports(controller->bus_address);
418
Nico Huber90292652013-06-13 14:37:15 +0200419 if (xhci->sp_ptrs) {
420 const size_t max_sp_bufs = xhci->capreg->Max_Scratchpad_Bufs;
421 for (i = 0; i < max_sp_bufs; ++i) {
422 if (xhci->sp_ptrs[i])
423 free(phys_to_virt(xhci->sp_ptrs[i]));
424 }
425 }
426 free(xhci->sp_ptrs);
427 free(xhci->dcbaa);
428 free((void *)xhci->ev_ring_table);
429 free((void *)xhci->er.ring);
430 free((void *)xhci->cr.ring);
431 free(xhci);
432 free(controller);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000433}
434
435static void
436xhci_start (hci_t *controller)
437{
Nico Huber90292652013-06-13 14:37:15 +0200438 xhci_t *const xhci = XHCI_INST(controller);
439
440 xhci->opreg->usbcmd |= USBCMD_RS;
441 if (!xhci_handshake(&xhci->opreg->usbsts, USBSTS_HCH, 0, 1000000L))
442 xhci_debug("Controller didn't start within 1s\n");
Patrick Georgi6615ef32010-08-13 09:18:58 +0000443}
444
445static void
446xhci_stop (hci_t *controller)
447{
Nico Huber90292652013-06-13 14:37:15 +0200448 xhci_t *const xhci = XHCI_INST(controller);
449
450 xhci->opreg->usbcmd &= ~USBCMD_RS;
451 if (!xhci_handshake(&xhci->opreg->usbsts,
452 USBSTS_HCH, USBSTS_HCH, 1000000L))
453 xhci_debug("Controller didn't halt within 1s\n");
Patrick Georgi6615ef32010-08-13 09:18:58 +0000454}
455
456static int
Nico Huber90292652013-06-13 14:37:15 +0200457xhci_reset_endpoint(usbdev_t *const dev, endpoint_t *const ep,
458 const int clear_halt)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000459{
Nico Huber90292652013-06-13 14:37:15 +0200460 xhci_t *const xhci = XHCI_INST(dev->controller);
461 devinfo_t *const di = DEVINFO_FROM_XHCI(xhci, dev->address);
462 const int slot_id = dev->address;
463 const int ep_id = ep ? xhci_ep_id(ep) : 1;
464
465 xhci_debug("Resetting ID %d EP %d (ep state: %d)\n",
466 slot_id, ep_id, EC_GET(STATE, di->devctx.eps[ep_id]));
467
468 /* Run Reset Endpoint Command if the EP is in Halted state */
469 if (EC_GET(STATE, di->devctx.eps[ep_id]) == 2) {
470 const int cc = xhci_cmd_reset_endpoint(xhci, slot_id, ep_id);
471 if (cc != CC_SUCCESS) {
472 xhci_debug("Reset Endpoint Command failed: %d\n", cc);
473 return 1;
474 }
475 }
476
477 /* Clear TT buffer for bulk and control endpoints behind a TT */
478 const int hub = dev->hub;
479 if (hub && dev->speed < HIGH_SPEED &&
480 dev->controller->devices[hub]->speed == HIGH_SPEED)
481 /* TODO */;
482
483 /* Try clearing the device' halt condition on non-control endpoints */
484 if (clear_halt && ep)
485 clear_stall(ep);
486
487 /* Reset transfer ring if the endpoint is in the right state */
488 const unsigned ep_state = EC_GET(STATE, di->devctx.eps[ep_id]);
489 if (ep_state == 3 || ep_state == 4) {
490 transfer_ring_t *const tr = di->transfer_rings[ep_id];
491 const int cc = xhci_cmd_set_tr_dq(xhci, slot_id, ep_id,
492 tr->ring, 1);
493 if (cc != CC_SUCCESS) {
494 xhci_debug("Set TR Dequeue Command failed: %d\n", cc);
495 return 1;
496 }
497 xhci_init_cycle_ring(tr, TRANSFER_RING_SIZE);
498 }
499
500 xhci_debug("Finished resetting ID %d EP %d (ep state: %d)\n",
501 slot_id, ep_id, EC_GET(STATE, di->devctx.eps[ep_id]));
502
503 return 0;
504}
505
506static void
507xhci_enqueue_trb(transfer_ring_t *const tr)
508{
509 const int chain = TRB_GET(CH, tr->cur);
510 TRB_SET(C, tr->cur, tr->pcs);
511 ++tr->cur;
512
513 while (TRB_GET(TT, tr->cur) == TRB_LINK) {
514 xhci_spew("Handling LINK pointer\n");
515 const int tc = TRB_GET(TC, tr->cur);
516 TRB_SET(CH, tr->cur, chain);
517 TRB_SET(C, tr->cur, tr->pcs);
518 tr->cur = phys_to_virt(tr->cur->ptr_low);
519 if (tc)
520 tr->pcs ^= 1;
521 }
522}
523
524static void
525xhci_enqueue_td(transfer_ring_t *const tr, const int ep, const size_t mps,
526 const int dalen, void *const data, const int dir)
527{
528 trb_t *trb = NULL; /* cur TRB */
529 u8 *cur_start = data; /* cur data pointer */
530 size_t length = dalen; /* remaining bytes */
531 size_t packets = (length + mps - 1) / mps; /* remaining packets */
532 size_t residue = 0; /* residue from last TRB */
533 size_t trb_count = 0; /* TRBs added so far */
534
535 while (length || !trb_count /* enqueue at least one */) {
536 const size_t cur_end = ((size_t)cur_start + 0x10000) & ~0xffff;
537 size_t cur_length = cur_end - (size_t)cur_start;
538 if (length < cur_length) {
539 cur_length = length;
540 packets = 0;
541 length = 0;
542 } else {
543 packets -= (residue + cur_length) / mps;
544 residue = (residue + cur_length) % mps;
545 length -= cur_length;
546 }
547
548 trb = tr->cur;
549 xhci_clear_trb(trb, tr->pcs);
550 trb->ptr_low = virt_to_phys(cur_start);
551 TRB_SET(TL, trb, cur_length);
552 TRB_SET(TDS, trb, packets);
553
554 /* Check for first, data stage TRB */
555 if (!trb_count && ep == 1) {
556 TRB_SET(DIR, trb, dir);
557 TRB_SET(TT, trb, TRB_DATA_STAGE);
558 } else {
559 TRB_SET(TT, trb, TRB_NORMAL);
560 }
561
562 /* Check for last TRB */
563 if (!length)
564 TRB_SET(IOC, trb, 1);
565 else
566 TRB_SET(CH, trb, 1);
567
568 xhci_enqueue_trb(tr);
569
570 cur_start += cur_length;
571 ++trb_count;
572 }
573}
574
575static int
576xhci_control(usbdev_t *const dev, const direction_t dir,
577 const int drlen, void *const devreq,
578 const int dalen, unsigned char *const data)
579{
580 xhci_t *const xhci = XHCI_INST(dev->controller);
581 devinfo_t *const di = DEVINFO_FROM_XHCI(xhci, dev->address);
582 transfer_ring_t *const tr = di->transfer_rings[1];
583
584 const size_t off = (size_t)data & 0xffff;
585 if ((off + dalen) > ((TRANSFER_RING_SIZE - 3) << 16)) {
586 xhci_debug("Unsupported transfer size\n");
587 return 1;
588 }
589
590 /* Reset endpoint if it's halted */
591 const unsigned ep_state = EC_GET(STATE, di->devctx.ep0);
592 if (ep_state == 2 || ep_state == 4) {
593 if (xhci_reset_endpoint(dev, NULL, 0))
594 return 1;
595 }
596
597 /* Fill and enqueue setup TRB */
598 trb_t *const setup = tr->cur;
599 xhci_clear_trb(setup, tr->pcs);
600 setup->ptr_low = ((u32 *)devreq)[0];
601 setup->ptr_high = ((u32 *)devreq)[1];
602 TRB_SET(TL, setup, 8);
603 TRB_SET(TRT, setup, (dalen)
604 ? ((dir == OUT) ? TRB_TRT_OUT_DATA : TRB_TRT_IN_DATA)
605 : TRB_TRT_NO_DATA);
606 TRB_SET(TT, setup, TRB_SETUP_STAGE);
607 TRB_SET(IDT, setup, 1);
608 TRB_SET(IOC, setup, 1);
609 xhci_enqueue_trb(tr);
610
611 /* Fill and enqueue data TRBs (if any) */
612 if (dalen) {
613 const unsigned mps = EC_GET(MPS, di->devctx.ep0);
614 const unsigned dt_dir = (dir == OUT) ? TRB_DIR_OUT : TRB_DIR_IN;
615 xhci_enqueue_td(tr, 1, mps, dalen, data, dt_dir);
616 }
617
618 /* Fill status TRB */
619 trb_t *const status = tr->cur;
620 xhci_clear_trb(status, tr->pcs);
621 TRB_SET(DIR, status, (dir == OUT) ? TRB_DIR_IN : TRB_DIR_OUT);
622 TRB_SET(TT, status, TRB_STATUS_STAGE);
623 TRB_SET(IOC, status, 1);
624 xhci_enqueue_trb(tr);
625
626 /* Ring doorbell for EP0 */
627 xhci->dbreg[dev->address] = 1;
628
629 /* Wait for transfer events */
630 int i;
631 const int n_stages = 2 + !!dalen;
632 for (i = 0; i < n_stages; ++i) {
633 const int ret = xhci_wait_for_transfer(xhci, dev->address, 1);
634 if (ret != CC_SUCCESS) {
635 if (ret == TIMEOUT) {
636 xhci_debug("Stopping ID %d EP 1\n",
637 dev->address);
638 xhci_cmd_stop_endpoint(xhci, dev->address, 1);
639 }
640 xhci_debug("Stage %d/%d failed: %d\n"
641 " trb ring: @%p\n"
642 " setup trb: @%p\n"
643 " status trb: @%p\n"
644 " ep state: %d -> %d\n"
645 " usbsts: 0x%08"PRIx32"\n",
646 i, n_stages, ret,
647 tr->ring, setup, status,
648 ep_state, EC_GET(STATE, di->devctx.ep0),
649 xhci->opreg->usbsts);
650 return 1;
651 }
652 }
653
654 return 0;
Patrick Georgi6615ef32010-08-13 09:18:58 +0000655}
656
657/* finalize == 1: if data is of packet aligned size, add a zero length packet */
658static int
Nico Huber90292652013-06-13 14:37:15 +0200659xhci_bulk(endpoint_t *const ep,
660 const int size, u8 *const data,
661 const int finalize)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000662{
Nico Huber90292652013-06-13 14:37:15 +0200663 /* finalize: Hopefully the xHCI controller always does this.
664 We have no control over the packets. */
665
666 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
667 const int ep_id = xhci_ep_id(ep);
668 devinfo_t *const di = DEVINFO_FROM_XHCI(xhci, ep->dev->address);
669 transfer_ring_t *const tr = di->transfer_rings[ep_id];
670
671 const size_t off = (size_t)data & 0xffff;
672 if ((off + size) > ((TRANSFER_RING_SIZE - 1) << 16)) {
673 xhci_debug("Unsupported transfer size\n");
674 return 1;
675 }
676
677 /* Reset endpoint if it's halted */
678 const unsigned ep_state = EC_GET(STATE, di->devctx.eps[ep_id]);
679 if (ep_state == 2 || ep_state == 4) {
680 if (xhci_reset_endpoint(ep->dev, ep, 0))
681 return 1;
682 }
683
684 /* Enqueue transfer and ring doorbell */
685 const unsigned mps = EC_GET(MPS, di->devctx.eps[ep_id]);
686 const unsigned dir = (ep->direction == OUT) ? TRB_DIR_OUT : TRB_DIR_IN;
687 xhci_enqueue_td(tr, ep_id, mps, size, data, dir);
688 xhci->dbreg[ep->dev->address] = ep_id;
689
690 /* Wait for transfer event */
691 const int ret = xhci_wait_for_transfer(xhci, ep->dev->address, ep_id);
692 if (ret != CC_SUCCESS) {
693 if (ret == TIMEOUT) {
694 xhci_debug("Stopping ID %d EP %d\n",
695 ep->dev->address, ep_id);
696 xhci_cmd_stop_endpoint(xhci, ep->dev->address, ep_id);
697 } else if (ret == CC_STALL_ERROR) {
698 xhci_reset_endpoint(ep->dev, ep, 1);
699 }
700 xhci_debug("Bulk transfer failed: %d\n"
701 " ep state: %d -> %d\n"
702 " usbsts: 0x%08"PRIx32"\n",
703 ret, ep_state,
704 EC_GET(STATE, di->devctx.eps[ep_id]),
705 xhci->opreg->usbsts);
706 return 1;
707 }
708
709 return 0;
710}
711
712static trb_t *
713xhci_next_trb(trb_t *cur, int *const pcs)
714{
715 ++cur;
716 while (TRB_GET(TT, cur) == TRB_LINK) {
717 if (pcs && TRB_GET(TC, cur))
718 *pcs ^= 1;
719 cur = phys_to_virt(cur->ptr_low);
720 }
721 return cur;
Patrick Georgi6615ef32010-08-13 09:18:58 +0000722}
723
724/* create and hook-up an intr queue into device schedule */
Nico Huber90292652013-06-13 14:37:15 +0200725static void *
726xhci_create_intr_queue(endpoint_t *const ep,
727 const int reqsize, const int reqcount,
728 const int reqtiming)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000729{
Nico Huber90292652013-06-13 14:37:15 +0200730 /* reqtiming: We ignore it and use the interval from the
731 endpoint descriptor configured earlier. */
732
733 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
734 const int ep_id = xhci_ep_id(ep);
735 devinfo_t *const di = DEVINFO_FROM_XHCI(xhci, ep->dev->address);
736 transfer_ring_t *const tr = di->transfer_rings[ep_id];
737
738 if (reqcount > (TRANSFER_RING_SIZE - 2)) {
739 xhci_debug("reqcount is too high, at most %d supported\n",
740 TRANSFER_RING_SIZE - 2);
741 return NULL;
742 }
743 if (reqsize > 0x10000) {
744 xhci_debug("reqsize is too large, at most 64KiB supported\n");
745 return NULL;
746 }
747 if (di->interrupt_queues[ep_id]) {
748 xhci_debug("Only one interrupt queue per endpoint supported\n");
749 return NULL;
750 }
751
752 /* Allocate intrq structure and reqdata chunks */
753
754 intrq_t *const intrq = malloc(sizeof(*intrq));
755 if (!intrq) {
756 xhci_debug("Out of memory\n");
757 return NULL;
758 }
759
760 int i;
761 int pcs = tr->pcs;
762 trb_t *cur = tr->cur;
763 for (i = 0; i < reqcount; ++i) {
764 if (TRB_GET(C, cur) == pcs) {
765 xhci_debug("Not enough empty TRBs\n");
766 goto _free_return;
767 }
768 void *const reqdata = xhci_align(1, reqsize);
769 if (!reqdata) {
770 xhci_debug("Out of memory\n");
771 goto _free_return;
772 }
773 xhci_clear_trb(cur, pcs);
774 cur->ptr_low = virt_to_phys(reqdata);
775 cur->ptr_high = 0;
776 TRB_SET(TL, cur, reqsize);
777 TRB_SET(TT, cur, TRB_NORMAL);
778 TRB_SET(ISP, cur, 1);
779 TRB_SET(IOC, cur, 1);
780
781 cur = xhci_next_trb(cur, &pcs);
782 }
783
784 intrq->size = reqsize;
785 intrq->count = reqcount;
786 intrq->next = tr->cur;
787 intrq->ready = NULL;
788 intrq->ep = ep;
789 di->interrupt_queues[ep_id] = intrq;
790
791 /* Now enqueue all the prepared TRBs but the last
792 and ring the doorbell. */
793 for (i = 0; i < (reqcount - 1); ++i)
794 xhci_enqueue_trb(tr);
795 xhci->dbreg[ep->dev->address] = ep_id;
796
797 return intrq;
798
799_free_return:
800 cur = tr->cur;
801 for (--i; i >= 0; --i) {
802 free(phys_to_virt(cur->ptr_low));
803 cur = xhci_next_trb(cur, NULL);
804 }
805 free(intrq);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000806 return NULL;
807}
808
809/* remove queue from device schedule, dropping all data that came in */
810static void
Nico Huber90292652013-06-13 14:37:15 +0200811xhci_destroy_intr_queue(endpoint_t *const ep, void *const q)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000812{
Nico Huber90292652013-06-13 14:37:15 +0200813 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
814 const int ep_id = xhci_ep_id(ep);
815 devinfo_t *const di = DEVINFO_FROM_XHCI(xhci, ep->dev->address);
816 transfer_ring_t *const tr = di->transfer_rings[ep_id];
817
818 intrq_t *const intrq = (intrq_t *)q;
819
820 /* Make sure the endpoint is stopped */
821 if (EC_GET(STATE, di->devctx.eps[ep_id]) == 1) {
822 const int cc = xhci_cmd_stop_endpoint(
823 xhci, ep->dev->address, ep_id);
824 if (cc != CC_SUCCESS)
825 xhci_debug("Warning: Failed to stop endpoint\n");
826 }
827
828 /* Process all remaining transfer events */
829 xhci_handle_events(xhci);
830
831 /* Free all pending transfers and the interrupt queue structure */
832 int i;
833 for (i = 0; i < intrq->count; ++i) {
834 free(phys_to_virt(intrq->next->ptr_low));
835 intrq->next = xhci_next_trb(intrq->next, NULL);
836 }
837 di->interrupt_queues[ep_id] = NULL;
838 free((void *)intrq);
839
840 /* Reset the controller's dequeue pointer and reinitialize the ring */
841 xhci_cmd_set_tr_dq(xhci, ep->dev->address, ep_id, tr->ring, 1);
842 xhci_init_cycle_ring(tr, TRANSFER_RING_SIZE);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000843}
844
845/* read one intr-packet from queue, if available. extend the queue for new input.
846 return NULL if nothing new available.
847 Recommended use: while (data=poll_intr_queue(q)) process(data);
848 */
Nico Huber90292652013-06-13 14:37:15 +0200849static u8 *
850xhci_poll_intr_queue(void *const q)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000851{
Nico Huber90292652013-06-13 14:37:15 +0200852 if (!q)
853 return NULL;
854
855 intrq_t *const intrq = (intrq_t *)q;
856 endpoint_t *const ep = intrq->ep;
857 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
858
859 /* TODO: Reset interrupt queue if it gets halted? */
860
861 xhci_handle_events(xhci);
862
863 u8 *reqdata = NULL;
864 while (!reqdata && intrq->ready) {
865 const int ep_id = xhci_ep_id(ep);
866 devinfo_t *const di = DEVINFO_FROM_XHCI(xhci, ep->dev->address);
867 transfer_ring_t *const tr = di->transfer_rings[ep_id];
868
869 /* Fetch the request's buffer */
870 reqdata = phys_to_virt(intrq->next->ptr_low);
871
872 /* Enqueue the last (spare) TRB and ring doorbell */
873 xhci_enqueue_trb(tr);
874 xhci->dbreg[ep->dev->address] = ep_id;
875
876 /* Reuse the current buffer for the next spare TRB */
877 xhci_clear_trb(tr->cur, tr->pcs);
878 tr->cur->ptr_low = virt_to_phys(reqdata);
879 tr->cur->ptr_high = 0;
880 TRB_SET(TL, tr->cur, intrq->size);
881 TRB_SET(TT, tr->cur, TRB_NORMAL);
882 TRB_SET(ISP, tr->cur, 1);
883 TRB_SET(IOC, tr->cur, 1);
884
885 /* Check if anything was transferred */
886 const size_t read = TRB_GET(TL, intrq->next);
887 if (!read)
888 reqdata = NULL;
889 else if (read < intrq->size)
890 /* At least zero it, poll interface is rather limited */
891 memset(reqdata + read, 0x00, intrq->size - read);
892
893 /* Advance the interrupt queue */
894 if (intrq->ready == intrq->next)
895 /* This was last TRB being ready */
896 intrq->ready = NULL;
897 intrq->next = xhci_next_trb(intrq->next, NULL);
898 }
899
900 return reqdata;
Patrick Georgi6615ef32010-08-13 09:18:58 +0000901}