blob: 4ab2fe3c344eb00485d3c0dacd3bd3751ca6c1b8 [file] [log] [blame]
Patrick Georgi6615ef32010-08-13 09:18:58 +00001/*
2 * This file is part of the libpayload project.
3 *
4 * Copyright (C) 2010 Patrick Georgi
Nico Huber90292652013-06-13 14:37:15 +02005 * Copyright (C) 2013 secunet Security Networks AG
Patrick Georgi6615ef32010-08-13 09:18:58 +00006 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
Nico Huber90292652013-06-13 14:37:15 +020031//#define XHCI_SPEW_DEBUG
Patrick Georgi6615ef32010-08-13 09:18:58 +000032
Nico Huber90292652013-06-13 14:37:15 +020033#include <inttypes.h>
Patrick Georgi6615ef32010-08-13 09:18:58 +000034#include <arch/virtual.h>
Patrick Georgi6615ef32010-08-13 09:18:58 +000035#include "xhci_private.h"
Nico Huber90292652013-06-13 14:37:15 +020036#include "xhci.h"
Patrick Georgi6615ef32010-08-13 09:18:58 +000037
38static void xhci_start (hci_t *controller);
39static void xhci_stop (hci_t *controller);
40static void xhci_reset (hci_t *controller);
Nico Huber90292652013-06-13 14:37:15 +020041static void xhci_reinit (hci_t *controller);
Patrick Georgi6615ef32010-08-13 09:18:58 +000042static void xhci_shutdown (hci_t *controller);
43static int xhci_bulk (endpoint_t *ep, int size, u8 *data, int finalize);
44static int xhci_control (usbdev_t *dev, direction_t dir, int drlen, void *devreq,
45 int dalen, u8 *data);
46static void* xhci_create_intr_queue (endpoint_t *ep, int reqsize, int reqcount, int reqtiming);
47static void xhci_destroy_intr_queue (endpoint_t *ep, void *queue);
48static u8* xhci_poll_intr_queue (void *queue);
49
Nico Huber90292652013-06-13 14:37:15 +020050/*
51 * Some structures must not cross page boundaries. To get this,
52 * we align them by their size (or the next greater power of 2).
53 */
54void *
55xhci_align(const size_t min_align, const size_t size)
Patrick Georgi6615ef32010-08-13 09:18:58 +000056{
Nico Huber90292652013-06-13 14:37:15 +020057 size_t align;
58 if (!(size & (size - 1)))
59 align = size; /* It's a power of 2 */
60 else
61 align = 1 << ((sizeof(unsigned) << 3) - __builtin_clz(size));
62 if (align < min_align)
63 align = min_align;
64 xhci_spew("Aligning %zu to %zu\n", size, align);
Julius Werner1f864342013-09-03 17:15:31 -070065 return dma_memalign(align, size);
Nico Huber90292652013-06-13 14:37:15 +020066}
67
68void
69xhci_clear_trb(trb_t *const trb, const int pcs)
70{
71 trb->ptr_low = 0;
72 trb->ptr_high = 0;
73 trb->status = 0;
74 trb->control = !pcs;
75}
76
77void
78xhci_init_cycle_ring(transfer_ring_t *const tr, const size_t ring_size)
79{
80 memset((void *)tr->ring, 0, ring_size * sizeof(*tr->ring));
81 TRB_SET(TT, &tr->ring[ring_size - 1], TRB_LINK);
82 TRB_SET(TC, &tr->ring[ring_size - 1], 1);
83 /* only one segment that points to itself */
84 tr->ring[ring_size - 1].ptr_low = virt_to_phys(tr->ring);
85
86 tr->pcs = 1;
87 tr->cur = tr->ring;
88}
89
90/* On Panther Point: switch ports shared with EHCI to xHCI */
91static void
92xhci_switch_ppt_ports(pcidev_t addr)
93{
94 if (pci_read_config32(addr, 0x00) == 0x1e318086) {
95 u32 reg32 = pci_read_config32(addr, 0xdc) & 0xf;
96 xhci_debug("Ports capable of SuperSpeed: 0x%"PRIx32"\n", reg32);
97
98 /* For now, do not enable SuperSpeed on any ports */
99 //pci_write_config32(addr, 0xd8, reg32);
100 pci_write_config32(addr, 0xd8, 0x00000000);
101 reg32 = pci_read_config32(addr, 0xd8) & 0xf;
102 xhci_debug("Configured for SuperSpeed: 0x%"PRIx32"\n", reg32);
103
104 reg32 = pci_read_config32(addr, 0xd4) & 0xf;
105 xhci_debug("Trying to switch over: 0x%"PRIx32"\n", reg32);
106
107 pci_write_config32(addr, 0xd0, reg32);
108 reg32 = pci_read_config32(addr, 0xd0) & 0xf;
109 xhci_debug("Actually switched over: 0x%"PRIx32"\n", reg32);
110 }
111}
112
Nico Huberc3714422013-07-19 14:03:47 +0200113/* On Panther Point: switch all ports back to EHCI */
114static void
115xhci_switchback_ppt_ports(pcidev_t addr)
116{
117 if (pci_read_config32(addr, 0x00) == 0x1e318086) {
118 u32 reg32 = pci_read_config32(addr, 0xd0) & 0xf;
119 xhci_debug("Switching ports back: 0x%"PRIx32"\n", reg32);
120 pci_write_config32(addr, 0xd0, 0x00000000);
121 reg32 = pci_read_config32(addr, 0xd0) & 0xf;
122 xhci_debug("Still switched to xHCI: 0x%"PRIx32"\n", reg32);
123 }
124}
125
Nico Huber90292652013-06-13 14:37:15 +0200126static long
127xhci_handshake(volatile u32 *const reg, u32 mask, u32 wait_for, long timeout_us)
128{
129 while ((*reg & mask) != wait_for && timeout_us--) udelay(1);
130 return timeout_us;
131}
132
133static int
134xhci_wait_ready(xhci_t *const xhci)
135{
136 xhci_debug("Waiting for controller to be ready... ");
137 if (!xhci_handshake(&xhci->opreg->usbsts, USBSTS_CNR, 0, 100000L)) {
138 usb_debug("timeout!\n");
139 return -1;
140 }
141 usb_debug("ok.\n");
142 return 0;
143}
144
145hci_t *
Nico Huber6e230662014-07-07 16:33:59 +0200146xhci_init (unsigned long physical_bar)
Nico Huber90292652013-06-13 14:37:15 +0200147{
148 int i;
149
150 /* First, allocate and initialize static controller structures */
151
152 hci_t *const controller = new_controller();
153 if (!controller) {
154 xhci_debug("Could not create USB controller instance\n");
155 return controller;
156 }
157
158 controller->type = XHCI;
159 controller->start = xhci_start;
160 controller->stop = xhci_stop;
161 controller->reset = xhci_reset;
162 controller->init = xhci_reinit;
163 controller->shutdown = xhci_shutdown;
164 controller->bulk = xhci_bulk;
165 controller->control = xhci_control;
166 controller->set_address = xhci_set_address;
167 controller->finish_device_config= xhci_finish_device_config;
168 controller->destroy_device = xhci_destroy_dev;
169 controller->create_intr_queue = xhci_create_intr_queue;
170 controller->destroy_intr_queue = xhci_destroy_intr_queue;
171 controller->poll_intr_queue = xhci_poll_intr_queue;
Nico Huber8b8e9632014-07-07 17:20:53 +0200172 controller->pcidev = 0;
Nico Huber90292652013-06-13 14:37:15 +0200173 for (i = 0; i < 128; ++i) {
174 controller->devices[i] = NULL;
175 }
176
177 controller->instance = malloc(sizeof(xhci_t));
178 if (!controller->instance) {
179 xhci_debug("Out of memory creating xHCI controller instance\n");
180 goto _free_controller;
181 }
182 xhci_t *const xhci = (xhci_t *)controller->instance;
183 memset(xhci, 0x00, sizeof(*xhci));
184
185 init_device_entry(controller, 0);
186 xhci->roothub = controller->devices[0];
187 xhci->cr.ring = xhci_align(64, COMMAND_RING_SIZE * sizeof(trb_t));
188 xhci->er.ring = xhci_align(64, EVENT_RING_SIZE * sizeof(trb_t));
189 xhci->ev_ring_table = xhci_align(64, sizeof(erst_entry_t));
190 if (!xhci->roothub || !xhci->cr.ring ||
191 !xhci->er.ring || !xhci->ev_ring_table) {
192 xhci_debug("Out of memory\n");
193 goto _free_xhci;
194 }
195
Nico Huber5b9e6f12014-07-10 12:56:34 +0200196 xhci->capreg = phys_to_virt(physical_bar);
Nico Huber90292652013-06-13 14:37:15 +0200197 xhci->opreg = ((void *)xhci->capreg) + xhci->capreg->caplength;
198 xhci->hcrreg = ((void *)xhci->capreg) + xhci->capreg->rtsoff;
199 xhci->dbreg = ((void *)xhci->capreg) + xhci->capreg->dboff;
Nico Huber5b9e6f12014-07-10 12:56:34 +0200200 xhci_debug("regbase: 0x%"PRIx32"\n", physical_bar);
Nico Huber90292652013-06-13 14:37:15 +0200201 xhci_debug("caplen: 0x%"PRIx32"\n", xhci->capreg->caplength);
202 xhci_debug("rtsoff: 0x%"PRIx32"\n", xhci->capreg->rtsoff);
203 xhci_debug("dboff: 0x%"PRIx32"\n", xhci->capreg->dboff);
204
205 xhci_debug("hciversion: %"PRIx8".%"PRIx8"\n",
206 xhci->capreg->hciver_hi, xhci->capreg->hciver_lo);
207 if ((xhci->capreg->hciversion < 0x96) ||
208 (xhci->capreg->hciversion > 0x100)) {
209 xhci_debug("Unsupported xHCI version\n");
210 goto _free_xhci;
211 }
212
Julius Werner1f864342013-09-03 17:15:31 -0700213 xhci_debug("context size: %dB\n", CTXSIZE(xhci));
Nico Huber90292652013-06-13 14:37:15 +0200214 xhci_debug("maxslots: 0x%02lx\n", xhci->capreg->MaxSlots);
215 xhci_debug("maxports: 0x%02lx\n", xhci->capreg->MaxPorts);
216 const unsigned pagesize = xhci->opreg->pagesize << 12;
217 xhci_debug("pagesize: 0x%04x\n", pagesize);
218
219 /*
220 * We haven't touched the hardware yet. So we allocate all dynamic
221 * structures at first and can still chicken out easily if we run out
222 * of memory.
223 */
Julius Werner1f864342013-09-03 17:15:31 -0700224 xhci->max_slots_en = xhci->capreg->MaxSlots & CONFIG_LP_MASK_MaxSlotsEn;
225 xhci->dcbaa = xhci_align(64, (xhci->max_slots_en + 1) * sizeof(u64));
226 xhci->dev = malloc((xhci->max_slots_en + 1) * sizeof(*xhci->dev));
227 if (!xhci->dcbaa || !xhci->dev) {
Nico Huber90292652013-06-13 14:37:15 +0200228 xhci_debug("Out of memory\n");
229 goto _free_xhci;
230 }
Julius Werner1f864342013-09-03 17:15:31 -0700231 memset(xhci->dcbaa, 0x00, (xhci->max_slots_en + 1) * sizeof(u64));
232 memset(xhci->dev, 0x00, (xhci->max_slots_en + 1) * sizeof(*xhci->dev));
Nico Huber90292652013-06-13 14:37:15 +0200233
234 /*
235 * Let dcbaa[0] point to another array of pointers, sp_ptrs.
236 * The pointers therein point to scratchpad buffers (pages).
237 */
238 const size_t max_sp_bufs = xhci->capreg->Max_Scratchpad_Bufs;
239 xhci_debug("max scratchpad bufs: 0x%zx\n", max_sp_bufs);
240 if (max_sp_bufs) {
241 const size_t sp_ptrs_size = max_sp_bufs * sizeof(u64);
242 xhci->sp_ptrs = xhci_align(64, sp_ptrs_size);
243 if (!xhci->sp_ptrs) {
244 xhci_debug("Out of memory\n");
245 goto _free_xhci_structs;
246 }
247 memset(xhci->sp_ptrs, 0x00, sp_ptrs_size);
248 for (i = 0; i < max_sp_bufs; ++i) {
249 /* Could use mmap() here if we had it.
250 Maybe there is another way. */
251 void *const page = memalign(pagesize, pagesize);
252 if (!page) {
253 xhci_debug("Out of memory\n");
254 goto _free_xhci_structs;
255 }
256 xhci->sp_ptrs[i] = virt_to_phys(page);
257 }
258 xhci->dcbaa[0] = virt_to_phys(xhci->sp_ptrs);
259 }
260
Julius Werner1f864342013-09-03 17:15:31 -0700261 if (dma_initialized()) {
262 xhci->dma_buffer = dma_memalign(64 * 1024, DMA_SIZE);
263 if (!xhci->dma_buffer) {
264 xhci_debug("Not enough memory for DMA bounce buffer\n");
265 goto _free_xhci_structs;
266 }
267 }
268
Nico Huber90292652013-06-13 14:37:15 +0200269 /* Now start working on the hardware */
Nico Huber90292652013-06-13 14:37:15 +0200270 if (xhci_wait_ready(xhci))
Julius Werner1f864342013-09-03 17:15:31 -0700271 goto _free_xhci_structs;
Nico Huber90292652013-06-13 14:37:15 +0200272
273 /* TODO: Check if BIOS claims ownership (and hand over) */
274
275 xhci_reset(controller);
276 xhci_reinit(controller);
277
Nico Huber90292652013-06-13 14:37:15 +0200278 xhci->roothub->controller = controller;
279 xhci->roothub->init = xhci_rh_init;
280 xhci->roothub->init(xhci->roothub);
281
282 return controller;
283
284_free_xhci_structs:
285 if (xhci->sp_ptrs) {
286 for (i = 0; i < max_sp_bufs; ++i) {
287 if (xhci->sp_ptrs[i])
288 free(phys_to_virt(xhci->sp_ptrs[i]));
289 }
290 }
291 free(xhci->sp_ptrs);
292 free(xhci->dcbaa);
293_free_xhci:
294 free((void *)xhci->ev_ring_table);
295 free((void *)xhci->er.ring);
296 free((void *)xhci->cr.ring);
297 free(xhci->roothub);
Julius Werner1f864342013-09-03 17:15:31 -0700298 free(xhci->dev);
Nico Huber90292652013-06-13 14:37:15 +0200299 free(xhci);
300_free_controller:
301 detach_controller(controller);
302 free(controller);
303 return NULL;
304}
305
Gabe Black1ee2c6d2013-08-09 04:27:35 -0700306#ifdef CONFIG_LP_USB_PCI
Stefan Reinauer8992e532013-05-02 16:16:41 -0700307hci_t *
308xhci_pci_init (pcidev_t addr)
309{
310 u32 reg_addr;
Patrick Georgifdb348a2013-12-21 11:41:22 +0100311 hci_t *controller;
Stefan Reinauer8992e532013-05-02 16:16:41 -0700312
Nico Huber6a058902014-07-04 18:17:39 +0200313 reg_addr = pci_read_config32 (addr, 0x10) & ~0xf;
Stefan Reinauer8992e532013-05-02 16:16:41 -0700314 if (pci_read_config32 (addr, 0x14) > 0) {
315 fatal("We don't do 64bit addressing.\n");
316 }
317
Nico Huber6e230662014-07-07 16:33:59 +0200318 controller = xhci_init((unsigned long)reg_addr);
Nico Huberf4316f82014-07-07 17:11:53 +0200319 if (controller) {
320 controller->pcidev = addr;
Stefan Reinauer8992e532013-05-02 16:16:41 -0700321
Nico Huberf4316f82014-07-07 17:11:53 +0200322 xhci_switch_ppt_ports(addr);
323 }
Stefan Reinauer8992e532013-05-02 16:16:41 -0700324
325 return controller;
326}
327#endif
328
Nico Huber90292652013-06-13 14:37:15 +0200329static void
330xhci_reset(hci_t *const controller)
331{
332 xhci_t *const xhci = XHCI_INST(controller);
333
334 xhci_stop(controller);
335
336 xhci->opreg->usbcmd |= USBCMD_HCRST;
337 xhci_debug("Resetting controller... ");
338 if (!xhci_handshake(&xhci->opreg->usbcmd, USBCMD_HCRST, 0, 1000000L))
339 usb_debug("timeout!\n");
340 else
341 usb_debug("ok.\n");
Patrick Georgi6615ef32010-08-13 09:18:58 +0000342}
343
Nico Huber6e711c62012-11-12 16:20:32 +0100344static void
345xhci_reinit (hci_t *controller)
346{
Nico Huber90292652013-06-13 14:37:15 +0200347 xhci_t *const xhci = XHCI_INST(controller);
Nico Huber6e711c62012-11-12 16:20:32 +0100348
Nico Huber90292652013-06-13 14:37:15 +0200349 if (xhci_wait_ready(xhci))
350 return;
351
352 /* Enable all available slots */
Julius Werner1f864342013-09-03 17:15:31 -0700353 xhci->opreg->config = xhci->max_slots_en;
Nico Huber90292652013-06-13 14:37:15 +0200354
355 /* Set DCBAA */
356 xhci->opreg->dcbaap_lo = virt_to_phys(xhci->dcbaa);
357 xhci->opreg->dcbaap_hi = 0;
358
359 /* Initialize command ring */
360 xhci_init_cycle_ring(&xhci->cr, COMMAND_RING_SIZE);
361 xhci_debug("command ring @%p (0x%08x)\n",
362 xhci->cr.ring, virt_to_phys(xhci->cr.ring));
363 xhci->opreg->crcr_lo = virt_to_phys(xhci->cr.ring) | CRCR_RCS;
364 xhci->opreg->crcr_hi = 0;
365
366 /* Make sure interrupts are disabled */
367 xhci->opreg->usbcmd &= ~USBCMD_INTE;
368
369 /* Initialize event ring */
370 xhci_reset_event_ring(&xhci->er);
371 xhci_debug("event ring @%p (0x%08x)\n",
372 xhci->er.ring, virt_to_phys(xhci->er.ring));
373 xhci_debug("ERST Max: 0x%lx -> 0x%lx entries\n",
374 xhci->capreg->ERST_Max, 1 << xhci->capreg->ERST_Max);
375 memset((void*)xhci->ev_ring_table, 0x00, sizeof(erst_entry_t));
376 xhci->ev_ring_table[0].seg_base_lo = virt_to_phys(xhci->er.ring);
377 xhci->ev_ring_table[0].seg_base_hi = 0;
378 xhci->ev_ring_table[0].seg_size = EVENT_RING_SIZE;
379
380 /* Initialize primary interrupter */
381 xhci->hcrreg->intrrs[0].erstsz = 1;
382 xhci_update_event_dq(xhci);
383 /* erstba has to be written at last */
384 xhci->hcrreg->intrrs[0].erstba_lo = virt_to_phys(xhci->ev_ring_table);
385 xhci->hcrreg->intrrs[0].erstba_hi = 0;
386
387 xhci_start(controller);
388
389#ifdef USB_DEBUG
Patrick Georgi6615ef32010-08-13 09:18:58 +0000390 int i;
Nico Huber90292652013-06-13 14:37:15 +0200391 for (i = 0; i < 32; ++i) {
392 xhci_debug("NOOP run #%d\n", i);
393 trb_t *const cmd = xhci_next_command_trb(xhci);
394 TRB_SET(TT, cmd, TRB_CMD_NOOP);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000395
Nico Huber90292652013-06-13 14:37:15 +0200396 xhci_post_command(xhci);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000397
Nico Huber90292652013-06-13 14:37:15 +0200398 /* Wait for result in event ring */
399 xhci_wait_for_command_done(xhci, cmd, 1);
400 xhci_debug("Command ring is %srunning\n",
401 (xhci->opreg->crcr_lo & CRCR_CRR) ? "" : "not ");
Patrick Georgi6615ef32010-08-13 09:18:58 +0000402 }
Nico Huber90292652013-06-13 14:37:15 +0200403#endif
Patrick Georgi6615ef32010-08-13 09:18:58 +0000404}
405
406static void
Nico Huber90292652013-06-13 14:37:15 +0200407xhci_shutdown(hci_t *const controller)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000408{
Nico Huber90292652013-06-13 14:37:15 +0200409 int i;
410
Patrick Georgi6615ef32010-08-13 09:18:58 +0000411 if (controller == 0)
412 return;
Nico Huber90292652013-06-13 14:37:15 +0200413 xhci_t *const xhci = XHCI_INST(controller);
414
415 detach_controller(controller);
416
417 /* Detach device hierarchy (starting at root hub) */
418 usb_detach_device(controller, 0);
419
420 xhci_stop(controller);
421
Patrick Georgifdb348a2013-12-21 11:41:22 +0100422 if (controller->pcidev)
423 xhci_switchback_ppt_ports(controller->pcidev);
Nico Huberc3714422013-07-19 14:03:47 +0200424
Nico Huber90292652013-06-13 14:37:15 +0200425 if (xhci->sp_ptrs) {
426 const size_t max_sp_bufs = xhci->capreg->Max_Scratchpad_Bufs;
427 for (i = 0; i < max_sp_bufs; ++i) {
428 if (xhci->sp_ptrs[i])
429 free(phys_to_virt(xhci->sp_ptrs[i]));
430 }
431 }
432 free(xhci->sp_ptrs);
433 free(xhci->dcbaa);
Julius Werner1f864342013-09-03 17:15:31 -0700434 free(xhci->dev);
Nico Huber90292652013-06-13 14:37:15 +0200435 free((void *)xhci->ev_ring_table);
436 free((void *)xhci->er.ring);
437 free((void *)xhci->cr.ring);
438 free(xhci);
439 free(controller);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000440}
441
442static void
443xhci_start (hci_t *controller)
444{
Nico Huber90292652013-06-13 14:37:15 +0200445 xhci_t *const xhci = XHCI_INST(controller);
446
447 xhci->opreg->usbcmd |= USBCMD_RS;
448 if (!xhci_handshake(&xhci->opreg->usbsts, USBSTS_HCH, 0, 1000000L))
449 xhci_debug("Controller didn't start within 1s\n");
Patrick Georgi6615ef32010-08-13 09:18:58 +0000450}
451
452static void
453xhci_stop (hci_t *controller)
454{
Nico Huber90292652013-06-13 14:37:15 +0200455 xhci_t *const xhci = XHCI_INST(controller);
456
457 xhci->opreg->usbcmd &= ~USBCMD_RS;
458 if (!xhci_handshake(&xhci->opreg->usbsts,
459 USBSTS_HCH, USBSTS_HCH, 1000000L))
460 xhci_debug("Controller didn't halt within 1s\n");
Patrick Georgi6615ef32010-08-13 09:18:58 +0000461}
462
463static int
Nico Huber90292652013-06-13 14:37:15 +0200464xhci_reset_endpoint(usbdev_t *const dev, endpoint_t *const ep,
465 const int clear_halt)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000466{
Nico Huber90292652013-06-13 14:37:15 +0200467 xhci_t *const xhci = XHCI_INST(dev->controller);
Nico Huber90292652013-06-13 14:37:15 +0200468 const int slot_id = dev->address;
469 const int ep_id = ep ? xhci_ep_id(ep) : 1;
Julius Werner1f864342013-09-03 17:15:31 -0700470 epctx_t *const epctx = xhci->dev[slot_id].ctx.ep[ep_id];
Nico Huber90292652013-06-13 14:37:15 +0200471
472 xhci_debug("Resetting ID %d EP %d (ep state: %d)\n",
Julius Werner1f864342013-09-03 17:15:31 -0700473 slot_id, ep_id, EC_GET(STATE, epctx));
Nico Huber90292652013-06-13 14:37:15 +0200474
475 /* Run Reset Endpoint Command if the EP is in Halted state */
Julius Werner1f864342013-09-03 17:15:31 -0700476 if (EC_GET(STATE, epctx) == 2) {
Nico Huber90292652013-06-13 14:37:15 +0200477 const int cc = xhci_cmd_reset_endpoint(xhci, slot_id, ep_id);
478 if (cc != CC_SUCCESS) {
479 xhci_debug("Reset Endpoint Command failed: %d\n", cc);
480 return 1;
481 }
482 }
483
484 /* Clear TT buffer for bulk and control endpoints behind a TT */
485 const int hub = dev->hub;
486 if (hub && dev->speed < HIGH_SPEED &&
487 dev->controller->devices[hub]->speed == HIGH_SPEED)
488 /* TODO */;
489
490 /* Try clearing the device' halt condition on non-control endpoints */
491 if (clear_halt && ep)
492 clear_stall(ep);
493
494 /* Reset transfer ring if the endpoint is in the right state */
Julius Werner1f864342013-09-03 17:15:31 -0700495 const unsigned ep_state = EC_GET(STATE, epctx);
Nico Huber90292652013-06-13 14:37:15 +0200496 if (ep_state == 3 || ep_state == 4) {
Julius Werner1f864342013-09-03 17:15:31 -0700497 transfer_ring_t *const tr =
498 xhci->dev[slot_id].transfer_rings[ep_id];
Nico Huber90292652013-06-13 14:37:15 +0200499 const int cc = xhci_cmd_set_tr_dq(xhci, slot_id, ep_id,
500 tr->ring, 1);
501 if (cc != CC_SUCCESS) {
502 xhci_debug("Set TR Dequeue Command failed: %d\n", cc);
503 return 1;
504 }
505 xhci_init_cycle_ring(tr, TRANSFER_RING_SIZE);
506 }
507
508 xhci_debug("Finished resetting ID %d EP %d (ep state: %d)\n",
Julius Werner1f864342013-09-03 17:15:31 -0700509 slot_id, ep_id, EC_GET(STATE, epctx));
Nico Huber90292652013-06-13 14:37:15 +0200510
511 return 0;
512}
513
514static void
515xhci_enqueue_trb(transfer_ring_t *const tr)
516{
517 const int chain = TRB_GET(CH, tr->cur);
518 TRB_SET(C, tr->cur, tr->pcs);
519 ++tr->cur;
520
521 while (TRB_GET(TT, tr->cur) == TRB_LINK) {
522 xhci_spew("Handling LINK pointer\n");
523 const int tc = TRB_GET(TC, tr->cur);
524 TRB_SET(CH, tr->cur, chain);
525 TRB_SET(C, tr->cur, tr->pcs);
526 tr->cur = phys_to_virt(tr->cur->ptr_low);
527 if (tc)
528 tr->pcs ^= 1;
529 }
530}
531
532static void
533xhci_enqueue_td(transfer_ring_t *const tr, const int ep, const size_t mps,
534 const int dalen, void *const data, const int dir)
535{
536 trb_t *trb = NULL; /* cur TRB */
537 u8 *cur_start = data; /* cur data pointer */
538 size_t length = dalen; /* remaining bytes */
539 size_t packets = (length + mps - 1) / mps; /* remaining packets */
540 size_t residue = 0; /* residue from last TRB */
541 size_t trb_count = 0; /* TRBs added so far */
542
543 while (length || !trb_count /* enqueue at least one */) {
544 const size_t cur_end = ((size_t)cur_start + 0x10000) & ~0xffff;
545 size_t cur_length = cur_end - (size_t)cur_start;
546 if (length < cur_length) {
547 cur_length = length;
548 packets = 0;
549 length = 0;
550 } else {
551 packets -= (residue + cur_length) / mps;
552 residue = (residue + cur_length) % mps;
553 length -= cur_length;
554 }
555
556 trb = tr->cur;
557 xhci_clear_trb(trb, tr->pcs);
558 trb->ptr_low = virt_to_phys(cur_start);
559 TRB_SET(TL, trb, cur_length);
560 TRB_SET(TDS, trb, packets);
Julius Werner83da5012013-09-27 12:45:11 -0700561 TRB_SET(CH, trb, 1);
Nico Huber90292652013-06-13 14:37:15 +0200562
563 /* Check for first, data stage TRB */
564 if (!trb_count && ep == 1) {
565 TRB_SET(DIR, trb, dir);
566 TRB_SET(TT, trb, TRB_DATA_STAGE);
567 } else {
568 TRB_SET(TT, trb, TRB_NORMAL);
569 }
570
Nico Huber90292652013-06-13 14:37:15 +0200571 xhci_enqueue_trb(tr);
572
573 cur_start += cur_length;
574 ++trb_count;
575 }
Julius Werner83da5012013-09-27 12:45:11 -0700576
577 trb = tr->cur;
578 xhci_clear_trb(trb, tr->pcs);
579 trb->ptr_low = virt_to_phys(trb); /* for easier debugging only */
580 TRB_SET(TT, trb, TRB_EVENT_DATA);
581 TRB_SET(IOC, trb, 1);
582
583 xhci_enqueue_trb(tr);
Nico Huber90292652013-06-13 14:37:15 +0200584}
585
586static int
587xhci_control(usbdev_t *const dev, const direction_t dir,
588 const int drlen, void *const devreq,
Julius Werner1f864342013-09-03 17:15:31 -0700589 const int dalen, unsigned char *const src)
Nico Huber90292652013-06-13 14:37:15 +0200590{
Julius Werner1f864342013-09-03 17:15:31 -0700591 unsigned char *data = src;
Nico Huber90292652013-06-13 14:37:15 +0200592 xhci_t *const xhci = XHCI_INST(dev->controller);
Julius Werner1f864342013-09-03 17:15:31 -0700593 epctx_t *const epctx = xhci->dev[dev->address].ctx.ep0;
594 transfer_ring_t *const tr = xhci->dev[dev->address].transfer_rings[1];
Nico Huber90292652013-06-13 14:37:15 +0200595
596 const size_t off = (size_t)data & 0xffff;
Julius Werner83da5012013-09-27 12:45:11 -0700597 if ((off + dalen) > ((TRANSFER_RING_SIZE - 4) << 16)) {
Nico Huber90292652013-06-13 14:37:15 +0200598 xhci_debug("Unsupported transfer size\n");
Julius Wernere9738db2013-02-21 13:41:40 -0800599 return -1;
Nico Huber90292652013-06-13 14:37:15 +0200600 }
601
602 /* Reset endpoint if it's halted */
Julius Werner1f864342013-09-03 17:15:31 -0700603 const unsigned ep_state = EC_GET(STATE, epctx);
Nico Huber90292652013-06-13 14:37:15 +0200604 if (ep_state == 2 || ep_state == 4) {
605 if (xhci_reset_endpoint(dev, NULL, 0))
Julius Wernere9738db2013-02-21 13:41:40 -0800606 return -1;
Nico Huber90292652013-06-13 14:37:15 +0200607 }
608
Julius Werner1f864342013-09-03 17:15:31 -0700609 if (dalen && !dma_coherent(src)) {
610 data = xhci->dma_buffer;
611 if (dalen > DMA_SIZE) {
612 xhci_debug("Control transfer too large: %d\n", dalen);
613 return -1;
614 }
615 if (dir == OUT)
616 memcpy(data, src, dalen);
617 }
618
Nico Huber90292652013-06-13 14:37:15 +0200619 /* Fill and enqueue setup TRB */
620 trb_t *const setup = tr->cur;
621 xhci_clear_trb(setup, tr->pcs);
622 setup->ptr_low = ((u32 *)devreq)[0];
623 setup->ptr_high = ((u32 *)devreq)[1];
624 TRB_SET(TL, setup, 8);
625 TRB_SET(TRT, setup, (dalen)
626 ? ((dir == OUT) ? TRB_TRT_OUT_DATA : TRB_TRT_IN_DATA)
627 : TRB_TRT_NO_DATA);
628 TRB_SET(TT, setup, TRB_SETUP_STAGE);
629 TRB_SET(IDT, setup, 1);
630 TRB_SET(IOC, setup, 1);
631 xhci_enqueue_trb(tr);
632
633 /* Fill and enqueue data TRBs (if any) */
634 if (dalen) {
Julius Werner1f864342013-09-03 17:15:31 -0700635 const unsigned mps = EC_GET(MPS, epctx);
Nico Huber90292652013-06-13 14:37:15 +0200636 const unsigned dt_dir = (dir == OUT) ? TRB_DIR_OUT : TRB_DIR_IN;
637 xhci_enqueue_td(tr, 1, mps, dalen, data, dt_dir);
638 }
639
640 /* Fill status TRB */
641 trb_t *const status = tr->cur;
642 xhci_clear_trb(status, tr->pcs);
643 TRB_SET(DIR, status, (dir == OUT) ? TRB_DIR_IN : TRB_DIR_OUT);
644 TRB_SET(TT, status, TRB_STATUS_STAGE);
645 TRB_SET(IOC, status, 1);
646 xhci_enqueue_trb(tr);
647
648 /* Ring doorbell for EP0 */
649 xhci->dbreg[dev->address] = 1;
650
651 /* Wait for transfer events */
Julius Wernere9738db2013-02-21 13:41:40 -0800652 int i, transferred = 0;
Nico Huber90292652013-06-13 14:37:15 +0200653 const int n_stages = 2 + !!dalen;
654 for (i = 0; i < n_stages; ++i) {
655 const int ret = xhci_wait_for_transfer(xhci, dev->address, 1);
Julius Wernere9738db2013-02-21 13:41:40 -0800656 transferred += ret;
657 if (ret < 0) {
Nico Huber90292652013-06-13 14:37:15 +0200658 if (ret == TIMEOUT) {
659 xhci_debug("Stopping ID %d EP 1\n",
660 dev->address);
661 xhci_cmd_stop_endpoint(xhci, dev->address, 1);
662 }
663 xhci_debug("Stage %d/%d failed: %d\n"
664 " trb ring: @%p\n"
665 " setup trb: @%p\n"
666 " status trb: @%p\n"
667 " ep state: %d -> %d\n"
668 " usbsts: 0x%08"PRIx32"\n",
669 i, n_stages, ret,
670 tr->ring, setup, status,
Julius Werner1f864342013-09-03 17:15:31 -0700671 ep_state, EC_GET(STATE, epctx),
Nico Huber90292652013-06-13 14:37:15 +0200672 xhci->opreg->usbsts);
Julius Wernere9738db2013-02-21 13:41:40 -0800673 return ret;
Nico Huber90292652013-06-13 14:37:15 +0200674 }
675 }
676
Julius Werner1f864342013-09-03 17:15:31 -0700677 if (dir == IN && data != src)
678 memcpy(src, data, transferred);
Julius Wernere9738db2013-02-21 13:41:40 -0800679 return transferred;
Patrick Georgi6615ef32010-08-13 09:18:58 +0000680}
681
682/* finalize == 1: if data is of packet aligned size, add a zero length packet */
683static int
Julius Werner1f864342013-09-03 17:15:31 -0700684xhci_bulk(endpoint_t *const ep, const int size, u8 *const src,
Nico Huber90292652013-06-13 14:37:15 +0200685 const int finalize)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000686{
Nico Huber90292652013-06-13 14:37:15 +0200687 /* finalize: Hopefully the xHCI controller always does this.
688 We have no control over the packets. */
689
Julius Werner1f864342013-09-03 17:15:31 -0700690 u8 *data = src;
Nico Huber90292652013-06-13 14:37:15 +0200691 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
Julius Werner1f864342013-09-03 17:15:31 -0700692 const int slot_id = ep->dev->address;
Nico Huber90292652013-06-13 14:37:15 +0200693 const int ep_id = xhci_ep_id(ep);
Julius Werner1f864342013-09-03 17:15:31 -0700694 epctx_t *const epctx = xhci->dev[slot_id].ctx.ep[ep_id];
695 transfer_ring_t *const tr = xhci->dev[slot_id].transfer_rings[ep_id];
Nico Huber90292652013-06-13 14:37:15 +0200696
697 const size_t off = (size_t)data & 0xffff;
Julius Werner83da5012013-09-27 12:45:11 -0700698 if ((off + size) > ((TRANSFER_RING_SIZE - 2) << 16)) {
Nico Huber90292652013-06-13 14:37:15 +0200699 xhci_debug("Unsupported transfer size\n");
Julius Wernere9738db2013-02-21 13:41:40 -0800700 return -1;
Nico Huber90292652013-06-13 14:37:15 +0200701 }
702
Julius Werner1f864342013-09-03 17:15:31 -0700703 if (!dma_coherent(src)) {
704 data = xhci->dma_buffer;
705 if (size > DMA_SIZE) {
706 xhci_debug("Bulk transfer too large: %d\n", size);
707 return -1;
708 }
709 if (ep->direction == OUT)
710 memcpy(data, src, size);
711 }
712
Nico Huber90292652013-06-13 14:37:15 +0200713 /* Reset endpoint if it's halted */
Julius Werner1f864342013-09-03 17:15:31 -0700714 const unsigned ep_state = EC_GET(STATE, epctx);
Nico Huber90292652013-06-13 14:37:15 +0200715 if (ep_state == 2 || ep_state == 4) {
716 if (xhci_reset_endpoint(ep->dev, ep, 0))
Julius Wernere9738db2013-02-21 13:41:40 -0800717 return -1;
Nico Huber90292652013-06-13 14:37:15 +0200718 }
719
720 /* Enqueue transfer and ring doorbell */
Julius Werner1f864342013-09-03 17:15:31 -0700721 const unsigned mps = EC_GET(MPS, epctx);
Nico Huber90292652013-06-13 14:37:15 +0200722 const unsigned dir = (ep->direction == OUT) ? TRB_DIR_OUT : TRB_DIR_IN;
723 xhci_enqueue_td(tr, ep_id, mps, size, data, dir);
724 xhci->dbreg[ep->dev->address] = ep_id;
725
726 /* Wait for transfer event */
727 const int ret = xhci_wait_for_transfer(xhci, ep->dev->address, ep_id);
Julius Wernere9738db2013-02-21 13:41:40 -0800728 if (ret < 0) {
Nico Huber90292652013-06-13 14:37:15 +0200729 if (ret == TIMEOUT) {
730 xhci_debug("Stopping ID %d EP %d\n",
731 ep->dev->address, ep_id);
732 xhci_cmd_stop_endpoint(xhci, ep->dev->address, ep_id);
Julius Wernere9738db2013-02-21 13:41:40 -0800733 } else if (ret == -CC_STALL_ERROR) {
Nico Huber90292652013-06-13 14:37:15 +0200734 xhci_reset_endpoint(ep->dev, ep, 1);
735 }
736 xhci_debug("Bulk transfer failed: %d\n"
737 " ep state: %d -> %d\n"
738 " usbsts: 0x%08"PRIx32"\n",
739 ret, ep_state,
Julius Werner1f864342013-09-03 17:15:31 -0700740 EC_GET(STATE, epctx),
Nico Huber90292652013-06-13 14:37:15 +0200741 xhci->opreg->usbsts);
Julius Wernere9738db2013-02-21 13:41:40 -0800742 return ret;
Nico Huber90292652013-06-13 14:37:15 +0200743 }
744
Julius Werner1f864342013-09-03 17:15:31 -0700745 if (ep->direction == IN && data != src)
746 memcpy(src, data, ret);
Julius Wernere9738db2013-02-21 13:41:40 -0800747 return ret;
Nico Huber90292652013-06-13 14:37:15 +0200748}
749
750static trb_t *
751xhci_next_trb(trb_t *cur, int *const pcs)
752{
753 ++cur;
754 while (TRB_GET(TT, cur) == TRB_LINK) {
755 if (pcs && TRB_GET(TC, cur))
756 *pcs ^= 1;
757 cur = phys_to_virt(cur->ptr_low);
758 }
759 return cur;
Patrick Georgi6615ef32010-08-13 09:18:58 +0000760}
761
762/* create and hook-up an intr queue into device schedule */
Nico Huber90292652013-06-13 14:37:15 +0200763static void *
764xhci_create_intr_queue(endpoint_t *const ep,
765 const int reqsize, const int reqcount,
766 const int reqtiming)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000767{
Nico Huber90292652013-06-13 14:37:15 +0200768 /* reqtiming: We ignore it and use the interval from the
769 endpoint descriptor configured earlier. */
770
771 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
Julius Werner1f864342013-09-03 17:15:31 -0700772 const int slot_id = ep->dev->address;
Nico Huber90292652013-06-13 14:37:15 +0200773 const int ep_id = xhci_ep_id(ep);
Julius Werner1f864342013-09-03 17:15:31 -0700774 transfer_ring_t *const tr = xhci->dev[slot_id].transfer_rings[ep_id];
Nico Huber90292652013-06-13 14:37:15 +0200775
776 if (reqcount > (TRANSFER_RING_SIZE - 2)) {
777 xhci_debug("reqcount is too high, at most %d supported\n",
778 TRANSFER_RING_SIZE - 2);
779 return NULL;
780 }
781 if (reqsize > 0x10000) {
782 xhci_debug("reqsize is too large, at most 64KiB supported\n");
783 return NULL;
784 }
Julius Werner1f864342013-09-03 17:15:31 -0700785 if (xhci->dev[slot_id].interrupt_queues[ep_id]) {
Nico Huber90292652013-06-13 14:37:15 +0200786 xhci_debug("Only one interrupt queue per endpoint supported\n");
787 return NULL;
788 }
789
790 /* Allocate intrq structure and reqdata chunks */
791
792 intrq_t *const intrq = malloc(sizeof(*intrq));
793 if (!intrq) {
794 xhci_debug("Out of memory\n");
795 return NULL;
796 }
797
798 int i;
799 int pcs = tr->pcs;
800 trb_t *cur = tr->cur;
801 for (i = 0; i < reqcount; ++i) {
802 if (TRB_GET(C, cur) == pcs) {
803 xhci_debug("Not enough empty TRBs\n");
804 goto _free_return;
805 }
806 void *const reqdata = xhci_align(1, reqsize);
807 if (!reqdata) {
808 xhci_debug("Out of memory\n");
809 goto _free_return;
810 }
811 xhci_clear_trb(cur, pcs);
812 cur->ptr_low = virt_to_phys(reqdata);
813 cur->ptr_high = 0;
814 TRB_SET(TL, cur, reqsize);
815 TRB_SET(TT, cur, TRB_NORMAL);
816 TRB_SET(ISP, cur, 1);
817 TRB_SET(IOC, cur, 1);
818
819 cur = xhci_next_trb(cur, &pcs);
820 }
821
822 intrq->size = reqsize;
823 intrq->count = reqcount;
824 intrq->next = tr->cur;
825 intrq->ready = NULL;
826 intrq->ep = ep;
Julius Werner1f864342013-09-03 17:15:31 -0700827 xhci->dev[slot_id].interrupt_queues[ep_id] = intrq;
Nico Huber90292652013-06-13 14:37:15 +0200828
829 /* Now enqueue all the prepared TRBs but the last
830 and ring the doorbell. */
831 for (i = 0; i < (reqcount - 1); ++i)
832 xhci_enqueue_trb(tr);
Julius Werner1f864342013-09-03 17:15:31 -0700833 xhci->dbreg[slot_id] = ep_id;
Nico Huber90292652013-06-13 14:37:15 +0200834
835 return intrq;
836
837_free_return:
838 cur = tr->cur;
839 for (--i; i >= 0; --i) {
840 free(phys_to_virt(cur->ptr_low));
841 cur = xhci_next_trb(cur, NULL);
842 }
843 free(intrq);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000844 return NULL;
845}
846
847/* remove queue from device schedule, dropping all data that came in */
848static void
Nico Huber90292652013-06-13 14:37:15 +0200849xhci_destroy_intr_queue(endpoint_t *const ep, void *const q)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000850{
Nico Huber90292652013-06-13 14:37:15 +0200851 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
Julius Werner1f864342013-09-03 17:15:31 -0700852 const int slot_id = ep->dev->address;
Nico Huber90292652013-06-13 14:37:15 +0200853 const int ep_id = xhci_ep_id(ep);
Julius Werner1f864342013-09-03 17:15:31 -0700854 transfer_ring_t *const tr = xhci->dev[slot_id].transfer_rings[ep_id];
Nico Huber90292652013-06-13 14:37:15 +0200855
856 intrq_t *const intrq = (intrq_t *)q;
857
858 /* Make sure the endpoint is stopped */
Julius Werner1f864342013-09-03 17:15:31 -0700859 if (EC_GET(STATE, xhci->dev[slot_id].ctx.ep[ep_id]) == 1) {
860 const int cc = xhci_cmd_stop_endpoint(xhci, slot_id, ep_id);
Nico Huber90292652013-06-13 14:37:15 +0200861 if (cc != CC_SUCCESS)
862 xhci_debug("Warning: Failed to stop endpoint\n");
863 }
864
865 /* Process all remaining transfer events */
866 xhci_handle_events(xhci);
867
868 /* Free all pending transfers and the interrupt queue structure */
869 int i;
870 for (i = 0; i < intrq->count; ++i) {
871 free(phys_to_virt(intrq->next->ptr_low));
872 intrq->next = xhci_next_trb(intrq->next, NULL);
873 }
Julius Werner1f864342013-09-03 17:15:31 -0700874 xhci->dev[slot_id].interrupt_queues[ep_id] = NULL;
Nico Huber90292652013-06-13 14:37:15 +0200875 free((void *)intrq);
876
877 /* Reset the controller's dequeue pointer and reinitialize the ring */
Julius Werner1f864342013-09-03 17:15:31 -0700878 xhci_cmd_set_tr_dq(xhci, slot_id, ep_id, tr->ring, 1);
Nico Huber90292652013-06-13 14:37:15 +0200879 xhci_init_cycle_ring(tr, TRANSFER_RING_SIZE);
Patrick Georgi6615ef32010-08-13 09:18:58 +0000880}
881
882/* read one intr-packet from queue, if available. extend the queue for new input.
883 return NULL if nothing new available.
884 Recommended use: while (data=poll_intr_queue(q)) process(data);
885 */
Nico Huber90292652013-06-13 14:37:15 +0200886static u8 *
887xhci_poll_intr_queue(void *const q)
Patrick Georgi6615ef32010-08-13 09:18:58 +0000888{
Nico Huber90292652013-06-13 14:37:15 +0200889 if (!q)
890 return NULL;
891
892 intrq_t *const intrq = (intrq_t *)q;
893 endpoint_t *const ep = intrq->ep;
894 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
895
896 /* TODO: Reset interrupt queue if it gets halted? */
897
898 xhci_handle_events(xhci);
899
900 u8 *reqdata = NULL;
901 while (!reqdata && intrq->ready) {
902 const int ep_id = xhci_ep_id(ep);
Julius Werner1f864342013-09-03 17:15:31 -0700903 transfer_ring_t *const tr =
904 xhci->dev[ep->dev->address].transfer_rings[ep_id];
Nico Huber90292652013-06-13 14:37:15 +0200905
906 /* Fetch the request's buffer */
907 reqdata = phys_to_virt(intrq->next->ptr_low);
908
909 /* Enqueue the last (spare) TRB and ring doorbell */
910 xhci_enqueue_trb(tr);
911 xhci->dbreg[ep->dev->address] = ep_id;
912
913 /* Reuse the current buffer for the next spare TRB */
914 xhci_clear_trb(tr->cur, tr->pcs);
915 tr->cur->ptr_low = virt_to_phys(reqdata);
916 tr->cur->ptr_high = 0;
917 TRB_SET(TL, tr->cur, intrq->size);
918 TRB_SET(TT, tr->cur, TRB_NORMAL);
919 TRB_SET(ISP, tr->cur, 1);
920 TRB_SET(IOC, tr->cur, 1);
921
922 /* Check if anything was transferred */
923 const size_t read = TRB_GET(TL, intrq->next);
924 if (!read)
925 reqdata = NULL;
926 else if (read < intrq->size)
927 /* At least zero it, poll interface is rather limited */
928 memset(reqdata + read, 0x00, intrq->size - read);
929
930 /* Advance the interrupt queue */
931 if (intrq->ready == intrq->next)
932 /* This was last TRB being ready */
933 intrq->ready = NULL;
934 intrq->next = xhci_next_trb(intrq->next, NULL);
935 }
936
937 return reqdata;
Patrick Georgi6615ef32010-08-13 09:18:58 +0000938}