libpayload: OHCI driver correct PCI BAR reading
[coreboot.git] / payloads / libpayload / drivers / usb / ohci.c
blob8a01cf24fbfb58221ba9dfbba113f7190231e69e
1 /*
2 * This file is part of the libpayload project.
4 * Copyright (C) 2010 Patrick Georgi
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
30 //#define USB_DEBUG
32 #include <arch/virtual.h>
33 #include <usb/usb.h>
34 #include "ohci_private.h"
35 #include "ohci.h"
37 static void ohci_start (hci_t *controller);
38 static void ohci_stop (hci_t *controller);
39 static void ohci_reset (hci_t *controller);
40 static void ohci_shutdown (hci_t *controller);
41 static int ohci_bulk (endpoint_t *ep, int size, u8 *data, int finalize);
42 static int ohci_control (usbdev_t *dev, direction_t dir, int drlen, void *devreq,
43 int dalen, u8 *data);
44 static void* ohci_create_intr_queue (endpoint_t *ep, int reqsize, int reqcount, int reqtiming);
45 static void ohci_destroy_intr_queue (endpoint_t *ep, void *queue);
46 static u8* ohci_poll_intr_queue (void *queue);
47 static void ohci_process_done_queue(ohci_t *ohci, int spew_debug);
49 static void
50 ohci_reset (hci_t *controller)
54 #ifdef USB_DEBUG
55 /* Section 4.3.3 */
56 static const char *completion_codes[] = {
57 "No error",
58 "CRC",
59 "Bit stuffing",
60 "Data toggle mismatch",
61 "Stall",
62 "Device not responding",
63 "PID check failure",
64 "Unexpected PID",
65 "Data overrun",
66 "Data underrun",
67 "--- (10)",
68 "--- (11)",
69 "Buffer overrun",
70 "Buffer underrun",
71 "Not accessed (14)",
72 "Not accessed (15)"
75 /* Section 4.3.1.2 */
76 static const char *direction[] = {
77 "SETUP",
78 "OUT",
79 "IN",
80 "reserved / from TD"
82 #endif
84 hci_t *
85 ohci_init (pcidev_t addr)
87 int i;
89 hci_t *controller = new_controller ();
91 if (!controller)
92 fatal("Could not create USB controller instance.\n");
94 controller->instance = malloc (sizeof (ohci_t));
95 if(!controller->instance)
96 fatal("Not enough memory creating USB controller instance.\n");
98 controller->start = ohci_start;
99 controller->stop = ohci_stop;
100 controller->reset = ohci_reset;
101 controller->shutdown = ohci_shutdown;
102 controller->bulk = ohci_bulk;
103 controller->control = ohci_control;
104 controller->create_intr_queue = ohci_create_intr_queue;
105 controller->destroy_intr_queue = ohci_destroy_intr_queue;
106 controller->poll_intr_queue = ohci_poll_intr_queue;
107 for (i = 0; i < 128; i++) {
108 controller->devices[i] = 0;
110 init_device_entry (controller, 0);
111 OHCI_INST (controller)->roothub = controller->devices[0];
113 controller->bus_address = addr;
114 /* regarding OHCI spec, Appendix A, BAR_OCHI register description, Table A-4
115 * BASE ADDRESS only [31-12] bits. All other usually 0, but not all */
116 controller->reg_base = pci_read_config32 (controller->bus_address, 0x10) & 0xfffff000; // OHCI mandates MMIO, so bit 0 is clear
117 OHCI_INST (controller)->opreg = (opreg_t*)phys_to_virt(controller->reg_base);
118 printf("OHCI Version %x.%x\n", (OHCI_INST (controller)->opreg->HcRevision >> 4) & 0xf, OHCI_INST (controller)->opreg->HcRevision & 0xf);
120 if ((OHCI_INST (controller)->opreg->HcControl & HostControllerFunctionalStateMask) == USBReset) {
121 /* cold boot */
122 OHCI_INST (controller)->opreg->HcControl &= ~RemoteWakeupConnected;
123 OHCI_INST (controller)->opreg->HcFmInterval = (11999 * FrameInterval) | ((((11999 - 210)*6)/7) * FSLargestDataPacket);
124 /* TODO: right value for PowerOnToPowerGoodTime ? */
125 OHCI_INST (controller)->opreg->HcRhDescriptorA = NoPowerSwitching | NoOverCurrentProtection | (10 * PowerOnToPowerGoodTime);
126 OHCI_INST (controller)->opreg->HcRhDescriptorB = (0 * DeviceRemovable);
127 udelay(100); /* TODO: reset asserting according to USB spec */
128 } else if ((OHCI_INST (controller)->opreg->HcControl & HostControllerFunctionalStateMask) != USBOperational) {
129 OHCI_INST (controller)->opreg->HcControl = (OHCI_INST (controller)->opreg->HcControl & ~HostControllerFunctionalStateMask) | USBResume;
130 udelay(100); /* TODO: resume time according to USB spec */
132 int interval = OHCI_INST (controller)->opreg->HcFmInterval;
134 OHCI_INST (controller)->opreg->HcCommandStatus = HostControllerReset;
135 udelay (10); /* at most 10us for reset to complete. State must be set to Operational within 2ms (5.1.1.4) */
136 OHCI_INST (controller)->opreg->HcFmInterval = interval;
137 OHCI_INST (controller)->hcca = memalign(256, 256);
138 memset((void*)OHCI_INST (controller)->hcca, 0, 256);
140 /* Initialize interrupt table. */
141 u32 *const intr_table = OHCI_INST(controller)->hcca->HccaInterruptTable;
142 ed_t *const periodic_ed = memalign(sizeof(ed_t), sizeof(ed_t));
143 memset((void *)periodic_ed, 0, sizeof(*periodic_ed));
144 for (i = 0; i < 32; ++i)
145 intr_table[i] = virt_to_phys(periodic_ed);
146 OHCI_INST (controller)->periodic_ed = periodic_ed;
148 OHCI_INST (controller)->opreg->HcHCCA = virt_to_phys(OHCI_INST (controller)->hcca);
149 /* Make sure periodic schedule is enabled. */
150 OHCI_INST (controller)->opreg->HcControl |= PeriodicListEnable;
151 OHCI_INST (controller)->opreg->HcControl &= ~IsochronousEnable; // unused by this driver
152 // disable everything, contrary to what OHCI spec says in 5.1.1.4, as we don't need IRQs
153 OHCI_INST (controller)->opreg->HcInterruptEnable = 1<<31;
154 OHCI_INST (controller)->opreg->HcInterruptDisable = ~(1<<31);
155 OHCI_INST (controller)->opreg->HcInterruptStatus = ~0;
156 OHCI_INST (controller)->opreg->HcPeriodicStart = (((OHCI_INST (controller)->opreg->HcFmInterval & FrameIntervalMask) / 10) * 9);
157 OHCI_INST (controller)->opreg->HcControl = (OHCI_INST (controller)->opreg->HcControl & ~HostControllerFunctionalStateMask) | USBOperational;
159 mdelay(100);
161 controller->devices[0]->controller = controller;
162 controller->devices[0]->init = ohci_rh_init;
163 controller->devices[0]->init (controller->devices[0]);
164 ohci_reset (controller);
165 return controller;
168 static void
169 ohci_shutdown (hci_t *controller)
171 if (controller == 0)
172 return;
173 detach_controller (controller);
174 ohci_stop(controller);
175 OHCI_INST (controller)->roothub->destroy (OHCI_INST (controller)->
176 roothub);
177 free ((void *)OHCI_INST (controller)->periodic_ed);
178 free (OHCI_INST (controller));
179 free (controller);
182 static void
183 ohci_start (hci_t *controller)
185 // TODO: turn on all operation of OHCI, but assume that it's initialized.
188 static void
189 ohci_stop (hci_t *controller)
191 // TODO: turn off all operation of OHCI
194 static void
195 dump_td(td_t *cur, int level)
197 #ifdef USB_DEBUG
198 static const char *spaces=" ";
199 const char *spc=spaces+(10-level);
200 debug("%std at %x (%s), condition code: %s\n", spc, cur, direction[(cur->config & TD_DIRECTION_MASK) >> TD_DIRECTION_SHIFT],
201 completion_codes[(cur->config & TD_CC_MASK) >> TD_CC_SHIFT]);
202 debug("%s toggle: %x\n", spc, !!(cur->config & TD_TOGGLE_DATA1));
203 #endif
206 static int
207 wait_for_ed(usbdev_t *dev, ed_t *head, int pages)
209 /* wait for results */
210 /* TOTEST: how long to wait?
211 * give 2s per TD (2 pages) plus another 2s for now
213 int timeout = pages*1000 + 2000;
214 while (((head->head_pointer & ~3) != head->tail_pointer) &&
215 !(head->head_pointer & 1) &&
216 ((((td_t*)phys_to_virt(head->head_pointer & ~3))->config
217 & TD_CC_MASK) >= TD_CC_NOACCESS) &&
218 timeout--) {
219 /* don't log every ms */
220 if (!(timeout % 100))
221 debug("intst: %x; ctrl: %x; cmdst: %x; head: %x -> %x, tail: %x, condition: %x\n",
222 OHCI_INST(dev->controller)->opreg->HcInterruptStatus,
223 OHCI_INST(dev->controller)->opreg->HcControl,
224 OHCI_INST(dev->controller)->opreg->HcCommandStatus,
225 head->head_pointer,
226 ((td_t*)phys_to_virt(head->head_pointer & ~3))->next_td,
227 head->tail_pointer,
228 (((td_t*)phys_to_virt(head->head_pointer & ~3))->config & TD_CC_MASK) >> TD_CC_SHIFT);
229 mdelay(1);
231 if (timeout < 0)
232 printf("Error: ohci: endpoint "
233 "descriptor processing timed out.\n");
234 /* Clear the done queue. */
235 ohci_process_done_queue(OHCI_INST(dev->controller), 1);
237 if (head->head_pointer & 1) {
238 debug("HALTED!\n");
239 return 1;
241 return 0;
244 static void
245 ohci_free_ed (ed_t *const head)
247 /* In case the transfer canceled, we have to free unprocessed TDs. */
248 while ((head->head_pointer & ~0x3) != head->tail_pointer) {
249 /* Save current TD pointer. */
250 td_t *const cur_td =
251 (td_t*)phys_to_virt(head->head_pointer & ~0x3);
252 /* Advance head pointer. */
253 head->head_pointer = cur_td->next_td;
254 /* Free current TD. */
255 free((void *)cur_td);
258 /* Always free the dummy TD */
259 if ((head->head_pointer & ~0x3) == head->tail_pointer)
260 free(phys_to_virt(head->head_pointer & ~0x3));
261 /* and the ED. */
262 free((void *)head);
265 static int
266 ohci_control (usbdev_t *dev, direction_t dir, int drlen, void *devreq, int dalen,
267 unsigned char *data)
269 td_t *cur;
271 // pages are specified as 4K in OHCI, so don't use getpagesize()
272 int first_page = (unsigned long)data / 4096;
273 int last_page = (unsigned long)(data+dalen-1)/4096;
274 if (last_page < first_page) last_page = first_page;
275 int pages = (dalen==0)?0:(last_page - first_page + 1);
277 /* First TD. */
278 td_t *const first_td = (td_t *)memalign(sizeof(td_t), sizeof(td_t));
279 memset((void *)first_td, 0, sizeof(*first_td));
280 cur = first_td;
282 cur->config = TD_DIRECTION_SETUP |
283 TD_DELAY_INTERRUPT_NOINTR |
284 TD_TOGGLE_FROM_TD |
285 TD_TOGGLE_DATA0 |
286 TD_CC_NOACCESS;
287 cur->current_buffer_pointer = virt_to_phys(devreq);
288 cur->buffer_end = virt_to_phys(devreq + drlen - 1);
290 while (pages > 0) {
291 /* One more TD. */
292 td_t *const next = (td_t *)memalign(sizeof(td_t), sizeof(td_t));
293 memset((void *)next, 0, sizeof(*next));
294 /* Linked to the previous. */
295 cur->next_td = virt_to_phys(next);
296 /* Advance to the new TD. */
297 cur = next;
299 cur->config = (dir == IN ? TD_DIRECTION_IN : TD_DIRECTION_OUT) |
300 TD_DELAY_INTERRUPT_NOINTR |
301 TD_TOGGLE_FROM_ED |
302 TD_CC_NOACCESS;
303 cur->current_buffer_pointer = virt_to_phys(data);
304 pages--;
305 int consumed = (4096 - ((unsigned long)data % 4096));
306 if (consumed >= dalen) {
307 // end of data is within same page
308 cur->buffer_end = virt_to_phys(data + dalen - 1);
309 dalen = 0;
310 /* assert(pages == 0); */
311 } else {
312 dalen -= consumed;
313 data += consumed;
314 pages--;
315 int second_page_size = dalen;
316 if (dalen > 4096) {
317 second_page_size = 4096;
319 cur->buffer_end = virt_to_phys(data + second_page_size - 1);
320 dalen -= second_page_size;
321 data += second_page_size;
325 /* One more TD. */
326 td_t *const next_td = (td_t *)memalign(sizeof(td_t), sizeof(td_t));
327 memset((void *)next_td, 0, sizeof(*next_td));
328 /* Linked to the previous. */
329 cur->next_td = virt_to_phys(next_td);
330 /* Advance to the new TD. */
331 cur = next_td;
332 cur->config = (dir == IN ? TD_DIRECTION_OUT : TD_DIRECTION_IN) |
333 TD_DELAY_INTERRUPT_ZERO | /* Write done head after this TD. */
334 TD_TOGGLE_FROM_TD |
335 TD_TOGGLE_DATA1 |
336 TD_CC_NOACCESS;
337 cur->current_buffer_pointer = 0;
338 cur->buffer_end = 0;
340 /* Final dummy TD. */
341 td_t *const final_td = (td_t *)memalign(sizeof(td_t), sizeof(td_t));
342 memset((void *)final_td, 0, sizeof(*final_td));
343 /* Linked to the previous. */
344 cur->next_td = virt_to_phys(final_td);
346 /* Data structures */
347 ed_t *head = memalign(sizeof(ed_t), sizeof(ed_t));
348 memset((void*)head, 0, sizeof(*head));
349 head->config = (dev->address << ED_FUNC_SHIFT) |
350 (0 << ED_EP_SHIFT) |
351 (OHCI_FROM_TD << ED_DIR_SHIFT) |
352 (dev->speed?ED_LOWSPEED:0) |
353 (dev->endpoints[0].maxpacketsize << ED_MPS_SHIFT);
354 head->tail_pointer = virt_to_phys(final_td);
355 head->head_pointer = virt_to_phys(first_td);
357 debug("doing control transfer with %x. first_td at %x\n",
358 head->config & ED_FUNC_MASK, virt_to_phys(first_td));
360 /* activate schedule */
361 OHCI_INST(dev->controller)->opreg->HcControlHeadED = virt_to_phys(head);
362 OHCI_INST(dev->controller)->opreg->HcControl |= ControlListEnable;
363 OHCI_INST(dev->controller)->opreg->HcCommandStatus = ControlListFilled;
365 int failure = wait_for_ed(dev, head,
366 (dalen==0)?0:(last_page - first_page + 1));
367 /* Wait some frames before and one after disabling list access. */
368 mdelay(4);
369 OHCI_INST(dev->controller)->opreg->HcControl &= ~ControlListEnable;
370 mdelay(1);
372 /* free memory */
373 ohci_free_ed(head);
375 return failure;
378 /* finalize == 1: if data is of packet aligned size, add a zero length packet */
379 static int
380 ohci_bulk (endpoint_t *ep, int dalen, u8 *data, int finalize)
382 int i;
383 debug("bulk: %x bytes from %x, finalize: %x, maxpacketsize: %x\n", dalen, data, finalize, ep->maxpacketsize);
385 td_t *cur, *next;
387 // pages are specified as 4K in OHCI, so don't use getpagesize()
388 int first_page = (unsigned long)data / 4096;
389 int last_page = (unsigned long)(data+dalen-1)/4096;
390 if (last_page < first_page) last_page = first_page;
391 int pages = (dalen==0)?0:(last_page - first_page + 1);
392 int td_count = (pages+1)/2;
394 if (finalize && ((dalen % ep->maxpacketsize) == 0)) {
395 td_count++;
398 /* First TD. */
399 td_t *const first_td = (td_t *)memalign(sizeof(td_t), sizeof(td_t));
400 memset((void *)first_td, 0, sizeof(*first_td));
401 cur = next = first_td;
403 for (i = 0; i < td_count; ++i) {
404 /* Advance to next TD. */
405 cur = next;
406 cur->config = (ep->direction == IN ? TD_DIRECTION_IN : TD_DIRECTION_OUT) |
407 TD_DELAY_INTERRUPT_NOINTR |
408 TD_TOGGLE_FROM_ED |
409 TD_CC_NOACCESS;
410 cur->current_buffer_pointer = virt_to_phys(data);
411 pages--;
412 if (dalen == 0) {
413 /* magic TD for empty packet transfer */
414 cur->current_buffer_pointer = 0;
415 cur->buffer_end = 0;
416 /* assert((pages == 0) && finalize); */
418 int consumed = (4096 - ((unsigned long)data % 4096));
419 if (consumed >= dalen) {
420 // end of data is within same page
421 cur->buffer_end = virt_to_phys(data + dalen - 1);
422 dalen = 0;
423 /* assert(pages == finalize); */
424 } else {
425 dalen -= consumed;
426 data += consumed;
427 pages--;
428 int second_page_size = dalen;
429 if (dalen > 4096) {
430 second_page_size = 4096;
432 cur->buffer_end = virt_to_phys(data + second_page_size - 1);
433 dalen -= second_page_size;
434 data += second_page_size;
436 /* One more TD. */
437 next = (td_t *)memalign(sizeof(td_t), sizeof(td_t));
438 memset((void *)next, 0, sizeof(*next));
439 /* Linked to the previous. */
440 cur->next_td = virt_to_phys(next);
443 /* Write done head after last TD. */
444 cur->config &= ~TD_DELAY_INTERRUPT_MASK;
445 /* Advance to final, dummy TD. */
446 cur = next;
448 /* Data structures */
449 ed_t *head = memalign(sizeof(ed_t), sizeof(ed_t));
450 memset((void*)head, 0, sizeof(*head));
451 head->config = (ep->dev->address << ED_FUNC_SHIFT) |
452 ((ep->endpoint & 0xf) << ED_EP_SHIFT) |
453 (((ep->direction==IN)?OHCI_IN:OHCI_OUT) << ED_DIR_SHIFT) |
454 (ep->dev->speed?ED_LOWSPEED:0) |
455 (ep->maxpacketsize << ED_MPS_SHIFT);
456 head->tail_pointer = virt_to_phys(cur);
457 head->head_pointer = virt_to_phys(first_td) | (ep->toggle?ED_TOGGLE:0);
459 debug("doing bulk transfer with %x(%x). first_td at %x, last %x\n",
460 head->config & ED_FUNC_MASK,
461 (head->config & ED_EP_MASK) >> ED_EP_SHIFT,
462 virt_to_phys(first_td), virt_to_phys(cur));
464 /* activate schedule */
465 OHCI_INST(ep->dev->controller)->opreg->HcBulkHeadED = virt_to_phys(head);
466 OHCI_INST(ep->dev->controller)->opreg->HcControl |= BulkListEnable;
467 OHCI_INST(ep->dev->controller)->opreg->HcCommandStatus = BulkListFilled;
469 int failure = wait_for_ed(ep->dev, head,
470 (dalen==0)?0:(last_page - first_page + 1));
471 /* Wait some frames before and one after disabling list access. */
472 mdelay(4);
473 OHCI_INST(ep->dev->controller)->opreg->HcControl &= ~BulkListEnable;
474 mdelay(1);
476 ep->toggle = head->head_pointer & ED_TOGGLE;
478 /* free memory */
479 ohci_free_ed(head);
481 if (failure) {
482 /* try cleanup */
483 clear_stall(ep);
486 return failure;
490 struct _intr_queue;
492 struct _intrq_td {
493 volatile td_t td;
494 u8 *data;
495 struct _intrq_td *next;
496 struct _intr_queue *intrq;
499 struct _intr_queue {
500 volatile ed_t ed;
501 struct _intrq_td *head;
502 struct _intrq_td *tail;
503 u8 *data;
504 int reqsize;
505 endpoint_t *endp;
506 unsigned int remaining_tds;
509 typedef struct _intrq_td intrq_td_t;
510 typedef struct _intr_queue intr_queue_t;
512 #define INTRQ_TD_FROM_TD(x) ((intrq_td_t *)x)
514 static void
515 ohci_fill_intrq_td(intrq_td_t *const td, intr_queue_t *const intrq,
516 u8 *const data)
518 memset(td, 0, sizeof(*td));
519 td->td.config = TD_QUEUETYPE_INTR |
520 (intrq->endp->direction == IN
521 ? TD_DIRECTION_IN : TD_DIRECTION_OUT) |
522 TD_DELAY_INTERRUPT_ZERO |
523 TD_TOGGLE_FROM_ED |
524 TD_CC_NOACCESS;
525 td->td.current_buffer_pointer = virt_to_phys(data);
526 td->td.buffer_end = td->td.current_buffer_pointer + intrq->reqsize - 1;
527 td->intrq = intrq;
528 td->data = data;
531 /* create and hook-up an intr queue into device schedule */
532 static void *
533 ohci_create_intr_queue(endpoint_t *const ep, const int reqsize,
534 const int reqcount, const int reqtiming)
536 int i;
537 intrq_td_t *first_td = NULL, *last_td = NULL;
539 if (reqsize > 4096)
540 return NULL;
542 intr_queue_t *const intrq =
543 (intr_queue_t *)memalign(sizeof(intrq->ed), sizeof(*intrq));
544 memset(intrq, 0, sizeof(*intrq));
545 intrq->data = (u8 *)malloc(reqcount * reqsize);
546 intrq->reqsize = reqsize;
547 intrq->endp = ep;
549 /* Create #reqcount TDs. */
550 u8 *cur_data = intrq->data;
551 for (i = 0; i < reqcount; ++i) {
552 intrq_td_t *const td = memalign(sizeof(td->td), sizeof(*td));
553 ++intrq->remaining_tds;
554 ohci_fill_intrq_td(td, intrq, cur_data);
555 cur_data += reqsize;
556 if (!first_td)
557 first_td = td;
558 else
559 last_td->td.next_td = virt_to_phys(&td->td);
560 last_td = td;
563 /* Create last, dummy TD. */
564 intrq_td_t *dummy_td = memalign(sizeof(dummy_td->td), sizeof(*dummy_td));
565 memset(dummy_td, 0, sizeof(*dummy_td));
566 dummy_td->intrq = intrq;
567 if (last_td)
568 last_td->td.next_td = virt_to_phys(&dummy_td->td);
569 last_td = dummy_td;
571 /* Initialize ED. */
572 intrq->ed.config = (ep->dev->address << ED_FUNC_SHIFT) |
573 ((ep->endpoint & 0xf) << ED_EP_SHIFT) |
574 (((ep->direction == IN) ? OHCI_IN : OHCI_OUT) << ED_DIR_SHIFT) |
575 (ep->dev->speed ? ED_LOWSPEED : 0) |
576 (ep->maxpacketsize << ED_MPS_SHIFT);
577 intrq->ed.tail_pointer = virt_to_phys(last_td);
578 intrq->ed.head_pointer = virt_to_phys(first_td) |
579 (ep->toggle ? ED_TOGGLE : 0);
581 /* Insert ED into periodic table. */
582 int nothing_placed = 1;
583 ohci_t *const ohci = OHCI_INST(ep->dev->controller);
584 u32 *const intr_table = ohci->hcca->HccaInterruptTable;
585 const u32 dummy_ptr = virt_to_phys(ohci->periodic_ed);
586 for (i = 0; i < 32; i += reqtiming) {
587 /* Advance to the next free position. */
588 while ((i < 32) && (intr_table[i] != dummy_ptr)) ++i;
589 if (i < 32) {
590 intr_table[i] = virt_to_phys(&intrq->ed);
591 nothing_placed = 0;
594 if (nothing_placed) {
595 printf("Error: Failed to place ohci interrupt endpoint "
596 "descriptor into periodic table: no space left\n");
597 ohci_destroy_intr_queue(ep, intrq);
598 return NULL;
601 return intrq;
604 /* remove queue from device schedule, dropping all data that came in */
605 static void
606 ohci_destroy_intr_queue(endpoint_t *const ep, void *const q_)
608 intr_queue_t *const intrq = (intr_queue_t *)q_;
610 int i;
612 /* Remove interrupt queue from periodic table. */
613 ohci_t *const ohci = OHCI_INST(ep->dev->controller);
614 u32 *const intr_table = ohci->hcca->HccaInterruptTable;
615 for (i=0; i < 32; ++i) {
616 if (intr_table[i] == virt_to_phys(intrq))
617 intr_table[i] = virt_to_phys(ohci->periodic_ed);
619 /* Wait for frame to finish. */
620 mdelay(1);
622 /* Free unprocessed TDs. */
623 while ((intrq->ed.head_pointer & ~0x3) != intrq->ed.tail_pointer) {
624 td_t *const cur_td =
625 (td_t *)phys_to_virt(intrq->ed.head_pointer & ~0x3);
626 intrq->ed.head_pointer = cur_td->next_td;
627 free(INTRQ_TD_FROM_TD(cur_td));
628 --intrq->remaining_tds;
630 /* Free final, dummy TD. */
631 free(phys_to_virt(intrq->ed.head_pointer & ~0x3));
632 /* Free data buffer. */
633 free(intrq->data);
635 /* Process done queue and free processed TDs. */
636 ohci_process_done_queue(ohci, 1);
637 while (intrq->head) {
638 intrq_td_t *const cur_td = intrq->head;
639 intrq->head = intrq->head->next;
640 free(cur_td);
641 --intrq->remaining_tds;
643 if (intrq->remaining_tds) {
644 printf("error: ohci_destroy_intr_queue(): "
645 "freed all but %d TDs.\n", intrq->remaining_tds);
648 free(intrq);
650 /* Save data toggle. */
651 ep->toggle = intrq->ed.head_pointer & ED_TOGGLE;
654 /* read one intr-packet from queue, if available. extend the queue for new input.
655 return NULL if nothing new available.
656 Recommended use: while (data=poll_intr_queue(q)) process(data);
658 static u8 *
659 ohci_poll_intr_queue(void *const q_)
661 intr_queue_t *const intrq = (intr_queue_t *)q_;
663 u8 *data = NULL;
665 /* Process done queue first, then check if we have work to do. */
666 ohci_process_done_queue(OHCI_INST(intrq->endp->dev->controller), 0);
668 if (intrq->head) {
669 /* Save pointer to processed TD and advance. */
670 intrq_td_t *const cur_td = intrq->head;
671 intrq->head = cur_td->next;
673 /* Return data buffer of this TD. */
674 data = cur_td->data;
676 /* Requeue this TD (i.e. copy to dummy and requeue as dummy). */
677 intrq_td_t *const dummy_td =
678 INTRQ_TD_FROM_TD(phys_to_virt(intrq->ed.tail_pointer));
679 ohci_fill_intrq_td(dummy_td, intrq, cur_td->data);
680 /* Reset all but intrq pointer (i.e. init as dummy). */
681 memset(cur_td, 0, sizeof(*cur_td));
682 cur_td->intrq = intrq;
683 /* Insert into interrupt queue as dummy. */
684 dummy_td->td.next_td = virt_to_phys(&cur_td->td);
685 intrq->ed.tail_pointer = virt_to_phys(&cur_td->td);
688 return data;
691 static void
692 ohci_process_done_queue(ohci_t *const ohci, const int spew_debug)
694 int i, j;
696 /* Temporary queue of interrupt queue TDs (to reverse order). */
697 intrq_td_t *temp_tdq = NULL;
699 /* Check if done head has been written. */
700 if (!(ohci->opreg->HcInterruptStatus & WritebackDoneHead))
701 return;
702 /* Fetch current done head.
703 Lsb is only interesting for hw interrupts. */
704 u32 phys_done_queue = ohci->hcca->HccaDoneHead & ~1;
705 /* Tell host controller, he may overwrite the done head pointer. */
706 ohci->opreg->HcInterruptStatus = WritebackDoneHead;
708 i = 0;
709 /* Process done queue (it's in reversed order). */
710 while (phys_done_queue) {
711 td_t *const done_td = (td_t *)phys_to_virt(phys_done_queue);
713 /* Advance pointer to next TD. */
714 phys_done_queue = done_td->next_td;
716 switch (done_td->config & TD_QUEUETYPE_MASK) {
717 case TD_QUEUETYPE_ASYNC:
718 /* Free processed async TDs. */
719 free((void *)done_td);
720 break;
721 case TD_QUEUETYPE_INTR:
722 /* Save done TD if it comes from an interrupt queue. */
723 INTRQ_TD_FROM_TD(done_td)->next = temp_tdq;
724 temp_tdq = INTRQ_TD_FROM_TD(done_td);
725 break;
726 default:
727 break;
729 ++i;
731 if (spew_debug)
732 debug("Processed %d done TDs.\n", i);
734 j = 0;
735 /* Process interrupt queue TDs in right order. */
736 while (temp_tdq) {
737 /* Save pointer of current TD and advance. */
738 intrq_td_t *const cur_td = temp_tdq;
739 temp_tdq = temp_tdq->next;
741 /* The interrupt queue for the current TD. */
742 intr_queue_t *const intrq = cur_td->intrq;
743 /* Append to interrupt queue. */
744 if (!intrq->head) {
745 /* First element. */
746 intrq->head = intrq->tail = cur_td;
747 } else {
748 /* Insert at tail. */
749 intrq->tail->next = cur_td;
750 intrq->tail = cur_td;
752 /* It's always the last element. */
753 cur_td->next = NULL;
754 ++j;
756 if (spew_debug)
757 debug("processed %d done tds, %d intr tds thereof.\n", i, j);