Minor fixes to comments.
[AROS.git] / rom / usb / pciusb / uhcichip.c
blob65c8bb13f0c75f0a60f61fe314a2212ddb0f47a1
1 /*
2 Copyright © 2010-2011, The AROS Development Team. All rights reserved
3 $Id$
4 */
7 #include <proto/exec.h>
8 #include <proto/oop.h>
9 #include <hidd/pci.h>
11 #include <devices/usb_hub.h>
13 #include "uhwcmd.h"
15 #undef HiddPCIDeviceAttrBase
16 #define HiddPCIDeviceAttrBase (hd->hd_HiddPCIDeviceAB)
17 #undef HiddAttrBase
18 #define HiddAttrBase (hd->hd_HiddAB)
20 static AROS_INTH1(UhciResetHandler, struct PCIController *, hc)
22 AROS_INTFUNC_INIT
24 // stop controller and disable all interrupts
25 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, 0);
26 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBINTEN, 0);
28 return FALSE;
30 AROS_INTFUNC_EXIT
33 void uhciFreeQContext(struct PCIController *hc, struct UhciQH *uqh) {
35 struct UhciTD *utd = NULL;
36 struct UhciTD *nextutd;
38 KPRINTF(5, ("Unlinking QContext %08lx\n", uqh));
39 // unlink from schedule
40 uqh->uqh_Pred->uxx_Link = uqh->uqh_Succ->uxx_Self;
41 SYNC;
43 uqh->uqh_Succ->uxx_Pred = uqh->uqh_Pred;
44 uqh->uqh_Pred->uxx_Succ = uqh->uqh_Succ;
45 SYNC;
47 nextutd = uqh->uqh_FirstTD;
48 while(nextutd)
50 KPRINTF(1, ("FreeTD %08lx\n", nextutd));
51 utd = nextutd;
52 nextutd = (struct UhciTD *) utd->utd_Succ;
53 uhciFreeTD(hc, utd);
55 uhciFreeQH(hc, uqh);
58 void uhciUpdateIntTree(struct PCIController *hc) {
60 struct UhciXX *uxx;
61 struct UhciXX *preduxx;
62 struct UhciXX *lastuseduxx;
63 UWORD cnt;
65 // optimize linkage between queue heads
66 preduxx = lastuseduxx = (struct UhciXX *) hc->hc_UhciCtrlQH; //hc->hc_UhciIsoTD;
67 for(cnt = 0; cnt < 9; cnt++)
69 uxx = (struct UhciXX *) hc->hc_UhciIntQH[cnt];
70 if(uxx->uxx_Succ != preduxx)
72 lastuseduxx = uxx->uxx_Succ;
74 uxx->uxx_Link = lastuseduxx->uxx_Self;
75 preduxx = uxx;
79 void uhciCheckPortStatusChange(struct PCIController *hc) {
81 struct PCIUnit *unit = hc->hc_Unit;
82 UWORD oldval;
83 UWORD hciport;
85 // check for port status change for UHCI and frame rollovers
87 for(hciport = 0; hciport < 2; hciport++)
89 UWORD portreg;
90 UWORD idx = hc->hc_PortNum20[hciport];
91 // don't pay attention to UHCI port changes when pwned by EHCI
92 if(!unit->hu_EhciOwned[idx])
94 portreg = hciport ? UHCI_PORT2STSCTRL : UHCI_PORT1STSCTRL;
95 oldval = READIO16_LE(hc->hc_RegBase, portreg);
96 if(oldval & UHPF_ENABLECHANGE)
98 KPRINTF(10, ("Port %ld (%ld) Enable changed\n", idx, hciport));
99 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_ENABLE;
101 if(oldval & UHPF_CONNECTCHANGE)
103 KPRINTF(10, ("Port %ld (%ld) Connect changed\n", idx, hciport));
104 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_CONNECTION;
105 if(!(oldval & UHPF_PORTCONNECTED))
107 if(unit->hu_PortMap20[idx])
109 KPRINTF(20, ("Transferring Port %ld back to EHCI\n", idx));
110 unit->hu_EhciOwned[idx] = TRUE;
114 if(oldval & UHPF_RESUMEDTX)
116 KPRINTF(10, ("Port %ld (%ld) Resume changed\n", idx, hciport));
117 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND|UPSF_PORT_ENABLE;
118 oldval &= ~UHPF_RESUMEDTX;
120 if(hc->hc_PortChangeMap[hciport])
122 unit->hu_RootPortChanges |= 1UL<<(idx+1);
123 /*KPRINTF(10, ("Port %ld (%ld) contributes %04lx to portmap %04lx\n",
124 idx, hciport, hc->hc_PortChangeMap[hciport], unit->hu_RootPortChanges));*/
126 WRITEIO16_LE(hc->hc_RegBase, portreg, oldval);
131 void uhciHandleFinishedTDs(struct PCIController *hc) {
133 struct PCIUnit *unit = hc->hc_Unit;
134 struct IOUsbHWReq *ioreq;
135 struct IOUsbHWReq *nextioreq;
136 struct UhciQH *uqh;
137 struct UhciTD *utd;
138 struct UhciTD *nextutd;
139 UWORD devadrep;
140 ULONG len;
141 ULONG linkelem;
142 UWORD inspect;
143 BOOL shortpkt;
144 ULONG ctrlstatus;
145 ULONG nextctrlstatus = 0;
146 ULONG token = 0;
147 ULONG actual;
148 BOOL updatetree = FALSE;
149 BOOL fixsetupterm = FALSE;
151 KPRINTF(1, ("Checking for work done...\n"));
152 ioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
153 while((nextioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ))
155 uqh = (struct UhciQH *) ioreq->iouh_DriverPrivate1;
156 if(uqh)
158 KPRINTF(1, ("Examining IOReq=%08lx with UQH=%08lx\n", ioreq, uqh));
159 linkelem = READMEM32_LE(&uqh->uqh_Element);
160 inspect = 0;
161 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
162 if(linkelem & UHCI_TERMINATE)
164 KPRINTF(1, ("UQH terminated %08lx\n", linkelem));
165 inspect = 2;
166 } else {
167 utd = (struct UhciTD *) ((linkelem & UHCI_PTRMASK) - hc->hc_PCIVirtualAdjust - 16); // struct UhciTD starts 16 bytes before physical TD
168 ctrlstatus = READMEM32_LE(&utd->utd_CtrlStatus);
169 nextutd = (struct UhciTD *)utd->utd_Succ;
170 if(!(ctrlstatus & UTCF_ACTIVE) && nextutd)
172 /* OK, it's not active. Does it look like it's done? Code copied from below.
173 If not done, check the next TD too. */
174 if(ctrlstatus & (UTSF_BABBLE|UTSF_STALLED|UTSF_CRCTIMEOUT|UTSF_DATABUFFERERR|UTSF_BITSTUFFERR))
176 nextutd = 0;
178 else
180 token = READMEM32_LE(&utd->utd_Token);
181 len = (ctrlstatus & UTSM_ACTUALLENGTH) >> UTSS_ACTUALLENGTH;
182 if((len != (token & UTTM_TRANSLENGTH) >> UTTS_TRANSLENGTH))
184 nextutd = 0;
187 if(nextutd)
189 nextctrlstatus = READMEM32_LE(&nextutd->utd_CtrlStatus);
192 /* Now, did the element link pointer change while we fetched the status for the pointed at TD?
193 If so, disregard the gathered information and assume still active. */
194 if(READMEM32_LE(&uqh->uqh_Element) != linkelem)
196 /* Oh well, probably still active */
197 KPRINTF(1, ("Link Element changed, still active.\n"));
199 else if(!(ctrlstatus & UTCF_ACTIVE) && (nextutd == 0 || !(nextctrlstatus & UTCF_ACTIVE)))
201 KPRINTF(1, ("CtrlStatus inactive %08lx\n", ctrlstatus));
202 inspect = 1;
204 else if(unit->hu_NakTimeoutFrame[devadrep] && (hc->hc_FrameCounter > unit->hu_NakTimeoutFrame[devadrep]))
206 ioreq->iouh_Req.io_Error = UHIOERR_NAKTIMEOUT;
207 inspect = 1;
210 fixsetupterm = FALSE;
211 if(inspect)
213 APTR data = &((UBYTE *)ioreq->iouh_Data)[ioreq->iouh_Actual];
214 shortpkt = FALSE;
215 if(inspect < 2) // if all went okay, don't traverse list, assume all bytes successfully transferred
217 utd = uqh->uqh_FirstTD;
218 actual = 0;
221 ctrlstatus = READMEM32_LE(&utd->utd_CtrlStatus);
222 if(ctrlstatus & UTCF_ACTIVE)
224 KPRINTF(20, ("Internal error! Still active?!\n"));
225 if(ctrlstatus & UTSF_BABBLE)
227 KPRINTF(200, ("HOST CONTROLLER IS DEAD!!!\n"));
228 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
229 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_HCRESET|UHCF_MAXPACKET64|UHCF_CONFIGURE|UHCF_RUNSTOP);
230 inspect = 0;
231 break;
233 break;
235 token = READMEM32_LE(&utd->utd_Token);
236 KPRINTF(1, ("TD=%08lx CS=%08lx Token=%08lx\n", utd, ctrlstatus, token));
237 if(ctrlstatus & (UTSF_BABBLE|UTSF_STALLED|UTSF_CRCTIMEOUT|UTSF_DATABUFFERERR|UTSF_BITSTUFFERR))
239 if(ctrlstatus & UTSF_BABBLE)
241 KPRINTF(20, ("Babble error %08lx/%08lx\n", ctrlstatus, token));
242 ioreq->iouh_Req.io_Error = UHIOERR_OVERFLOW;
243 #if 0
244 // VIA chipset seems to die on babble!?!
245 KPRINTF(10, ("HW Regs USBCMD=%04lx\n", READIO16_LE(hc->hc_RegBase, UHCI_USBCMD)));
246 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_MAXPACKET64|UHCF_CONFIGURE|UHCF_RUNSTOP);
247 SYNC;
248 #endif
249 //retry
250 //ctrlstatus &= ~(UTSF_BABBLE|UTSF_STALLED|UTSF_CRCTIMEOUT|UTSF_DATABUFFERERR|UTSF_BITSTUFFERR|UTSF_NAK);
251 ctrlstatus |= UTCF_ACTIVE;
252 WRITEMEM32_LE(&utd->utd_CtrlStatus, ctrlstatus);
253 SYNC;
254 inspect = 3;
255 break;
257 else if(ctrlstatus & UTSF_CRCTIMEOUT)
259 KPRINTF(20, ("CRC/Timeout error IOReq=%08lx DIR=%ld\n", ioreq, ioreq->iouh_Dir));
260 if(ctrlstatus & UTSF_STALLED)
262 ioreq->iouh_Req.io_Error = UHIOERR_TIMEOUT;
263 } else {
264 ioreq->iouh_Req.io_Error = (ioreq->iouh_Dir == UHDIR_IN) ? UHIOERR_CRCERROR : UHIOERR_TIMEOUT;
267 else if(ctrlstatus & UTSF_STALLED)
269 KPRINTF(20, ("STALLED!\n"));
270 ioreq->iouh_Req.io_Error = UHIOERR_STALL;
272 else if(ctrlstatus & UTSF_BITSTUFFERR)
274 KPRINTF(20, ("Bitstuff error\n"));
275 ioreq->iouh_Req.io_Error = UHIOERR_CRCERROR;
277 else if(ctrlstatus & UTSF_DATABUFFERERR)
279 KPRINTF(20, ("Databuffer error\n"));
280 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
282 inspect = 0;
283 break;
285 if(unit->hu_NakTimeoutFrame[devadrep] && (hc->hc_FrameCounter > unit->hu_NakTimeoutFrame[devadrep]) && (ctrlstatus & UTSF_NAK))
287 ioreq->iouh_Req.io_Error = UHIOERR_NAKTIMEOUT;
288 inspect = 0;
291 len = (ctrlstatus & UTSM_ACTUALLENGTH)>>UTSS_ACTUALLENGTH;
292 if((len != (token & UTTM_TRANSLENGTH)>>UTTS_TRANSLENGTH))
294 shortpkt = TRUE;
296 len = (len+1) & 0x7ff; // get real length
297 if((token & UTTM_PID)>>UTTS_PID != PID_SETUP) // don't count setup packet
299 actual += len;
300 // due to the VIA babble bug workaround, actually more bytes can
301 // be received than requested, limit the actual value to the upper limit
302 if(actual > uqh->uqh_Actual)
304 actual = uqh->uqh_Actual;
307 if(shortpkt)
309 break;
311 } while((utd = (struct UhciTD *) utd->utd_Succ));
312 if(inspect == 3)
314 // bail out from babble
315 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
316 continue;
318 if((actual < uqh->uqh_Actual) && (!ioreq->iouh_Req.io_Error) && (!(ioreq->iouh_Flags & UHFF_ALLOWRUNTPKTS)))
320 KPRINTF(10, ("Short packet: %ld < %ld\n", actual, ioreq->iouh_Length));
321 ioreq->iouh_Req.io_Error = UHIOERR_RUNTPACKET;
323 } else {
324 KPRINTF(10, ("all %ld bytes transferred\n", uqh->uqh_Actual));
325 actual = uqh->uqh_Actual;
327 ioreq->iouh_Actual += actual;
328 // due to the short packet, the terminal of a setup packet has not been sent. Please do so.
329 if(shortpkt && (ioreq->iouh_Req.io_Command == UHCMD_CONTROLXFER))
331 fixsetupterm = TRUE;
333 // this is actually no short packet but result of the VIA babble fix
334 if(shortpkt && (ioreq->iouh_Actual == ioreq->iouh_Length))
336 shortpkt = FALSE;
338 unit->hu_DevBusyReq[devadrep] = NULL;
339 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
340 if (uqh->uqh_DataBuffer)
341 usbReleaseBuffer(uqh->uqh_DataBuffer, data, actual, ioreq->iouh_Dir);
342 if (uqh->uqh_SetupBuffer)
343 usbReleaseBuffer(uqh->uqh_SetupBuffer, &ioreq->iouh_SetupData, sizeof(ioreq->iouh_SetupData), (ioreq->iouh_SetupData.bmRequestType & URTF_IN) ? UHDIR_IN : UHDIR_OUT);
344 uhciFreeQContext(hc, uqh);
345 if(ioreq->iouh_Req.io_Command == UHCMD_INTXFER)
347 updatetree = TRUE;
349 if(inspect)
351 if(inspect < 2) // otherwise, toggle will be right already
353 // use next data toggle bit based on last successful transaction
354 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? FALSE : TRUE;
356 if((!shortpkt && (ioreq->iouh_Actual < ioreq->iouh_Length)) || fixsetupterm)
358 // fragmented, do some more work
359 switch(ioreq->iouh_Req.io_Command)
361 case UHCMD_CONTROLXFER:
362 KPRINTF(10, ("Rescheduling CtrlTransfer at %ld of %ld\n", ioreq->iouh_Actual, ioreq->iouh_Length));
363 AddHead(&hc->hc_CtrlXFerQueue, (struct Node *) ioreq);
364 break;
366 case UHCMD_INTXFER:
367 KPRINTF(10, ("Rescheduling IntTransfer at %ld of %ld\n", ioreq->iouh_Actual, ioreq->iouh_Length));
368 AddHead(&hc->hc_IntXFerQueue, (struct Node *) ioreq);
369 break;
371 case UHCMD_BULKXFER:
372 KPRINTF(10, ("Rescheduling BulkTransfer at %ld of %ld\n", ioreq->iouh_Actual, ioreq->iouh_Length));
373 AddHead(&hc->hc_BulkXFerQueue, (struct Node *) ioreq);
374 break;
376 default:
377 KPRINTF(10, ("Uhm, internal error, dunno where to queue this req\n"));
378 ReplyMsg(&ioreq->iouh_Req.io_Message);
380 } else {
381 // check for sucessful clear feature and set address ctrl transfers
382 if(ioreq->iouh_Req.io_Command == UHCMD_CONTROLXFER)
384 uhwCheckSpecialCtrlTransfers(hc, ioreq);
386 ReplyMsg(&ioreq->iouh_Req.io_Message);
388 } else {
389 // be sure to save the data toggle bit where the error occurred
390 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? TRUE : FALSE;
391 ReplyMsg(&ioreq->iouh_Req.io_Message);
394 } else {
395 KPRINTF(20, ("IOReq=%08lx has no UQH!\n", ioreq));
397 ioreq = nextioreq;
399 if(updatetree)
401 KPRINTF(10, ("Updating Tree\n"));
402 uhciUpdateIntTree(hc);
406 void uhciScheduleCtrlTDs(struct PCIController *hc) {
408 struct PCIUnit *unit = hc->hc_Unit;
409 struct IOUsbHWReq *ioreq;
410 UWORD devadrep;
411 struct UhciQH *uqh;
412 struct UhciTD *setuputd;
413 struct UhciTD *datautd;
414 struct UhciTD *termutd;
415 struct UhciTD *predutd;
416 ULONG actual;
417 ULONG ctrlstatus;
418 ULONG token;
419 ULONG len;
420 ULONG phyaddr;
421 BOOL cont;
423 /* *** CTRL Transfers *** */
424 KPRINTF(1, ("Scheduling new CTRL transfers...\n"));
425 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
426 while(((struct Node *) ioreq)->ln_Succ)
428 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint;
429 KPRINTF(10, ("New CTRL transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
430 /* is endpoint already in use or do we have to wait for next transaction */
431 if(unit->hu_DevBusyReq[devadrep])
433 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
434 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
435 continue;
438 uqh = uhciAllocQH(hc);
439 if(!uqh)
441 break;
444 setuputd = uhciAllocTD(hc);
445 if(!setuputd)
447 uhciFreeQH(hc, uqh);
448 break;
450 termutd = uhciAllocTD(hc);
451 if(!termutd)
453 uhciFreeTD(hc, setuputd);
454 uhciFreeQH(hc, uqh);
455 break;
457 uqh->uqh_IOReq = ioreq;
459 //termutd->utd_QueueHead = setuputd->utd_QueueHead = uqh;
461 KPRINTF(1, ("SetupTD=%08lx, TermTD=%08lx\n", setuputd, termutd));
463 // fill setup td
464 ctrlstatus = UTCF_ACTIVE|UTCF_3ERRORSLIMIT;
465 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
467 KPRINTF(5, ("*** LOW SPEED ***\n"));
468 ctrlstatus |= UTCF_LOWSPEED;
470 token = (ioreq->iouh_DevAddr<<UTTS_DEVADDR)|(ioreq->iouh_Endpoint<<UTTS_ENDPOINT);
471 //setuputd->utd_Pred = NULL;
472 if(ioreq->iouh_Actual)
474 // this is a continuation of a fragmented ctrl transfer!
475 KPRINTF(1, ("Continuing FRAGMENT at %ld of %ld\n", ioreq->iouh_Actual, ioreq->iouh_Length));
476 cont = TRUE;
477 } else {
478 cont = FALSE;
479 uqh->uqh_FirstTD = setuputd;
480 uqh->uqh_Element = setuputd->utd_Self; // start of queue
481 uqh->uqh_SetupBuffer = usbGetBuffer(&ioreq->iouh_SetupData, sizeof(ioreq->iouh_SetupData), (ioreq->iouh_SetupData.bmRequestType & URTF_IN) ? UHDIR_IN : UHDIR_OUT);
482 WRITEMEM32_LE(&setuputd->utd_CtrlStatus, ctrlstatus);
483 WRITEMEM32_LE(&setuputd->utd_Token, (PID_SETUP<<UTTS_PID)|token|(7<<UTTS_TRANSLENGTH)|UTTF_DATA0);
484 WRITEMEM32_LE(&setuputd->utd_BufferPtr, (ULONG) (IPTR) pciGetPhysical(hc, uqh->uqh_SetupBuffer));
487 token |= (ioreq->iouh_SetupData.bmRequestType & URTF_IN) ? PID_IN : PID_OUT;
488 predutd = setuputd;
489 actual = ioreq->iouh_Actual;
491 if(ioreq->iouh_Length - actual)
493 ctrlstatus |= UTCF_SHORTPACKET;
494 if(cont)
496 if(!unit->hu_DevDataToggle[devadrep])
498 // continue with data toggle 0
499 token |= UTTF_DATA1;
501 } else {
502 ioreq->iouh_Actual=0;
504 uqh->uqh_DataBuffer = usbGetBuffer(&(((UBYTE *) ioreq->iouh_Data)[ioreq->iouh_Actual]), ioreq->iouh_Length - actual, ioreq->iouh_Dir);
505 phyaddr = (ULONG)(IPTR)pciGetPhysical(hc, uqh->uqh_DataBuffer);
508 datautd = uhciAllocTD(hc);
509 if(!datautd)
511 break;
513 token ^= UTTF_DATA1; // toggle bit
514 predutd->utd_Link = datautd->utd_Self;
515 predutd->utd_Succ = (struct UhciXX *) datautd;
516 //datautd->utd_Pred = (struct UhciXX *) predutd;
517 //datautd->utd_QueueHead = uqh;
518 len = ioreq->iouh_Length - actual;
519 if(len > ioreq->iouh_MaxPktSize)
521 len = ioreq->iouh_MaxPktSize;
523 WRITEMEM32_LE(&datautd->utd_CtrlStatus, ctrlstatus);
524 #if 1
525 /* FIXME: This workaround for a VIA babble bug will potentially overwrite innocent memory (very rarely), but will avoid the host controller dropping dead completely. */
526 if((len < ioreq->iouh_MaxPktSize) && (ioreq->iouh_SetupData.bmRequestType & URTF_IN))
528 WRITEMEM32_LE(&datautd->utd_Token, token|((ioreq->iouh_MaxPktSize-1)<<UTTS_TRANSLENGTH)); // no masking need here as len is always >= 1
529 } else {
530 WRITEMEM32_LE(&datautd->utd_Token, token|((len-1)<<UTTS_TRANSLENGTH)); // no masking need here as len is always >= 1
532 #else
533 WRITEMEM32_LE(&datautd->utd_Token, token|((len-1)<<UTTS_TRANSLENGTH)); // no masking need here as len is always >= 1
534 #endif
535 WRITEMEM32_LE(&datautd->utd_BufferPtr, phyaddr);
536 phyaddr += len;
537 actual += len;
538 predutd = datautd;
539 } while((actual < ioreq->iouh_Length) && (actual - ioreq->iouh_Actual < UHCI_TD_CTRL_LIMIT));
540 if(actual == ioreq->iouh_Actual)
542 // not at least one data TD? try again later
543 uhciFreeTD(hc, setuputd);
544 uhciFreeTD(hc, termutd);
545 uhciFreeQH(hc, uqh);
546 break;
548 if(cont)
550 // free Setup packet
551 KPRINTF(1, ("Freeing setup\n"));
552 uqh->uqh_FirstTD = (struct UhciTD *) setuputd->utd_Succ;
553 //uqh->uqh_FirstTD->utd_Pred = NULL;
554 uqh->uqh_Element = setuputd->utd_Succ->uxx_Self; // start of queue after setup packet
555 uhciFreeTD(hc, setuputd);
556 // set toggle for next batch
557 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? FALSE : TRUE;
559 } else {
560 if(cont)
562 // free Setup packet, assign termination as first packet (no data)
563 KPRINTF(1, ("Freeing setup (term only)\n"));
564 uqh->uqh_FirstTD = (struct UhciTD *) termutd;
565 uqh->uqh_Element = termutd->utd_Self; // start of queue after setup packet
566 uhciFreeTD(hc, setuputd);
567 predutd = NULL;
570 uqh->uqh_Actual = actual - ioreq->iouh_Actual;
571 ctrlstatus |= UTCF_READYINTEN;
572 if(actual == ioreq->iouh_Length)
574 // TERM packet
575 KPRINTF(1, ("Activating TERM\n"));
576 token |= UTTF_DATA1;
577 token ^= (PID_IN^PID_OUT)<<UTTS_PID;
579 if(predutd)
581 predutd->utd_Link = termutd->utd_Self;
582 predutd->utd_Succ = (struct UhciXX *) termutd;
584 //termutd->utd_Pred = (struct UhciXX *) predutd;
585 WRITEMEM32_LE(&termutd->utd_CtrlStatus, ctrlstatus);
586 WRITEMEM32_LE(&termutd->utd_Token, token|(0x7ff<<UTTS_TRANSLENGTH));
587 CONSTWRITEMEM32_LE(&termutd->utd_Link, UHCI_TERMINATE);
588 termutd->utd_Succ = NULL;
589 //uqh->uqh_LastTD = termutd;
590 } else {
591 KPRINTF(1, ("Setup data phase fragmented\n"));
592 // don't create TERM, we don't know the final data toggle bit
593 // but mark the last data TD for interrupt generation
594 WRITEMEM32_LE(&predutd->utd_CtrlStatus, ctrlstatus);
595 uhciFreeTD(hc, termutd);
596 CONSTWRITEMEM32_LE(&predutd->utd_Link, UHCI_TERMINATE);
597 predutd->utd_Succ = NULL;
598 //uqh->uqh_LastTD = predutd;
601 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
602 ioreq->iouh_DriverPrivate1 = uqh;
604 // manage endpoint going busy
605 unit->hu_DevBusyReq[devadrep] = ioreq;
606 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
608 Disable();
609 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
611 // looks good to me, now enqueue this entry (just behind the CtrlQH)
612 uqh->uqh_Succ = hc->hc_UhciCtrlQH->uqh_Succ;
613 uqh->uqh_Link = uqh->uqh_Succ->uxx_Self;
614 SYNC;
616 uqh->uqh_Pred = (struct UhciXX *) hc->hc_UhciCtrlQH;
617 uqh->uqh_Succ->uxx_Pred = (struct UhciXX *) uqh;
618 hc->hc_UhciCtrlQH->uqh_Succ = (struct UhciXX *) uqh;
619 hc->hc_UhciCtrlQH->uqh_Link = uqh->uqh_Self;
620 SYNC;
621 Enable();
623 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
627 void uhciScheduleIntTDs(struct PCIController *hc) {
629 struct PCIUnit *unit = hc->hc_Unit;
630 struct IOUsbHWReq *ioreq;
631 UWORD cnt;
632 UWORD devadrep;
633 struct UhciQH *uqh;
634 struct UhciQH *intuqh;
635 struct UhciTD *utd;
636 struct UhciTD *predutd;
637 ULONG actual;
638 ULONG ctrlstatus;
639 ULONG token;
640 ULONG len;
641 ULONG phyaddr;
643 /* *** INT Transfers *** */
644 KPRINTF(1, ("Scheduling new INT transfers...\n"));
645 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
646 while(((struct Node *) ioreq)->ln_Succ)
648 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
649 KPRINTF(10, ("New INT transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
650 /* is endpoint already in use or do we have to wait for next transaction */
651 if(unit->hu_DevBusyReq[devadrep])
653 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
654 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
655 continue;
658 uqh = uhciAllocQH(hc);
659 if(!uqh)
661 break;
664 uqh->uqh_IOReq = ioreq;
666 ctrlstatus = UTCF_ACTIVE|UTCF_1ERRORLIMIT|UTCF_SHORTPACKET;
667 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
669 KPRINTF(5, ("*** LOW SPEED ***\n"));
670 ctrlstatus |= UTCF_LOWSPEED;
672 token = (ioreq->iouh_DevAddr<<UTTS_DEVADDR)|(ioreq->iouh_Endpoint<<UTTS_ENDPOINT);
673 token |= (ioreq->iouh_Dir == UHDIR_IN) ? PID_IN : PID_OUT;
674 predutd = NULL;
675 actual = ioreq->iouh_Actual;
676 uqh->uqh_DataBuffer = usbGetBuffer(&(((UBYTE *) ioreq->iouh_Data)[ioreq->iouh_Actual]), ioreq->iouh_Length - actual, ioreq->iouh_Dir);
677 phyaddr = (ULONG) (IPTR) pciGetPhysical(hc, uqh->uqh_DataBuffer);
678 if(unit->hu_DevDataToggle[devadrep])
680 // continue with data toggle 1
681 KPRINTF(1, ("Data1\n"));
682 token |= UTTF_DATA1;
683 } else {
684 KPRINTF(1, ("Data0\n"));
688 utd = uhciAllocTD(hc);
689 if(!utd)
691 break;
693 if(predutd)
695 WRITEMEM32_LE(&predutd->utd_Link, READMEM32_LE(&utd->utd_Self)|UHCI_DFS);
696 predutd->utd_Succ = (struct UhciXX *) utd;
697 //utd->utd_Pred = (struct UhciXX *) predutd;
698 } else {
699 uqh->uqh_FirstTD = utd;
700 uqh->uqh_Element = utd->utd_Self;
701 //utd->utd_Pred = NULL;
703 //utd->utd_QueueHead = uqh;
704 len = ioreq->iouh_Length - actual;
705 if(len > ioreq->iouh_MaxPktSize)
707 len = ioreq->iouh_MaxPktSize;
710 WRITEMEM32_LE(&utd->utd_CtrlStatus, ctrlstatus);
711 WRITEMEM32_LE(&utd->utd_Token, token|(((len-1) & 0x7ff)<<UTTS_TRANSLENGTH));
712 WRITEMEM32_LE(&utd->utd_BufferPtr, phyaddr);
713 phyaddr += len;
714 actual += len;
715 predutd = utd;
716 token ^= UTTF_DATA1; // toggle bit
717 } while((actual < ioreq->iouh_Length) && (actual - ioreq->iouh_Actual < UHCI_TD_INT_LIMIT));
719 if(!utd)
721 // not at least one data TD? try again later
722 uhciFreeQH(hc, uqh);
723 break;
726 uqh->uqh_Actual = actual - ioreq->iouh_Actual;
727 // set toggle for next batch / succesful transfer
728 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? TRUE : FALSE;
729 if(unit->hu_DevDataToggle[devadrep])
731 // continue with data toggle 1
732 KPRINTF(1, ("NewData1\n"));
733 } else {
734 KPRINTF(1, ("NewData0\n"));
736 ctrlstatus |= UTCF_READYINTEN;
737 WRITEMEM32_LE(&predutd->utd_CtrlStatus, ctrlstatus);
738 CONSTWRITEMEM32_LE(&utd->utd_Link, UHCI_TERMINATE);
739 utd->utd_Succ = NULL;
740 //uqh->uqh_LastTD = utd;
742 if(ioreq->iouh_Interval >= 255)
744 intuqh = hc->hc_UhciIntQH[8]; // 256ms interval
745 } else {
746 cnt = 0;
749 intuqh = hc->hc_UhciIntQH[cnt++];
750 } while(ioreq->iouh_Interval >= (1<<cnt));
751 KPRINTF(1, ("Scheduled at level %ld\n", cnt));
754 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
755 ioreq->iouh_DriverPrivate1 = uqh;
757 // manage endpoint going busy
758 unit->hu_DevBusyReq[devadrep] = ioreq;
759 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
761 Disable();
762 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
764 // looks good to me, now enqueue this entry (just behind the right IntQH)
765 uqh->uqh_Succ = intuqh->uqh_Succ;
766 uqh->uqh_Link = uqh->uqh_Succ->uxx_Self;
767 SYNC;
769 uqh->uqh_Pred = (struct UhciXX *) intuqh;
770 uqh->uqh_Succ->uxx_Pred = (struct UhciXX *) uqh;
771 intuqh->uqh_Succ = (struct UhciXX *) uqh;
772 intuqh->uqh_Link = uqh->uqh_Self;
773 SYNC;
774 Enable();
776 uhciUpdateIntTree(hc);
778 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
782 void uhciScheduleBulkTDs(struct PCIController *hc) {
784 struct PCIUnit *unit = hc->hc_Unit;
785 struct IOUsbHWReq *ioreq;
786 UWORD devadrep;
787 struct UhciQH *uqh;
788 struct UhciTD *utd;
789 struct UhciTD *predutd;
790 ULONG actual;
791 ULONG ctrlstatus;
792 ULONG token;
793 ULONG len;
794 ULONG phyaddr;
795 BOOL forcezero;
797 /* *** BULK Transfers *** */
798 KPRINTF(1, ("Scheduling new BULK transfers...\n"));
799 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
800 while(((struct Node *) ioreq)->ln_Succ)
802 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
803 KPRINTF(10, ("New BULK transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
804 /* is endpoint already in use or do we have to wait for next transaction */
805 if(unit->hu_DevBusyReq[devadrep])
807 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
808 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
809 continue;
812 uqh = uhciAllocQH(hc);
813 if(!uqh)
815 break;
818 uqh->uqh_IOReq = ioreq;
820 // fill setup td
821 ctrlstatus = UTCF_ACTIVE|UTCF_1ERRORLIMIT|UTCF_SHORTPACKET;
822 token = (ioreq->iouh_DevAddr<<UTTS_DEVADDR)|(ioreq->iouh_Endpoint<<UTTS_ENDPOINT);
823 token |= (ioreq->iouh_Dir == UHDIR_IN) ? PID_IN : PID_OUT;
824 predutd = NULL;
825 actual = ioreq->iouh_Actual;
827 // Get a MEMF_31BIT bounce buffer
828 uqh->uqh_DataBuffer = usbGetBuffer(&(((UBYTE *) ioreq->iouh_Data)[ioreq->iouh_Actual]), ioreq->iouh_Length - actual, ioreq->iouh_Dir);
829 phyaddr = (IPTR)pciGetPhysical(hc, uqh->uqh_DataBuffer);
830 if(unit->hu_DevDataToggle[devadrep])
832 // continue with data toggle 1
833 token |= UTTF_DATA1;
837 utd = uhciAllocTD(hc);
838 if(!utd)
840 break;
842 forcezero = FALSE;
843 if(predutd)
845 WRITEMEM32_LE(&predutd->utd_Link, READMEM32_LE(&utd->utd_Self)|UHCI_DFS);
846 predutd->utd_Succ = (struct UhciXX *) utd;
847 //utd->utd_Pred = (struct UhciXX *) predutd;
848 } else {
849 uqh->uqh_FirstTD = utd;
850 uqh->uqh_Element = utd->utd_Self;
851 //utd->utd_Pred = NULL;
853 //utd->utd_QueueHead = uqh;
854 len = ioreq->iouh_Length - actual;
855 if(len > ioreq->iouh_MaxPktSize)
857 len = ioreq->iouh_MaxPktSize;
859 WRITEMEM32_LE(&utd->utd_CtrlStatus, ctrlstatus);
860 WRITEMEM32_LE(&utd->utd_Token, token|(((len-1) & 0x7ff)<<UTTS_TRANSLENGTH));
861 WRITEMEM32_LE(&utd->utd_BufferPtr, phyaddr);
862 phyaddr += len;
863 actual += len;
864 predutd = utd;
865 token ^= UTTF_DATA1; // toggle bit
866 if((actual == ioreq->iouh_Length) && len)
868 if((ioreq->iouh_Flags & UHFF_NOSHORTPKT) || (ioreq->iouh_Dir == UHDIR_IN) || (actual % ioreq->iouh_MaxPktSize))
870 // no last zero byte packet
871 break;
872 } else {
873 // avoid rare case that the zero byte packet is reached on TD_BULK_LIMIT
874 forcezero = TRUE;
877 } while(forcezero || (len && (actual <= ioreq->iouh_Length) && (actual - ioreq->iouh_Actual < UHCI_TD_BULK_LIMIT)));
879 if(!utd)
881 // not at least one data TD? try again later
882 uhciFreeQH(hc, uqh);
883 break;
885 uqh->uqh_Actual = actual - ioreq->iouh_Actual;
886 // set toggle for next batch / succesful transfer
887 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? TRUE : FALSE;
889 ctrlstatus |= UTCF_READYINTEN;
890 WRITEMEM32_LE(&predutd->utd_CtrlStatus, ctrlstatus);
891 CONSTWRITEMEM32_LE(&utd->utd_Link, UHCI_TERMINATE);
892 utd->utd_Succ = NULL;
893 //uqh->uqh_LastTD = utd;
895 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
896 ioreq->iouh_DriverPrivate1 = uqh;
898 // manage endpoint going busy
899 unit->hu_DevBusyReq[devadrep] = ioreq;
900 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
902 Disable();
903 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
905 // looks good to me, now enqueue this entry (just behind the BulkQH)
906 uqh->uqh_Succ = hc->hc_UhciBulkQH->uqh_Succ;
907 uqh->uqh_Link = uqh->uqh_Succ->uxx_Self;
908 SYNC;
910 uqh->uqh_Pred = (struct UhciXX *) hc->hc_UhciBulkQH;
911 uqh->uqh_Succ->uxx_Pred = (struct UhciXX *) uqh;
912 hc->hc_UhciBulkQH->uqh_Succ = (struct UhciXX *) uqh;
913 hc->hc_UhciBulkQH->uqh_Link = uqh->uqh_Self;
914 SYNC;
915 Enable();
917 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
921 void uhciUpdateFrameCounter(struct PCIController *hc) {
923 UWORD framecnt;
924 Disable();
925 framecnt = READIO16_LE(hc->hc_RegBase, UHCI_FRAMECOUNT) & 0x07ff;
926 if(framecnt < (hc->hc_FrameCounter & 0x07ff))
928 hc->hc_FrameCounter |= 0x07ff;
929 hc->hc_FrameCounter++;
930 hc->hc_FrameCounter |= framecnt;
931 KPRINTF(10, ("Frame Counter Rollover %ld\n", hc->hc_FrameCounter));
932 } else {
933 hc->hc_FrameCounter = (hc->hc_FrameCounter & 0xfffff800)|framecnt;
935 Enable();
938 static AROS_INTH1(uhciCompleteInt, struct PCIController *,hc)
940 AROS_INTFUNC_INIT
942 KPRINTF(1, ("CompleteInt!\n"));
943 uhciUpdateFrameCounter(hc);
945 /* **************** PROCESS DONE TRANSFERS **************** */
947 uhciCheckPortStatusChange(hc);
948 uhwCheckRootHubChanges(hc->hc_Unit);
950 uhciHandleFinishedTDs(hc);
952 if(hc->hc_CtrlXFerQueue.lh_Head->ln_Succ)
954 uhciScheduleCtrlTDs(hc);
957 if(hc->hc_IntXFerQueue.lh_Head->ln_Succ)
959 uhciScheduleIntTDs(hc);
962 if(hc->hc_BulkXFerQueue.lh_Head->ln_Succ)
964 uhciScheduleBulkTDs(hc);
967 KPRINTF(1, ("CompleteDone\n"));
969 return FALSE;
971 AROS_INTFUNC_EXIT
974 static AROS_INTH1(uhciIntCode, struct PCIController *, hc)
976 AROS_INTFUNC_INIT
978 struct PCIDevice *base = hc->hc_Device;
979 UWORD intr;
981 //KPRINTF(10, ("pciUhciInt()\n"));
982 intr = READIO16_LE(hc->hc_RegBase, UHCI_USBSTATUS);
983 if(intr & (UHSF_USBINT|UHSF_USBERRORINT|UHSF_RESUMEDTX|UHSF_HCSYSERROR|UHSF_HCPROCERROR|UHSF_HCHALTED))
985 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBSTATUS, intr);
986 //KPRINTF(1, ("INT=%04lx\n", intr));
987 if(intr & (UHSF_HCSYSERROR|UHSF_HCPROCERROR|UHSF_HCHALTED))
989 KPRINTF(200, ("Host ERROR!\n"));
990 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_HCRESET|UHCF_GLOBALRESET|UHCF_MAXPACKET64|UHCF_CONFIGURE);
991 //WRITEIO16_LE(hc->hc_RegBase, UHCI_USBINTEN, 0);
993 if (!(hc->hc_Flags & HCF_ONLINE))
995 return FALSE;
997 if(intr & (UHSF_USBINT|UHSF_USBERRORINT))
999 SureCause(base, &hc->hc_CompleteInt);
1003 return FALSE;
1005 AROS_INTFUNC_EXIT
1008 BOOL uhciInit(struct PCIController *hc, struct PCIUnit *hu) {
1010 struct PCIDevice *hd = hu->hu_Device;
1012 struct UhciQH *uqh;
1013 struct UhciQH *preduqh;
1014 struct UhciTD *utd;
1015 ULONG *tabptr;
1016 UBYTE *memptr;
1017 ULONG bitcnt;
1019 ULONG cnt;
1021 struct TagItem pciActivateIO[] =
1023 { aHidd_PCIDevice_isIO, TRUE },
1024 { TAG_DONE, 0UL },
1027 struct TagItem pciActivateBusmaster[] =
1029 { aHidd_PCIDevice_isMaster, TRUE },
1030 { TAG_DONE, 0UL },
1033 struct TagItem pciDeactivateBusmaster[] =
1035 { aHidd_PCIDevice_isMaster, FALSE },
1036 { TAG_DONE, 0UL },
1039 hc->hc_NumPorts = 2; // UHCI always uses 2 ports per controller
1040 KPRINTF(20, ("Found UHCI Controller %08lx FuncNum=%ld with %ld ports\n", hc->hc_PCIDeviceObject, hc->hc_FunctionNum, hc->hc_NumPorts));
1041 hc->hc_CompleteInt.is_Node.ln_Type = NT_INTERRUPT;
1042 hc->hc_CompleteInt.is_Node.ln_Name = "UHCI CompleteInt";
1043 hc->hc_CompleteInt.is_Node.ln_Pri = 0;
1044 hc->hc_CompleteInt.is_Data = hc;
1045 hc->hc_CompleteInt.is_Code = (VOID_FUNC)uhciCompleteInt;
1047 hc->hc_PCIMemSize = sizeof(ULONG) * UHCI_FRAMELIST_SIZE + UHCI_FRAMELIST_ALIGNMENT + 1;
1048 hc->hc_PCIMemSize += sizeof(struct UhciQH) * UHCI_QH_POOLSIZE;
1049 hc->hc_PCIMemSize += sizeof(struct UhciTD) * UHCI_TD_POOLSIZE;
1051 memptr = HIDD_PCIDriver_AllocPCIMem(hc->hc_PCIDriverObject, hc->hc_PCIMemSize);
1052 /* memptr will be in the MEMF_31BIT type, therefore
1053 * we know that it's *physical address* will be 32 bits or
1054 * less, which is required for UHCI operation
1056 hc->hc_PCIMem = (APTR) memptr;
1057 if(memptr) {
1059 // PhysicalAddress - VirtualAdjust = VirtualAddress
1060 // VirtualAddress + VirtualAdjust = PhysicalAddress
1061 hc->hc_PCIVirtualAdjust = (IPTR)pciGetPhysical(hc, memptr) - (IPTR)memptr;
1062 KPRINTF(10, ("VirtualAdjust 0x%08lx\n", hc->hc_PCIVirtualAdjust));
1064 // align memory
1065 memptr = (UBYTE *) ((((IPTR) hc->hc_PCIMem) + UHCI_FRAMELIST_ALIGNMENT) & (~UHCI_FRAMELIST_ALIGNMENT));
1066 hc->hc_UhciFrameList = (ULONG *) memptr;
1067 KPRINTF(10, ("FrameListBase 0x%08lx\n", hc->hc_UhciFrameList));
1068 memptr += sizeof(APTR) * UHCI_FRAMELIST_SIZE;
1070 // build up QH pool
1071 // Again, all the UQHs are in the MEMF_31BIT hc_PCIMem pool,
1072 // so we can safely treat their physical addresses as 32 bit pointers
1073 uqh = (struct UhciQH *) memptr;
1074 hc->hc_UhciQHPool = uqh;
1075 cnt = UHCI_QH_POOLSIZE - 1;
1076 do {
1077 // minimal initalization
1078 uqh->uqh_Succ = (struct UhciXX *) (uqh + 1);
1079 WRITEMEM32_LE(&uqh->uqh_Self, (ULONG) ((IPTR)(&uqh->uqh_Link) + hc->hc_PCIVirtualAdjust + UHCI_QHSELECT));
1080 uqh++;
1081 } while(--cnt);
1082 uqh->uqh_Succ = NULL;
1083 WRITEMEM32_LE(&uqh->uqh_Self, (ULONG) ((IPTR)(&uqh->uqh_Link) + hc->hc_PCIVirtualAdjust + UHCI_QHSELECT));
1084 memptr += sizeof(struct UhciQH) * UHCI_QH_POOLSIZE;
1086 // build up TD pool
1087 // Again, all the UTDs are in the MEMF_31BIT hc_PCIMem pool,
1088 // so we can safely treat their physical addresses as 32 bit pointers
1089 utd = (struct UhciTD *) memptr;
1090 hc->hc_UhciTDPool = utd;
1091 cnt = UHCI_TD_POOLSIZE - 1;
1092 do {
1093 utd->utd_Succ = (struct UhciXX *) (utd + 1);
1094 WRITEMEM32_LE(&utd->utd_Self, (ULONG) ((IPTR)(&utd->utd_Link) + hc->hc_PCIVirtualAdjust + UHCI_TDSELECT));
1095 utd++;
1096 } while(--cnt);
1097 utd->utd_Succ = NULL;
1098 WRITEMEM32_LE(&utd->utd_Self, (ULONG) ((IPTR)(&utd->utd_Link) + hc->hc_PCIVirtualAdjust + UHCI_TDSELECT));
1099 memptr += sizeof(struct UhciTD) * UHCI_TD_POOLSIZE;
1101 // terminating QH
1102 hc->hc_UhciTermQH = preduqh = uqh = uhciAllocQH(hc);
1103 uqh->uqh_Succ = NULL;
1104 CONSTWRITEMEM32_LE(&uqh->uqh_Link, UHCI_TERMINATE);
1105 CONSTWRITEMEM32_LE(&uqh->uqh_Element, UHCI_TERMINATE);
1107 // dummy Bulk QH
1108 hc->hc_UhciBulkQH = uqh = uhciAllocQH(hc);
1109 uqh->uqh_Succ = (struct UhciXX *) preduqh;
1110 preduqh->uqh_Pred = (struct UhciXX *) uqh;
1111 uqh->uqh_Link = preduqh->uqh_Self; // link to terminating QH
1112 CONSTWRITEMEM32_LE(&uqh->uqh_Element, UHCI_TERMINATE);
1113 preduqh = uqh;
1115 // dummy Ctrl QH
1116 hc->hc_UhciCtrlQH = uqh = uhciAllocQH(hc);
1117 uqh->uqh_Succ = (struct UhciXX *) preduqh;
1118 preduqh->uqh_Pred = (struct UhciXX *) uqh;
1119 uqh->uqh_Link = preduqh->uqh_Self; // link to Bulk QH
1120 CONSTWRITEMEM32_LE(&uqh->uqh_Element, UHCI_TERMINATE);
1122 // dummy ISO TD
1123 hc->hc_UhciIsoTD = utd = uhciAllocTD(hc);
1124 utd->utd_Succ = (struct UhciXX *) uqh;
1125 //utd->utd_Pred = NULL; // no certain linkage above this level
1126 uqh->uqh_Pred = (struct UhciXX *) utd;
1127 utd->utd_Link = uqh->uqh_Self; // link to Ctrl QH
1129 CONSTWRITEMEM32_LE(&utd->utd_CtrlStatus, 0);
1131 // 1 ms INT QH
1132 hc->hc_UhciIntQH[0] = uqh = uhciAllocQH(hc);
1133 uqh->uqh_Succ = (struct UhciXX *) utd;
1134 uqh->uqh_Pred = NULL; // who knows...
1135 //uqh->uqh_Link = utd->utd_Self; // link to ISO
1136 CONSTWRITEMEM32_LE(&uqh->uqh_Element, UHCI_TERMINATE);
1137 preduqh = uqh;
1139 // make 9 levels of QH interrupts
1140 for(cnt = 1; cnt < 9; cnt++) {
1141 hc->hc_UhciIntQH[cnt] = uqh = uhciAllocQH(hc);
1142 uqh->uqh_Succ = (struct UhciXX *) preduqh;
1143 uqh->uqh_Pred = NULL; // who knows...
1144 //uqh->uqh_Link = preduqh->uqh_Self; // link to previous int level
1145 CONSTWRITEMEM32_LE(&uqh->uqh_Element, UHCI_TERMINATE);
1146 preduqh = uqh;
1149 uhciUpdateIntTree(hc);
1151 // fill in framelist with IntQH entry points based on interval
1152 tabptr = hc->hc_UhciFrameList;
1153 for(cnt = 0; cnt < UHCI_FRAMELIST_SIZE; cnt++) {
1154 uqh = hc->hc_UhciIntQH[8];
1155 bitcnt = 0;
1156 do {
1157 if(cnt & (1UL<<bitcnt)) {
1158 uqh = hc->hc_UhciIntQH[bitcnt];
1159 break;
1161 } while(++bitcnt < 9);
1162 *tabptr++ = uqh->uqh_Self;
1165 // this will cause more PCI memory access, but faster USB transfers as well
1166 //WRITEMEM32_LE(&hc->hc_UhciTermQH->uqh_Link, AROS_LONG2LE(hc->hc_UhciBulkQH->uqh_Self));
1168 // time to initialize hardware...
1169 OOP_GetAttr(hc->hc_PCIDeviceObject, aHidd_PCIDevice_Base4, (IPTR *) &hc->hc_RegBase);
1170 hc->hc_RegBase = (APTR) (((IPTR) hc->hc_RegBase) & (~0xf));
1171 KPRINTF(10, ("RegBase = 0x%08lx\n", hc->hc_RegBase));
1172 OOP_SetAttrs(hc->hc_PCIDeviceObject, (struct TagItem *) pciActivateIO);
1174 // disable BIOS legacy support
1175 KPRINTF(10, ("Turning off BIOS legacy support (old value=%04lx)\n", PCIXReadConfigWord(hc, UHCI_USBLEGSUP)));
1176 PCIXWriteConfigWord(hc, UHCI_USBLEGSUP, 0x8f00);
1178 KPRINTF(10, ("Resetting UHCI HC\n"));
1179 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_GLOBALRESET);
1180 uhwDelayMS(15, hu);
1182 OOP_SetAttrs(hc->hc_PCIDeviceObject, (struct TagItem *) pciDeactivateBusmaster); // no busmaster yet
1184 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_HCRESET);
1185 cnt = 100;
1186 do {
1187 uhwDelayMS(10, hu);
1188 if(!(READIO16_LE(hc->hc_RegBase, UHCI_USBCMD) & UHCF_HCRESET)) {
1189 break;
1191 } while(--cnt);
1193 if(cnt == 0) {
1194 KPRINTF(20, ("Reset Timeout!\n"));
1195 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_HCRESET);
1196 uhwDelayMS(15, hu);
1197 } else {
1198 KPRINTF(20, ("Reset finished after %ld ticks\n", 100-cnt));
1201 // stop controller and disable all interrupts first
1202 KPRINTF(10, ("Stopping controller and enabling busmaster\n"));
1203 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, 0);
1204 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBINTEN, 0);
1206 OOP_SetAttrs(hc->hc_PCIDeviceObject, (struct TagItem *) pciActivateBusmaster); // enable busmaster
1208 // Fix for VIA Babble problem
1209 cnt = PCIXReadConfigByte(hc, 0x40);
1210 if(!(cnt & 0x40)) {
1211 KPRINTF(20, ("Applying VIA Babble workaround\n"));
1212 PCIXWriteConfigByte(hc, 0x40, cnt|0x40);
1215 KPRINTF(10, ("Configuring UHCI HC\n"));
1216 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_MAXPACKET64|UHCF_CONFIGURE);
1218 WRITEIO16_LE(hc->hc_RegBase, UHCI_FRAMECOUNT, 0);
1220 /* hc->hc_UhciFrameList points to a portion of hc->hc_PciMem,
1221 * which we know is 32 bit
1223 WRITEIO32_LE(hc->hc_RegBase, UHCI_FRAMELISTADDR, (ULONG)(IPTR)pciGetPhysical(hc, hc->hc_UhciFrameList));
1225 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBSTATUS, UHIF_TIMEOUTCRC|UHIF_INTONCOMPLETE|UHIF_SHORTPACKET);
1227 // install reset handler
1228 hc->hc_ResetInt.is_Code = (VOID_FUNC)UhciResetHandler;
1229 hc->hc_ResetInt.is_Data = hc;
1230 AddResetCallback(&hc->hc_ResetInt);
1232 // add interrupt
1233 hc->hc_PCIIntHandler.is_Node.ln_Name = "UHCI PCI (pciusb.device)";
1234 hc->hc_PCIIntHandler.is_Node.ln_Pri = 5;
1235 hc->hc_PCIIntHandler.is_Node.ln_Type = NT_INTERRUPT;
1236 hc->hc_PCIIntHandler.is_Code = (VOID_FUNC)uhciIntCode;
1237 hc->hc_PCIIntHandler.is_Data = hc;
1238 AddIntServer(INTB_KERNEL + hc->hc_PCIIntLine, &hc->hc_PCIIntHandler);
1240 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBINTEN, UHIF_TIMEOUTCRC|UHIF_INTONCOMPLETE|UHIF_SHORTPACKET);
1242 // clear all port bits (both ports)
1243 WRITEIO32_LE(hc->hc_RegBase, UHCI_PORT1STSCTRL, 0);
1245 // enable PIRQ
1246 KPRINTF(10, ("Enabling PIRQ (old value=%04lx)\n", PCIXReadConfigWord(hc, UHCI_USBLEGSUP)));
1247 PCIXWriteConfigWord(hc, UHCI_USBLEGSUP, 0x2000);
1249 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_MAXPACKET64|UHCF_CONFIGURE|UHCF_RUNSTOP);
1250 SYNC;
1252 KPRINTF(20, ("HW Init done\n"));
1254 KPRINTF(10, ("HW Regs USBCMD=%04lx\n", READIO16_LE(hc->hc_RegBase, UHCI_USBCMD)));
1255 KPRINTF(10, ("HW Regs USBSTS=%04lx\n", READIO16_LE(hc->hc_RegBase, UHCI_USBSTATUS)));
1256 KPRINTF(10, ("HW Regs FRAMECOUNT=%04lx\n", READIO16_LE(hc->hc_RegBase, UHCI_FRAMECOUNT)));
1258 KPRINTF(20, ("uhciInit returns TRUE...\n"));
1259 return TRUE;
1263 FIXME: What would the appropriate debug level be?
1265 KPRINTF(1000, ("uhciInit returns FALSE...\n"));
1266 return FALSE;
1269 void uhciFree(struct PCIController *hc, struct PCIUnit *hu) {
1271 hc = (struct PCIController *) hu->hu_Controllers.lh_Head;
1272 while(hc->hc_Node.ln_Succ)
1274 switch(hc->hc_HCIType)
1276 case HCITYPE_UHCI:
1278 KPRINTF(20, ("Shutting down UHCI %08lx\n", hc));
1279 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBINTEN, 0);
1280 // disable PIRQ
1281 PCIXWriteConfigWord(hc, UHCI_USBLEGSUP, 0);
1282 // disable all ports
1283 WRITEIO32_LE(hc->hc_RegBase, UHCI_PORT1STSCTRL, 0);
1284 uhwDelayMS(50, hu);
1285 //WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_MAXPACKET64|UHCF_CONFIGURE);
1286 //uhwDelayMS(50, hu);
1287 KPRINTF(20, ("Stopping UHCI %08lx\n", hc));
1288 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, 0);
1289 SYNC;
1291 //KPRINTF(20, ("Reset done UHCI %08lx\n", hc));
1292 uhwDelayMS(10, hu);
1294 KPRINTF(20, ("Resetting UHCI %08lx\n", hc));
1295 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_HCRESET);
1296 SYNC;
1298 uhwDelayMS(50, hu);
1299 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, 0);
1300 SYNC;
1302 KPRINTF(20, ("Shutting down UHCI done.\n"));
1303 break;
1307 hc = (struct PCIController *) hc->hc_Node.ln_Succ;