less debug
[AROS.git] / rom / usb / pciusb / ehcichip.c
blobd3772246167ca00d93a3a370853b8673f59d4131
1 /*
2 Copyright © 2010-2013, The AROS Development Team. All rights reserved
3 $Id$
4 */
6 #define DB_LEVEL 100
8 #include <proto/exec.h>
9 #include <proto/oop.h>
10 #include <hidd/pci.h>
12 #include <devices/usb_hub.h>
14 #include "uhwcmd.h"
16 #undef HiddPCIDeviceAttrBase
17 #define HiddPCIDeviceAttrBase (hd->hd_HiddPCIDeviceAB)
18 #undef HiddAttrBase
19 #define HiddAttrBase (hd->hd_HiddAB)
21 static AROS_INTH1(EhciResetHandler, struct PCIController *, hc)
23 AROS_INTFUNC_INIT
25 // reset controller
26 CONSTWRITEREG32_LE(hc->hc_RegBase, EHCI_USBCMD, EHUF_HCRESET|(1UL<<EHUS_INTTHRESHOLD));
28 return FALSE;
30 AROS_INTFUNC_EXIT
33 static void ehciFinishRequest(struct PCIUnit *unit, struct IOUsbHWReq *ioreq)
35 struct EhciQH *eqh = ioreq->iouh_DriverPrivate1;
36 UWORD devadrep;
37 UWORD dir;
39 // unlink from schedule
40 eqh->eqh_Pred->eqh_NextQH = eqh->eqh_Succ->eqh_Self;
41 CacheClearE(&eqh->eqh_Pred->eqh_NextQH, 32, CACRF_ClearD);
42 SYNC;
44 eqh->eqh_Succ->eqh_Pred = eqh->eqh_Pred;
45 eqh->eqh_Pred->eqh_Succ = eqh->eqh_Succ;
46 SYNC;
48 /* Deactivate the endpoint */
49 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
50 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
51 unit->hu_DevBusyReq[devadrep] = NULL;
53 /* Release bounce buffers */
54 if (ioreq->iouh_Req.io_Command == UHCMD_CONTROLXFER)
55 dir = (ioreq->iouh_SetupData.bmRequestType & URTF_IN) ? UHDIR_IN : UHDIR_OUT;
56 else
57 dir = ioreq->iouh_Dir;
59 usbReleaseBuffer(eqh->eqh_Buffer, ioreq->iouh_Data, ioreq->iouh_Actual, dir);
60 usbReleaseBuffer(eqh->eqh_SetupBuf, &ioreq->iouh_SetupData, 8, UHDIR_OUT);
61 eqh->eqh_Buffer = NULL;
62 eqh->eqh_SetupBuf = NULL;
65 void ehciFreeAsyncContext(struct PCIController *hc, struct IOUsbHWReq *ioreq)
67 struct EhciQH *eqh = ioreq->iouh_DriverPrivate1;
69 KPRINTF(5, ("Freeing AsyncContext 0x%p\n", eqh));
70 ehciFinishRequest(hc->hc_Unit, ioreq);
72 // need to wait until an async schedule rollover before freeing these
73 Disable();
74 eqh->eqh_Succ = hc->hc_EhciAsyncFreeQH;
75 hc->hc_EhciAsyncFreeQH = eqh;
76 // activate doorbell
77 WRITEREG32_LE(hc->hc_RegBase, EHCI_USBCMD, hc->hc_EhciUsbCmd|EHUF_ASYNCDOORBELL);
78 Enable();
81 void ehciFreePeriodicContext(struct PCIController *hc, struct IOUsbHWReq *ioreq)
83 struct EhciQH *eqh = ioreq->iouh_DriverPrivate1;
84 struct EhciTD *etd;
85 struct EhciTD *nextetd;
87 KPRINTF(5, ("Freeing PeriodicContext 0x%p\n", eqh));
88 ehciFinishRequest(hc->hc_Unit, ioreq);
90 Disable(); // avoid race condition with interrupt
91 nextetd = eqh->eqh_FirstTD;
92 while((etd = nextetd))
94 KPRINTF(1, ("FreeTD 0x%p\n", nextetd));
95 nextetd = etd->etd_Succ;
96 ehciFreeTD(hc, etd);
98 ehciFreeQH(hc, eqh);
99 Enable();
102 void ehciFreeQHandTDs(struct PCIController *hc, struct EhciQH *eqh) {
104 struct EhciTD *etd = NULL;
105 struct EhciTD *nextetd;
107 KPRINTF(5, ("Unlinking QContext 0x%p\n", eqh));
108 nextetd = eqh->eqh_FirstTD;
109 while(nextetd)
111 KPRINTF(1, ("FreeTD 0x%p\n", nextetd));
112 etd = nextetd;
113 nextetd = (struct EhciTD *) etd->etd_Succ;
114 ehciFreeTD(hc, etd);
117 ehciFreeQH(hc, eqh);
120 void ehciUpdateIntTree(struct PCIController *hc) {
122 struct EhciQH *eqh;
123 struct EhciQH *predeqh;
124 struct EhciQH *lastusedeqh;
125 UWORD cnt;
127 // optimize linkage between queue heads
128 predeqh = lastusedeqh = hc->hc_EhciTermQH;
129 for(cnt = 0; cnt < 11; cnt++)
131 eqh = hc->hc_EhciIntQH[cnt];
132 if(eqh->eqh_Succ != predeqh)
134 lastusedeqh = eqh->eqh_Succ;
136 eqh->eqh_NextQH = lastusedeqh->eqh_Self;
137 CacheClearE(&eqh->eqh_NextQH, 32, CACRF_ClearD);
138 predeqh = eqh;
142 void ehciHandleFinishedTDs(struct PCIController *hc) {
144 struct PCIUnit *unit = hc->hc_Unit;
145 struct IOUsbHWReq *ioreq;
146 struct IOUsbHWReq *nextioreq;
147 struct EhciQH *eqh;
148 struct EhciTD *etd;
149 struct EhciTD *predetd;
150 UWORD devadrep;
151 ULONG len;
152 UWORD inspect;
153 ULONG nexttd;
154 BOOL shortpkt;
155 ULONG ctrlstatus;
156 ULONG epctrlstatus;
157 ULONG actual;
158 BOOL halted;
159 BOOL updatetree = FALSE;
160 BOOL zeroterm;
161 IPTR phyaddr;
163 KPRINTF(1, ("Checking for Async work done...\n"));
164 ioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
165 while((nextioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ))
167 eqh = (struct EhciQH *) ioreq->iouh_DriverPrivate1;
168 if(eqh)
170 KPRINTF(1, ("Examining IOReq=0x%p with EQH=0x%p\n", ioreq, eqh));
171 SYNC;
173 CacheClearE(&eqh->eqh_NextQH, 32, CACRF_InvalidateD);
174 epctrlstatus = READMEM32_LE(&eqh->eqh_CtrlStatus);
175 nexttd = READMEM32_LE(&eqh->eqh_NextTD);
176 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
177 halted = ((epctrlstatus & (ETCF_ACTIVE|ETSF_HALTED)) == ETSF_HALTED);
178 if(halted || (!(epctrlstatus & ETCF_ACTIVE) && (nexttd & EHCI_TERMINATE)))
180 KPRINTF(1, ("AS: CS=%08lx CP=%08lx NX=%08lx\n", epctrlstatus, READMEM32_LE(&eqh->eqh_CurrTD), nexttd));
181 shortpkt = FALSE;
182 actual = 0;
183 inspect = 1;
184 etd = eqh->eqh_FirstTD;
187 ctrlstatus = READMEM32_LE(&etd->etd_CtrlStatus);
188 KPRINTF(1, ("AS: CS=%08lx SL=%08lx TD=0x%p\n", ctrlstatus, READMEM32_LE(&etd->etd_Self), etd));
189 if(ctrlstatus & ETCF_ACTIVE)
191 if(halted)
193 KPRINTF(20, ("Async: Halted before TD\n"));
194 //ctrlstatus = eqh->eqh_CtrlStatus;
195 inspect = 0;
196 if(unit->hu_NakTimeoutFrame[devadrep] && (hc->hc_FrameCounter > unit->hu_NakTimeoutFrame[devadrep]))
198 KPRINTF(20, ("NAK timeout\n"));
199 ioreq->iouh_Req.io_Error = UHIOERR_NAKTIMEOUT;
201 break;
202 } else {
203 // what happened here? The host controller was just updating the fields and has not finished yet
204 ctrlstatus = epctrlstatus;
206 /*KPRINTF(20, ("AS: CS=%08lx CP=%08lx NX=%08lx\n", epctrlstatus, READMEM32_LE(&eqh->eqh_CurrTD), nexttd));
207 KPRINTF(20, ("AS: CS=%08lx CP=%08lx NX=%08lx\n", READMEM32_LE(&eqh->eqh_CtrlStatus), READMEM32_LE(&eqh->eqh_CurrTD), READMEM32_LE(&eqh->eqh_NextTD)));
208 KPRINTF(20, ("AS: CS=%08lx SL=%08lx TD=%08lx\n", ctrlstatus, READMEM32_LE(&etd->etd_Self), etd));
209 etd = eqh->eqh_FirstTD;
212 KPRINTF(20, ("XX: CS=%08lx SL=%08lx TD=%08lx\n", READMEM32_LE(&etd->etd_CtrlStatus), READMEM32_LE(&etd->etd_Self), etd));
213 } while(etd = etd->etd_Succ);
214 KPRINTF(20, ("Async: Internal error! Still active?!\n"));
215 inspect = 2;
216 break;*/
220 if(ctrlstatus & (ETSF_HALTED|ETSF_TRANSERR|ETSF_BABBLE|ETSF_DATABUFFERERR))
222 if(ctrlstatus & ETSF_BABBLE)
224 KPRINTF(20, ("Babble error %08lx\n", ctrlstatus));
225 ioreq->iouh_Req.io_Error = UHIOERR_OVERFLOW;
227 else if(ctrlstatus & ETSF_DATABUFFERERR)
229 KPRINTF(20, ("Databuffer error\n"));
230 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
232 else if(ctrlstatus & ETSF_TRANSERR)
234 if((ctrlstatus & ETCM_ERRORLIMIT)>>ETCS_ERRORLIMIT)
236 KPRINTF(20, ("other kind of STALLED!\n"));
237 ioreq->iouh_Req.io_Error = UHIOERR_STALL;
238 } else {
239 KPRINTF(20, ("TIMEOUT!\n"));
240 ioreq->iouh_Req.io_Error = UHIOERR_TIMEOUT;
242 } else {
243 KPRINTF(20, ("STALLED!\n"));
244 ioreq->iouh_Req.io_Error = UHIOERR_STALL;
246 inspect = 0;
247 break;
250 len = etd->etd_Length - ((ctrlstatus & ETSM_TRANSLENGTH)>>ETSS_TRANSLENGTH);
251 if((ctrlstatus & ETCM_PIDCODE) != ETCF_PIDCODE_SETUP) // don't count setup packet
253 actual += len;
255 if(ctrlstatus & ETSM_TRANSLENGTH)
257 KPRINTF(10, ("Short packet: %ld < %ld\n", len, etd->etd_Length));
258 shortpkt = TRUE;
259 break;
261 etd = etd->etd_Succ;
262 } while(etd && (!(ctrlstatus & ETCF_READYINTEN)));
263 /*if(inspect == 2)
265 // phantom halted
266 ioreq = nextioreq;
267 continue;
270 if(((actual + ioreq->iouh_Actual) < eqh->eqh_Actual) && (!ioreq->iouh_Req.io_Error) && (!(ioreq->iouh_Flags & UHFF_ALLOWRUNTPKTS)))
272 ioreq->iouh_Req.io_Error = UHIOERR_RUNTPACKET;
274 ioreq->iouh_Actual += actual;
275 if(inspect && (!shortpkt) && (eqh->eqh_Actual < ioreq->iouh_Length))
277 KPRINTF(10, ("Reloading BULK at %ld/%ld\n", eqh->eqh_Actual, ioreq->iouh_Length));
278 // reload
279 ctrlstatus = (ioreq->iouh_Dir == UHDIR_IN) ? (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_IN) : (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_OUT);
280 phyaddr = (IPTR)pciGetPhysical(hc, eqh->eqh_Buffer + ioreq->iouh_Actual);
281 predetd = etd = eqh->eqh_FirstTD;
283 CONSTWRITEMEM32_LE(&eqh->eqh_CurrTD, EHCI_TERMINATE);
284 CONSTWRITEMEM32_LE(&eqh->eqh_NextTD, EHCI_TERMINATE);
285 CONSTWRITEMEM32_LE(&eqh->eqh_AltNextTD, EHCI_TERMINATE);
288 len = ioreq->iouh_Length - eqh->eqh_Actual;
289 if(len > 4*EHCI_PAGE_SIZE)
291 len = 4*EHCI_PAGE_SIZE;
293 etd->etd_Length = len;
294 KPRINTF(1, ("Reload Bulk TD 0x%p len %ld (%ld/%ld) phy=0x%p\n",
295 etd, len, eqh->eqh_Actual, ioreq->iouh_Length, phyaddr));
296 WRITEMEM32_LE(&etd->etd_CtrlStatus, ctrlstatus|(len<<ETSS_TRANSLENGTH));
297 // FIXME need quark scatter gather mechanism here
298 WRITEMEM32_LE(&etd->etd_BufferPtr[0], phyaddr);
299 WRITEMEM32_LE(&etd->etd_BufferPtr[1], (phyaddr & EHCI_PAGE_MASK) + (1*EHCI_PAGE_SIZE));
300 WRITEMEM32_LE(&etd->etd_BufferPtr[2], (phyaddr & EHCI_PAGE_MASK) + (2*EHCI_PAGE_SIZE));
301 WRITEMEM32_LE(&etd->etd_BufferPtr[3], (phyaddr & EHCI_PAGE_MASK) + (3*EHCI_PAGE_SIZE));
302 WRITEMEM32_LE(&etd->etd_BufferPtr[4], (phyaddr & EHCI_PAGE_MASK) + (4*EHCI_PAGE_SIZE));
304 // FIXME Make use of these on 64-bit-capable hardware
305 etd->etd_ExtBufferPtr[0] = 0;
306 etd->etd_ExtBufferPtr[1] = 0;
307 etd->etd_ExtBufferPtr[2] = 0;
308 etd->etd_ExtBufferPtr[3] = 0;
309 etd->etd_ExtBufferPtr[4] = 0;
311 phyaddr += len;
312 eqh->eqh_Actual += len;
313 zeroterm = (len && (ioreq->iouh_Dir == UHDIR_OUT) && (eqh->eqh_Actual == ioreq->iouh_Length) && (!(ioreq->iouh_Flags & UHFF_NOSHORTPKT)) && ((eqh->eqh_Actual % ioreq->iouh_MaxPktSize) == 0));
314 predetd = etd;
315 etd = etd->etd_Succ;
316 if((!etd) && zeroterm)
318 // rare case where the zero packet would be lost, allocate etd and append zero packet.
319 etd = ehciAllocTD(hc);
320 if(!etd)
322 KPRINTF(200, ("INTERNAL ERROR! This should not happen! Could not allocate zero packet TD\n"));
323 break;
325 predetd->etd_Succ = etd;
326 predetd->etd_NextTD = etd->etd_Self;
327 predetd->etd_AltNextTD = hc->hc_ShortPktEndTD->etd_Self;
328 etd->etd_Succ = NULL;
329 CONSTWRITEMEM32_LE(&etd->etd_NextTD, EHCI_TERMINATE);
330 CONSTWRITEMEM32_LE(&etd->etd_AltNextTD, EHCI_TERMINATE);
332 } while(etd && ((eqh->eqh_Actual < ioreq->iouh_Length) || zeroterm));
333 ctrlstatus |= ETCF_READYINTEN|(predetd->etd_Length<<ETSS_TRANSLENGTH);
334 WRITEMEM32_LE(&predetd->etd_CtrlStatus, ctrlstatus);
335 CONSTWRITEMEM32_LE(&predetd->etd_NextTD, EHCI_TERMINATE);
336 CONSTWRITEMEM32_LE(&predetd->etd_AltNextTD, EHCI_TERMINATE);
337 SYNC;
338 etd = eqh->eqh_FirstTD;
339 eqh->eqh_NextTD = etd->etd_Self;
340 SYNC;
341 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + (ioreq->iouh_NakTimeout<<3) : 0;
343 else
345 ehciFreeAsyncContext(hc, ioreq);
346 // use next data toggle bit based on last successful transaction
347 KPRINTF(1, ("Old Toggle %04lx:%ld\n", devadrep, unit->hu_DevDataToggle[devadrep]));
348 unit->hu_DevDataToggle[devadrep] = (ctrlstatus & ETCF_DATA1) ? TRUE : FALSE;
349 KPRINTF(1, ("Toggle now %04lx:%ld\n", devadrep, unit->hu_DevDataToggle[devadrep]));
350 if(inspect)
352 if(ioreq->iouh_Req.io_Command == UHCMD_CONTROLXFER)
354 // check for sucessful clear feature and set address ctrl transfers
355 uhwCheckSpecialCtrlTransfers(hc, ioreq);
358 ReplyMsg(&ioreq->iouh_Req.io_Message);
361 } else {
362 KPRINTF(20, ("IOReq=0x%p has no UQH!\n", ioreq));
364 ioreq = nextioreq;
367 KPRINTF(1, ("Checking for Periodic work done...\n"));
368 ioreq = (struct IOUsbHWReq *) hc->hc_PeriodicTDQueue.lh_Head;
369 while((nextioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ))
371 eqh = (struct EhciQH *) ioreq->iouh_DriverPrivate1;
372 if(eqh)
374 KPRINTF(1, ("Examining IOReq=0x%p with EQH=0x%p\n", ioreq, eqh));
375 nexttd = READMEM32_LE(&eqh->eqh_NextTD);
376 etd = eqh->eqh_FirstTD;
377 ctrlstatus = READMEM32_LE(&eqh->eqh_CtrlStatus);
378 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
379 halted = ((ctrlstatus & (ETCF_ACTIVE|ETSF_HALTED)) == ETSF_HALTED);
380 if(halted || (!(ctrlstatus & ETCF_ACTIVE) && (nexttd & EHCI_TERMINATE)))
382 KPRINTF(1, ("EQH not active %08lx\n", ctrlstatus));
383 shortpkt = FALSE;
384 actual = 0;
385 inspect = 1;
388 ctrlstatus = READMEM32_LE(&etd->etd_CtrlStatus);
389 KPRINTF(1, ("Periodic: TD=0x%p CS=%08lx\n", etd, ctrlstatus));
390 if(ctrlstatus & ETCF_ACTIVE)
392 if(halted)
394 KPRINTF(20, ("Periodic: Halted before TD\n"));
395 //ctrlstatus = eqh->eqh_CtrlStatus;
396 inspect = 0;
397 if(unit->hu_NakTimeoutFrame[devadrep] && (hc->hc_FrameCounter > unit->hu_NakTimeoutFrame[devadrep]))
399 KPRINTF(20, ("NAK timeout\n"));
400 ioreq->iouh_Req.io_Error = UHIOERR_NAKTIMEOUT;
402 break;
403 } else {
404 KPRINTF(20, ("Periodic: Internal error! Still active?!\n"));
405 break;
409 if(ctrlstatus & (ETSF_HALTED|ETSF_TRANSERR|ETSF_BABBLE|ETSF_DATABUFFERERR|ETSF_MISSEDCSPLIT))
411 if(ctrlstatus & ETSF_BABBLE)
413 KPRINTF(20, ("Babble error %08lx\n", ctrlstatus));
414 ioreq->iouh_Req.io_Error = UHIOERR_OVERFLOW;
416 else if(ctrlstatus & ETSF_MISSEDCSPLIT)
418 KPRINTF(20, ("Missed CSplit %08lx\n", ctrlstatus));
419 ioreq->iouh_Req.io_Error = UHIOERR_STALL;
421 else if(ctrlstatus & ETSF_DATABUFFERERR)
423 KPRINTF(20, ("Databuffer error\n"));
424 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
426 else if(ctrlstatus & ETSF_TRANSERR)
428 if((ctrlstatus & ETCM_ERRORLIMIT)>>ETCS_ERRORLIMIT)
430 KPRINTF(20, ("STALLED!\n"));
431 ioreq->iouh_Req.io_Error = UHIOERR_STALL;
432 } else {
433 KPRINTF(20, ("TIMEOUT!\n"));
434 ioreq->iouh_Req.io_Error = UHIOERR_TIMEOUT;
437 else if(unit->hu_NakTimeoutFrame[devadrep] && (hc->hc_FrameCounter > unit->hu_NakTimeoutFrame[devadrep]))
439 ioreq->iouh_Req.io_Error = UHIOERR_NAKTIMEOUT;
441 inspect = 0;
442 break;
445 len = etd->etd_Length - ((ctrlstatus & ETSM_TRANSLENGTH)>>ETSS_TRANSLENGTH);
446 actual += len;
447 if(ctrlstatus & ETSM_TRANSLENGTH)
449 KPRINTF(10, ("Short packet: %ld < %ld\n", len, etd->etd_Length));
450 shortpkt = TRUE;
451 break;
453 etd = etd->etd_Succ;
454 } while(etd);
455 if((actual < eqh->eqh_Actual) && (!ioreq->iouh_Req.io_Error) && (!(ioreq->iouh_Flags & UHFF_ALLOWRUNTPKTS)))
457 ioreq->iouh_Req.io_Error = UHIOERR_RUNTPACKET;
459 ioreq->iouh_Actual += actual;
460 ehciFreePeriodicContext(hc, ioreq);
461 updatetree = TRUE;
462 // use next data toggle bit based on last successful transaction
463 KPRINTF(1, ("Old Toggle %04lx:%ld\n", devadrep, unit->hu_DevDataToggle[devadrep]));
464 unit->hu_DevDataToggle[devadrep] = (ctrlstatus & ETCF_DATA1) ? TRUE : FALSE;
465 KPRINTF(1, ("Toggle now %04lx:%ld\n", devadrep, unit->hu_DevDataToggle[devadrep]));
466 ReplyMsg(&ioreq->iouh_Req.io_Message);
468 } else {
469 KPRINTF(20, ("IOReq=0x%p has no UQH!\n", ioreq));
471 ioreq = nextioreq;
473 if(updatetree)
475 ehciUpdateIntTree(hc);
479 void ehciScheduleCtrlTDs(struct PCIController *hc) {
481 struct PCIUnit *unit = hc->hc_Unit;
482 struct IOUsbHWReq *ioreq;
483 UWORD devadrep;
484 struct EhciQH *eqh;
485 struct EhciTD *setupetd;
486 struct EhciTD *dataetd;
487 struct EhciTD *termetd;
488 struct EhciTD *predetd;
489 ULONG epcaps;
490 ULONG ctrlstatus;
491 ULONG len;
492 IPTR phyaddr;
494 /* *** CTRL Transfers *** */
495 KPRINTF(1, ("Scheduling new CTRL transfers...\n"));
496 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
497 while(((struct Node *) ioreq)->ln_Succ)
499 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint;
500 KPRINTF(10, ("New CTRL transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
501 /* is endpoint already in use or do we have to wait for next transaction */
502 if(unit->hu_DevBusyReq[devadrep])
504 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
505 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
506 continue;
509 eqh = ehciAllocQH(hc);
510 if(!eqh)
512 break;
515 setupetd = ehciAllocTD(hc);
516 if(!setupetd)
518 ehciFreeQH(hc, eqh);
519 break;
521 termetd = ehciAllocTD(hc);
522 if(!termetd)
524 ehciFreeTD(hc, setupetd);
525 ehciFreeQH(hc, eqh);
526 break;
528 eqh->eqh_IOReq = ioreq;
529 eqh->eqh_FirstTD = setupetd;
530 eqh->eqh_Actual = 0;
532 epcaps = ((0<<EQES_RELOAD)|EQEF_TOGGLEFROMTD)|(ioreq->iouh_MaxPktSize<<EQES_MAXPKTLEN)|(ioreq->iouh_DevAddr<<EQES_DEVADDR)|(ioreq->iouh_Endpoint<<EQES_ENDPOINT);
533 if(ioreq->iouh_Flags & UHFF_SPLITTRANS)
535 KPRINTF(10, ("*** SPLIT TRANSACTION to HubPort %ld at Addr %ld\n", ioreq->iouh_SplitHubPort, ioreq->iouh_SplitHubAddr));
536 // full speed and low speed handling
537 WRITEMEM32_LE(&eqh->eqh_SplitCtrl, EQSF_MULTI_1|(ioreq->iouh_SplitHubPort<<EQSS_PORTNUMBER)|(ioreq->iouh_SplitHubAddr<<EQSS_HUBADDRESS));
538 epcaps |= EQEF_SPLITCTRLEP;
539 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
541 KPRINTF(10, ("*** LOW SPEED ***\n"));
542 epcaps |= EQEF_LOWSPEED;
544 } else {
545 CONSTWRITEMEM32_LE(&eqh->eqh_SplitCtrl, EQSF_MULTI_1);
546 epcaps |= EQEF_HIGHSPEED;
548 WRITEMEM32_LE(&eqh->eqh_EPCaps, epcaps);
549 //eqh->eqh_CtrlStatus = eqh->eqh_CurrTD = 0;
550 //eqh->eqh_AltNextTD = eqh->eqh_NextTD = setupetd->etd_Self;
552 //termetd->etd_QueueHead = setupetd->etd_QueueHead = eqh;
554 KPRINTF(1, ("SetupTD=0x%p, TermTD=0x%p\n", setupetd, termetd));
556 // fill setup td
557 setupetd->etd_Length = 8;
559 CONSTWRITEMEM32_LE(&setupetd->etd_CtrlStatus, (8<<ETSS_TRANSLENGTH)|ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_SETUP);
561 eqh->eqh_SetupBuf = usbGetBuffer(&ioreq->iouh_SetupData, 8, UHDIR_OUT);
562 phyaddr = (IPTR) pciGetPhysical(hc, eqh->eqh_SetupBuf);
564 WRITEMEM32_LE(&setupetd->etd_BufferPtr[0], phyaddr);
565 WRITEMEM32_LE(&setupetd->etd_BufferPtr[1], (phyaddr + 8) & EHCI_PAGE_MASK); // theoretically, setup data may cross one page
566 setupetd->etd_BufferPtr[2] = 0; // clear for overlay bits
568 // FIXME Make use of these on 64-bit-capable hardware
569 setupetd->etd_ExtBufferPtr[0] = 0;
570 setupetd->etd_ExtBufferPtr[1] = 0;
571 setupetd->etd_ExtBufferPtr[2] = 0;
573 ctrlstatus = (ioreq->iouh_SetupData.bmRequestType & URTF_IN) ? (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_IN) : (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_OUT);
574 predetd = setupetd;
575 if(ioreq->iouh_Length)
577 eqh->eqh_Buffer = usbGetBuffer(ioreq->iouh_Data, ioreq->iouh_Length, (ioreq->iouh_SetupData.bmRequestType & URTF_IN) ? UHDIR_IN : UHDIR_OUT);
578 phyaddr = (IPTR)pciGetPhysical(hc, eqh->eqh_Buffer);
581 dataetd = ehciAllocTD(hc);
582 if(!dataetd)
584 break;
586 ctrlstatus ^= ETCF_DATA1; // toggle bit
587 predetd->etd_Succ = dataetd;
588 predetd->etd_NextTD = dataetd->etd_Self;
589 dataetd->etd_AltNextTD = termetd->etd_Self;
591 len = ioreq->iouh_Length - eqh->eqh_Actual;
592 if(len > 4*EHCI_PAGE_SIZE)
594 len = 4*EHCI_PAGE_SIZE;
596 dataetd->etd_Length = len;
597 WRITEMEM32_LE(&dataetd->etd_CtrlStatus, ctrlstatus|(len<<ETSS_TRANSLENGTH));
598 // FIXME need quark scatter gather mechanism here
599 WRITEMEM32_LE(&dataetd->etd_BufferPtr[0], phyaddr);
600 WRITEMEM32_LE(&dataetd->etd_BufferPtr[1], (phyaddr & EHCI_PAGE_MASK) + (1*EHCI_PAGE_SIZE));
601 WRITEMEM32_LE(&dataetd->etd_BufferPtr[2], (phyaddr & EHCI_PAGE_MASK) + (2*EHCI_PAGE_SIZE));
602 WRITEMEM32_LE(&dataetd->etd_BufferPtr[3], (phyaddr & EHCI_PAGE_MASK) + (3*EHCI_PAGE_SIZE));
603 WRITEMEM32_LE(&dataetd->etd_BufferPtr[4], (phyaddr & EHCI_PAGE_MASK) + (4*EHCI_PAGE_SIZE));
605 // FIXME Make use of these on 64-bit-capable hardware
606 dataetd->etd_ExtBufferPtr[0] = 0;
607 dataetd->etd_ExtBufferPtr[1] = 0;
608 dataetd->etd_ExtBufferPtr[2] = 0;
609 dataetd->etd_ExtBufferPtr[3] = 0;
610 dataetd->etd_ExtBufferPtr[4] = 0;
612 phyaddr += len;
613 eqh->eqh_Actual += len;
614 predetd = dataetd;
615 } while(eqh->eqh_Actual < ioreq->iouh_Length);
616 if(!dataetd)
618 // not enough dataetds? try again later
619 usbReleaseBuffer(eqh->eqh_Buffer, ioreq->iouh_Data, 0, 0);
620 usbReleaseBuffer(eqh->eqh_SetupBuf, &ioreq->iouh_SetupData, 0, 0);
621 ehciFreeQHandTDs(hc, eqh);
622 ehciFreeTD(hc, termetd); // this one's not linked yet
623 break;
626 // TERM packet
627 ctrlstatus |= ETCF_DATA1|ETCF_READYINTEN;
628 ctrlstatus ^= (ETCF_PIDCODE_IN^ETCF_PIDCODE_OUT);
630 predetd->etd_NextTD = termetd->etd_Self;
631 predetd->etd_Succ = termetd;
632 CONSTWRITEMEM32_LE(&termetd->etd_NextTD, EHCI_TERMINATE);
633 CONSTWRITEMEM32_LE(&termetd->etd_AltNextTD, EHCI_TERMINATE);
634 WRITEMEM32_LE(&termetd->etd_CtrlStatus, ctrlstatus);
635 termetd->etd_Length = 0;
636 termetd->etd_BufferPtr[0] = 0; // clear for overlay bits
637 termetd->etd_BufferPtr[1] = 0; // clear for overlay bits
638 termetd->etd_BufferPtr[2] = 0; // clear for overlay bits
639 termetd->etd_ExtBufferPtr[0] = 0; // clear for overlay bits
640 termetd->etd_ExtBufferPtr[1] = 0; // clear for overlay bits
641 termetd->etd_ExtBufferPtr[2] = 0; // clear for overlay bits
642 termetd->etd_Succ = NULL;
644 // due to sillicon bugs, we fill in the first overlay ourselves.
645 eqh->eqh_CurrTD = setupetd->etd_Self;
646 eqh->eqh_NextTD = setupetd->etd_NextTD;
647 eqh->eqh_AltNextTD = setupetd->etd_AltNextTD;
648 eqh->eqh_CtrlStatus = setupetd->etd_CtrlStatus;
649 eqh->eqh_BufferPtr[0] = setupetd->etd_BufferPtr[0];
650 eqh->eqh_BufferPtr[1] = setupetd->etd_BufferPtr[1];
651 eqh->eqh_BufferPtr[2] = 0;
652 eqh->eqh_ExtBufferPtr[0] = setupetd->etd_ExtBufferPtr[0];
653 eqh->eqh_ExtBufferPtr[1] = setupetd->etd_ExtBufferPtr[1];
654 eqh->eqh_ExtBufferPtr[2] = 0;
656 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
657 ioreq->iouh_DriverPrivate1 = eqh;
659 // manage endpoint going busy
660 unit->hu_DevBusyReq[devadrep] = ioreq;
661 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + (ioreq->iouh_NakTimeout<<3) : 0;
663 Disable();
664 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
666 // looks good to me, now enqueue this entry (just behind the asyncQH)
667 eqh->eqh_Succ = hc->hc_EhciAsyncQH->eqh_Succ;
668 eqh->eqh_NextQH = eqh->eqh_Succ->eqh_Self;
669 SYNC;
671 eqh->eqh_Pred = hc->hc_EhciAsyncQH;
672 eqh->eqh_Succ->eqh_Pred = eqh;
673 hc->hc_EhciAsyncQH->eqh_Succ = eqh;
674 hc->hc_EhciAsyncQH->eqh_NextQH = eqh->eqh_Self;
675 SYNC;
676 Enable();
678 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
682 void ehciScheduleIntTDs(struct PCIController *hc) {
684 struct PCIUnit *unit = hc->hc_Unit;
685 struct IOUsbHWReq *ioreq;
686 UWORD devadrep;
687 UWORD cnt;
688 struct EhciQH *eqh;
689 struct EhciQH *inteqh;
690 struct EhciTD *etd;
691 struct EhciTD *predetd;
692 ULONG epcaps;
693 ULONG ctrlstatus;
694 ULONG splitctrl;
695 ULONG len;
696 IPTR phyaddr;
698 /* *** INT Transfers *** */
699 KPRINTF(1, ("Scheduling new INT transfers...\n"));
700 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
701 while(((struct Node *) ioreq)->ln_Succ)
703 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
704 KPRINTF(10, ("New INT transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
705 /* is endpoint already in use or do we have to wait for next transaction */
706 if(unit->hu_DevBusyReq[devadrep])
708 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
709 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
710 continue;
713 eqh = ehciAllocQH(hc);
714 if(!eqh)
716 break;
719 eqh->eqh_IOReq = ioreq;
720 eqh->eqh_Actual = 0;
722 epcaps = (0<<EQES_RELOAD)|(ioreq->iouh_MaxPktSize<<EQES_MAXPKTLEN)|(ioreq->iouh_DevAddr<<EQES_DEVADDR)|(ioreq->iouh_Endpoint<<EQES_ENDPOINT);
723 if(ioreq->iouh_Flags & UHFF_SPLITTRANS)
725 KPRINTF(10, ("*** SPLIT TRANSACTION to HubPort %ld at Addr %ld\n", ioreq->iouh_SplitHubPort, ioreq->iouh_SplitHubAddr));
726 // full speed and low speed handling
727 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
729 KPRINTF(10, ("*** LOW SPEED ***\n"));
730 epcaps |= EQEF_LOWSPEED;
732 WRITEMEM32_LE(&eqh->eqh_SplitCtrl, (EQSF_MULTI_1|(0x01<<EQSS_MUSOFACTIVE)|(0x1c<<EQSS_MUSOFCSPLIT))|(ioreq->iouh_SplitHubPort<<EQSS_PORTNUMBER)|(ioreq->iouh_SplitHubAddr<<EQSS_HUBADDRESS));
733 if(ioreq->iouh_Interval >= 255)
735 inteqh = hc->hc_EhciIntQH[8]; // 256ms interval
736 } else {
737 cnt = 0;
740 inteqh = hc->hc_EhciIntQH[cnt++];
741 } while(ioreq->iouh_Interval >= (1<<cnt));
743 } else {
744 epcaps |= EQEF_HIGHSPEED;
745 if(ioreq->iouh_Flags & UHFF_MULTI_3)
747 splitctrl = EQSF_MULTI_3;
749 else if(ioreq->iouh_Flags & UHFF_MULTI_2)
751 splitctrl = EQSF_MULTI_2;
752 } else {
753 splitctrl = EQSF_MULTI_1;
755 if(ioreq->iouh_Interval < 2) // 0-1 µFrames
757 splitctrl |= (0xff<<EQSS_MUSOFACTIVE);
759 else if(ioreq->iouh_Interval < 4) // 2-3 µFrames
761 splitctrl |= (0x55<<EQSS_MUSOFACTIVE);
763 else if(ioreq->iouh_Interval < 8) // 4-7 µFrames
765 splitctrl |= (0x22<<EQSS_MUSOFACTIVE);
767 else if(ioreq->iouh_Interval > 511) // 64ms and higher
769 splitctrl |= (0x10<<EQSS_MUSOFACTIVE);
771 else //if(ioreq->iouh_Interval >= 8) // 1-64ms
773 splitctrl |= (0x01<<EQSS_MUSOFACTIVE);
775 WRITEMEM32_LE(&eqh->eqh_SplitCtrl, splitctrl);
776 if(ioreq->iouh_Interval >= 1024)
778 inteqh = hc->hc_EhciIntQH[10]; // 1024 µFrames interval
779 } else {
780 cnt = 0;
783 inteqh = hc->hc_EhciIntQH[cnt++];
784 } while(ioreq->iouh_Interval >= (1<<cnt));
787 WRITEMEM32_LE(&eqh->eqh_EPCaps, epcaps);
788 //eqh->eqh_CtrlStatus = eqh->eqh_CurrTD = 0;
789 eqh->eqh_FirstTD = NULL; // clear for ehciFreeQHandTDs()
791 ctrlstatus = (ioreq->iouh_Dir == UHDIR_IN) ? (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_IN) : (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_OUT);
792 if(unit->hu_DevDataToggle[devadrep])
794 // continue with data toggle 0
795 ctrlstatus |= ETCF_DATA1;
797 predetd = NULL;
798 eqh->eqh_Buffer = usbGetBuffer(ioreq->iouh_Data, ioreq->iouh_Length, ioreq->iouh_Dir);
799 phyaddr = (IPTR) pciGetPhysical(hc, eqh->eqh_Buffer);
802 etd = ehciAllocTD(hc);
803 if(!etd)
805 break;
807 if(predetd)
809 predetd->etd_Succ = etd;
810 predetd->etd_NextTD = etd->etd_Self;
811 predetd->etd_AltNextTD = hc->hc_ShortPktEndTD->etd_Self;
812 } else {
813 eqh->eqh_FirstTD = etd;
814 //eqh->eqh_AltNextTD = eqh->eqh_NextTD = etd->etd_Self;
817 len = ioreq->iouh_Length - eqh->eqh_Actual;
818 if(len > 4*EHCI_PAGE_SIZE)
820 len = 4*EHCI_PAGE_SIZE;
822 etd->etd_Length = len;
823 WRITEMEM32_LE(&etd->etd_CtrlStatus, ctrlstatus|(len<<ETSS_TRANSLENGTH));
824 // FIXME need quark scatter gather mechanism here
825 WRITEMEM32_LE(&etd->etd_BufferPtr[0], phyaddr);
826 WRITEMEM32_LE(&etd->etd_BufferPtr[1], (phyaddr & EHCI_PAGE_MASK) + (1*EHCI_PAGE_SIZE));
827 WRITEMEM32_LE(&etd->etd_BufferPtr[2], (phyaddr & EHCI_PAGE_MASK) + (2*EHCI_PAGE_SIZE));
828 WRITEMEM32_LE(&etd->etd_BufferPtr[3], (phyaddr & EHCI_PAGE_MASK) + (3*EHCI_PAGE_SIZE));
829 WRITEMEM32_LE(&etd->etd_BufferPtr[4], (phyaddr & EHCI_PAGE_MASK) + (4*EHCI_PAGE_SIZE));
831 // FIXME Use these on 64-bit-capable hardware
832 etd->etd_ExtBufferPtr[0] = 0;
833 etd->etd_ExtBufferPtr[1] = 0;
834 etd->etd_ExtBufferPtr[2] = 0;
835 etd->etd_ExtBufferPtr[3] = 0;
836 etd->etd_ExtBufferPtr[4] = 0;
838 phyaddr += len;
839 eqh->eqh_Actual += len;
840 predetd = etd;
841 } while(eqh->eqh_Actual < ioreq->iouh_Length);
843 if(!etd)
845 // not enough etds? try again later
846 usbReleaseBuffer(eqh->eqh_Buffer, ioreq->iouh_Data, 0, 0);
847 ehciFreeQHandTDs(hc, eqh);
848 break;
850 ctrlstatus |= ETCF_READYINTEN|(etd->etd_Length<<ETSS_TRANSLENGTH);
851 WRITEMEM32_LE(&predetd->etd_CtrlStatus, ctrlstatus);
853 CONSTWRITEMEM32_LE(&predetd->etd_NextTD, EHCI_TERMINATE);
854 CONSTWRITEMEM32_LE(&predetd->etd_AltNextTD, EHCI_TERMINATE);
855 predetd->etd_Succ = NULL;
857 // due to sillicon bugs, we fill in the first overlay ourselves.
858 etd = eqh->eqh_FirstTD;
859 eqh->eqh_CurrTD = etd->etd_Self;
860 eqh->eqh_NextTD = etd->etd_NextTD;
861 eqh->eqh_AltNextTD = etd->etd_AltNextTD;
862 eqh->eqh_CtrlStatus = etd->etd_CtrlStatus;
863 eqh->eqh_BufferPtr[0] = etd->etd_BufferPtr[0];
864 eqh->eqh_BufferPtr[1] = etd->etd_BufferPtr[1];
865 eqh->eqh_BufferPtr[2] = etd->etd_BufferPtr[2];
866 eqh->eqh_BufferPtr[3] = etd->etd_BufferPtr[3];
867 eqh->eqh_BufferPtr[4] = etd->etd_BufferPtr[4];
868 eqh->eqh_ExtBufferPtr[0] = etd->etd_ExtBufferPtr[0];
869 eqh->eqh_ExtBufferPtr[1] = etd->etd_ExtBufferPtr[1];
870 eqh->eqh_ExtBufferPtr[2] = etd->etd_ExtBufferPtr[2];
871 eqh->eqh_ExtBufferPtr[3] = etd->etd_ExtBufferPtr[3];
872 eqh->eqh_ExtBufferPtr[4] = etd->etd_ExtBufferPtr[4];
874 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
875 ioreq->iouh_DriverPrivate1 = eqh;
877 // manage endpoint going busy
878 unit->hu_DevBusyReq[devadrep] = ioreq;
879 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + (ioreq->iouh_NakTimeout<<3) : 0;
881 Disable();
882 AddTail(&hc->hc_PeriodicTDQueue, (struct Node *) ioreq);
884 // looks good to me, now enqueue this entry in the right IntQH
885 eqh->eqh_Succ = inteqh->eqh_Succ;
886 eqh->eqh_NextQH = eqh->eqh_Succ->eqh_Self;
887 SYNC;
889 eqh->eqh_Pred = inteqh;
890 eqh->eqh_Succ->eqh_Pred = eqh;
891 inteqh->eqh_Succ = eqh;
892 inteqh->eqh_NextQH = eqh->eqh_Self;
893 SYNC;
894 Enable();
896 ehciUpdateIntTree(hc);
898 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
902 void ehciScheduleBulkTDs(struct PCIController *hc) {
904 struct PCIUnit *unit = hc->hc_Unit;
905 struct IOUsbHWReq *ioreq;
906 UWORD devadrep;
907 struct EhciQH *eqh;
908 struct EhciTD *etd = NULL;
909 struct EhciTD *predetd;
910 ULONG epcaps;
911 ULONG ctrlstatus;
912 ULONG splitctrl;
913 ULONG len;
914 IPTR phyaddr;
916 /* *** BULK Transfers *** */
917 KPRINTF(1, ("Scheduling new BULK transfers...\n"));
918 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
919 while(((struct Node *) ioreq)->ln_Succ)
921 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
922 KPRINTF(10, ("New BULK transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
923 /* is endpoint already in use or do we have to wait for next transaction */
924 if(unit->hu_DevBusyReq[devadrep])
926 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
927 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
928 continue;
931 eqh = ehciAllocQH(hc);
932 if(!eqh)
934 break;
937 eqh->eqh_IOReq = ioreq;
938 eqh->eqh_Actual = 0;
940 epcaps = (0<<EQES_RELOAD)|(ioreq->iouh_MaxPktSize<<EQES_MAXPKTLEN)|(ioreq->iouh_DevAddr<<EQES_DEVADDR)|(ioreq->iouh_Endpoint<<EQES_ENDPOINT);
941 if(ioreq->iouh_Flags & UHFF_SPLITTRANS)
943 KPRINTF(10, ("*** SPLIT TRANSACTION to HubPort %ld at Addr %ld\n", ioreq->iouh_SplitHubPort, ioreq->iouh_SplitHubAddr));
944 // full speed and low speed handling
945 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
947 KPRINTF(10, ("*** LOW SPEED ***\n"));
948 epcaps |= EQEF_LOWSPEED;
950 WRITEMEM32_LE(&eqh->eqh_SplitCtrl, EQSF_MULTI_1|(ioreq->iouh_SplitHubPort<<EQSS_PORTNUMBER)|(ioreq->iouh_SplitHubAddr<<EQSS_HUBADDRESS));
951 } else {
952 epcaps |= EQEF_HIGHSPEED;
953 if(ioreq->iouh_Flags & UHFF_MULTI_3)
955 splitctrl = EQSF_MULTI_3;
957 else if(ioreq->iouh_Flags & UHFF_MULTI_2)
959 splitctrl = EQSF_MULTI_2;
960 } else {
961 splitctrl = EQSF_MULTI_1;
963 WRITEMEM32_LE(&eqh->eqh_SplitCtrl, splitctrl);
965 WRITEMEM32_LE(&eqh->eqh_EPCaps, epcaps);
966 //eqh->eqh_CtrlStatus = eqh->eqh_CurrTD = 0;
967 eqh->eqh_FirstTD = NULL; // clear for ehciFreeQHandTDs()
969 ctrlstatus = (ioreq->iouh_Dir == UHDIR_IN) ? (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_IN) : (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_OUT);
970 if(unit->hu_DevDataToggle[devadrep])
972 // continue with data toggle 0
973 ctrlstatus |= ETCF_DATA1;
975 predetd = NULL;
976 eqh->eqh_Buffer = usbGetBuffer(ioreq->iouh_Data, ioreq->iouh_Length, ioreq->iouh_Dir);
977 phyaddr = (IPTR)pciGetPhysical(hc, eqh->eqh_Buffer);
980 if((eqh->eqh_Actual >= EHCI_TD_BULK_LIMIT) && (eqh->eqh_Actual < ioreq->iouh_Length))
982 KPRINTF(10, ("Bulk too large, splitting...\n"));
983 break;
985 etd = ehciAllocTD(hc);
986 if(!etd)
988 break;
990 if(predetd)
992 predetd->etd_Succ = etd;
993 predetd->etd_NextTD = etd->etd_Self;
994 predetd->etd_AltNextTD = hc->hc_ShortPktEndTD->etd_Self;
995 } else {
996 eqh->eqh_FirstTD = etd;
997 //eqh->eqh_AltNextTD = eqh->eqh_NextTD = etd->etd_Self;
1000 len = ioreq->iouh_Length - eqh->eqh_Actual;
1001 if(len > 4*EHCI_PAGE_SIZE)
1003 len = 4*EHCI_PAGE_SIZE;
1005 etd->etd_Length = len;
1006 KPRINTF(1, ("Bulk TD 0x%p len %ld (%ld/%ld) phy=0x%p\n",
1007 etd, len, eqh->eqh_Actual, ioreq->iouh_Length, phyaddr));
1008 WRITEMEM32_LE(&etd->etd_CtrlStatus, ctrlstatus|(len<<ETSS_TRANSLENGTH));
1009 // FIXME need quark scatter gather mechanism here
1010 WRITEMEM32_LE(&etd->etd_BufferPtr[0], phyaddr);
1011 WRITEMEM32_LE(&etd->etd_BufferPtr[1], (phyaddr & EHCI_PAGE_MASK) + (1*EHCI_PAGE_SIZE));
1012 WRITEMEM32_LE(&etd->etd_BufferPtr[2], (phyaddr & EHCI_PAGE_MASK) + (2*EHCI_PAGE_SIZE));
1013 WRITEMEM32_LE(&etd->etd_BufferPtr[3], (phyaddr & EHCI_PAGE_MASK) + (3*EHCI_PAGE_SIZE));
1014 WRITEMEM32_LE(&etd->etd_BufferPtr[4], (phyaddr & EHCI_PAGE_MASK) + (4*EHCI_PAGE_SIZE));
1016 // FIXME Use these on 64-bit-capable hardware
1017 etd->etd_ExtBufferPtr[0] = 0;
1018 etd->etd_ExtBufferPtr[1] = 0;
1019 etd->etd_ExtBufferPtr[2] = 0;
1020 etd->etd_ExtBufferPtr[3] = 0;
1021 etd->etd_ExtBufferPtr[4] = 0;
1023 phyaddr += len;
1024 eqh->eqh_Actual += len;
1026 predetd = etd;
1027 } while((eqh->eqh_Actual < ioreq->iouh_Length) || (len && (ioreq->iouh_Dir == UHDIR_OUT) && (eqh->eqh_Actual == ioreq->iouh_Length) && (!(ioreq->iouh_Flags & UHFF_NOSHORTPKT)) && ((eqh->eqh_Actual % ioreq->iouh_MaxPktSize) == 0)));
1029 if(!etd)
1031 // not enough etds? try again later
1032 usbReleaseBuffer(eqh->eqh_Buffer, ioreq->iouh_Data, 0, 0);
1033 ehciFreeQHandTDs(hc, eqh);
1034 break;
1036 ctrlstatus |= ETCF_READYINTEN|(predetd->etd_Length<<ETSS_TRANSLENGTH);
1037 WRITEMEM32_LE(&predetd->etd_CtrlStatus, ctrlstatus);
1039 predetd->etd_Succ = NULL;
1040 CONSTWRITEMEM32_LE(&predetd->etd_NextTD, EHCI_TERMINATE);
1041 CONSTWRITEMEM32_LE(&predetd->etd_AltNextTD, EHCI_TERMINATE);
1043 // due to sillicon bugs, we fill in the first overlay ourselves.
1044 etd = eqh->eqh_FirstTD;
1045 eqh->eqh_CurrTD = etd->etd_Self;
1046 eqh->eqh_NextTD = etd->etd_NextTD;
1047 eqh->eqh_AltNextTD = etd->etd_AltNextTD;
1048 eqh->eqh_CtrlStatus = etd->etd_CtrlStatus;
1049 eqh->eqh_BufferPtr[0] = etd->etd_BufferPtr[0];
1050 eqh->eqh_BufferPtr[1] = etd->etd_BufferPtr[1];
1051 eqh->eqh_BufferPtr[2] = etd->etd_BufferPtr[2];
1052 eqh->eqh_BufferPtr[3] = etd->etd_BufferPtr[3];
1053 eqh->eqh_BufferPtr[4] = etd->etd_BufferPtr[4];
1054 eqh->eqh_ExtBufferPtr[0] = etd->etd_ExtBufferPtr[0];
1055 eqh->eqh_ExtBufferPtr[1] = etd->etd_ExtBufferPtr[1];
1056 eqh->eqh_ExtBufferPtr[2] = etd->etd_ExtBufferPtr[2];
1057 eqh->eqh_ExtBufferPtr[3] = etd->etd_ExtBufferPtr[3];
1058 eqh->eqh_ExtBufferPtr[4] = etd->etd_ExtBufferPtr[4];
1060 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
1061 ioreq->iouh_DriverPrivate1 = eqh;
1063 // manage endpoint going busy
1064 unit->hu_DevBusyReq[devadrep] = ioreq;
1065 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + (ioreq->iouh_NakTimeout<<3) : 0;
1067 Disable();
1068 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
1070 // looks good to me, now enqueue this entry (just behind the asyncQH)
1071 eqh->eqh_Succ = hc->hc_EhciAsyncQH->eqh_Succ;
1072 eqh->eqh_NextQH = eqh->eqh_Succ->eqh_Self;
1073 SYNC;
1075 eqh->eqh_Pred = hc->hc_EhciAsyncQH;
1076 eqh->eqh_Succ->eqh_Pred = eqh;
1077 hc->hc_EhciAsyncQH->eqh_Succ = eqh;
1078 hc->hc_EhciAsyncQH->eqh_NextQH = eqh->eqh_Self;
1079 SYNC;
1080 Enable();
1082 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
1086 void ehciUpdateFrameCounter(struct PCIController *hc) {
1088 Disable();
1089 hc->hc_FrameCounter = (hc->hc_FrameCounter & 0xffffc000)|(READREG32_LE(hc->hc_RegBase, EHCI_FRAMECOUNT) & 0x3fff);
1090 Enable();
1093 static AROS_INTH1(ehciCompleteInt, struct PCIController *, hc)
1095 AROS_INTFUNC_INIT
1097 KPRINTF(1, ("CompleteInt!\n"));
1098 ehciUpdateFrameCounter(hc);
1100 /* **************** PROCESS DONE TRANSFERS **************** */
1102 if(hc->hc_AsyncAdvanced)
1104 struct EhciQH *eqh;
1105 struct EhciTD *etd;
1106 struct EhciTD *nextetd;
1108 hc->hc_AsyncAdvanced = FALSE;
1110 KPRINTF(1, ("AsyncAdvance 0x%p\n", hc->hc_EhciAsyncFreeQH));
1112 while((eqh = hc->hc_EhciAsyncFreeQH))
1114 KPRINTF(1, ("FreeQH 0x%p\n", eqh));
1115 nextetd = eqh->eqh_FirstTD;
1116 while((etd = nextetd))
1118 KPRINTF(1, ("FreeTD 0x%p\n", nextetd));
1119 nextetd = etd->etd_Succ;
1120 ehciFreeTD(hc, etd);
1122 hc->hc_EhciAsyncFreeQH = eqh->eqh_Succ;
1123 ehciFreeQH(hc, eqh);
1127 ehciHandleFinishedTDs(hc);
1129 if(hc->hc_CtrlXFerQueue.lh_Head->ln_Succ)
1131 ehciScheduleCtrlTDs(hc);
1134 if(hc->hc_IntXFerQueue.lh_Head->ln_Succ)
1136 ehciScheduleIntTDs(hc);
1139 if(hc->hc_BulkXFerQueue.lh_Head->ln_Succ)
1141 ehciScheduleBulkTDs(hc);
1144 KPRINTF(1, ("CompleteDone\n"));
1146 return FALSE;
1148 AROS_INTFUNC_EXIT
1151 static AROS_INTH1(ehciIntCode, struct PCIController *, hc)
1153 AROS_INTFUNC_INIT
1155 struct PCIDevice *base = hc->hc_Device;
1156 struct PCIUnit *unit = hc->hc_Unit;
1157 ULONG intr;
1159 //KPRINTF(1, ("pciEhciInt()\n"));
1160 intr = READREG32_LE(hc->hc_RegBase, EHCI_USBSTATUS);
1161 if(intr & hc->hc_PCIIntEnMask)
1163 WRITEREG32_LE(hc->hc_RegBase, EHCI_USBSTATUS, intr);
1164 //KPRINTF(1, ("INT=%04lx\n", intr));
1165 if (!(hc->hc_Flags & HCF_ONLINE))
1167 return FALSE;
1169 if(intr & EHSF_FRAMECOUNTOVER)
1171 hc->hc_FrameCounter |= 0x3fff;
1172 hc->hc_FrameCounter++;
1173 hc->hc_FrameCounter |= READREG32_LE(hc->hc_RegBase, EHCI_FRAMECOUNT) & 0x3fff;
1174 KPRINTF(5, ("Frame Counter Rollover %ld\n", hc->hc_FrameCounter));
1176 if(intr & EHSF_ASYNCADVANCE)
1178 KPRINTF(1, ("AsyncAdvance\n"));
1179 hc->hc_AsyncAdvanced = TRUE;
1181 if(intr & EHSF_HOSTERROR)
1183 KPRINTF(200, ("Host ERROR!\n"));
1185 if(intr & EHSF_PORTCHANGED)
1187 UWORD hciport;
1188 ULONG oldval;
1189 UWORD portreg = EHCI_PORTSC1;
1190 for(hciport = 0; hciport < hc->hc_NumPorts; hciport++, portreg += 4)
1192 oldval = READREG32_LE(hc->hc_RegBase, portreg);
1193 // reflect port ownership (shortcut without hc->hc_PortNum20[hciport], as usb 2.0 maps 1:1)
1194 unit->hu_EhciOwned[hciport] = (oldval & EHPF_NOTPORTOWNER) ? FALSE : TRUE;
1195 if(oldval & EHPF_ENABLECHANGE)
1197 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_ENABLE;
1199 if(oldval & EHPF_CONNECTCHANGE)
1201 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_CONNECTION;
1203 if(oldval & EHPF_RESUMEDTX)
1205 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND|UPSF_PORT_ENABLE;
1207 if(oldval & EHPF_OVERCURRENTCHG)
1209 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_OVER_CURRENT;
1211 WRITEREG32_LE(hc->hc_RegBase, portreg, oldval);
1212 KPRINTF(20, ("PCI Int Port %ld Change %08lx\n", hciport + 1, oldval));
1213 if(hc->hc_PortChangeMap[hciport])
1215 unit->hu_RootPortChanges |= 1UL<<(hciport + 1);
1218 uhwCheckRootHubChanges(unit);
1220 if(intr & (EHSF_TDDONE|EHSF_TDERROR|EHSF_ASYNCADVANCE))
1222 SureCause(base, &hc->hc_CompleteInt);
1226 return FALSE;
1228 AROS_INTFUNC_EXIT
1231 BOOL ehciInit(struct PCIController *hc, struct PCIUnit *hu) {
1233 struct PCIDevice *hd = hu->hu_Device;
1235 struct EhciQH *eqh;
1236 struct EhciQH *predeqh;
1237 struct EhciTD *etd;
1238 ULONG *tabptr;
1239 UBYTE *memptr;
1240 ULONG bitcnt;
1241 ULONG hcsparams;
1242 ULONG hccparams;
1243 volatile APTR pciregbase;
1244 ULONG extcapoffset;
1245 ULONG legsup;
1246 ULONG timeout;
1247 ULONG tmp;
1249 ULONG cnt;
1251 struct TagItem pciActivateMem[] =
1253 { aHidd_PCIDevice_isMEM, TRUE },
1254 { TAG_DONE, 0UL },
1257 struct TagItem pciActivateBusmaster[] =
1259 { aHidd_PCIDevice_isMaster, TRUE },
1260 { TAG_DONE, 0UL },
1263 struct TagItem pciDeactivateBusmaster[] =
1265 { aHidd_PCIDevice_isMaster, FALSE },
1266 { TAG_DONE, 0UL },
1269 hc->hc_portroute = 0;
1271 hc->hc_CompleteInt.is_Node.ln_Type = NT_INTERRUPT;
1272 hc->hc_CompleteInt.is_Node.ln_Name = "EHCI CompleteInt";
1273 hc->hc_CompleteInt.is_Node.ln_Pri = 0;
1274 hc->hc_CompleteInt.is_Data = hc;
1275 hc->hc_CompleteInt.is_Code = (VOID_FUNC)ehciCompleteInt;
1278 FIXME: Check the real size from USBCMD Frame List Size field (bits3:2)
1279 and set the value accordingly if Frame List Flag in the HCCPARAMS indicates RW for the field
1280 else use default value of EHCI_FRAMELIST_SIZE (1024)
1282 hc->hc_PCIMemSize = sizeof(ULONG) * EHCI_FRAMELIST_SIZE + EHCI_FRAMELIST_ALIGNMENT + 1;
1283 hc->hc_PCIMemSize += sizeof(struct EhciQH) * EHCI_QH_POOLSIZE;
1284 hc->hc_PCIMemSize += sizeof(struct EhciTD) * EHCI_TD_POOLSIZE;
1287 FIXME: We should be able to read some EHCI registers before allocating memory
1289 memptr = HIDD_PCIDriver_AllocPCIMem(hc->hc_PCIDriverObject, hc->hc_PCIMemSize);
1290 hc->hc_PCIMem = (APTR) memptr;
1292 if(memptr) {
1293 // PhysicalAddress - VirtualAdjust = VirtualAddress
1294 // VirtualAddress + VirtualAdjust = PhysicalAddress
1295 hc->hc_PCIVirtualAdjust = pciGetPhysical(hc, memptr) - (APTR)memptr;
1296 KPRINTF(10, ("VirtualAdjust 0x%08lx\n", hc->hc_PCIVirtualAdjust));
1298 // align memory
1299 memptr = (UBYTE *) ((((IPTR) hc->hc_PCIMem) + EHCI_FRAMELIST_ALIGNMENT) & (~EHCI_FRAMELIST_ALIGNMENT));
1300 hc->hc_EhciFrameList = (ULONG *) memptr;
1301 KPRINTF(10, ("FrameListBase 0x%p\n", hc->hc_EhciFrameList));
1302 memptr += sizeof(APTR) * EHCI_FRAMELIST_SIZE;
1304 // build up QH pool
1305 eqh = (struct EhciQH *) memptr;
1306 hc->hc_EhciQHPool = eqh;
1307 cnt = EHCI_QH_POOLSIZE - 1;
1308 do {
1309 // minimal initalization
1310 eqh->eqh_Succ = (eqh + 1);
1311 WRITEMEM32_LE(&eqh->eqh_Self, (IPTR) (&eqh->eqh_NextQH) + hc->hc_PCIVirtualAdjust + EHCI_QUEUEHEAD);
1312 CONSTWRITEMEM32_LE(&eqh->eqh_NextTD, EHCI_TERMINATE);
1313 CONSTWRITEMEM32_LE(&eqh->eqh_AltNextTD, EHCI_TERMINATE);
1314 eqh++;
1315 } while(--cnt);
1316 eqh->eqh_Succ = NULL;
1317 WRITEMEM32_LE(&eqh->eqh_Self, (IPTR) (&eqh->eqh_NextQH) + hc->hc_PCIVirtualAdjust + EHCI_QUEUEHEAD);
1318 CONSTWRITEMEM32_LE(&eqh->eqh_NextTD, EHCI_TERMINATE);
1319 CONSTWRITEMEM32_LE(&eqh->eqh_AltNextTD, EHCI_TERMINATE);
1320 memptr += sizeof(struct EhciQH) * EHCI_QH_POOLSIZE;
1322 // build up TD pool
1323 etd = (struct EhciTD *) memptr;
1324 hc->hc_EhciTDPool = etd;
1325 cnt = EHCI_TD_POOLSIZE - 1;
1328 etd->etd_Succ = (etd + 1);
1329 WRITEMEM32_LE(&etd->etd_Self, (IPTR) (&etd->etd_NextTD) + hc->hc_PCIVirtualAdjust);
1330 etd++;
1331 } while(--cnt);
1332 etd->etd_Succ = NULL;
1333 WRITEMEM32_LE(&etd->etd_Self, (IPTR) (&etd->etd_NextTD) + hc->hc_PCIVirtualAdjust);
1334 memptr += sizeof(struct EhciTD) * EHCI_TD_POOLSIZE;
1336 // empty async queue head
1337 hc->hc_EhciAsyncFreeQH = NULL;
1338 hc->hc_EhciAsyncQH = eqh = ehciAllocQH(hc);
1339 eqh->eqh_Succ = eqh;
1340 eqh->eqh_Pred = eqh;
1341 CONSTWRITEMEM32_LE(&eqh->eqh_EPCaps, EQEF_RECLAMHEAD);
1342 eqh->eqh_NextQH = eqh->eqh_Self;
1344 // empty terminating queue head
1345 hc->hc_EhciTermQH = eqh = ehciAllocQH(hc);
1346 eqh->eqh_Succ = NULL;
1347 CONSTWRITEMEM32_LE(&eqh->eqh_NextQH, EHCI_TERMINATE);
1348 predeqh = eqh;
1350 // 1 ms INT QH
1351 hc->hc_EhciIntQH[0] = eqh = ehciAllocQH(hc);
1352 eqh->eqh_Succ = predeqh;
1353 predeqh->eqh_Pred = eqh;
1354 eqh->eqh_Pred = NULL; // who knows...
1355 //eqh->eqh_NextQH = predeqh->eqh_Self;
1356 predeqh = eqh;
1358 // make 11 levels of QH interrupts
1359 for(cnt = 1; cnt < 11; cnt++)
1361 hc->hc_EhciIntQH[cnt] = eqh = ehciAllocQH(hc);
1362 eqh->eqh_Succ = predeqh;
1363 eqh->eqh_Pred = NULL; // who knows...
1364 //eqh->eqh_NextQH = predeqh->eqh_Self; // link to previous int level
1365 predeqh = eqh;
1368 ehciUpdateIntTree(hc);
1370 // fill in framelist with IntQH entry points based on interval
1371 tabptr = hc->hc_EhciFrameList;
1372 for(cnt = 0; cnt < EHCI_FRAMELIST_SIZE; cnt++)
1374 eqh = hc->hc_EhciIntQH[10];
1375 bitcnt = 0;
1378 if(cnt & (1UL<<bitcnt))
1380 eqh = hc->hc_EhciIntQH[bitcnt];
1381 break;
1383 } while(++bitcnt < 11);
1384 *tabptr++ = eqh->eqh_Self;
1387 etd = hc->hc_ShortPktEndTD = ehciAllocTD(hc);
1388 etd->etd_Succ = NULL;
1389 CONSTWRITEMEM32_LE(&etd->etd_NextTD, EHCI_TERMINATE);
1390 CONSTWRITEMEM32_LE(&etd->etd_AltNextTD, EHCI_TERMINATE);
1391 CONSTWRITEMEM32_LE(&etd->etd_CtrlStatus, 0);
1393 // time to initialize hardware...
1394 OOP_GetAttr(hc->hc_PCIDeviceObject, aHidd_PCIDevice_Base0, (IPTR *) &pciregbase);
1395 pciregbase = (APTR) (((IPTR) pciregbase) & (~0xf));
1396 OOP_SetAttrs(hc->hc_PCIDeviceObject, (struct TagItem *) pciActivateMem); // activate memory
1398 extcapoffset = (READREG32_LE(pciregbase, EHCI_HCCPARAMS) & EHCM_EXTCAPOFFSET)>>EHCS_EXTCAPOFFSET;
1400 while(extcapoffset >= 0x40)
1402 KPRINTF(10, ("EHCI has extended caps at 0x%08lx\n", extcapoffset));
1403 legsup = PCIXReadConfigLong(hc, extcapoffset);
1404 if(((legsup & EHLM_CAP_ID) >> EHLS_CAP_ID) == 0x01)
1406 if(legsup & EHLF_BIOS_OWNER)
1408 KPRINTF(10, ("BIOS still has hands on EHCI, trying to get rid of it\n"));
1409 legsup |= EHLF_OS_OWNER;
1410 PCIXWriteConfigLong(hc, extcapoffset, legsup);
1411 timeout = 100;
1414 legsup = PCIXReadConfigLong(hc, extcapoffset);
1415 if(!(legsup & EHLF_BIOS_OWNER))
1417 KPRINTF(10, ("BIOS gave up on EHCI. Pwned!\n"));
1418 break;
1420 uhwDelayMS(10, hu);
1421 } while(--timeout);
1422 if(!timeout)
1424 KPRINTF(10, ("BIOS didn't release EHCI. Forcing and praying...\n"));
1425 legsup |= EHLF_OS_OWNER;
1426 legsup &= ~EHLF_BIOS_OWNER;
1427 PCIXWriteConfigLong(hc, extcapoffset, legsup);
1430 /* disable all SMIs */
1431 PCIXWriteConfigLong(hc, extcapoffset + 4, 0);
1432 break;
1434 extcapoffset = (legsup & EHCM_EXTCAPOFFSET)>>EHCS_EXTCAPOFFSET;
1437 OOP_SetAttrs(hc->hc_PCIDeviceObject, (struct TagItem *) pciDeactivateBusmaster); // no busmaster yet
1439 // we use the operational registers as RegBase.
1440 hc->hc_RegBase = (APTR) ((IPTR) pciregbase + READREG16_LE(pciregbase, EHCI_CAPLENGTH));
1441 KPRINTF(10, ("RegBase = 0x%p\n", hc->hc_RegBase));
1443 KPRINTF(10, ("Resetting EHCI HC\n"));
1444 KPRINTF(10, ("EHCI CMD: 0x%08x STS: 0x%08x\n", READREG32_LE(hc->hc_RegBase, EHCI_USBCMD), READREG32_LE(hc->hc_RegBase, EHCI_USBSTATUS)));
1445 /* Step 1: Stop the HC */
1446 tmp = READREG32_LE(hc->hc_RegBase, EHCI_USBCMD);
1447 tmp &= ~EHUF_RUNSTOP;
1448 CONSTWRITEREG32_LE(hc->hc_RegBase, EHCI_USBCMD, tmp);
1450 /* Step 2. Wait for the controller to halt */
1451 cnt = 100;
1454 uhwDelayMS(10, hu);
1455 if(READREG32_LE(hc->hc_RegBase, EHCI_USBSTATUS) & EHSF_HCHALTED)
1457 break;
1459 } while (cnt--);
1460 if (cnt == 0)
1462 KPRINTF(200, ("EHCI: Timeout waiting for controller to halt\n"));
1465 /* Step 3. Reset the controller */
1466 WRITEREG32_LE(hc->hc_RegBase, EHCI_USBCMD, tmp | EHUF_HCRESET);
1468 /* Step 4. Wait for the reset bit to clear */
1469 cnt = 100;
1472 uhwDelayMS(10, hu);
1473 if(!(READREG32_LE(hc->hc_RegBase, EHCI_USBCMD) & EHUF_HCRESET))
1475 break;
1477 } while(--cnt);
1479 #ifdef DEBUG
1480 if(cnt == 0)
1482 KPRINTF(20, ("Reset Timeout!\n"));
1483 } else {
1484 KPRINTF(20, ("Reset finished after %ld ticks\n", 100-cnt));
1486 #endif
1488 OOP_SetAttrs(hc->hc_PCIDeviceObject, (struct TagItem *) pciActivateBusmaster); // enable busmaster
1490 // Read HCSPARAMS register to obtain number of downstream ports
1491 hcsparams = READREG32_LE(pciregbase, EHCI_HCSPARAMS);
1492 hccparams = READREG32_LE(pciregbase, EHCI_HCCPARAMS);
1494 hc->hc_NumPorts = (hcsparams & EHSM_NUM_PORTS)>>EHSS_NUM_PORTS;
1496 KPRINTF(20, ("Found EHCI Controller 0x%p with %ld ports (%ld companions with %ld ports each)\n",
1497 hc->hc_PCIDeviceObject, hc->hc_NumPorts,
1498 (hcsparams & EHSM_NUM_COMPANIONS)>>EHSS_NUM_COMPANIONS,
1499 (hcsparams & EHSM_PORTS_PER_COMP)>>EHSS_PORTS_PER_COMP));
1501 if(hcsparams & EHSF_EXTPORTROUTING)
1503 hc->hc_complexrouting = TRUE;
1504 hc->hc_portroute = READREG32_LE(pciregbase, EHCI_HCSPPORTROUTE);
1505 #ifdef DEBUG
1506 for(cnt = 0; cnt < hc->hc_NumPorts; cnt++) {
1507 KPRINTF(100, ("Port %ld maps to controller %ld\n", cnt, ((hc->hc_portroute >> (cnt<<2)) & 0xf)));
1509 #endif
1510 }else{
1511 hc->hc_complexrouting = FALSE;
1514 KPRINTF(20, ("HCCParams: 64 Bit=%s, ProgFrameList=%s, AsyncSchedPark=%s\n",
1515 (hccparams & EHCF_64BITS) ? "Yes" : "No",
1516 (hccparams & EHCF_PROGFRAMELIST) ? "Yes" : "No",
1517 (hccparams & EHCF_ASYNCSCHEDPARK) ? "Yes" : "No"));
1518 hc->hc_EhciUsbCmd = (1UL<<EHUS_INTTHRESHOLD);
1520 /* FIXME HERE: Process EHCF_64BITS flag and implement 64-bit addressing */
1522 if(hccparams & EHCF_ASYNCSCHEDPARK)
1524 KPRINTF(20, ("Enabling AsyncSchedParkMode with MULTI_3\n"));
1525 hc->hc_EhciUsbCmd |= EHUF_ASYNCSCHEDPARK|(3<<EHUS_ASYNCPARKCOUNT);
1528 WRITEREG32_LE(hc->hc_RegBase, EHCI_USBCMD, hc->hc_EhciUsbCmd);
1530 CONSTWRITEREG32_LE(hc->hc_RegBase, EHCI_FRAMECOUNT, 0);
1532 WRITEREG32_LE(hc->hc_RegBase, EHCI_PERIODICLIST, (IPTR)pciGetPhysical(hc, hc->hc_EhciFrameList));
1533 WRITEREG32_LE(hc->hc_RegBase, EHCI_ASYNCADDR, AROS_LONG2LE(hc->hc_EhciAsyncQH->eqh_Self));
1534 CONSTWRITEREG32_LE(hc->hc_RegBase, EHCI_USBSTATUS, EHSF_ALL_INTS);
1536 // install reset handler
1537 hc->hc_ResetInt.is_Code = (VOID_FUNC)EhciResetHandler;
1538 hc->hc_ResetInt.is_Data = hc;
1539 AddResetCallback(&hc->hc_ResetInt);
1541 // add interrupt
1542 hc->hc_PCIIntHandler.is_Node.ln_Name = "EHCI PCI (pciusb.device)";
1543 hc->hc_PCIIntHandler.is_Node.ln_Pri = 5;
1544 hc->hc_PCIIntHandler.is_Node.ln_Type = NT_INTERRUPT;
1545 hc->hc_PCIIntHandler.is_Code = (VOID_FUNC)ehciIntCode;
1546 hc->hc_PCIIntHandler.is_Data = hc;
1547 PCIXAddInterrupt(hc, &hc->hc_PCIIntHandler);
1549 hc->hc_PCIIntEnMask = EHSF_ALL_INTS;
1550 WRITEREG32_LE(hc->hc_RegBase, EHCI_USBINTEN, hc->hc_PCIIntEnMask);
1552 CacheClearE(hc->hc_EhciFrameList, sizeof(ULONG) * EHCI_FRAMELIST_SIZE, CACRF_ClearD);
1553 CacheClearE(hc->hc_EhciQHPool, sizeof(struct EhciQH) * EHCI_QH_POOLSIZE, CACRF_ClearD);
1554 CacheClearE(hc->hc_EhciTDPool, sizeof(struct EhciTD) * EHCI_TD_POOLSIZE, CACRF_ClearD);
1556 CONSTWRITEREG32_LE(hc->hc_RegBase, EHCI_CONFIGFLAG, EHCF_CONFIGURED);
1557 hc->hc_EhciUsbCmd |= EHUF_RUNSTOP|EHUF_PERIODICENABLE|EHUF_ASYNCENABLE;
1558 WRITEREG32_LE(hc->hc_RegBase, EHCI_USBCMD, hc->hc_EhciUsbCmd);
1559 SYNC;
1561 KPRINTF(20, ("HW Init done\n"));
1563 KPRINTF(10, ("HW Regs USBCMD=%04lx\n", READREG32_LE(hc->hc_RegBase, EHCI_USBCMD)));
1564 KPRINTF(10, ("HW Regs USBSTS=%04lx\n", READREG32_LE(hc->hc_RegBase, EHCI_USBSTATUS)));
1565 KPRINTF(10, ("HW Regs FRAMECOUNT=%04lx\n", READREG32_LE(hc->hc_RegBase, EHCI_FRAMECOUNT)));
1567 KPRINTF(1000, ("ehciInit returns TRUE...\n"));
1568 return TRUE;
1572 FIXME: What would the appropriate debug level be?
1574 KPRINTF(1000, ("ehciInit returns FALSE...\n"));
1575 return FALSE;
1578 void ehciFree(struct PCIController *hc, struct PCIUnit *hu) {
1580 hc = (struct PCIController *) hu->hu_Controllers.lh_Head;
1581 while(hc->hc_Node.ln_Succ)
1583 switch(hc->hc_HCIType)
1585 case HCITYPE_EHCI:
1587 UWORD portreg;
1588 UWORD hciport;
1589 KPRINTF(20, ("Shutting down EHCI 0x%p\n", hc));
1590 CONSTWRITEREG32_LE(hc->hc_RegBase, EHCI_USBINTEN, 0);
1591 // disable all ports
1592 for(hciport = 0; hciport < hc->hc_NumPorts; hciport++)
1594 portreg = EHCI_PORTSC1 + (hciport<<2);
1595 WRITEREG32_LE(hc->hc_RegBase, portreg, 0);
1597 CONSTWRITEREG32_LE(hc->hc_RegBase, EHCI_USBCMD, 1UL<<EHUS_INTTHRESHOLD);
1598 uhwDelayMS(10, hu);
1599 CONSTWRITEREG32_LE(hc->hc_RegBase, EHCI_CONFIGFLAG, 0);
1600 CONSTWRITEREG32_LE(hc->hc_RegBase, EHCI_USBCMD, EHUF_HCRESET|(1UL<<EHUS_INTTHRESHOLD));
1601 SYNC;
1603 uhwDelayMS(50, hu);
1604 CONSTWRITEREG32_LE(hc->hc_RegBase, EHCI_USBCMD, 1UL<<EHUS_INTTHRESHOLD);
1605 SYNC;
1607 uhwDelayMS(10, hu);
1609 KPRINTF(20, ("Shutting down EHCI done.\n"));
1610 break;
1614 hc = (struct PCIController *) hc->hc_Node.ln_Succ;