pciusb.device: ohci: Don't try to move to reset state via OHCI_CONTROL
[AROS.git] / rom / usb / pciusb / ohcichip.c
blobee54feed33da3cc41a999580dbdfab58af7fe8c1
1 /*
2 Copyright © 2010-2011, The AROS Development Team. All rights reserved
3 $Id$
4 */
6 /* Enable debug level 1000, keeps an eye on TD DoneQueue consistency */
7 #define DEBUG 1
8 #define DB_LEVEL 1000
10 #include <proto/exec.h>
11 #include <proto/oop.h>
12 #include <hidd/pci.h>
13 #include <devices/usb_hub.h>
15 #include <stddef.h>
17 #include "uhwcmd.h"
18 #include "ohciproto.h"
20 #undef HiddPCIDeviceAttrBase
21 #define HiddPCIDeviceAttrBase (hd->hd_HiddPCIDeviceAB)
22 #undef HiddAttrBase
23 #define HiddAttrBase (hd->hd_HiddAB)
25 #ifdef DEBUG_TD
27 static void PrintTD(const char *txt, ULONG ptd, struct PCIController *hc)
29 KPrintF("HC 0x%p %s TD list:", hc, txt);
31 while (ptd)
33 struct OhciTD *otd = (struct OhciTD *)((IPTR)ptd - hc->hc_PCIVirtualAdjust - offsetof(struct OhciTD, otd_Ctrl));
35 KPrintF(" 0x%p", otd);
36 ptd = READMEM32_LE(&otd->otd_NextTD);
38 RawPutChar('\n');
41 #else
42 #define PrintTD(txt, ptd, hc)
43 #endif
45 #ifdef DEBUG_ED
47 static void PrintED(const char *txt, struct OhciED *oed, struct PCIController *hc)
49 struct OhciTD *otd;
51 KPrintF("%s ED 0x%p: EPCaps=%08lx, HeadPtr=%08lx, TailPtr=%08lx, NextED=%08lx\n", txt, oed,
52 READMEM32_LE(&oed->oed_EPCaps),
53 READMEM32_LE(&oed->oed_HeadPtr),
54 READMEM32_LE(&oed->oed_TailPtr),
55 READMEM32_LE(&oed->oed_NextED));
57 KPrintF("...TD list:", hc, txt, oed);
58 for (otd = oed->oed_FirstTD; otd; otd = otd->otd_Succ)
59 KPrintF(" 0x%p", otd);
60 RawPutChar('\n');
63 #else
64 #define PrintED(txt, oed, hc)
65 #endif
67 static AROS_INTH1(OhciResetHandler, struct PCIController *, hc)
69 AROS_INTFUNC_INIT
71 // reset controller
72 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, OCSF_HCRESET);
74 return FALSE;
76 AROS_INTFUNC_EXIT
79 static void ohciFreeTDChain(struct PCIController *hc, struct OhciTD *nextotd)
81 struct OhciTD *otd;
83 while (nextotd)
85 KPRINTF(1, ("FreeTD %p\n", nextotd));
86 otd = nextotd;
87 nextotd = (struct OhciTD *) otd->otd_Succ;
88 ohciFreeTD(hc, otd);
92 static void ohciFreeEDContext(struct PCIController *hc, struct IOUsbHWReq *ioreq)
94 struct PCIUnit *unit = hc->hc_Unit;
95 struct OhciED *oed = ioreq->iouh_DriverPrivate1;
96 UWORD devadrep;
97 UWORD dir;
99 KPRINTF(5, ("Freeing EDContext 0x%p IOReq 0x%p\n", oed, ioreq));
101 if (ioreq->iouh_Req.io_Command == UHCMD_CONTROLXFER)
102 dir = (ioreq->iouh_SetupData.bmRequestType & URTF_IN) ? UHDIR_IN : UHDIR_OUT;
103 else
104 dir = ioreq->iouh_Dir;
106 usbReleaseBuffer(oed->oed_Buffer, ioreq->iouh_Data, ioreq->iouh_Actual, dir);
107 usbReleaseBuffer(oed->oed_SetupData, &ioreq->iouh_SetupData, 8, UHDIR_IN);
109 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
110 unit->hu_DevBusyReq[devadrep] = NULL;
111 unit->hu_DevDataToggle[devadrep] = (READMEM32_LE(&oed->oed_HeadPtr) & OEHF_DATA1) ? TRUE : FALSE;
113 Disable();
114 ohciFreeTDChain(hc, oed->oed_FirstTD);
115 ohciFreeED(hc, oed);
116 Enable();
119 static void ohciUpdateIntTree(struct PCIController *hc)
121 struct OhciED *oed;
122 struct OhciED *predoed;
123 struct OhciED *lastusedoed;
124 UWORD cnt;
126 // optimize linkage between queue heads
127 predoed = lastusedoed = hc->hc_OhciTermED;
128 for(cnt = 0; cnt < 5; cnt++)
130 oed = hc->hc_OhciIntED[cnt];
131 if(oed->oed_Succ != predoed)
133 lastusedoed = oed->oed_Succ;
135 oed->oed_NextED = lastusedoed->oed_Self;
136 CacheClearE(&oed->oed_EPCaps, 16, CACRF_ClearD);
137 predoed = oed;
141 static void ohciHandleFinishedTDs(struct PCIController *hc)
143 struct IOUsbHWReq *ioreq;
144 struct IOUsbHWReq *nextioreq;
145 struct OhciED *oed = NULL;
146 struct OhciTD *otd;
147 ULONG len;
148 ULONG ctrlstatus;
149 ULONG epcaps;
150 BOOL direction_in;
151 BOOL updatetree = FALSE;
152 ULONG donehead, nexttd;
153 BOOL retire;
155 KPRINTF(1, ("Checking for work done...\n"));
156 Disable();
157 donehead = hc->hc_OhciDoneQueue;
158 hc->hc_OhciDoneQueue = 0UL;
159 Enable();
160 if(!donehead)
162 KPRINTF(1, ("Nothing to do!\n"));
163 return;
165 otd = (struct OhciTD *) ((IPTR)donehead - hc->hc_PCIVirtualAdjust - offsetof(struct OhciTD, otd_Ctrl));
166 KPRINTF(10, ("DoneHead=%08lx, OTD=%p, Frame=%ld\n", donehead, otd, READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT)));
167 PrintTD("Done", donehead, hc); /* CHECKME: This can give inconsistent printout on cache-incoherent hardware */
170 CacheClearE(&otd->otd_Ctrl, 16, CACRF_InvalidateD);
171 oed = otd->otd_ED;
172 if(!oed)
175 * WATCH OUT!!! Rogue TD is a very bad thing!!!
176 * If you see this, there's definitely a bug in DoneQueue processing flow.
177 * See below for the complete description.
179 KPRINTF(1000, ("Came across a rogue TD 0x%p that already has been freed!\n", otd));
180 nexttd = READMEM32_LE(&otd->otd_NextTD) & OHCI_PTRMASK;
181 if(!nexttd)
183 break;
185 otd = (struct OhciTD *) ((IPTR)nexttd - hc->hc_PCIVirtualAdjust - offsetof(struct OhciTD, otd_Ctrl));
186 continue;
188 CacheClearE(&oed->oed_EPCaps, 16, CACRF_InvalidateD);
189 ctrlstatus = READMEM32_LE(&otd->otd_Ctrl);
190 KPRINTF(1, ("TD: %08lx - %08lx\n", READMEM32_LE(&otd->otd_BufferPtr),
191 READMEM32_LE(&otd->otd_BufferEnd)));
192 if(otd->otd_BufferPtr)
194 // FIXME this will blow up if physical memory is ever going to be discontinuous
195 len = READMEM32_LE(&otd->otd_BufferPtr) - (READMEM32_LE(&otd->otd_BufferEnd) + 1 - otd->otd_Length);
196 } else {
197 len = otd->otd_Length;
200 ioreq = oed->oed_IOReq;
202 KPRINTF(1, ("Examining TD %p for ED %p (IOReq=%p), Status %08lx, len=%ld\n", otd, oed, ioreq, ctrlstatus, len));
203 if(!ioreq)
205 /* You should never see this (very weird inconsistency), but who knows... */
206 KPRINTF(1000, ("Came across a rogue ED 0x%p that already has been replied! TD 0x%p,\n", oed, otd));
207 nexttd = READMEM32_LE(&otd->otd_NextTD) & OHCI_PTRMASK;
208 if(!nexttd)
210 break;
212 otd = (struct OhciTD *) ((IPTR)nexttd - hc->hc_PCIVirtualAdjust - offsetof(struct OhciTD, otd_Ctrl));
213 continue;
216 if (len)
218 epcaps = READMEM32_LE(&oed->oed_EPCaps);
219 direction_in = ((epcaps & OECM_DIRECTION) == OECF_DIRECTION_TD)
220 ? (ioreq->iouh_SetupData.bmRequestType & URTF_IN)
221 : (epcaps & OECF_DIRECTION_IN);
222 CachePostDMA((APTR)(IPTR)READMEM32_LE(&otd->otd_BufferEnd) - len + 1, &len, direction_in ? 0 : DMA_ReadFromRAM);
225 ioreq->iouh_Actual += len;
227 * CHECKME: This condition may get triggered on control transfers even if terminating TD is not processed yet.
228 * (got triggered by MacMini's keyboard, when someone sends control ED with no data payload,
229 * and some other ED is being done meanwhile (its final packet generated an interrupt).
230 * In this case the given control ED can be partially done (setup TD is done, term TD is not).
231 * iouh_Length is 0, and the whole ED is retired, while still being processed by the HC. Next time
232 * its terminator TD arrives into done queue.
233 * This can cause weird things like looping TD list on itself. My modification of ohciFreeTD()
234 * explicitly clears NextTD to avoid keeping dangling value there, however the problem still can
235 * appear if this TD is quickly reused by another request.
236 * Final TDs have OTCM_DELAYINT fields set to zero. HC processes TDs in order, so if we receive
237 * the final TD, we assume the whole ED's list has been processed.
238 * This means it should be safe to simply disable this check.
239 * If this doesn't work for some reason, we need a more complex check which makes sure that all TDs
240 * are really done (or ED is halted). This can be done by checking OTCM_COMPLETIONCODE field against
241 * OTCF_CC_INVALID value.
242 * Pavel Fedin <pavel.fedin@mail.ru>
243 retire = (ioreq->iouh_Actual == ioreq->iouh_Length);
244 if (retire)
246 KPRINTF(10, ("TD 0x%p Data transfer done (%lu bytes)\n", otd, ioreq->iouh_Length));
247 } */
248 retire = FALSE;
249 if((ctrlstatus & OTCM_DELAYINT) != OTCF_NOINT)
251 KPRINTF(10, ("TD 0x%p Terminator detected\n", otd));
252 retire = TRUE;
254 switch((ctrlstatus & OTCM_COMPLETIONCODE)>>OTCS_COMPLETIONCODE)
256 case (OTCF_CC_NOERROR>>OTCS_COMPLETIONCODE):
257 break;
259 case (OTCF_CC_CRCERROR>>OTCS_COMPLETIONCODE):
260 KPRINTF(200, ("CRC Error!\n"));
261 ioreq->iouh_Req.io_Error = UHIOERR_CRCERROR;
263 * CHECKME: Do we really need to set retire flag here?
264 * Critical errors are always accompanied by OEHF_HALTED bit.
265 * But what if HC thinks it's recoverable error and continues
266 * working on this ED? In this case early retirement happens,
267 * causing bad things. See long explanation above.
269 retire = TRUE;
270 break;
272 case (OTCF_CC_BABBLE>>OTCS_COMPLETIONCODE):
273 KPRINTF(200, ("Babble/Bitstuffing Error!\n"));
274 ioreq->iouh_Req.io_Error = UHIOERR_CRCERROR;
275 retire = TRUE;
276 break;
278 case (OTCF_CC_WRONGTOGGLE>>OTCS_COMPLETIONCODE):
279 KPRINTF(200, ("Data toggle mismatch length = %ld\n", len));
280 break;
282 case (OTCF_CC_STALL>>OTCS_COMPLETIONCODE):
283 KPRINTF(200, ("STALLED!\n"));
284 ioreq->iouh_Req.io_Error = UHIOERR_STALL;
285 retire = TRUE;
286 break;
288 case (OTCF_CC_TIMEOUT>>OTCS_COMPLETIONCODE):
289 KPRINTF(200, ("TIMEOUT!\n"));
290 ioreq->iouh_Req.io_Error = UHIOERR_TIMEOUT;
291 retire = TRUE;
292 break;
294 case (OTCF_CC_PIDCORRUPT>>OTCS_COMPLETIONCODE):
295 KPRINTF(200, ("PID Error!\n"));
296 ioreq->iouh_Req.io_Error = UHIOERR_CRCERROR;
297 retire = TRUE;
298 break;
300 case (OTCF_CC_WRONGPID>>OTCS_COMPLETIONCODE):
301 KPRINTF(200, ("Illegal PID!\n"));
302 ioreq->iouh_Req.io_Error = UHIOERR_CRCERROR;
303 retire = TRUE;
304 break;
306 case (OTCF_CC_OVERFLOW>>OTCS_COMPLETIONCODE):
307 KPRINTF(200, ("Overflow Error!\n"));
308 ioreq->iouh_Req.io_Error = UHIOERR_OVERFLOW;
309 retire = TRUE;
310 break;
312 case (OTCF_CC_SHORTPKT>>OTCS_COMPLETIONCODE):
313 KPRINTF(10, ("Short packet %ld < %ld\n", len, otd->otd_Length));
314 if((!ioreq->iouh_Req.io_Error) && (!(ioreq->iouh_Flags & UHFF_ALLOWRUNTPKTS)))
316 ioreq->iouh_Req.io_Error = UHIOERR_RUNTPACKET;
318 retire = TRUE;
319 break;
321 case (OTCF_CC_OVERRUN>>OTCS_COMPLETIONCODE):
322 KPRINTF(200, ("Data Overrun Error!\n"));
323 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
324 retire = TRUE;
325 break;
327 case (OTCF_CC_UNDERRUN>>OTCS_COMPLETIONCODE):
328 KPRINTF(200, ("Data Underrun Error!\n"));
329 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
330 retire = TRUE;
331 break;
333 case (OTCF_CC_INVALID>>OTCS_COMPLETIONCODE):
334 KPRINTF(200, ("Not touched?!?\n"));
335 break;
337 if(READMEM32_LE(&oed->oed_HeadPtr) & OEHF_HALTED)
339 KPRINTF(100, ("OED halted!\n"));
340 retire = TRUE;
343 if(retire)
345 KPRINTF(50, ("ED 0x%p stopped at TD 0x%p\n", oed, otd));
346 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
347 AddHead(&hc->hc_OhciRetireQueue, &ioreq->iouh_Req.io_Message.mn_Node);
350 nexttd = READMEM32_LE(&otd->otd_NextTD) & OHCI_PTRMASK;
351 KPRINTF(1, ("NextTD=0x%08lx\n", nexttd));
352 if(!nexttd)
354 break;
356 otd = (struct OhciTD *) ((IPTR)nexttd - hc->hc_PCIVirtualAdjust - offsetof(struct OhciTD, otd_Ctrl));
357 KPRINTF(1, ("NextOTD = %p\n", otd));
358 } while(TRUE);
360 ioreq = (struct IOUsbHWReq *) hc->hc_OhciRetireQueue.lh_Head;
361 while((nextioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ))
363 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
364 oed = (struct OhciED *) ioreq->iouh_DriverPrivate1;
365 if(oed)
367 KPRINTF(50, ("HC 0x%p Retiring IOReq=0x%p Command=%ld ED=0x%p, Frame=%ld\n", hc, ioreq, ioreq->iouh_Req.io_Command, oed, READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT)));
369 if(oed->oed_Continue)
371 ULONG actual = ioreq->iouh_Actual;
372 ULONG oldenables;
373 ULONG phyaddr;
374 struct OhciTD *predotd = NULL;
376 KPRINTF(10, ("Reloading Bulk transfer at %ld of %ld\n", ioreq->iouh_Actual, ioreq->iouh_Length));
377 otd = oed->oed_FirstTD;
379 phyaddr = (IPTR)pciGetPhysical(hc, oed->oed_Buffer + actual);
382 len = ioreq->iouh_Length - actual;
383 if(len > OHCI_PAGE_SIZE)
385 len = OHCI_PAGE_SIZE;
387 if((!otd->otd_Succ) && (actual + len == ioreq->iouh_Length) && (!(ioreq->iouh_Flags & UHFF_NOSHORTPKT)) && ((actual % ioreq->iouh_MaxPktSize) == 0))
389 // special case -- zero padding would not fit in this run,
390 // and next time, we would forget about it. So rather abort
391 // reload now, so the zero padding goes with the next reload
392 break;
394 predotd = otd;
395 otd->otd_Length = len;
396 KPRINTF(1, ("TD with %ld bytes: %08x-%08x\n", len, phyaddr, phyaddr+len-1));
397 CONSTWRITEMEM32_LE(&otd->otd_Ctrl, OTCF_CC_INVALID|OTCF_NOINT);
398 if(otd->otd_Succ)
400 otd->otd_NextTD = otd->otd_Succ->otd_Self;
402 if(len)
404 WRITEMEM32_LE(&otd->otd_BufferPtr, (IPTR)CachePreDMA((APTR)(IPTR)phyaddr, &len, (ioreq->iouh_Dir == UHDIR_IN) ? 0 : DMA_ReadFromRAM));
405 phyaddr += len - 1;
406 WRITEMEM32_LE(&otd->otd_BufferEnd, phyaddr);
407 phyaddr++;
408 } else {
409 CONSTWRITEMEM32_LE(&otd->otd_BufferPtr, 0);
410 CONSTWRITEMEM32_LE(&otd->otd_BufferEnd, 0);
412 CacheClearE(&otd->otd_Ctrl, 16, CACRF_ClearD);
413 actual += len;
414 otd = otd->otd_Succ;
415 } while(otd && ((actual < ioreq->iouh_Length) || (len && (ioreq->iouh_Dir == UHDIR_OUT) && (actual == ioreq->iouh_Length) && (!(ioreq->iouh_Flags & UHFF_NOSHORTPKT)) && ((actual % ioreq->iouh_MaxPktSize) == 0))));
416 oed->oed_Continue = (actual < ioreq->iouh_Length);
417 predotd->otd_NextTD = hc->hc_OhciTermTD->otd_Self;
419 CONSTWRITEMEM32_LE(&predotd->otd_Ctrl, OTCF_CC_INVALID);
420 CacheClearE(&predotd->otd_Ctrl, 16, CACRF_ClearD);
422 Disable();
423 AddTail(&hc->hc_TDQueue, &ioreq->iouh_Req.io_Message.mn_Node);
425 // keep toggle bit
426 ctrlstatus = READMEM32_LE(&oed->oed_HeadPtr) & OEHF_DATA1;
427 ctrlstatus |= READMEM32_LE(&oed->oed_FirstTD->otd_Self);
428 WRITEMEM32_LE(&oed->oed_HeadPtr, ctrlstatus);
429 CacheClearE(&oed->oed_EPCaps, 16, CACRF_ClearD);
431 PrintED("Continued bulk", oed, hc);
433 oldenables = READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS);
434 oldenables |= OCSF_BULKENABLE;
435 WRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, oldenables);
436 SYNC;
437 Enable();
438 } else {
439 // disable ED
440 ohciDisableED(oed);
441 PrintED("Completed", oed, hc);
443 ohciFreeEDContext(hc, ioreq);
444 if(ioreq->iouh_Req.io_Command == UHCMD_INTXFER)
446 updatetree = TRUE;
448 // check for successful clear feature and set address ctrl transfers
449 if((!ioreq->iouh_Req.io_Error) && (ioreq->iouh_Req.io_Command == UHCMD_CONTROLXFER))
451 uhwCheckSpecialCtrlTransfers(hc, ioreq);
453 ReplyMsg(&ioreq->iouh_Req.io_Message);
455 } else {
456 KPRINTF(20, ("IOReq=%p has no OED!\n", ioreq));
458 ioreq = nextioreq;
460 if(updatetree)
462 ohciUpdateIntTree(hc);
466 static ULONG ohciHandleAbortedEDs(struct PCIController *hc)
468 struct IOUsbHWReq *ioreq;
469 ULONG restartmask = 0;
471 KPRINTF(50, ("Processing abort queue...\n"));
473 // We don't need this any more
474 ohciDisableInt(hc, OISF_SOF);
477 * If the aborted IORequest was replied in ohciHandleFinishedTDs(),
478 * it was already Remove()d from this queue. It's safe to do no checks.
479 * io_Error was set earlier.
481 while ((ioreq = (struct IOUsbHWReq *)RemHead(&hc->hc_AbortQueue)))
483 KPRINTF(70, ("HC 0x%p Aborted IOReq 0x%p\n", hc, ioreq));
484 PrintED("Aborted", ioreq->iouh_DriverPrivate1, hc);
486 ohciFreeEDContext(hc, ioreq);
487 ReplyMsg(&ioreq->iouh_Req.io_Message);
490 /* Restart stopped queues */
491 if (hc->hc_Flags & HCF_STOP_CTRL)
493 KPRINTF(50, ("Restarting control transfers\n"));
494 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CTRL_ED, 0);
495 restartmask |= OCSF_CTRLENABLE;
498 if (hc->hc_Flags & HCF_STOP_BULK)
500 KPRINTF(50, ("Restarting bulk transfers\n"));
501 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_BULK_ED, 0);
502 restartmask |= OCSF_BULKENABLE;
505 /* Everything is enabled again, aborting done */
506 hc->hc_Flags &= ~(HCF_STOP_CTRL | HCF_STOP_BULK | HCF_ABORT);
508 /* We will accumulate flags and start queues only once, when everything is set up */
509 return restartmask;
512 static ULONG ohciScheduleCtrlTDs(struct PCIController *hc)
514 struct PCIUnit *unit = hc->hc_Unit;
515 struct IOUsbHWReq *ioreq;
516 UWORD devadrep;
517 struct OhciED *oed;
518 struct OhciTD *setupotd;
519 struct OhciTD *dataotd;
520 struct OhciTD *termotd;
521 struct OhciTD *predotd;
522 ULONG actual;
523 ULONG epcaps;
524 ULONG ctrl;
525 ULONG len;
526 ULONG phyaddr;
527 ULONG oldenables;
528 ULONG startmask = 0;
530 /* *** CTRL Transfers *** */
531 KPRINTF(1, ("Scheduling new CTRL transfers...\n"));
532 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
533 while(((struct Node *) ioreq)->ln_Succ)
535 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint;
536 KPRINTF(10, ("New CTRL transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
537 /* is endpoint already in use or do we have to wait for next transaction */
538 if(unit->hu_DevBusyReq[devadrep])
540 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
541 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
542 continue;
545 oed = ohciAllocED(hc);
546 if(!oed)
548 break;
551 setupotd = ohciAllocTD(hc);
552 if(!setupotd)
554 ohciFreeED(hc, oed);
555 break;
557 termotd = ohciAllocTD(hc);
558 if(!termotd)
560 ohciFreeTD(hc, setupotd);
561 ohciFreeED(hc, oed);
562 break;
564 oed->oed_IOReq = ioreq;
566 KPRINTF(1, ("SetupTD=%p, TermTD=%p\n", setupotd, termotd));
568 // fill setup td
569 epcaps = (ioreq->iouh_DevAddr<<OECS_DEVADDR)|(ioreq->iouh_Endpoint<<OECS_ENDPOINT)|(ioreq->iouh_MaxPktSize<<OECS_MAXPKTLEN)|OECF_DIRECTION_TD;
571 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
573 KPRINTF(5, ("*** LOW SPEED ***\n"));
574 epcaps |= OECF_LOWSPEED;
577 WRITEMEM32_LE(&oed->oed_EPCaps, epcaps);
579 oed->oed_TailPtr = hc->hc_OhciTermTD->otd_Self;
580 oed->oed_HeadPtr = setupotd->otd_Self;
581 oed->oed_FirstTD = setupotd;
583 setupotd->otd_ED = oed;
584 setupotd->otd_Length = 0; // don't increase io_Actual for that transfer
585 CONSTWRITEMEM32_LE(&setupotd->otd_Ctrl, OTCF_PIDCODE_SETUP|OTCF_CC_INVALID|OTCF_NOINT);
586 len = 8;
588 /* CHECKME: As i can understand, setup packet is always sent TO the device. Is this true? */
589 oed->oed_SetupData = usbGetBuffer(&ioreq->iouh_SetupData, len, UHDIR_OUT);
590 WRITEMEM32_LE(&setupotd->otd_BufferPtr, (IPTR) CachePreDMA(pciGetPhysical(hc, oed->oed_SetupData), &len, DMA_ReadFromRAM));
591 WRITEMEM32_LE(&setupotd->otd_BufferEnd, (IPTR) pciGetPhysical(hc, ((UBYTE *)oed->oed_SetupData) + 7));
593 KPRINTF(1, ("TD send: %08lx - %08lx\n", READMEM32_LE(&setupotd->otd_BufferPtr),
594 READMEM32_LE(&setupotd->otd_BufferEnd)));
596 ctrl = (ioreq->iouh_SetupData.bmRequestType & URTF_IN) ? (OTCF_PIDCODE_IN|OTCF_CC_INVALID|OTCF_NOINT) : (OTCF_PIDCODE_OUT|OTCF_CC_INVALID|OTCF_NOINT);
598 predotd = setupotd;
599 if (ioreq->iouh_Length)
601 oed->oed_Buffer = usbGetBuffer(ioreq->iouh_Data, ioreq->iouh_Length, (ioreq->iouh_SetupData.bmRequestType & URTF_IN) ? UHDIR_IN : UHDIR_OUT);
602 phyaddr = (IPTR)pciGetPhysical(hc, oed->oed_Buffer);
603 actual = 0;
606 dataotd = ohciAllocTD(hc);
607 if(!dataotd)
609 predotd->otd_Succ = NULL;
610 break;
612 dataotd->otd_ED = oed;
613 predotd->otd_Succ = dataotd;
614 predotd->otd_NextTD = dataotd->otd_Self;
615 len = ioreq->iouh_Length - actual;
616 if(len > OHCI_PAGE_SIZE)
618 len = OHCI_PAGE_SIZE;
620 dataotd->otd_Length = len;
621 KPRINTF(1, ("TD with %ld bytes\n", len));
622 WRITEMEM32_LE(&dataotd->otd_Ctrl, ctrl);
624 * CHECKME: Here and there we feed phyaddr to CachePreDMA(), however it expects a logical address.
625 * Perhaps the whole thing works only because HIDD_PCIDriver_CPUtoPCI() actually doesn't do any
626 * translation.
628 WRITEMEM32_LE(&dataotd->otd_BufferPtr, (IPTR)CachePreDMA((APTR)(IPTR)phyaddr, &len, (ioreq->iouh_SetupData.bmRequestType & URTF_IN) ? 0 : DMA_ReadFromRAM));
629 phyaddr += len - 1;
630 WRITEMEM32_LE(&dataotd->otd_BufferEnd, phyaddr);
632 KPRINTF(1, ("TD send: %08lx - %08lx\n", READMEM32_LE(&dataotd->otd_BufferPtr),
633 READMEM32_LE(&dataotd->otd_BufferEnd)));
635 CacheClearE(&dataotd->otd_Ctrl, 16, CACRF_ClearD);
636 phyaddr++;
637 actual += len;
638 predotd = dataotd;
639 } while(actual < ioreq->iouh_Length);
641 if(actual != ioreq->iouh_Length)
643 // out of TDs
644 KPRINTF(200, ("Out of TDs for Ctrl Transfer!\n"));
645 dataotd = setupotd->otd_Succ;
646 ohciFreeTD(hc, setupotd);
647 ohciFreeTDChain(hc, dataotd);
648 ohciFreeTD(hc, termotd);
649 usbReleaseBuffer(oed->oed_Buffer, ioreq->iouh_Data, 0, 0);
650 usbReleaseBuffer(oed->oed_SetupData, &oed->oed_IOReq->iouh_SetupData, 0, 0);
651 ohciFreeED(hc, oed);
652 break;
654 predotd->otd_Succ = termotd;
655 predotd->otd_NextTD = termotd->otd_Self;
656 } else {
657 setupotd->otd_Succ = termotd;
658 setupotd->otd_NextTD = termotd->otd_Self;
660 CacheClearE(&setupotd->otd_Ctrl, 16, CACRF_ClearD);
661 CacheClearE(&predotd->otd_Ctrl, 16, CACRF_ClearD);
663 ctrl ^= (OTCF_PIDCODE_IN^OTCF_PIDCODE_OUT)|OTCF_NOINT|OTCF_DATA1|OTCF_TOGGLEFROMTD;
665 termotd->otd_Length = 0;
666 termotd->otd_ED = oed;
667 termotd->otd_Succ = NULL;
668 termotd->otd_NextTD = hc->hc_OhciTermTD->otd_Self;
669 CONSTWRITEMEM32_LE(&termotd->otd_Ctrl, ctrl);
670 CONSTWRITEMEM32_LE(&termotd->otd_BufferPtr, 0);
671 CONSTWRITEMEM32_LE(&termotd->otd_BufferEnd, 0);
672 CacheClearE(&termotd->otd_Ctrl, 16, CACRF_ClearD);
674 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
675 ioreq->iouh_DriverPrivate1 = oed;
677 // manage endpoint going busy
678 unit->hu_DevBusyReq[devadrep] = ioreq;
679 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
681 Disable();
682 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
684 // looks good to me, now enqueue this entry
685 oed->oed_Succ = hc->hc_OhciCtrlTailED;
686 oed->oed_NextED = oed->oed_Succ->oed_Self;
687 oed->oed_Pred = hc->hc_OhciCtrlTailED->oed_Pred;
688 CacheClearE(&oed->oed_Pred->oed_EPCaps, 16, CACRF_InvalidateD);
689 oed->oed_Pred->oed_Succ = oed;
690 oed->oed_Pred->oed_NextED = oed->oed_Self;
691 oed->oed_Succ->oed_Pred = oed;
692 CacheClearE(&oed->oed_EPCaps, 16, CACRF_ClearD);
693 CacheClearE(&oed->oed_Pred->oed_EPCaps, 16, CACRF_ClearD);
694 SYNC;
696 PrintED("Control", oed, hc);
698 /* Control request is queued, we will start the queue */
699 startmask = OCSF_CTRLENABLE;
700 Enable();
702 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
705 if (startmask)
708 * If we are going to start the queue but it's not running yet,
709 * reset current ED pointer to zero. This will cause the HC to
710 * start over from the head.
712 oldenables = READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS);
713 if(!(oldenables & OCSF_BULKENABLE))
715 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_BULK_ED, 0);
719 return startmask;
722 static void ohciScheduleIntTDs(struct PCIController *hc)
724 struct PCIUnit *unit = hc->hc_Unit;
725 struct IOUsbHWReq *ioreq;
726 UWORD devadrep;
727 struct OhciED *intoed;
728 struct OhciED *oed;
729 struct OhciTD *otd;
730 struct OhciTD *predotd;
731 ULONG actual;
732 ULONG epcaps;
733 ULONG len;
734 ULONG phyaddr;
736 /* *** INT Transfers *** */
737 KPRINTF(1, ("Scheduling new INT transfers...\n"));
738 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
739 while(((struct Node *) ioreq)->ln_Succ)
741 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
742 KPRINTF(10, ("New INT transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
743 /* is endpoint already in use or do we have to wait for next transaction */
744 if(unit->hu_DevBusyReq[devadrep])
746 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
747 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
748 continue;
751 oed = ohciAllocED(hc);
752 if(!oed)
754 break;
757 oed->oed_IOReq = ioreq;
759 epcaps = (ioreq->iouh_DevAddr<<OECS_DEVADDR)|(ioreq->iouh_Endpoint<<OECS_ENDPOINT)|(ioreq->iouh_MaxPktSize<<OECS_MAXPKTLEN);
760 epcaps |= (ioreq->iouh_Dir == UHDIR_IN) ? OECF_DIRECTION_IN : OECF_DIRECTION_OUT;
762 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
764 KPRINTF(5, ("*** LOW SPEED ***\n"));
765 epcaps |= OECF_LOWSPEED;
768 WRITEMEM32_LE(&oed->oed_EPCaps, epcaps);
769 oed->oed_TailPtr = hc->hc_OhciTermTD->otd_Self;
771 predotd = NULL;
772 oed->oed_Buffer = usbGetBuffer(ioreq->iouh_Data, ioreq->iouh_Length, ioreq->iouh_Dir);
773 phyaddr = (IPTR)pciGetPhysical(hc, oed->oed_Buffer);
774 actual = 0;
777 otd = ohciAllocTD(hc);
778 if (predotd)
779 predotd->otd_Succ = otd;
780 if (!otd)
782 break;
784 otd->otd_ED = oed;
785 if (predotd)
787 predotd->otd_NextTD = otd->otd_Self;
789 else
791 WRITEMEM32_LE(&oed->oed_HeadPtr, READMEM32_LE(&otd->otd_Self)|(unit->hu_DevDataToggle[devadrep] ? OEHF_DATA1 : 0));
792 oed->oed_FirstTD = otd;
794 len = ioreq->iouh_Length - actual;
795 if(len > OHCI_PAGE_SIZE)
797 len = OHCI_PAGE_SIZE;
799 otd->otd_Length = len;
800 KPRINTF(1, ("Control TD 0x%p with %ld bytes\n", otd, len));
801 CONSTWRITEMEM32_LE(&otd->otd_Ctrl, OTCF_CC_INVALID|OTCF_NOINT);
802 if(len)
804 WRITEMEM32_LE(&otd->otd_BufferPtr, (IPTR)CachePreDMA((APTR)(IPTR)phyaddr, &len, (ioreq->iouh_Dir == UHDIR_IN) ? 0 : DMA_ReadFromRAM));
805 phyaddr += len - 1;
806 WRITEMEM32_LE(&otd->otd_BufferEnd, phyaddr);
807 phyaddr++;
808 } else {
809 CONSTWRITEMEM32_LE(&otd->otd_BufferPtr, 0);
810 CONSTWRITEMEM32_LE(&otd->otd_BufferEnd, 0);
812 actual += len;
813 CacheClearE(&otd->otd_Ctrl, 16, CACRF_ClearD);
814 predotd = otd;
815 } while(actual < ioreq->iouh_Length);
817 if(actual != ioreq->iouh_Length)
819 // out of TDs
820 KPRINTF(200, ("Out of TDs for Int Transfer!\n"));
821 ohciFreeTDChain(hc, oed->oed_FirstTD);
822 usbReleaseBuffer(oed->oed_Buffer, ioreq->iouh_Data, 0, 0);
823 ohciFreeED(hc, oed);
824 break;
826 predotd->otd_Succ = NULL;
827 predotd->otd_NextTD = hc->hc_OhciTermTD->otd_Self;
829 CONSTWRITEMEM32_LE(&predotd->otd_Ctrl, OTCF_CC_INVALID);
830 CacheClearE(&predotd->otd_Ctrl, 16, CACRF_ClearD);
832 if(ioreq->iouh_Interval >= 31)
834 intoed = hc->hc_OhciIntED[4]; // 32ms interval
835 } else {
836 UWORD cnt = 0;
839 intoed = hc->hc_OhciIntED[cnt++];
840 } while(ioreq->iouh_Interval >= (1<<cnt));
843 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
844 ioreq->iouh_DriverPrivate1 = oed;
846 // manage endpoint going busy
847 unit->hu_DevBusyReq[devadrep] = ioreq;
848 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
850 Disable();
851 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
853 // looks good to me, now enqueue this entry (behind Int head)
854 oed->oed_Succ = intoed->oed_Succ;
855 oed->oed_NextED = intoed->oed_Succ->oed_Self;
856 oed->oed_Pred = intoed;
857 intoed->oed_Succ = oed;
858 intoed->oed_NextED = oed->oed_Self;
859 oed->oed_Succ->oed_Pred = oed;
860 CacheClearE(&oed->oed_EPCaps, 16, CACRF_ClearD);
861 CacheClearE(&intoed->oed_EPCaps, 16, CACRF_ClearD);
862 SYNC;
864 PrintED("Int", oed, hc);
865 Enable();
867 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
871 static ULONG ohciScheduleBulkTDs(struct PCIController *hc)
873 struct PCIUnit *unit = hc->hc_Unit;
874 struct IOUsbHWReq *ioreq;
875 UWORD devadrep;
876 struct OhciED *oed;
877 struct OhciTD *otd;
878 struct OhciTD *predotd;
879 ULONG actual;
880 ULONG epcaps;
881 ULONG len;
882 ULONG phyaddr;
883 ULONG oldenables;
884 ULONG startmask = 0;
886 /* *** BULK Transfers *** */
887 KPRINTF(1, ("Scheduling new BULK transfers...\n"));
888 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
889 while(((struct Node *) ioreq)->ln_Succ)
891 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
892 KPRINTF(10, ("New BULK transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
893 /* is endpoint already in use or do we have to wait for next transaction */
894 if(unit->hu_DevBusyReq[devadrep])
896 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
897 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
898 continue;
901 oed = ohciAllocED(hc);
902 if(!oed)
904 break;
907 oed->oed_IOReq = ioreq;
909 epcaps = (ioreq->iouh_DevAddr<<OECS_DEVADDR)|(ioreq->iouh_Endpoint<<OECS_ENDPOINT)|(ioreq->iouh_MaxPktSize<<OECS_MAXPKTLEN);
910 epcaps |= (ioreq->iouh_Dir == UHDIR_IN) ? OECF_DIRECTION_IN : OECF_DIRECTION_OUT;
912 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
914 KPRINTF(5, ("*** LOW SPEED ***\n"));
915 epcaps |= OECF_LOWSPEED;
918 WRITEMEM32_LE(&oed->oed_EPCaps, epcaps);
919 oed->oed_TailPtr = hc->hc_OhciTermTD->otd_Self;
921 predotd = NULL;
922 oed->oed_Buffer = usbGetBuffer(ioreq->iouh_Data, ioreq->iouh_Length, ioreq->iouh_Dir);
923 phyaddr = (IPTR)pciGetPhysical(hc, oed->oed_Buffer);
924 actual = 0;
927 if((actual >= OHCI_TD_BULK_LIMIT) && (actual < ioreq->iouh_Length))
929 KPRINTF(10, ("Bulk too large, splitting...\n"));
930 break;
932 otd = ohciAllocTD(hc);
933 if(!otd)
935 if(predotd != NULL)
937 predotd->otd_Succ = NULL;
939 break;
941 otd->otd_ED = oed;
942 if(predotd)
944 predotd->otd_Succ = otd;
945 predotd->otd_NextTD = otd->otd_Self;
946 } else {
947 WRITEMEM32_LE(&oed->oed_HeadPtr, READMEM32_LE(&otd->otd_Self)|(unit->hu_DevDataToggle[devadrep] ? OEHF_DATA1 : 0));
948 oed->oed_FirstTD = otd;
950 len = ioreq->iouh_Length - actual;
951 if(len > OHCI_PAGE_SIZE)
953 len = OHCI_PAGE_SIZE;
955 otd->otd_Length = len;
956 KPRINTF(1, ("TD with %ld bytes: %08x-%08x\n", len, phyaddr, phyaddr+len-1));
957 CONSTWRITEMEM32_LE(&otd->otd_Ctrl, OTCF_CC_INVALID|OTCF_NOINT);
958 if(len)
960 WRITEMEM32_LE(&otd->otd_BufferPtr, (IPTR)CachePreDMA((APTR)(IPTR)phyaddr, &len, (ioreq->iouh_Dir == UHDIR_IN) ? 0 : DMA_ReadFromRAM));
961 phyaddr += len - 1;
962 WRITEMEM32_LE(&otd->otd_BufferEnd, phyaddr);
963 phyaddr++;
964 } else {
965 CONSTWRITEMEM32_LE(&otd->otd_BufferPtr, 0);
966 CONSTWRITEMEM32_LE(&otd->otd_BufferEnd, 0);
968 actual += len;
969 CacheClearE(&otd->otd_Ctrl, 16, CACRF_ClearD);
971 predotd = otd;
972 } while((actual < ioreq->iouh_Length) || (len && (ioreq->iouh_Dir == UHDIR_OUT) && (actual == ioreq->iouh_Length) && (!(ioreq->iouh_Flags & UHFF_NOSHORTPKT)) && ((actual % ioreq->iouh_MaxPktSize) == 0)));
974 if(!actual)
976 // out of TDs
977 KPRINTF(200, ("Out of TDs for Bulk Transfer!\n"));
978 ohciFreeTDChain(hc, oed->oed_FirstTD);
979 usbReleaseBuffer(oed->oed_Buffer, ioreq->iouh_Data, 0, 0);
980 ohciFreeED(hc, oed);
981 break;
983 oed->oed_Continue = (actual < ioreq->iouh_Length);
984 predotd->otd_Succ = NULL;
985 predotd->otd_NextTD = hc->hc_OhciTermTD->otd_Self;
987 CONSTWRITEMEM32_LE(&predotd->otd_Ctrl, OTCF_CC_INVALID);
988 CacheClearE(&predotd->otd_Ctrl, 16, CACRF_ClearD);
990 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
991 ioreq->iouh_DriverPrivate1 = oed;
993 // manage endpoint going busy
994 unit->hu_DevBusyReq[devadrep] = ioreq;
995 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
997 Disable();
998 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
1000 // looks good to me, now enqueue this entry
1001 oed->oed_Succ = hc->hc_OhciBulkTailED;
1002 oed->oed_NextED = oed->oed_Succ->oed_Self;
1003 oed->oed_Pred = hc->hc_OhciBulkTailED->oed_Pred;
1004 oed->oed_Pred->oed_Succ = oed;
1005 oed->oed_Pred->oed_NextED = oed->oed_Self;
1006 oed->oed_Succ->oed_Pred = oed;
1007 CacheClearE(&oed->oed_EPCaps, 16, CACRF_ClearD);
1008 CacheClearE(&oed->oed_Pred->oed_EPCaps, 16, CACRF_ClearD);
1009 SYNC;
1011 KPRINTF(10, ("Activating BULK at %ld\n", READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT)));
1012 PrintED("Bulk", oed, hc);
1014 /* Similar to ohciScheduleCtrlTDs(), but use bulk queue */
1015 startmask = OCSF_BULKENABLE;
1016 Enable();
1017 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
1020 if (startmask)
1022 oldenables = READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS);
1023 if(!(oldenables & OCSF_BULKENABLE))
1025 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_BULK_ED, 0);
1028 return startmask;
1031 void ohciUpdateFrameCounter(struct PCIController *hc)
1034 Disable();
1035 hc->hc_FrameCounter = (hc->hc_FrameCounter & 0xffff0000)|(READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT) & 0xffff);
1036 Enable();
1039 static AROS_INTH1(ohciCompleteInt, struct PCIController *,hc)
1041 AROS_INTFUNC_INIT
1043 ULONG restartmask = 0;
1045 KPRINTF(1, ("CompleteInt!\n"));
1047 ohciUpdateFrameCounter(hc);
1049 /* **************** PROCESS DONE TRANSFERS **************** */
1051 if (hc->hc_OhciDoneQueue)
1052 ohciHandleFinishedTDs(hc);
1054 if (hc->hc_Flags & HCF_ABORT)
1055 restartmask = ohciHandleAbortedEDs(hc);
1057 if ((!(hc->hc_Flags & HCF_STOP_CTRL)) && hc->hc_CtrlXFerQueue.lh_Head->ln_Succ)
1058 restartmask |= ohciScheduleCtrlTDs(hc);
1060 if (hc->hc_IntXFerQueue.lh_Head->ln_Succ)
1061 ohciScheduleIntTDs(hc);
1063 if ((!(hc->hc_Flags & HCF_STOP_BULK)) && hc->hc_BulkXFerQueue.lh_Head->ln_Succ)
1064 restartmask |= ohciScheduleBulkTDs(hc);
1067 * Restart queues. In restartmask we have accumulated which queues need to be started.
1069 * We do it here only once, after everything is set up, because otherwise HC goes nuts
1070 * in some cases. For example, the following situation caused TD queue loop: we are
1071 * simultaneously scheduling two control EDs and one of them completes with error. If
1072 * we attempt to start the queue right after a ED is scheduled (this is how the code
1073 * originally worked), it looks like the HC manages to deal with the first ED right
1074 * before the second one is scheduled. At this moment the first TD is HALTed with
1075 * oed_HeadPtr pointing to the failed TD, which went to the DoneQueue (which will be
1076 * picked up only on next ISR round, we are still in ohciSchedileCtrlEDs()). The
1077 * second ED is scheduled (first one is not removed yet!) and we re-trigger control
1078 * queue to start. It causes errorneous TD to reappear on the DoneQueue, effectively
1079 * looping it. DoneQueue loop causes ohciHandleFinishedTDs() to never exit.
1080 * Restarting queues here in this manner actually fixed the problem.
1082 if (restartmask)
1084 restartmask |= READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS);
1085 WRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, restartmask);
1086 SYNC;
1089 KPRINTF(1, ("CompleteDone\n"));
1091 return 0;
1093 AROS_INTFUNC_EXIT
1096 static AROS_INTH1(ohciIntCode, struct PCIController *, hc)
1098 AROS_INTFUNC_INIT
1100 struct PCIDevice *base = hc->hc_Device;
1101 struct PCIUnit *unit = hc->hc_Unit;
1102 ULONG intr = 0;
1103 ULONG donehead;
1105 CacheClearE(&hc->hc_OhciHCCA->oha_DoneHead, sizeof(hc->hc_OhciHCCA->oha_DoneHead), CACRF_InvalidateD);
1107 donehead = READMEM32_LE(&hc->hc_OhciHCCA->oha_DoneHead);
1109 if(donehead)
1111 if (donehead & ~1)
1112 intr = OISF_DONEHEAD;
1113 if(donehead & 1)
1115 intr |= READREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS);
1117 donehead &= OHCI_PTRMASK;
1119 CONSTWRITEMEM32_LE(&hc->hc_OhciHCCA->oha_DoneHead, 0);
1121 KPRINTF(5, ("New Donehead %08lx for old %08lx\n", donehead, hc->hc_OhciDoneQueue));
1122 } else {
1123 intr = READREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS);
1125 if (intr & OISF_DONEHEAD)
1127 KPRINTF(1, ("!!!!!!!!!!!!!!!!!!!!!!!DoneHead was empty!!!!!!!!!!!!!!!!!!!\n"));
1128 CacheClearE(hc->hc_OhciHCCA, sizeof(struct OhciHCCA), CACRF_InvalidateD);
1129 donehead = READMEM32_LE(&hc->hc_OhciHCCA->oha_DoneHead) & OHCI_PTRMASK;
1130 CONSTWRITEMEM32_LE(&hc->hc_OhciHCCA->oha_DoneHead, 0);
1132 KPRINTF(5, ("New Donehead %08lx for old %08lx\n", donehead, hc->hc_OhciDoneQueue));
1135 CacheClearE(hc->hc_OhciHCCA, sizeof(struct OhciHCCA), CACRF_ClearD);
1137 intr &= ~OISF_MASTERENABLE;
1139 if(intr & hc->hc_PCIIntEnMask)
1141 KPRINTF(1, ("ohciIntCode(0x%p) interrupts 0x%08lx, mask 0x%08lx\n", hc, intr, hc->hc_PCIIntEnMask));
1143 // Acknowledge all interrupts, but process only those we want
1144 WRITEREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS, intr);
1145 //KPRINTF(1, ("INT=%02lx\n", intr));
1146 intr &= hc->hc_PCIIntEnMask;
1148 if(intr & OISF_HOSTERROR)
1150 KPRINTF(200, ("Host ERROR!\n"));
1152 if(intr & OISF_SCHEDOVERRUN)
1154 KPRINTF(200, ("Schedule overrun!\n"));
1156 if (!(hc->hc_Flags & HCF_ONLINE))
1158 if(READREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS) & OISF_HUBCHANGE)
1160 // if the driver is not online and the controller has a broken
1161 // hub change interrupt, make sure we don't run into infinite
1162 // interrupt by disabling the interrupt bit
1163 ohciDisableInt(hc, OISF_HUBCHANGE);
1165 return FALSE;
1167 WRITEREG32_LE(hc->hc_RegBase, OHCI_INTEN, OISF_HUBCHANGE);
1168 if(intr & OISF_FRAMECOUNTOVER)
1170 hc->hc_FrameCounter |= 0x7fff;
1171 hc->hc_FrameCounter++;
1172 hc->hc_FrameCounter |= READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT) & 0xffff;
1173 KPRINTF(10, ("HCI 0x%p: Frame Counter Rollover %ld\n", hc, hc->hc_FrameCounter));
1175 if(intr & OISF_HUBCHANGE)
1177 UWORD hciport;
1178 ULONG oldval;
1179 UWORD portreg = OHCI_PORTSTATUS;
1180 BOOL clearbits = FALSE;
1182 if(READREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS) & OISF_HUBCHANGE)
1184 // some OHCI implementations will keep the interrupt bit stuck until
1185 // all port changes have been cleared, which is wrong according to the
1186 // OHCI spec. As a workaround we will clear all change bits, which should
1187 // be no problem as the port changes are reflected in the PortChangeMap
1188 // array.
1189 clearbits = TRUE;
1191 for(hciport = 0; hciport < hc->hc_NumPorts; hciport++, portreg += 4)
1193 oldval = READREG32_LE(hc->hc_RegBase, portreg);
1194 if(oldval & OHPF_OVERCURRENTCHG)
1196 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_OVER_CURRENT;
1198 if(oldval & OHPF_RESETCHANGE)
1200 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_RESET;
1202 if(oldval & OHPF_ENABLECHANGE)
1204 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_ENABLE;
1206 if(oldval & OHPF_CONNECTCHANGE)
1208 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_CONNECTION;
1210 if(oldval & OHPF_RESUMEDTX)
1212 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND;
1214 if(clearbits)
1216 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_CONNECTCHANGE|OHPF_ENABLECHANGE|OHPF_RESUMEDTX|OHPF_OVERCURRENTCHG|OHPF_RESETCHANGE);
1219 KPRINTF(20, ("PCI Int Port %ld (glob %ld) Change %08lx\n", hciport, hc->hc_PortNum20[hciport] + 1, oldval));
1220 if(hc->hc_PortChangeMap[hciport])
1222 unit->hu_RootPortChanges |= 1UL<<(hc->hc_PortNum20[hciport] + 1);
1225 uhwCheckRootHubChanges(unit);
1226 if(clearbits)
1228 // again try to get rid of any bits that may be causing the interrupt
1229 WRITEREG32_LE(hc->hc_RegBase, OHCI_HUBSTATUS, OHSF_OVERCURRENTCHG);
1230 WRITEREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS, OISF_HUBCHANGE);
1233 if(intr & OISF_DONEHEAD)
1235 KPRINTF(10, ("DoneHead Frame=%ld\n", READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT)));
1237 if(hc->hc_OhciDoneQueue)
1239 struct OhciTD *donetd = (struct OhciTD *) ((IPTR)donehead - hc->hc_PCIVirtualAdjust - offsetof(struct OhciTD, otd_Ctrl));
1241 CacheClearE(&donetd->otd_Ctrl, 16, CACRF_InvalidateD);
1242 while(donetd->otd_NextTD)
1244 donetd = (struct OhciTD *) ((IPTR)donetd->otd_NextTD - hc->hc_PCIVirtualAdjust - offsetof(struct OhciTD, otd_Ctrl));
1245 CacheClearE(&donetd->otd_Ctrl, 16, CACRF_InvalidateD);
1247 WRITEMEM32_LE(&donetd->otd_NextTD, hc->hc_OhciDoneQueue);
1248 CacheClearE(&donetd->otd_Ctrl, 16, CACRF_ClearD);
1250 KPRINTF(10, ("Attached old DoneHead 0x%08lx to TD 0x%08lx\n", hc->hc_OhciDoneQueue, donetd->otd_Self));
1252 hc->hc_OhciDoneQueue = donehead;
1254 if (intr & OISF_SOF)
1256 /* Aborted EDs are available for freeing */
1257 hc->hc_Flags |= HCF_ABORT;
1260 if (intr & (OISF_SOF | OISF_DONEHEAD))
1263 * These two are leveraged down to SoftInt.
1264 * This is done in order to keep queues rotation synchronized.
1266 SureCause(base, &hc->hc_CompleteInt);
1269 KPRINTF(1, ("Exiting ohciIntCode(0x%p)\n", unit));
1272 /* Unlock interrupts */
1273 WRITEREG32_LE(&hc->hc_RegBase, OHCI_INTEN, OISF_MASTERENABLE);
1275 return FALSE;
1277 AROS_INTFUNC_EXIT
1281 * CHECKME: This routine is implemented according to the OHCI specification, however poorly tested.
1282 * Additionally, disabling and re-enabling the queue seems to create a significant delay. Perhaps
1283 * this can be optimized. In fact the only thing we really need to is to make sure that the ED to
1284 * be removed is neither on list nor being processed at the moment. Perhaps it's enough to simply
1285 * unlink it, set SKIP flag and wait for the next SOF.
1286 * But be careful, improper TD/ED removal can easily cause DoneQueue loops which are extremely hard
1287 * to isolate and fix (debug output adds delays which hide the problem). One of danger signs are
1288 * "Came accross a rogue TD" messages on the debug log. They mean that one of freed TDs reappeared
1289 * on the DoneQueue. If you run intensive I/O, you can be unlucky enough to reallocate and reuse this
1290 * TD before is passes DoneQueue, so it will appear there for the second time and create loop.
1292 void ohciAbortRequest(struct PCIController *hc, struct IOUsbHWReq *ioreq)
1294 struct PCIUnit *unit = hc->hc_Unit;
1295 struct OhciED *oed = ioreq->iouh_DriverPrivate1;
1296 UWORD devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
1297 ULONG disablemask = 0;
1298 ULONG ctrlstatus;
1300 KPRINTF(70, ("HC 0x%p Aborting request 0x%p, command %ld, endpoint 0x%04lx, Frame=%ld\n", hc, ioreq, ioreq->iouh_Req.io_Command, devadrep, READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT)));
1301 PrintED("Aborting", oed, hc);
1303 /* Removing control and bulk EDs requires to stop the appropriate HC queue first (according to specification) */
1304 switch (ioreq->iouh_Req.io_Command)
1306 case UHCMD_CONTROLXFER:
1307 KPRINTF(50, ("Stopping control queue\n"));
1308 hc->hc_Flags |= HCF_STOP_CTRL;
1309 disablemask = OCSF_CTRLENABLE;
1310 break;
1312 case UHCMD_BULKXFER:
1313 KPRINTF(50, ("Stopping bulk queue\n"));
1314 hc->hc_Flags |= HCF_STOP_BULK;
1315 disablemask = OCSF_BULKENABLE;
1316 break;
1319 /* Stop selected queue(s) */
1320 if (disablemask)
1322 ctrlstatus = READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS);
1323 ctrlstatus &= ~disablemask;
1324 WRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, ctrlstatus);
1325 SYNC;
1328 // disable ED
1329 ohciDisableED(oed);
1332 * ...and move to abort queue.
1333 * We can't reply the request right now because some of its TDs
1334 * can be used by the HC right now. This means it does something
1335 * to the data buffer referred to by the request.
1336 * We reply the request only when the HC stops doing this. Otherwise
1337 * we may end up in trashed memory.
1339 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
1340 AddTail(&hc->hc_AbortQueue, &ioreq->iouh_Req.io_Message.mn_Node);
1342 if (ioreq->iouh_Req.io_Command == UHCMD_INTXFER)
1343 ohciUpdateIntTree(hc);
1345 unit->hu_DevDataToggle[devadrep] = (READMEM32_LE(&oed->oed_HeadPtr) & OEHF_DATA1) ? TRUE : FALSE;
1348 * Request StartOfFrame interrupt. Upon next frame this ED
1349 * is guaranteed to be out of use and can be freed.
1351 ohciEnableInt(hc, OISF_SOF);
1354 BOOL ohciInit(struct PCIController *hc, struct PCIUnit *hu) {
1356 struct PCIDevice *hd = hu->hu_Device;
1358 struct OhciED *oed;
1359 struct OhciED *predoed;
1360 struct OhciTD *otd;
1361 ULONG *tabptr;
1362 UBYTE *memptr;
1363 ULONG bitcnt;
1364 ULONG hubdesca;
1365 ULONG revision;
1366 ULONG cmdstatus;
1367 ULONG control;
1368 ULONG timeout;
1369 ULONG frameival;
1371 ULONG cnt;
1373 struct TagItem pciActivateMem[] =
1375 { aHidd_PCIDevice_isMEM, TRUE },
1376 { TAG_DONE, 0UL },
1379 struct TagItem pciActivateBusmaster[] =
1381 { aHidd_PCIDevice_isMaster, TRUE },
1382 { TAG_DONE, 0UL },
1385 struct TagItem pciDeactivateBusmaster[] =
1387 { aHidd_PCIDevice_isMaster, FALSE },
1388 { TAG_DONE, 0UL },
1391 hc->hc_CompleteInt.is_Node.ln_Type = NT_INTERRUPT;
1392 hc->hc_CompleteInt.is_Node.ln_Name = "OHCI CompleteInt";
1393 hc->hc_CompleteInt.is_Node.ln_Pri = 0;
1394 hc->hc_CompleteInt.is_Data = hc;
1395 hc->hc_CompleteInt.is_Code = (VOID_FUNC)ohciCompleteInt;
1397 hc->hc_PCIMemSize = OHCI_HCCA_SIZE + OHCI_HCCA_ALIGNMENT + 1;
1398 hc->hc_PCIMemSize += sizeof(struct OhciED) * OHCI_ED_POOLSIZE;
1399 hc->hc_PCIMemSize += sizeof(struct OhciTD) * OHCI_TD_POOLSIZE;
1401 memptr = HIDD_PCIDriver_AllocPCIMem(hc->hc_PCIDriverObject, hc->hc_PCIMemSize);
1402 hc->hc_PCIMem = (APTR) memptr;
1403 if (memptr)
1405 // PhysicalAddress - VirtualAdjust = VirtualAddress
1406 // VirtualAddress + VirtualAdjust = PhysicalAddress
1407 hc->hc_PCIVirtualAdjust = pciGetPhysical(hc, memptr) - (APTR)memptr;
1408 KPRINTF(10, ("VirtualAdjust 0x%08lx\n", hc->hc_PCIVirtualAdjust));
1410 // align memory
1411 memptr = (UBYTE *) (((IPTR)hc->hc_PCIMem + OHCI_HCCA_ALIGNMENT) & (~OHCI_HCCA_ALIGNMENT));
1412 hc->hc_OhciHCCA = (struct OhciHCCA *) memptr;
1413 KPRINTF(10, ("HCCA 0x%p\n", hc->hc_OhciHCCA));
1414 memptr += OHCI_HCCA_SIZE;
1416 // build up ED pool
1417 oed = (struct OhciED *) memptr;
1418 hc->hc_OhciEDPool = oed;
1419 cnt = OHCI_ED_POOLSIZE - 1;
1420 do {
1421 // minimal initalization
1422 oed->oed_Succ = (oed + 1);
1423 WRITEMEM32_LE(&oed->oed_Self, (IPTR)(&oed->oed_EPCaps) + hc->hc_PCIVirtualAdjust);
1424 oed++;
1425 } while(--cnt);
1426 oed->oed_Succ = NULL;
1427 WRITEMEM32_LE(&oed->oed_Self, (IPTR)(&oed->oed_EPCaps) + hc->hc_PCIVirtualAdjust);
1428 memptr += sizeof(struct OhciED) * OHCI_ED_POOLSIZE;
1430 // build up TD pool
1431 otd = (struct OhciTD *) memptr;
1432 hc->hc_OhciTDPool = otd;
1433 cnt = OHCI_TD_POOLSIZE - 1;
1434 do {
1435 otd->otd_Succ = (otd + 1);
1436 WRITEMEM32_LE(&otd->otd_Self, (IPTR)(&otd->otd_Ctrl) + hc->hc_PCIVirtualAdjust);
1437 otd++;
1438 } while(--cnt);
1439 otd->otd_Succ = NULL;
1440 WRITEMEM32_LE(&otd->otd_Self, (IPTR)(&otd->otd_Ctrl) + hc->hc_PCIVirtualAdjust);
1441 memptr += sizeof(struct OhciTD) * OHCI_TD_POOLSIZE;
1443 // terminating ED
1444 hc->hc_OhciTermED = oed = ohciAllocED(hc);
1445 oed->oed_Succ = NULL;
1446 oed->oed_Pred = NULL;
1447 CONSTWRITEMEM32_LE(&oed->oed_EPCaps, OECF_SKIP);
1448 oed->oed_NextED = 0;
1450 // terminating TD
1451 hc->hc_OhciTermTD = otd = ohciAllocTD(hc);
1452 otd->otd_Succ = NULL;
1453 otd->otd_NextTD = 0;
1455 // dummy head & tail Ctrl ED
1456 hc->hc_OhciCtrlHeadED = predoed = ohciAllocED(hc);
1457 hc->hc_OhciCtrlTailED = oed = ohciAllocED(hc);
1458 CONSTWRITEMEM32_LE(&predoed->oed_EPCaps, OECF_SKIP);
1459 CONSTWRITEMEM32_LE(&oed->oed_EPCaps, OECF_SKIP);
1460 predoed->oed_Succ = oed;
1461 predoed->oed_Pred = NULL;
1462 predoed->oed_NextED = oed->oed_Self;
1463 oed->oed_Succ = NULL;
1464 oed->oed_Pred = predoed;
1465 oed->oed_NextED = 0;
1467 // dummy head & tail Bulk ED
1468 hc->hc_OhciBulkHeadED = predoed = ohciAllocED(hc);
1469 hc->hc_OhciBulkTailED = oed = ohciAllocED(hc);
1470 CONSTWRITEMEM32_LE(&predoed->oed_EPCaps, OECF_SKIP);
1471 CONSTWRITEMEM32_LE(&oed->oed_EPCaps, OECF_SKIP);
1472 predoed->oed_Succ = oed;
1473 predoed->oed_Pred = NULL;
1474 predoed->oed_NextED = oed->oed_Self;
1475 oed->oed_Succ = NULL;
1476 oed->oed_Pred = predoed;
1477 oed->oed_NextED = 0;
1478 // 1 ms INT QH
1479 hc->hc_OhciIntED[0] = oed = ohciAllocED(hc);
1480 oed->oed_Succ = hc->hc_OhciTermED;
1481 oed->oed_Pred = NULL; // who knows...
1482 CONSTWRITEMEM32_LE(&oed->oed_EPCaps, OECF_SKIP);
1483 oed->oed_NextED = hc->hc_OhciTermED->oed_Self;
1484 predoed = oed;
1485 // make 5 levels of QH interrupts
1486 for(cnt = 1; cnt < 5; cnt++) {
1487 hc->hc_OhciIntED[cnt] = oed = ohciAllocED(hc);
1488 oed->oed_Succ = predoed;
1489 oed->oed_Pred = NULL; // who knows...
1490 CONSTWRITEMEM32_LE(&oed->oed_EPCaps, OECF_SKIP);
1491 oed->oed_NextED = hc->hc_OhciTermED->oed_Self;
1492 predoed = oed;
1495 ohciUpdateIntTree(hc);
1497 // fill in framelist with IntED entry points based on interval
1498 tabptr = hc->hc_OhciHCCA->oha_IntEDs;
1499 for(cnt = 0; cnt < 32; cnt++) {
1500 oed = hc->hc_OhciIntED[4];
1501 bitcnt = 0;
1502 do {
1503 if(cnt & (1UL<<bitcnt)) {
1504 oed = hc->hc_OhciIntED[bitcnt];
1505 break;
1507 } while(++bitcnt < 5);
1508 *tabptr++ = oed->oed_Self;
1511 // time to initialize hardware...
1512 OOP_GetAttr(hc->hc_PCIDeviceObject, aHidd_PCIDevice_Base0, (IPTR *) &hc->hc_RegBase);
1513 hc->hc_RegBase = (APTR) (((IPTR) hc->hc_RegBase) & (~0xf));
1514 KPRINTF(10, ("RegBase = 0x%p\n", hc->hc_RegBase));
1515 OOP_SetAttrs(hc->hc_PCIDeviceObject, (struct TagItem *) pciActivateMem); // enable memory
1517 hubdesca = READREG32_LE(hc->hc_RegBase, OHCI_HUBDESCA);
1518 revision = READREG32_LE(hc->hc_RegBase, OHCI_REVISION);
1519 hc->hc_NumPorts = (hubdesca & OHAM_NUMPORTS)>>OHAS_NUMPORTS;
1520 KPRINTF(20, ("Found OHCI Controller %p FuncNum = %ld, Rev %02lx, with %ld ports\n",
1521 hc->hc_PCIDeviceObject, hc->hc_FunctionNum,
1522 revision & 0xFF,
1523 hc->hc_NumPorts));
1525 KPRINTF(20, ("Powerswitching: %s %s\n",
1526 hubdesca & OHAF_NOPOWERSWITCH ? "Always on" : "Available",
1527 hubdesca & OHAF_INDIVIDUALPS ? "per port" : "global"));
1529 control = READREG32_LE(hc->hc_RegBase, OHCI_CONTROL);
1530 KPRINTF(10, ("OHCI control state: 0x%08lx\n", control));
1532 // disable BIOS legacy support
1533 if (control & OCLF_SMIINT)
1535 KPRINTF(10, ("BIOS still has hands on OHCI, trying to get rid of it\n"));
1537 cmdstatus = READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS);
1538 cmdstatus |= OCSF_OWNERCHANGEREQ;
1539 WRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, cmdstatus);
1540 timeout = 100;
1541 do {
1542 control = READREG32_LE(hc->hc_RegBase, OHCI_CONTROL);
1543 if(!(control & OCLF_SMIINT)) {
1544 KPRINTF(10, ("BIOS gave up on OHCI. Pwned!\n"));
1545 break;
1547 uhwDelayMS(10, hu);
1548 } while(--timeout);
1549 if(!timeout) {
1550 KPRINTF(10, ("BIOS didn't release OHCI. Forcing and praying...\n"));
1551 control &= ~OCLF_SMIINT;
1552 WRITEREG32_LE(hc->hc_RegBase, OHCI_CONTROL, control);
1556 OOP_SetAttrs(hc->hc_PCIDeviceObject, (struct TagItem *) pciDeactivateBusmaster); // no busmaster yet
1558 KPRINTF(10, ("Resetting OHCI HC\n"));
1559 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, OCSF_HCRESET);
1560 cnt = 100;
1561 do {
1562 if(!(READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS) & OCSF_HCRESET)) {
1563 break;
1565 uhwDelayMS(1, hu);
1566 } while(--cnt);
1568 #ifdef DEBUG
1569 if(cnt == 0) {
1570 KPRINTF(20, ("Reset Timeout!\n"));
1571 } else {
1572 KPRINTF(20, ("Reset finished after %ld ticks\n", 100-cnt));
1574 #endif
1576 OOP_SetAttrs(hc->hc_PCIDeviceObject, (struct TagItem *) pciActivateBusmaster); // enable busmaster
1578 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT, 0);
1579 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_PERIODICSTART, 10800); // 10% of 12000
1580 frameival = READREG32_LE(hc->hc_RegBase, OHCI_FRAMEINTERVAL);
1581 KPRINTF(10, ("FrameInterval=%08lx\n", frameival));
1582 frameival &= ~OIVM_BITSPERFRAME;
1583 frameival |= OHCI_DEF_BITSPERFRAME<<OIVS_BITSPERFRAME;
1584 frameival ^= OIVF_TOGGLE;
1585 WRITEREG32_LE(hc->hc_RegBase, OHCI_FRAMEINTERVAL, frameival);
1587 // make sure nothing is running
1588 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_PERIODIC_ED, 0);
1589 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CTRL_HEAD_ED, AROS_LONG2LE(hc->hc_OhciCtrlHeadED->oed_Self));
1590 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CTRL_ED, 0);
1591 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_BULK_HEAD_ED, AROS_LONG2LE(hc->hc_OhciBulkHeadED->oed_Self));
1592 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_BULK_ED, 0);
1593 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_DONEHEAD, 0);
1595 WRITEREG32_LE(hc->hc_RegBase, OHCI_HCCA, (IPTR)pciGetPhysical(hc, hc->hc_OhciHCCA));
1597 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS, OISF_ALL_INTS);
1598 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_INTDIS, OISF_ALL_INTS);
1599 SYNC;
1601 // install reset handler
1602 hc->hc_ResetInt.is_Code = (VOID_FUNC)OhciResetHandler;
1603 hc->hc_ResetInt.is_Data = hc;
1604 AddResetCallback(&hc->hc_ResetInt);
1606 // add interrupt
1607 hc->hc_PCIIntHandler.is_Node.ln_Name = "OHCI PCI (pciusb.device)";
1608 hc->hc_PCIIntHandler.is_Node.ln_Pri = 5;
1609 hc->hc_PCIIntHandler.is_Node.ln_Type = NT_INTERRUPT;
1610 hc->hc_PCIIntHandler.is_Code = (VOID_FUNC)ohciIntCode;
1611 hc->hc_PCIIntHandler.is_Data = hc;
1612 AddIntServer(INTB_KERNEL + hc->hc_PCIIntLine, &hc->hc_PCIIntHandler);
1614 hc->hc_PCIIntEnMask = OISF_DONEHEAD|OISF_RESUMEDTX|OISF_HOSTERROR|OISF_FRAMECOUNTOVER|OISF_HUBCHANGE;
1616 WRITEREG32_LE(hc->hc_RegBase, OHCI_INTEN, hc->hc_PCIIntEnMask|OISF_MASTERENABLE);
1618 /* Don't try to enter RESET state via OHCI_CONTROL - this flakes out
1619 * some chips, particularly the Sam460ex
1621 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CONTROL, OCLF_PERIODICENABLE|OCLF_CTRLENABLE|OCLF_BULKENABLE|OCLF_ISOENABLE|OCLF_USBOPER);
1622 SYNC;
1624 // make sure the ports are on with chipset quirk workaround
1625 hubdesca = READREG32_LE(hc->hc_RegBase, OHCI_HUBDESCA);
1626 if (hd->hd_Flags & HDF_FORCEPOWER)
1627 hubdesca |= OHAF_NOPOWERSWITCH; /* Required for some IntelMacs */
1628 else
1629 hubdesca |= OHAF_NOOVERCURRENT;
1630 WRITEREG32_LE(hc->hc_RegBase, OHCI_HUBDESCA, hubdesca);
1632 WRITEREG32_LE(hc->hc_RegBase, OHCI_HUBSTATUS, OHSF_POWERHUB);
1633 if((hubdesca & OHAF_NOPOWERSWITCH) || (!(hubdesca & OHAF_INDIVIDUALPS))) {
1634 KPRINTF(20, ("Individual power switching not available, turning on all ports!\n"));
1635 WRITEREG32_LE(hc->hc_RegBase, OHCI_HUBDESCB, 0);
1636 } else {
1637 KPRINTF(20, ("Enabling individual power switching\n"));
1638 WRITEREG32_LE(hc->hc_RegBase, OHCI_HUBDESCB, ((2<<hc->hc_NumPorts)-2)<<OHBS_PORTPOWERCTRL);
1641 uhwDelayMS(50, hu);
1642 WRITEREG32_LE(hc->hc_RegBase, OHCI_HUBDESCA, hubdesca);
1644 CacheClearE(hc->hc_OhciHCCA, sizeof(struct OhciHCCA), CACRF_ClearD);
1645 CacheClearE(hc->hc_OhciEDPool, sizeof(struct OhciED) * OHCI_ED_POOLSIZE, CACRF_ClearD);
1646 CacheClearE(hc->hc_OhciTDPool, sizeof(struct OhciTD) * OHCI_TD_POOLSIZE, CACRF_ClearD);
1648 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CONTROL, OCLF_PERIODICENABLE|OCLF_CTRLENABLE|OCLF_BULKENABLE|OCLF_ISOENABLE|OCLF_USBOPER);
1649 SYNC;
1651 KPRINTF(20, ("ohciInit returns TRUE...\n"));
1652 return TRUE;
1656 FIXME: What would the appropriate debug level be?
1658 KPRINTF(1000, ("ohciInit returns FALSE...\n"));
1659 return FALSE;
1662 void ohciFree(struct PCIController *hc, struct PCIUnit *hu) {
1664 hc = (struct PCIController *) hu->hu_Controllers.lh_Head;
1665 while(hc->hc_Node.ln_Succ)
1667 switch(hc->hc_HCIType)
1669 case HCITYPE_OHCI:
1671 KPRINTF(20, ("Shutting down OHCI %p\n", hc));
1672 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_INTDIS, OISF_ALL_INTS);
1673 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS, OISF_ALL_INTS);
1675 // disable all ports
1676 WRITEREG32_LE(hc->hc_RegBase, OHCI_HUBDESCB, 0);
1677 WRITEREG32_LE(hc->hc_RegBase, OHCI_HUBSTATUS, OHSF_UNPOWERHUB);
1679 uhwDelayMS(50, hu);
1680 KPRINTF(20, ("Stopping OHCI %p\n", hc));
1681 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CONTROL, 0);
1682 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, 0);
1683 SYNC;
1685 //KPRINTF(20, ("Reset done UHCI %08lx\n", hc));
1686 uhwDelayMS(10, hu);
1687 KPRINTF(20, ("Resetting OHCI %p\n", hc));
1688 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, OCSF_HCRESET);
1689 SYNC;
1690 uhwDelayMS(50, hu);
1692 KPRINTF(20, ("Shutting down OHCI done.\n"));
1693 break;
1697 hc = (struct PCIController *) hc->hc_Node.ln_Succ;