Sync'ed pciusb.device with bugfixes and (the most important) changes that made it...
[cake.git] / rom / usb / pciusb / uhwcmd.c
blobf729f9dfe5b2dadefe68d1daefa4bbd83fa86e1f
1 /* uhwcmd.c - pciusb.device by Chris Hodges
2 */
4 #include "uhwcmd.h"
5 #include <devices/usb_hub.h>
6 #include <strings.h>
8 #include <proto/utility.h>
9 #include <proto/exec.h>
11 #define NewList NEWLIST
13 /* Root hub data */
14 const struct UsbStdDevDesc RHDevDesc = { sizeof(struct UsbStdDevDesc), UDT_DEVICE, AROS_WORD2LE(0x0110), HUB_CLASSCODE, 0, 0, 8, AROS_WORD2LE(0x0000), AROS_WORD2LE(0x0000), AROS_WORD2LE(0x0100), 1, 2, 0, 1 };
16 const struct UsbStdCfgDesc RHCfgDesc = { 9, UDT_CONFIGURATION, AROS_WORD2LE(9+9+7), 1, 1, 3, USCAF_ONE|USCAF_SELF_POWERED, 0 };
17 const struct UsbStdIfDesc RHIfDesc = { 9, UDT_INTERFACE, 0, 0, 1, HUB_CLASSCODE, 0, 0, 4 };
18 const struct UsbStdEPDesc RHEPDesc = { 7, UDT_ENDPOINT, URTF_IN|1, USEAF_INTERRUPT, AROS_WORD2LE(1), 255 };
19 const struct UsbHubDesc RHHubDesc = { 9, UDT_HUB, 0, AROS_WORD2LE(UHCF_INDIVID_POWER|UHCF_INDIVID_OVP), 0, 1, 1, 0 };
21 const CONST_STRPTR RHStrings[] = { "Chris Hodges", "PCI Root Hub Unit x", "Standard Config", "Hub interface" };
23 /* /// "SureCause()" */
24 void SureCause(struct PCIDevice *base, struct Interrupt *interrupt)
26 /* this is a workaround for the original Cause() function missing tailed calls */
27 Disable();
28 if((interrupt->is_Node.ln_Type == NT_SOFTINT) || (interrupt->is_Node.ln_Type == NT_USER))
30 // signal tailed call
31 interrupt->is_Node.ln_Type = NT_USER;
32 } else {
35 interrupt->is_Node.ln_Type = NT_SOFTINT;
36 Enable();
37 (*((void (*)(struct Interrupt *)) (interrupt->is_Code)))(interrupt->is_Data);
38 Disable();
39 } while(interrupt->is_Node.ln_Type != NT_SOFTINT);
40 interrupt->is_Node.ln_Type = NT_INTERRUPT;
42 Enable();
44 /* \\\ */
46 /* /// "uhwOpenTimer()" */
47 BOOL uhwOpenTimer(struct PCIUnit *unit, struct PCIDevice *base)
49 if((unit->hu_MsgPort = CreateMsgPort()))
51 if((unit->hu_TimerReq = (struct timerequest *) CreateIORequest(unit->hu_MsgPort, sizeof(struct timerequest))))
53 if(!OpenDevice("timer.device", UNIT_MICROHZ, (struct IORequest *) unit->hu_TimerReq, 0))
55 unit->hu_TimerReq->tr_node.io_Message.mn_Node.ln_Name = "PCI hardware";
56 unit->hu_TimerReq->tr_node.io_Command = TR_ADDREQUEST;
57 KPRINTF(1, ("opened timer device\n"));
58 return(TRUE);
60 DeleteIORequest((struct IORequest *) unit->hu_TimerReq);
61 unit->hu_TimerReq = NULL;
63 DeleteMsgPort(unit->hu_MsgPort);
64 unit->hu_MsgPort = NULL;
66 KPRINTF(5, ("failed to open timer.device\n"));
67 return(FALSE);
69 /* \\\ */
71 /* /// "uhwDelayMS()" */
72 void uhwDelayMS(ULONG milli, struct PCIUnit *unit, struct PCIDevice *base)
74 unit->hu_TimerReq->tr_time.tv_secs = 0;
75 unit->hu_TimerReq->tr_time.tv_micro = milli * 1000;
76 DoIO((struct IORequest *) unit->hu_TimerReq);
78 /* \\\ */
80 /* /// "uhwDelayMicro()" */
81 void uhwDelayMicro(ULONG micro, struct PCIUnit *unit, struct PCIDevice *base)
83 unit->hu_TimerReq->tr_time.tv_secs = 0;
84 unit->hu_TimerReq->tr_time.tv_micro = micro;
85 DoIO((struct IORequest *) unit->hu_TimerReq);
87 /* \\\ */
89 /* /// "uhwCloseTimer()" */
90 void uhwCloseTimer(struct PCIUnit *unit, struct PCIDevice *base)
92 if(unit->hu_MsgPort)
94 if(unit->hu_TimerReq)
96 KPRINTF(1, ("closing timer.device\n"));
97 CloseDevice((APTR) unit->hu_TimerReq);
98 DeleteIORequest((struct IORequest *) unit->hu_TimerReq);
99 unit->hu_TimerReq = NULL;
101 DeleteMsgPort(unit->hu_MsgPort);
102 unit->hu_MsgPort = NULL;
105 /* \\\ */
107 /* /// "uhwHWInit()" */
108 void uhwHWInit(struct PCIController *hc)
110 KPRINTF(1, ("Reset\n"));
111 //unit->hu_FrameCounter = 1;
112 //unit->hu_RootHubAddr = 0;
114 /* \\\ */
116 /* /// "Open_Unit()" */
117 struct Unit * Open_Unit(struct IOUsbHWReq *ioreq,
118 LONG unitnr,
119 struct PCIDevice *base)
121 struct PCIUnit *unit = NULL;
123 if(!base->hd_ScanDone)
125 base->hd_ScanDone = TRUE;
126 if(!pciInit(base))
128 return NULL;
131 unit = (struct PCIUnit *) base->hd_Units.lh_Head;
132 while(((struct Node *) unit)->ln_Succ)
134 if(unit->hu_UnitNo == unitnr)
136 break;
138 unit = (struct PCIUnit *) ((struct Node *) unit)->ln_Succ;
140 if(!((struct Node *) unit)->ln_Succ)
142 KPRINTF(20, ("Unit %ld does not exist!\n", unitnr));
143 return NULL;
145 if(unit->hu_UnitAllocated)
147 ioreq->iouh_Req.io_Error = IOERR_UNITBUSY;
148 KPRINTF(5, ("Unit %ld already open!\n", unitnr));
149 return NULL;
152 if(uhwOpenTimer(unit, base))
155 if(pciAllocUnit(unit)) // hardware self test
158 unit->hu_NakTimeoutInt.is_Node.ln_Type = NT_INTERRUPT;
159 unit->hu_NakTimeoutInt.is_Node.ln_Name = "PCI NakTimeout";
160 unit->hu_NakTimeoutInt.is_Node.ln_Pri = -16;
161 unit->hu_NakTimeoutInt.is_Data = unit;
162 unit->hu_NakTimeoutInt.is_Code = (void (*)(void)) &uhwNakTimeoutInt;
164 CopyMem(unit->hu_TimerReq, &unit->hu_NakTimeoutReq, sizeof(struct timerequest));
165 unit->hu_NakTimeoutReq.tr_node.io_Message.mn_ReplyPort = &unit->hu_NakTimeoutMsgPort;
166 unit->hu_NakTimeoutMsgPort.mp_Node.ln_Type = NT_MSGPORT;
167 unit->hu_NakTimeoutMsgPort.mp_Flags = PA_SOFTINT;
168 unit->hu_NakTimeoutMsgPort.mp_SigTask = &unit->hu_NakTimeoutInt;
169 NewList(&unit->hu_NakTimeoutMsgPort.mp_MsgList);
170 Cause(&unit->hu_NakTimeoutInt);
171 return(&unit->hu_Unit);
172 } else {
173 ioreq->iouh_Req.io_Error = IOERR_SELFTEST;
174 KPRINTF(20, ("Hardware allocation failure!\n"));
176 uhwCloseTimer(unit, base);
178 return(NULL);
180 /* \\\ */
182 /* /// "Close_Unit()" */
183 void Close_Unit(struct PCIDevice *base,
184 struct PCIUnit *unit,
185 struct IOUsbHWReq *ioreq)
187 /* Disable all interrupts */
188 unit->hu_NakTimeoutMsgPort.mp_Flags = PA_IGNORE;
189 unit->hu_NakTimeoutInt.is_Node.ln_Type = NT_SOFTINT;
190 AbortIO((APTR) &unit->hu_NakTimeoutReq);
192 pciFreeUnit(unit);
194 uhwCloseTimer(unit, base);
195 unit->hu_UnitAllocated = FALSE;
197 /* \\\ */
199 /* /// "uhwGetUsbState()" */
200 UWORD uhwGetUsbState(struct IOUsbHWReq *ioreq,
201 struct PCIUnit *unit,
202 struct PCIDevice *base)
204 return(ioreq->iouh_State = UHSF_OPERATIONAL);
206 /* \\\ */
208 /* /// "cmdReset()" */
210 *======================================================================
211 * cmdReset(ioreq, unit, base)
212 *======================================================================
214 * This is the device CMD_RESET routine.
216 * Resets the whole USB hardware. Goes into USBOperational mode right
217 * after. Must NOT be called from an interrupt.
221 WORD cmdReset(struct IOUsbHWReq *ioreq,
222 struct PCIUnit *unit,
223 struct PCIDevice *base)
225 KPRINTF(10, ("CMD_RESET ioreq: 0x%08lx\n", ioreq));
226 //uhwHWInit(unit);
228 uhwDelayMS(1, unit, base);
229 uhwGetUsbState(ioreq, unit, base);
231 if(ioreq->iouh_State & UHSF_OPERATIONAL)
233 return RC_OK;
235 return UHIOERR_USBOFFLINE;
237 /* \\\ */
239 /* /// "cmdUsbReset()" */
241 *======================================================================
242 * cmdUsbReset(ioreq, unit, base)
243 *======================================================================
245 * This is the device UHCMD_USBRESET routine.
247 * Resets the USB bus. Goes into USBOperational mode right after. Must
248 * NOT be called from an interrupt.
252 WORD cmdUsbReset(struct IOUsbHWReq *ioreq,
253 struct PCIUnit *unit,
254 struct PCIDevice *base)
256 KPRINTF(10, ("UHCMD_USBRESET ioreq: 0x%08lx\n", ioreq));
258 /* FIXME */
259 uhwGetUsbState(ioreq, unit, base);
261 unit->hu_FrameCounter = 1;
262 unit->hu_RootHubAddr = 0;
264 if(ioreq->iouh_State & UHSF_OPERATIONAL)
266 return RC_OK;
268 return UHIOERR_USBOFFLINE;
270 /* \\\ */
272 /* /// "cmdUsbResume()" */
274 *======================================================================
275 * cmdUsbResume(ioreq, unit, base)
276 *======================================================================
278 * This is the device UHCMD_USBRESUME routine.
280 * Tries to resume from USBSuspend mode into USBOperational.
281 * Must NOT be called from an interrupt.
285 WORD cmdUsbResume(struct IOUsbHWReq *ioreq,
286 struct PCIUnit *unit,
287 struct PCIDevice *base)
289 KPRINTF(10, ("UHCMD_USBRESUME ioreq: 0x%08lx\n", ioreq));
291 /* FIXME */
292 uhwGetUsbState(ioreq, unit, base);
293 if(ioreq->iouh_State & UHSF_OPERATIONAL)
295 return RC_OK;
297 return UHIOERR_USBOFFLINE;
299 /* \\\ */
301 /* /// "cmdUsbSuspend()" */
303 *======================================================================
304 * cmdUsbSuspend(ioreq, unit, base)
305 *======================================================================
307 * This is the device UHCMD_USBSUSPEND routine.
309 * Sets the USB into USBSuspend mode.
310 * Must NOT be called from an interrupt.
314 WORD cmdUsbSuspend(struct IOUsbHWReq *ioreq,
315 struct PCIUnit *unit,
316 struct PCIDevice *base)
318 KPRINTF(10, ("UHCMD_USBSUSPEND ioreq: 0x%08lx\n", ioreq));
320 /* FIXME */
321 uhwGetUsbState(ioreq, unit, base);
322 if(ioreq->iouh_State & UHSF_SUSPENDED)
324 return RC_OK;
326 return UHIOERR_USBOFFLINE;
328 /* \\\ */
330 /* /// "cmdUsbOper()" */
332 *======================================================================
333 * cmdUsbOper(ioreq, unit, base)
334 *======================================================================
336 * This is the device UHCMD_USBOPER routine.
338 * Sets the USB into USBOperational mode.
339 * Must NOT be called from an interrupt.
343 WORD cmdUsbOper(struct IOUsbHWReq *ioreq,
344 struct PCIUnit *unit,
345 struct PCIDevice *base)
347 KPRINTF(10, ("UHCMD_USBOPER ioreq: 0x%08lx\n", ioreq));
349 /* FIXME */
350 uhwGetUsbState(ioreq, unit, base);
351 if(ioreq->iouh_State & UHSF_OPERATIONAL)
353 return RC_OK;
355 return UHIOERR_USBOFFLINE;
357 /* \\\ */
359 /* /// "cmdQueryDevice()" */
361 *======================================================================
362 * cmdQueryDevice(ioreq, unit, base)
363 *======================================================================
365 * This is the device UHCMD_QUERYDEVICE routine.
367 * Returns information about the hardware.
371 WORD cmdQueryDevice(struct IOUsbHWReq *ioreq,
372 struct PCIUnit *unit,
373 struct PCIDevice *base)
375 struct TagItem *taglist = (struct TagItem *) ioreq->iouh_Data;
376 struct TagItem *tag;
377 ULONG count = 0;
379 KPRINTF(10, ("UHCMD_QUERYDEVICE ioreq: 0x%08lx, taglist: 0x%08lx\n", ioreq, taglist));
381 if((tag = FindTagItem(UHA_State, taglist)))
383 *((ULONG *) tag->ti_Data) = (ULONG) uhwGetUsbState(ioreq, unit, base);
384 count++;
386 if((tag = FindTagItem(UHA_Manufacturer, taglist)))
388 *((STRPTR *) tag->ti_Data) = "Chris Hodges";
389 count++;
391 if((tag = FindTagItem(UHA_ProductName, taglist)))
393 *((STRPTR *) tag->ti_Data) = unit->hu_ProductName;
394 count++;
396 if((tag = FindTagItem(UHA_Description, taglist)))
398 *((STRPTR *) tag->ti_Data) = "Generic adaptive host controller driver for PCI cards";
399 count++;
401 if((tag = FindTagItem(UHA_Copyright, taglist)))
403 *((STRPTR *) tag->ti_Data) = "©2007-2009 Chris Hodges";
404 count++;
406 if((tag = FindTagItem(UHA_Version, taglist)))
408 *((ULONG *) tag->ti_Data) = VERSION_NUMBER;
409 count++;
411 if((tag = FindTagItem(UHA_Revision, taglist)))
413 *((ULONG *) tag->ti_Data) = REVISION_NUMBER;
414 count++;
416 if((tag = FindTagItem(UHA_DriverVersion, taglist)))
418 *((ULONG *) tag->ti_Data) = 0x220;
419 count++;
421 if((tag = FindTagItem(UHA_Capabilities, taglist)))
423 *((ULONG *) tag->ti_Data) = UHCF_USB20;
424 count++;
426 ioreq->iouh_Actual = count;
427 return RC_OK;
429 /* \\\ */
431 /* /// "cmdControlXFerRootHub()" */
432 WORD cmdControlXFerRootHub(struct IOUsbHWReq *ioreq,
433 struct PCIUnit *unit,
434 struct PCIDevice *base)
436 struct PCIController *hc;
437 struct PCIController *chc;
438 UWORD rt = ioreq->iouh_SetupData.bmRequestType;
439 UWORD req = ioreq->iouh_SetupData.bRequest;
440 UWORD idx = AROS_WORD2LE(ioreq->iouh_SetupData.wIndex);
441 UWORD val = AROS_WORD2LE(ioreq->iouh_SetupData.wValue);
442 UWORD len = AROS_WORD2LE(ioreq->iouh_SetupData.wLength);
443 UWORD hciport;
444 ULONG numports = unit->hu_RootHubPorts;
445 BOOL cmdgood;
446 ULONG cnt;
448 if(ioreq->iouh_Endpoint)
450 return(UHIOERR_STALL);
453 if(len != ioreq->iouh_Length)
455 KPRINTF(20, ("RH: Len (%ld != %ld) mismatch!\n", len != ioreq->iouh_Length));
456 return(UHIOERR_STALL);
458 switch(rt)
460 case (URTF_STANDARD|URTF_DEVICE):
461 switch(req)
463 case USR_SET_ADDRESS:
464 KPRINTF(1, ("RH: SetAddress = %ld\n", val));
465 unit->hu_RootHubAddr = val;
466 ioreq->iouh_Actual = len;
467 return(0);
469 case USR_SET_CONFIGURATION:
470 KPRINTF(1, ("RH: SetConfiguration=%ld\n", val));
471 ioreq->iouh_Actual = len;
472 return(0);
474 break;
476 case (URTF_IN|URTF_STANDARD|URTF_DEVICE):
477 switch(req)
479 case USR_GET_DESCRIPTOR:
480 switch(val>>8)
482 case UDT_DEVICE:
483 KPRINTF(1, ("RH: GetDeviceDescriptor (%ld)\n", len));
484 ioreq->iouh_Actual = (len > sizeof(struct UsbStdDevDesc)) ? sizeof(struct UsbStdDevDesc) : len;
485 CopyMem((APTR) &RHDevDesc, ioreq->iouh_Data, ioreq->iouh_Actual);
486 if(ioreq->iouh_Length >= sizeof(struct UsbStdDevDesc))
488 if(unit->hu_RootHub20Ports)
490 struct UsbStdDevDesc *usdd = (struct UsbStdDevDesc *) ioreq->iouh_Data;
491 usdd->bcdUSB = AROS_WORD2LE(0x0200); // signal a highspeed root hub
492 usdd->bDeviceProtocol = 1; // single TT
495 return(0);
497 case UDT_CONFIGURATION:
499 UBYTE tmpbuf[9+9+7];
500 KPRINTF(1, ("RH: GetConfigDescriptor (%ld)\n", len));
501 CopyMem((APTR) &RHCfgDesc, tmpbuf, 9);
502 CopyMem((APTR) &RHIfDesc, &tmpbuf[9], 9);
503 CopyMem((APTR) &RHEPDesc, &tmpbuf[9+9], 7);
504 if(unit->hu_RootHub20Ports)
506 struct UsbStdEPDesc *usepd = (struct UsbStdEPDesc *) &tmpbuf[9+9];
507 usepd->bInterval = 12; // 2048 µFrames
509 ioreq->iouh_Actual = (len > 9+9+7) ? 9+9+7 : len;
510 CopyMem(tmpbuf, ioreq->iouh_Data, ioreq->iouh_Actual);
511 return(0);
514 case UDT_STRING:
515 if(val & 0xff) /* get lang array */
517 CONST_STRPTR source = NULL;
518 UWORD *mptr = ioreq->iouh_Data;
519 UWORD slen = 1;
520 KPRINTF(1, ("RH: GetString %04lx (%ld)\n", val, len));
521 if((val & 0xff) > 4) /* index too high? */
523 return(UHIOERR_STALL);
525 source = RHStrings[(val & 0xff)-1];
526 if(len > 1)
528 ioreq->iouh_Actual = 2;
529 while(*source++)
531 slen++;
533 source = RHStrings[(val & 0xff)-1];
534 *mptr++ = AROS_WORD2BE((slen<<9)|UDT_STRING);
535 while(ioreq->iouh_Actual+1 < len)
537 // special hack for unit number in root hub string
538 if(((val & 0xff) == 2) && (source[1] == 0))
540 *mptr++ = AROS_WORD2LE('0' + unit->hu_UnitNo);
541 } else {
542 *mptr++ = AROS_WORD2LE(*source);
544 source++;
545 ioreq->iouh_Actual += 2;
546 if(!(*source))
548 break;
552 } else {
553 UWORD *mptr = ioreq->iouh_Data;
554 KPRINTF(1, ("RH: GetLangArray %04lx (%ld)\n", val, len));
555 if(len > 1)
557 ioreq->iouh_Actual = 2;
558 mptr[0] = AROS_WORD2BE((4<<8)|UDT_STRING);
559 if(len > 3)
561 ioreq->iouh_Actual += 2;
562 mptr[1] = AROS_WORD2LE(0x0409);
566 return(0);
568 default:
569 KPRINTF(1, ("RH: Unsupported Descriptor %04lx\n", idx));
571 break;
573 case USR_GET_CONFIGURATION:
574 if(len == 1)
576 KPRINTF(1, ("RH: GetConfiguration\n"));
577 ((UBYTE *) ioreq->iouh_Data)[0] = 1;
578 ioreq->iouh_Actual = len;
579 return(0);
581 break;
583 break;
585 case (URTF_CLASS|URTF_OTHER):
586 switch(req)
588 case USR_SET_FEATURE:
589 if((!idx) && (idx > numports))
591 KPRINTF(20, ("Port %ld out of range\n", idx));
592 return(UHIOERR_STALL);
594 chc = unit->hu_PortMap11[idx - 1];
595 if(unit->hu_EhciOwned[idx - 1])
597 hc = unit->hu_PortMap20[idx - 1];
598 hciport = idx - 1;
599 } else {
600 hc = chc;
601 hciport = unit->hu_PortNum11[idx - 1];
603 cmdgood = FALSE;
604 switch(hc->hc_HCIType)
606 case HCITYPE_UHCI:
608 UWORD portreg = hciport ? UHCI_PORT2STSCTRL : UHCI_PORT1STSCTRL;
609 ULONG oldval = READREG16_LE(hc->hc_RegBase, portreg) & ~(UHPF_ENABLECHANGE|UHPF_CONNECTCHANGE); // these are clear-on-write!
610 ULONG newval = oldval;
611 switch(val)
613 /* case UFS_PORT_CONNECTION: not possible */
614 case UFS_PORT_ENABLE:
615 KPRINTF(10, ("Enabling Port (%s)\n", newval & UHPF_PORTENABLE ? "already" : "ok"));
616 newval |= UHPF_PORTENABLE;
617 cmdgood = TRUE;
618 break;
620 case UFS_PORT_SUSPEND:
621 newval |= UHPF_PORTSUSPEND;
622 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND; // manually fake suspend change
623 cmdgood = TRUE;
624 break;
626 /* case UFS_PORT_OVER_CURRENT: not possible */
627 case UFS_PORT_RESET:
628 KPRINTF(10, ("Resetting Port (%s)\n", newval & UHPF_PORTRESET ? "already" : "ok"));
630 // this is an ugly blocking workaround to the inability of UHCI to clear reset automatically
631 newval &= ~(UHPF_PORTSUSPEND|UHPF_PORTENABLE);
632 newval |= UHPF_PORTRESET;
633 WRITEREG16_LE(hc->hc_RegBase, portreg, newval);
634 uhwDelayMS(75, unit, base);
635 newval = READREG16_LE(hc->hc_RegBase, portreg) & ~(UHPF_ENABLECHANGE|UHPF_CONNECTCHANGE|UHPF_PORTSUSPEND|UHPF_PORTENABLE);
636 KPRINTF(10, ("Reset=%s\n", newval & UHPF_PORTRESET ? "GOOD" : "BAD!"));
637 // like windows does it
638 newval &= ~UHPF_PORTRESET;
639 WRITEREG16_LE(hc->hc_RegBase, portreg, newval);
640 uhwDelayMicro(50, unit, base);
641 newval = READREG16_LE(hc->hc_RegBase, portreg) & ~(UHPF_ENABLECHANGE|UHPF_CONNECTCHANGE|UHPF_PORTSUSPEND);
642 KPRINTF(10, ("Reset=%s\n", newval & UHPF_PORTRESET ? "BAD!" : "GOOD"));
643 newval &= ~(UHPF_PORTSUSPEND|UHPF_PORTRESET);
644 newval |= UHPF_PORTENABLE;
645 WRITEREG16_LE(hc->hc_RegBase, portreg, newval);
646 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_RESET|UPSF_PORT_ENABLE; // manually fake reset change
648 cnt = 100;
651 uhwDelayMS(1, unit, base);
652 newval = READREG16_LE(hc->hc_RegBase, portreg) & ~(UHPF_ENABLECHANGE|UHPF_CONNECTCHANGE);
653 } while(--cnt && (!(newval & UHPF_PORTENABLE)));
654 if(cnt)
656 KPRINTF(10, ("Enabled after %ld ticks\n", 100-cnt));
657 } else {
658 KPRINTF(20, ("Port refuses to be enabled!\n"));
659 return(UHIOERR_HOSTERROR);
661 // make enumeration possible
662 unit->hu_DevControllers[0] = hc;
663 cmdgood = TRUE;
664 break;
666 case UFS_PORT_POWER:
667 KPRINTF(10, ("Powering Port\n"));
668 // ignore for UHCI, is always powered
669 cmdgood = TRUE;
670 break;
672 /* case UFS_PORT_LOW_SPEED: not possible */
673 /* case UFS_C_PORT_CONNECTION:
674 case UFS_C_PORT_ENABLE:
675 case UFS_C_PORT_SUSPEND:
676 case UFS_C_PORT_OVER_CURRENT:
677 case UFS_C_PORT_RESET: */
679 if(cmdgood)
681 KPRINTF(5, ("Port %ld SET_FEATURE %04lx->%04lx\n", idx, oldval, newval));
682 WRITEREG16_LE(hc->hc_RegBase, portreg, newval);
683 return(0);
685 break;
688 case HCITYPE_OHCI:
690 UWORD portreg = OHCI_PORTSTATUS + (hciport<<2);
691 ULONG oldval = READREG32_LE(hc->hc_RegBase, portreg);
693 switch(val)
695 /* case UFS_PORT_CONNECTION: not possible */
696 case UFS_PORT_ENABLE:
697 KPRINTF(10, ("Enabling Port (%s)\n", oldval & OHPF_PORTENABLE ? "already" : "ok"));
698 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_PORTENABLE);
699 cmdgood = TRUE;
700 break;
702 case UFS_PORT_SUSPEND:
703 KPRINTF(10, ("Suspending Port (%s)\n", oldval & OHPF_PORTSUSPEND ? "already" : "ok"));
704 //hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND; // manually fake suspend change
705 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_PORTSUSPEND);
706 cmdgood = TRUE;
707 break;
709 /* case UFS_PORT_OVER_CURRENT: not possible */
710 case UFS_PORT_RESET:
711 KPRINTF(10, ("Resetting Port (%s)\n", oldval & OHPF_PORTRESET ? "already" : "ok"));
712 // make sure we have at least 50ms of reset time here, as required for a root hub port
713 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_PORTRESET);
714 uhwDelayMS(10, unit, base);
715 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_PORTRESET);
716 uhwDelayMS(10, unit, base);
717 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_PORTRESET);
718 uhwDelayMS(10, unit, base);
719 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_PORTRESET);
720 uhwDelayMS(10, unit, base);
721 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_PORTRESET);
722 // make enumeration possible
723 unit->hu_DevControllers[0] = hc;
724 cmdgood = TRUE;
725 break;
727 case UFS_PORT_POWER:
728 KPRINTF(10, ("Powering Port (%s)\n", oldval & OHPF_PORTPOWER ? "already" : "ok"));
729 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_PORTPOWER);
730 cmdgood = TRUE;
731 break;
733 /* case UFS_PORT_LOW_SPEED: not possible */
734 /* case UFS_C_PORT_CONNECTION:
735 case UFS_C_PORT_ENABLE:
736 case UFS_C_PORT_SUSPEND:
737 case UFS_C_PORT_OVER_CURRENT:
738 case UFS_C_PORT_RESET: */
740 if(cmdgood)
742 return(0);
744 break;
747 case HCITYPE_EHCI:
749 UWORD portreg = EHCI_PORTSC1 + (hciport<<2);
750 ULONG oldval = READREG32_LE(hc->hc_RegBase, portreg) & ~(EHPF_OVERCURRENTCHG|EHPF_ENABLECHANGE|EHPF_CONNECTCHANGE); // these are clear-on-write!
751 ULONG newval = oldval;
752 switch(val)
754 /* case UFS_PORT_CONNECTION: not possible */
755 case UFS_PORT_ENABLE:
756 KPRINTF(10, ("Enabling Port (%s)\n", newval & EHPF_PORTENABLE ? "already" : "ok"));
757 newval |= EHPF_PORTENABLE;
758 cmdgood = TRUE;
759 break;
761 case UFS_PORT_SUSPEND:
762 newval |= EHPF_PORTSUSPEND;
763 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND; // manually fake suspend change
764 cmdgood = TRUE;
765 break;
767 /* case UFS_PORT_OVER_CURRENT: not possible */
768 case UFS_PORT_RESET:
769 KPRINTF(10, ("Resetting Port (%s)\n", newval & EHPF_PORTRESET ? "already" : "ok"));
771 // this is an ugly blocking workaround to the inability of UHCI to clear reset automatically
772 newval &= ~(EHPF_PORTSUSPEND|EHPF_PORTENABLE);
773 newval |= EHPF_PORTRESET;
774 WRITEREG32_LE(hc->hc_RegBase, portreg, newval);
775 uhwDelayMS(50, unit, base);
776 newval = READREG32_LE(hc->hc_RegBase, portreg) & ~(EHPF_OVERCURRENTCHG|EHPF_ENABLECHANGE|EHPF_CONNECTCHANGE|EHPF_PORTSUSPEND|EHPF_PORTENABLE);
777 KPRINTF(10, ("Reset=%s\n", newval & EHPF_PORTRESET ? "GOOD" : "BAD!"));
778 newval &= ~EHPF_PORTRESET;
779 WRITEREG32_LE(hc->hc_RegBase, portreg, newval);
780 uhwDelayMS(10, unit, base);
781 newval = READREG32_LE(hc->hc_RegBase, portreg) & ~(EHPF_OVERCURRENTCHG|EHPF_ENABLECHANGE|EHPF_CONNECTCHANGE|EHPF_PORTSUSPEND);
782 KPRINTF(10, ("Reset=%s\n", newval & EHPF_PORTRESET ? "BAD!" : "GOOD"));
783 KPRINTF(10, ("Highspeed=%s\n", newval & EHPF_PORTENABLE ? "YES!" : "NO"));
784 if(!(newval & EHPF_PORTENABLE))
786 // if not highspeed, release ownership
787 KPRINTF(20, ("Transferring ownership to UHCI/OHCI port %ld\n", unit->hu_PortNum11[idx - 1]));
788 KPRINTF(10, ("Device is %s\n", newval & EHPF_LINESTATUS_DM ? "LOWSPEED" : "FULLSPEED"));
789 unit->hu_EhciOwned[idx - 1] = FALSE;
790 newval |= EHPF_NOTPORTOWNER;
791 WRITEREG32_LE(hc->hc_RegBase, portreg, newval);
792 // enable companion controller port
793 switch(chc->hc_HCIType)
795 case HCITYPE_UHCI:
797 UWORD uhcihciport = unit->hu_PortNum11[idx - 1];
798 UWORD uhciportreg = uhcihciport ? UHCI_PORT2STSCTRL : UHCI_PORT1STSCTRL;
799 ULONG uhcinewval;
801 uhcinewval = READREG16_LE(chc->hc_RegBase, uhciportreg) & ~(UHPF_ENABLECHANGE|UHPF_CONNECTCHANGE|UHPF_PORTSUSPEND);
802 KPRINTF(10, ("UHCI Reset=%s\n", uhcinewval & UHPF_PORTRESET ? "BAD!" : "GOOD"));
803 if((uhcinewval & UHPF_PORTRESET))//|| (newval & EHPF_LINESTATUS_DM))
805 // this is an ugly blocking workaround to the inability of UHCI to clear reset automatically
806 KPRINTF(20, ("Uhm, UHCI reset was bad!\n"));
807 uhcinewval &= ~(UHPF_PORTSUSPEND|UHPF_PORTENABLE);
808 uhcinewval |= UHPF_PORTRESET;
809 WRITEREG16_LE(chc->hc_RegBase, uhciportreg, uhcinewval);
810 uhwDelayMS(75, unit, base);
811 uhcinewval = READREG16_LE(chc->hc_RegBase, uhciportreg) & ~(UHPF_ENABLECHANGE|UHPF_CONNECTCHANGE|UHPF_PORTSUSPEND|UHPF_PORTENABLE);
812 KPRINTF(10, ("ReReset=%s\n", uhcinewval & UHPF_PORTRESET ? "GOOD" : "BAD!"));
813 uhcinewval &= ~UHPF_PORTRESET;
814 WRITEREG16_LE(chc->hc_RegBase, uhciportreg, uhcinewval);
815 uhwDelayMS(5, unit, base);
816 uhcinewval = READREG16_LE(chc->hc_RegBase, uhciportreg) & ~(UHPF_ENABLECHANGE|UHPF_CONNECTCHANGE|UHPF_PORTSUSPEND);
817 KPRINTF(10, ("ReReset=%s\n", uhcinewval & UHPF_PORTRESET ? "STILL BAD!" : "GOOD"));
819 uhcinewval &= ~UHPF_PORTRESET;
820 uhcinewval |= UHPF_PORTENABLE;
821 WRITEREG16_LE(chc->hc_RegBase, uhciportreg, uhcinewval);
822 chc->hc_PortChangeMap[uhcihciport] |= UPSF_PORT_RESET|UPSF_PORT_ENABLE; // manually fake reset change
823 uhwDelayMS(5, unit, base);
824 cnt = 100;
827 uhwDelayMS(1, unit, base);
828 uhcinewval = READREG16_LE(chc->hc_RegBase, uhciportreg) & ~(UHPF_ENABLECHANGE|UHPF_CONNECTCHANGE);
829 } while(--cnt && (!(uhcinewval & UHPF_PORTENABLE)));
830 if(cnt)
832 KPRINTF(10, ("Enabled after %ld ticks\n", 100-cnt));
833 } else {
834 KPRINTF(20, ("Port refuses to be enabled!\n"));
835 return(UHIOERR_HOSTERROR);
837 break;
840 case HCITYPE_OHCI:
842 UWORD ohcihciport = unit->hu_PortNum11[idx - 1];
843 UWORD ohciportreg = OHCI_PORTSTATUS + (ohcihciport<<2);
844 ULONG ohcioldval = READREG32_LE(hc->hc_RegBase, portreg);
845 KPRINTF(10, ("OHCI Resetting Port (%s)\n", ohcioldval & OHPF_PORTRESET ? "already" : "ok"));
846 WRITEREG32_LE(chc->hc_RegBase, ohciportreg, OHPF_PORTPOWER|OHPF_PORTRESET);
847 break;
851 // make enumeration possible
852 unit->hu_DevControllers[0] = chc;
853 } else {
854 newval &= ~EHPF_PORTRESET;
855 newval |= EHPF_PORTENABLE;
856 WRITEREG16_LE(hc->hc_RegBase, portreg, newval);
857 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_RESET; // manually fake reset change
858 uhwDelayMS(10, unit, base);
859 cnt = 100;
862 uhwDelayMS(1, unit, base);
863 newval = READREG32_LE(hc->hc_RegBase, portreg) & ~(EHPF_OVERCURRENTCHG|EHPF_ENABLECHANGE|EHPF_CONNECTCHANGE);
864 } while(--cnt && (!(newval & EHPF_PORTENABLE)));
865 if(cnt)
867 KPRINTF(10, ("Enabled after %ld ticks\n", 100-cnt));
868 } else {
869 KPRINTF(20, ("Port refuses to be enabled!\n"));
870 return(UHIOERR_HOSTERROR);
872 // make enumeration possible
873 unit->hu_DevControllers[0] = hc;
875 cmdgood = TRUE;
876 break;
878 case UFS_PORT_POWER:
879 KPRINTF(10, ("Powering Port\n"));
880 newval |= EHPF_PORTPOWER;
881 cmdgood = TRUE;
882 break;
884 /* case UFS_PORT_LOW_SPEED: not possible */
885 /* case UFS_C_PORT_CONNECTION:
886 case UFS_C_PORT_ENABLE:
887 case UFS_C_PORT_SUSPEND:
888 case UFS_C_PORT_OVER_CURRENT:
889 case UFS_C_PORT_RESET: */
891 if(cmdgood)
893 KPRINTF(5, ("Port %ld SET_FEATURE %04lx->%04lx\n", idx, oldval, newval));
894 WRITEREG32_LE(hc->hc_RegBase, portreg, newval);
895 return(0);
897 break;
900 break;
902 case USR_CLEAR_FEATURE:
903 if((!idx) && (idx > numports))
905 KPRINTF(20, ("Port %ld out of range\n", idx));
906 return(UHIOERR_STALL);
908 if(unit->hu_EhciOwned[idx - 1])
910 hc = unit->hu_PortMap20[idx - 1];
911 hciport = idx - 1;
912 } else {
913 hc = unit->hu_PortMap11[idx - 1];
914 hciport = unit->hu_PortNum11[idx - 1];
916 KPRINTF(10, ("Clear Feature %ld maps from glob. Port %ld to local Port %ld\n", val, idx, hciport));
917 cmdgood = FALSE;
918 switch(hc->hc_HCIType)
920 case HCITYPE_UHCI:
922 UWORD portreg = hciport ? UHCI_PORT2STSCTRL : UHCI_PORT1STSCTRL;
923 ULONG oldval = READREG16_LE(hc->hc_RegBase, portreg) & ~(UHPF_ENABLECHANGE|UHPF_CONNECTCHANGE); // these are clear-on-write!
924 ULONG newval = oldval;
925 switch(val)
927 case UFS_PORT_ENABLE:
928 KPRINTF(10, ("Disabling Port (%s)\n", newval & UHPF_PORTENABLE ? "ok" : "already"));
929 newval &= ~UHPF_PORTENABLE;
930 cmdgood = TRUE;
931 // disable enumeration
932 unit->hu_DevControllers[0] = NULL;
933 break;
935 case UFS_PORT_SUSPEND:
936 newval &= ~UHPF_PORTSUSPEND;
937 cmdgood = TRUE;
938 break;
940 case UFS_PORT_POWER: // ignore for UHCI, there's no power control here
941 KPRINTF(10, ("Disabling Power\n"));
942 KPRINTF(10, ("Disabling Port (%s)\n", newval & UHPF_PORTENABLE ? "ok" : "already"));
943 newval &= ~UHPF_PORTENABLE;
944 cmdgood = TRUE;
945 break;
947 case UFS_C_PORT_CONNECTION:
948 newval |= UHPF_CONNECTCHANGE; // clear-on-write!
949 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_CONNECTION;
950 cmdgood = TRUE;
951 break;
953 case UFS_C_PORT_ENABLE:
954 newval |= UHPF_ENABLECHANGE; // clear-on-write!
955 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_ENABLE;
956 cmdgood = TRUE;
957 break;
959 case UFS_C_PORT_SUSPEND: // ignore for UHCI, there's no bit indicating this
960 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_SUSPEND; // manually fake suspend change clearing
961 cmdgood = TRUE;
962 break;
964 case UFS_C_PORT_OVER_CURRENT: // ignore for UHCI, there's no bit indicating this
965 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_OVER_CURRENT; // manually fake over current clearing
966 cmdgood = TRUE;
967 break;
969 case UFS_C_PORT_RESET: // ignore for UHCI, there's no bit indicating this
970 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_RESET; // manually fake reset change clearing
971 cmdgood = TRUE;
972 break;
974 if(cmdgood)
976 KPRINTF(5, ("Port %ld CLEAR_FEATURE %04lx->%04lx\n", idx, oldval, newval));
977 WRITEREG16_LE(hc->hc_RegBase, portreg, newval);
978 if(hc->hc_PortChangeMap[hciport])
980 unit->hu_RootPortChanges |= 1UL<<idx;
981 } else {
982 unit->hu_RootPortChanges &= ~(1UL<<idx);
984 return(0);
986 break;
989 case HCITYPE_OHCI:
991 UWORD portreg = OHCI_PORTSTATUS + (hciport<<2);
992 ULONG oldval = READREG32_LE(hc->hc_RegBase, portreg);
994 switch(val)
996 case UFS_PORT_ENABLE:
997 KPRINTF(10, ("Disabling Port (%s)\n", oldval & OHPF_PORTENABLE ? "ok" : "already"));
998 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_PORTDISABLE);
999 cmdgood = TRUE;
1000 break;
1002 case UFS_PORT_SUSPEND:
1003 KPRINTF(10, ("Resuming Port (%s)\n", oldval & OHPF_PORTSUSPEND ? "ok" : "already"));
1004 //hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_SUSPEND; // manually fake suspend change
1005 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_RESUME);
1006 cmdgood = TRUE;
1007 break;
1009 case UFS_PORT_POWER:
1010 KPRINTF(10, ("Unpowering Port (%s)\n", oldval & OHPF_PORTPOWER ? "ok" : "already"));
1011 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_PORTUNPOWER);
1012 cmdgood = TRUE;
1013 break;
1015 case UFS_C_PORT_CONNECTION:
1016 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_CONNECTCHANGE);
1017 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_CONNECTION;
1018 cmdgood = TRUE;
1019 break;
1021 case UFS_C_PORT_ENABLE:
1022 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_ENABLECHANGE);
1023 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_ENABLE;
1024 cmdgood = TRUE;
1025 break;
1027 case UFS_C_PORT_SUSPEND:
1028 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_RESUMEDTX);
1029 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_SUSPEND;
1030 cmdgood = TRUE;
1031 break;
1033 case UFS_C_PORT_OVER_CURRENT:
1034 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_OVERCURRENTCHG);
1035 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_OVER_CURRENT;
1036 cmdgood = TRUE;
1037 break;
1039 case UFS_C_PORT_RESET:
1040 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_RESETCHANGE);
1041 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_RESET;
1042 cmdgood = TRUE;
1043 break;
1045 if(cmdgood)
1047 return(0);
1049 break;
1052 case HCITYPE_EHCI:
1054 UWORD portreg = EHCI_PORTSC1 + (hciport<<2);
1055 ULONG oldval = READREG32_LE(hc->hc_RegBase, portreg) & ~(EHPF_OVERCURRENTCHG|EHPF_ENABLECHANGE|EHPF_CONNECTCHANGE); // these are clear-on-write!
1056 ULONG newval = oldval;
1057 switch(val)
1059 case UFS_PORT_ENABLE:
1060 KPRINTF(10, ("Disabling Port (%s)\n", newval & EHPF_PORTENABLE ? "ok" : "already"));
1061 newval &= ~EHPF_PORTENABLE;
1062 cmdgood = TRUE;
1063 // disable enumeration
1064 unit->hu_DevControllers[0] = NULL;
1065 break;
1067 case UFS_PORT_SUSPEND:
1068 newval &= ~EHPF_PORTSUSPEND;
1069 cmdgood = TRUE;
1070 break;
1072 case UFS_PORT_POWER: // ignore for UHCI, there's no power control here
1073 KPRINTF(10, ("Disabling Power (%s)\n", newval & EHPF_PORTPOWER ? "ok" : "already"));
1074 KPRINTF(10, ("Disabling Port (%s)\n", newval & EHPF_PORTENABLE ? "ok" : "already"));
1075 newval &= ~(EHPF_PORTENABLE|EHPF_PORTPOWER);
1076 cmdgood = TRUE;
1077 break;
1079 case UFS_C_PORT_CONNECTION:
1080 newval |= EHPF_CONNECTCHANGE; // clear-on-write!
1081 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_CONNECTION;
1082 cmdgood = TRUE;
1083 break;
1085 case UFS_C_PORT_ENABLE:
1086 newval |= EHPF_ENABLECHANGE; // clear-on-write!
1087 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_ENABLE;
1088 cmdgood = TRUE;
1089 break;
1091 case UFS_C_PORT_SUSPEND: // ignore for EHCI, there's no bit indicating this
1092 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_SUSPEND; // manually fake suspend change clearing
1093 cmdgood = TRUE;
1094 break;
1096 case UFS_C_PORT_OVER_CURRENT:
1097 newval |= EHPF_OVERCURRENTCHG; // clear-on-write!
1098 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_OVER_CURRENT; // manually fake over current clearing
1099 cmdgood = TRUE;
1100 break;
1102 case UFS_C_PORT_RESET: // ignore for EHCI, there's no bit indicating this
1103 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_RESET; // manually fake reset change clearing
1104 cmdgood = TRUE;
1105 break;
1107 if(cmdgood)
1109 KPRINTF(5, ("Port %ld CLEAR_FEATURE %08lx->%08lx\n", idx, oldval, newval));
1110 WRITEREG32_LE(hc->hc_RegBase, portreg, newval);
1111 if(hc->hc_PortChangeMap[hciport])
1113 unit->hu_RootPortChanges |= 1UL<<idx;
1114 } else {
1115 unit->hu_RootPortChanges &= ~(1UL<<idx);
1117 return(0);
1119 break;
1122 break;
1124 break;
1126 case (URTF_IN|URTF_CLASS|URTF_OTHER):
1127 switch(req)
1129 case USR_GET_STATUS:
1131 UWORD *mptr = ioreq->iouh_Data;
1132 if(len != sizeof(struct UsbPortStatus))
1134 return(UHIOERR_STALL);
1136 if((!idx) && (idx > numports))
1138 KPRINTF(20, ("Port %ld out of range\n", idx));
1139 return(UHIOERR_STALL);
1141 if(unit->hu_EhciOwned[idx - 1])
1143 hc = unit->hu_PortMap20[idx - 1];
1144 hciport = idx - 1;
1145 } else {
1146 hc = unit->hu_PortMap11[idx - 1];
1147 hciport = unit->hu_PortNum11[idx - 1];
1149 switch(hc->hc_HCIType)
1151 case HCITYPE_UHCI:
1153 UWORD portreg = hciport ? UHCI_PORT2STSCTRL : UHCI_PORT1STSCTRL;
1154 UWORD oldval = READREG16_LE(hc->hc_RegBase, portreg);
1155 *mptr = UPSF_PORT_POWER;
1156 if(oldval & UHPF_PORTCONNECTED) *mptr |= UPSF_PORT_CONNECTION;
1157 if(oldval & UHPF_PORTENABLE) *mptr |= UPSF_PORT_ENABLE;
1158 if(oldval & UHPF_LOWSPEED) *mptr |= UPSF_PORT_LOW_SPEED;
1159 if(oldval & UHPF_PORTRESET) *mptr |= UPSF_PORT_RESET;
1160 if(oldval & UHPF_PORTSUSPEND) *mptr |= UPSF_PORT_SUSPEND;
1162 KPRINTF(5, ("UHCI Port %ld is %s\n", idx, oldval & UHPF_LOWSPEED ? "LOWSPEED" : "FULLSPEED"));
1163 KPRINTF(5, ("UHCI Port %ld Status %08lx\n", idx, *mptr));
1165 mptr++;
1166 if(oldval & UHPF_ENABLECHANGE)
1168 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_ENABLE;
1170 if(oldval & UHPF_CONNECTCHANGE)
1172 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_CONNECTION;
1174 if(oldval & UHPF_RESUMEDTX)
1176 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND|UPSF_PORT_ENABLE;
1178 *mptr = hc->hc_PortChangeMap[hciport];
1179 WRITEREG16_LE(hc->hc_RegBase, portreg, oldval);
1180 KPRINTF(5, ("UHCI Port %ld Change %08lx\n", idx, *mptr));
1181 return(0);
1184 case HCITYPE_OHCI:
1186 UWORD portreg = OHCI_PORTSTATUS + (hciport<<2);
1187 ULONG oldval = READREG32_LE(hc->hc_RegBase, portreg);
1189 *mptr = 0;
1190 if(oldval & OHPF_PORTPOWER) *mptr |= UPSF_PORT_POWER;
1191 if(oldval & OHPF_OVERCURRENT) *mptr |= UPSF_PORT_OVER_CURRENT;
1192 if(oldval & OHPF_PORTCONNECTED) *mptr |= UPSF_PORT_CONNECTION;
1193 if(oldval & OHPF_PORTENABLE) *mptr |= UPSF_PORT_ENABLE;
1194 if(oldval & OHPF_LOWSPEED) *mptr |= UPSF_PORT_LOW_SPEED;
1195 if(oldval & OHPF_PORTRESET) *mptr |= UPSF_PORT_RESET;
1196 if(oldval & OHPF_PORTSUSPEND) *mptr |= UPSF_PORT_SUSPEND;
1198 KPRINTF(5, ("OHCI Port %ld (glob. %ld) is %s\n", hciport, idx, oldval & OHPF_LOWSPEED ? "LOWSPEED" : "FULLSPEED"));
1199 KPRINTF(5, ("OHCI Port %ld Status %08lx (%08lx)\n", idx, *mptr, oldval));
1201 mptr++;
1202 if(oldval & OHPF_OVERCURRENTCHG)
1204 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_OVER_CURRENT;
1206 if(oldval & OHPF_RESETCHANGE)
1208 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_RESET;
1210 if(oldval & OHPF_ENABLECHANGE)
1212 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_ENABLE;
1214 if(oldval & OHPF_CONNECTCHANGE)
1216 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_CONNECTION;
1218 if(oldval & OHPF_RESUMEDTX)
1220 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND;
1222 *mptr = hc->hc_PortChangeMap[hciport];
1223 KPRINTF(5, ("OHCI Port %ld Change %08lx\n", idx, *mptr));
1224 return(0);
1227 case HCITYPE_EHCI:
1229 UWORD portreg = EHCI_PORTSC1 + (hciport<<2);
1230 ULONG oldval = READREG32_LE(hc->hc_RegBase, portreg);
1232 *mptr = 0;
1233 if(oldval & EHPF_PORTCONNECTED) *mptr |= UPSF_PORT_CONNECTION;
1234 if(oldval & EHPF_PORTENABLE) *mptr |= UPSF_PORT_ENABLE|UPSF_PORT_HIGH_SPEED;
1235 if((oldval & (EHPF_LINESTATUS_DM|EHPF_PORTCONNECTED|EHPF_PORTENABLE)) ==
1236 (EHPF_LINESTATUS_DM|EHPF_PORTCONNECTED))
1238 KPRINTF(10, ("EHCI Port %ld is LOWSPEED\n", idx));
1239 // we need to detect low speed devices prior to reset
1240 *mptr |= UPSF_PORT_LOW_SPEED;
1243 if(oldval & EHPF_PORTRESET) *mptr |= UPSF_PORT_RESET;
1244 if(oldval & EHPF_PORTSUSPEND) *mptr |= UPSF_PORT_SUSPEND;
1245 if(oldval & EHPF_PORTPOWER) *mptr |= UPSF_PORT_POWER;
1246 if(oldval & EHPM_PORTINDICATOR) *mptr |= UPSF_PORT_INDICATOR;
1248 KPRINTF(5, ("EHCI Port %ld Status %08lx\n", idx, *mptr));
1250 mptr++;
1251 if(oldval & EHPF_ENABLECHANGE)
1253 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_ENABLE;
1255 if(oldval & EHPF_CONNECTCHANGE)
1257 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_CONNECTION;
1259 if(oldval & EHPF_RESUMEDTX)
1261 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND|UPSF_PORT_ENABLE;
1263 if(oldval & EHPF_OVERCURRENTCHG)
1265 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_OVER_CURRENT;
1267 *mptr = hc->hc_PortChangeMap[hciport];
1268 WRITEREG32_LE(hc->hc_RegBase, portreg, oldval);
1269 KPRINTF(5, ("EHCI Port %ld Change %08lx\n", idx, *mptr));
1270 return(0);
1273 return(0);
1277 break;
1279 case (URTF_IN|URTF_CLASS|URTF_DEVICE):
1280 switch(req)
1282 case USR_GET_STATUS:
1284 UWORD *mptr = ioreq->iouh_Data;
1285 if(len < sizeof(struct UsbHubStatus))
1287 return(UHIOERR_STALL);
1289 *mptr++ = 0;
1290 *mptr++ = 0;
1291 ioreq->iouh_Actual = 4;
1292 return(0);
1295 case USR_GET_DESCRIPTOR:
1296 switch(val>>8)
1298 case UDT_HUB:
1300 ULONG hubdesclen = 9;
1301 ULONG powergood = 1;
1302 struct UsbHubDesc *uhd = (struct UsbHubDesc *) ioreq->iouh_Data;
1303 KPRINTF(1, ("RH: GetHubDescriptor (%ld)\n", len));
1304 if(unit->hu_RootHubPorts > 7) // needs two bytes for port masks
1306 hubdesclen += 2;
1308 ioreq->iouh_Actual = (len > hubdesclen) ? hubdesclen : len;
1309 CopyMem((APTR) &RHHubDesc, ioreq->iouh_Data, ioreq->iouh_Actual);
1310 if(ioreq->iouh_Length)
1312 uhd->bLength = hubdesclen;
1314 if(ioreq->iouh_Length >= 6)
1316 hc = (struct PCIController *) unit->hu_Controllers.lh_Head;
1317 while(hc->hc_Node.ln_Succ)
1319 if(hc->hc_HCIType == HCITYPE_OHCI)
1321 ULONG localpwgood = READREG32_LE(hc->hc_RegBase, OHCI_HUBDESCA & OHAM_POWERGOOD) >> OHAS_POWERGOOD;
1322 if(localpwgood > powergood)
1324 powergood = localpwgood;
1325 KPRINTF(10, ("Increasing power good time to %ld\n", powergood));
1328 hc = (struct PCIController *) hc->hc_Node.ln_Succ;
1331 uhd->bPwrOn2PwrGood = powergood;
1333 if(ioreq->iouh_Length >= hubdesclen)
1335 uhd->bNbrPorts = unit->hu_RootHubPorts;
1336 if(hubdesclen == 9)
1338 uhd->DeviceRemovable = 0;
1339 uhd->PortPwrCtrlMask = (1<<(unit->hu_RootHubPorts+2))-2;
1340 } else {
1341 // each field is now 16 bits wide
1342 uhd->DeviceRemovable = 0;
1343 uhd->PortPwrCtrlMask = 0;
1344 ((UBYTE *) ioreq->iouh_Data)[9] = (1<<(unit->hu_RootHubPorts+2))-2;
1345 ((UBYTE *) ioreq->iouh_Data)[10] = ((1<<(unit->hu_RootHubPorts+2))-2)>>8;
1348 return(0);
1351 default:
1352 KPRINTF(20, ("RH: Unsupported Descriptor %04lx\n", idx));
1354 break;
1358 KPRINTF(20, ("RH: Unsupported command %02lx %02lx %04lx %04lx %04lx!\n", rt, req, idx, val, len));
1359 return(UHIOERR_STALL);
1361 /* \\\ */
1363 /* /// "cmdIntXFerRootHub()" */
1364 WORD cmdIntXFerRootHub(struct IOUsbHWReq *ioreq,
1365 struct PCIUnit *unit,
1366 struct PCIDevice *base)
1368 if((ioreq->iouh_Endpoint != 1) || (!ioreq->iouh_Length))
1370 return(UHIOERR_STALL);
1373 if(unit->hu_RootPortChanges)
1375 KPRINTF(1, ("Immediate Portchange map %04lx\n", unit->hu_RootPortChanges));
1377 if((unit->hu_RootHubPorts < 8) || (ioreq->iouh_Length == 1))
1379 *((UBYTE *) ioreq->iouh_Data) = unit->hu_RootPortChanges;
1380 ioreq->iouh_Actual = 1;
1381 } else {
1382 ((UBYTE *) ioreq->iouh_Data)[0] = unit->hu_RootPortChanges;
1383 ((UBYTE *) ioreq->iouh_Data)[1] = unit->hu_RootPortChanges>>8;
1384 ioreq->iouh_Actual = 2;
1386 unit->hu_RootPortChanges = 0;
1387 return(0);
1389 ioreq->iouh_Req.io_Flags &= ~IOF_QUICK;
1390 Disable();
1391 AddTail(&unit->hu_RHIOQueue, (struct Node *) ioreq);
1392 Enable();
1393 return(RC_DONTREPLY);
1395 /* \\\ */
1397 /* /// "cmdControlXFer()" */
1399 *======================================================================
1400 * cmdControlXFer(ioreq, unit, base)
1401 *======================================================================
1403 * This is the device UHCMD_CONTROLXFER routine.
1405 * First it check if the usb is in proper state and if user passed arguments
1406 * are valid. If everything is ok, the request is linked to queue of
1407 * pending transfer requests.
1411 WORD cmdControlXFer(struct IOUsbHWReq *ioreq,
1412 struct PCIUnit *unit,
1413 struct PCIDevice *base)
1415 struct PCIController *hc;
1417 KPRINTF(10, ("UHCMD_CONTROLXFER ioreq: 0x%08lx\n", ioreq));
1418 uhwGetUsbState(ioreq, unit, base);
1419 if(!(ioreq->iouh_State & UHSF_OPERATIONAL))
1421 return(UHIOERR_USBOFFLINE);
1423 /* Root hub emulation */
1424 if(ioreq->iouh_DevAddr == unit->hu_RootHubAddr)
1426 return(cmdControlXFerRootHub(ioreq, unit, base));
1429 hc = unit->hu_DevControllers[ioreq->iouh_DevAddr];
1430 if(!hc)
1432 KPRINTF(20, ("No Host controller assigned to device address %ld\n", ioreq->iouh_DevAddr));
1433 return(UHIOERR_HOSTERROR);
1436 ioreq->iouh_Req.io_Flags &= ~IOF_QUICK;
1437 ioreq->iouh_Actual = 0;
1439 Disable();
1440 AddTail(&hc->hc_CtrlXFerQueue, (struct Node *) ioreq);
1441 Enable();
1442 SureCause(base, &hc->hc_CompleteInt);
1444 KPRINTF(10, ("UHCMD_CONTROLXFER processed ioreq: 0x%08lx\n", ioreq));
1445 return(RC_DONTREPLY);
1447 /* \\\ */
1449 /* /// "cmdBulkXFer()" */
1451 *======================================================================
1452 * cmdBulkXFer(ioreq, unit, base)
1453 *======================================================================
1455 * This is the device UHCMD_BULKXFER routine.
1457 * First it check if the usb is in proper state and if user passed arguments
1458 * are valid. If everything is ok, the request is linked to queue of
1459 * pending transfer requests.
1463 WORD cmdBulkXFer(struct IOUsbHWReq *ioreq,
1464 struct PCIUnit *unit,
1465 struct PCIDevice *base)
1467 struct PCIController *hc;
1469 KPRINTF(10, ("UHCMD_BULKXFER ioreq: 0x%08lx\n", ioreq));
1470 uhwGetUsbState(ioreq, unit, base);
1471 if(!(ioreq->iouh_State & UHSF_OPERATIONAL))
1473 return(UHIOERR_USBOFFLINE);
1476 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
1478 return(UHIOERR_BADPARAMS);
1481 hc = unit->hu_DevControllers[ioreq->iouh_DevAddr];
1482 if(!hc)
1484 return(UHIOERR_HOSTERROR);
1487 ioreq->iouh_Req.io_Flags &= ~IOF_QUICK;
1488 ioreq->iouh_Actual = 0;
1490 Disable();
1491 AddTail(&hc->hc_BulkXFerQueue, (struct Node *) ioreq);
1492 Enable();
1493 SureCause(base, &hc->hc_CompleteInt);
1495 KPRINTF(10, ("UHCMD_BULKXFER processed ioreq: 0x%08lx\n", ioreq));
1496 return(RC_DONTREPLY);
1498 /* \\\ */
1500 /* /// "cmdIsoXFer()" */
1502 *======================================================================
1503 * cmdIsoXFer(ioreq, unit, base)
1504 *======================================================================
1506 * This is the device UHCMD_ISOXFER routine.
1508 * First it check if the usb is in proper state and if user passed arguments
1509 * are valid. If everything is ok, the request is linked to queue of
1510 * pending transfer requests.
1514 WORD cmdIsoXFer(struct IOUsbHWReq *ioreq,
1515 struct PCIUnit *unit,
1516 struct PCIDevice *base)
1518 struct PCIController *hc;
1520 KPRINTF(10, ("UHCMD_ISOXFER ioreq: 0x%08lx\n", ioreq));
1521 uhwGetUsbState(ioreq, unit, base);
1522 if(!(ioreq->iouh_State & UHSF_OPERATIONAL))
1524 return(UHIOERR_USBOFFLINE);
1527 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
1529 return(UHIOERR_BADPARAMS);
1532 hc = unit->hu_DevControllers[ioreq->iouh_DevAddr];
1533 if(!hc)
1535 return(UHIOERR_HOSTERROR);
1538 ioreq->iouh_Req.io_Flags &= ~IOF_QUICK;
1539 ioreq->iouh_Actual = 0;
1541 Disable();
1542 AddTail(&hc->hc_IsoXFerQueue, (struct Node *) ioreq);
1543 Enable();
1544 SureCause(base, &hc->hc_CompleteInt);
1546 KPRINTF(10, ("UHCMD_ISOXFER processed ioreq: 0x%08lx\n", ioreq));
1547 return(RC_DONTREPLY);
1549 /* \\\ */
1551 /* /// "cmdIntXFer()" */
1553 *======================================================================
1554 * cmdIntXFer(ioreq, unit, base)
1555 *======================================================================
1557 * This is the device UHCMD_INTXFER routine.
1559 * First it check if the usb is in proper state and if user passed arguments
1560 * are valid. If everything is ok, the request is linked to queue of
1561 * pending transfer requests.
1565 WORD cmdIntXFer(struct IOUsbHWReq *ioreq,
1566 struct PCIUnit *unit,
1567 struct PCIDevice *base)
1569 struct PCIController *hc;
1571 KPRINTF(10, ("UHCMD_INTXFER ioreq: 0x%08lx\n", ioreq));
1572 //uhwDelayMS(1000, unit, base); /* Wait 200 ms */
1573 uhwGetUsbState(ioreq, unit, base);
1574 if(!(ioreq->iouh_State & UHSF_OPERATIONAL))
1576 return(UHIOERR_USBOFFLINE);
1579 /* Root Hub Emulation */
1580 if(ioreq->iouh_DevAddr == unit->hu_RootHubAddr)
1582 return(cmdIntXFerRootHub(ioreq, unit, base));
1585 hc = unit->hu_DevControllers[ioreq->iouh_DevAddr];
1586 if(!hc)
1588 return(UHIOERR_HOSTERROR);
1591 ioreq->iouh_Req.io_Flags &= ~IOF_QUICK;
1592 ioreq->iouh_Actual = 0;
1594 Disable();
1595 AddTail(&hc->hc_IntXFerQueue, (struct Node *) ioreq);
1596 Enable();
1597 SureCause(base, &hc->hc_CompleteInt);
1599 KPRINTF(10, ("UHCMD_INTXFER processed ioreq: 0x%08lx\n", ioreq));
1600 return(RC_DONTREPLY);
1602 /* \\\ */
1604 /* /// "cmdFlush()" */
1606 *======================================================================
1607 * cmdFlush(ioreq, base)
1608 *======================================================================
1610 * This is the device CMD_FLUSH routine.
1612 * This routine abort all pending transfer requests.
1616 WORD cmdFlush(struct IOUsbHWReq *ioreq,
1617 struct PCIUnit *unit,
1618 struct PCIDevice *base)
1620 struct IOUsbHWReq *cmpioreq;
1621 struct PCIController *hc;
1622 UWORD devadrep;
1624 KPRINTF(10, ("CMD_FLUSH ioreq: 0x%08lx\n", ioreq));
1626 Disable();
1627 cmpioreq = (struct IOUsbHWReq *) unit->hu_RHIOQueue.lh_Head;
1628 while(((struct Node *) cmpioreq)->ln_Succ)
1630 Remove(&cmpioreq->iouh_Req.io_Message.mn_Node);
1631 cmpioreq->iouh_Req.io_Error = IOERR_ABORTED;
1632 ReplyMsg(&cmpioreq->iouh_Req.io_Message);
1633 cmpioreq = (struct IOUsbHWReq *) unit->hu_RHIOQueue.lh_Head;
1635 hc = (struct PCIController *) unit->hu_Controllers.lh_Head;
1636 while(hc->hc_Node.ln_Succ)
1638 cmpioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
1639 while(((struct Node *) cmpioreq)->ln_Succ)
1641 Remove(&cmpioreq->iouh_Req.io_Message.mn_Node);
1642 cmpioreq->iouh_Req.io_Error = IOERR_ABORTED;
1643 ReplyMsg(&cmpioreq->iouh_Req.io_Message);
1644 cmpioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
1646 cmpioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
1647 while(((struct Node *) cmpioreq)->ln_Succ)
1649 Remove(&cmpioreq->iouh_Req.io_Message.mn_Node);
1650 cmpioreq->iouh_Req.io_Error = IOERR_ABORTED;
1651 ReplyMsg(&cmpioreq->iouh_Req.io_Message);
1652 cmpioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
1654 cmpioreq = (struct IOUsbHWReq *) hc->hc_IsoXFerQueue.lh_Head;
1655 while(((struct Node *) cmpioreq)->ln_Succ)
1657 Remove(&cmpioreq->iouh_Req.io_Message.mn_Node);
1658 cmpioreq->iouh_Req.io_Error = IOERR_ABORTED;
1659 ReplyMsg(&cmpioreq->iouh_Req.io_Message);
1660 cmpioreq = (struct IOUsbHWReq *) hc->hc_IsoXFerQueue.lh_Head;
1662 cmpioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
1663 while(((struct Node *) cmpioreq)->ln_Succ)
1665 Remove(&cmpioreq->iouh_Req.io_Message.mn_Node);
1666 cmpioreq->iouh_Req.io_Error = IOERR_ABORTED;
1667 ReplyMsg(&cmpioreq->iouh_Req.io_Message);
1668 cmpioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
1670 switch(hc->hc_HCIType)
1672 case HCITYPE_UHCI:
1673 cmpioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
1674 while(((struct Node *) cmpioreq)->ln_Succ)
1676 Remove(&cmpioreq->iouh_Req.io_Message.mn_Node);
1677 devadrep = (cmpioreq->iouh_DevAddr<<5) + cmpioreq->iouh_Endpoint + ((cmpioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
1678 unit->hu_DevBusyReq[devadrep] = NULL;
1679 uhciFreeQContext(hc, (struct UhciQH *) cmpioreq->iouh_DriverPrivate1);
1680 cmpioreq->iouh_Req.io_Error = IOERR_ABORTED;
1681 ReplyMsg(&cmpioreq->iouh_Req.io_Message);
1682 cmpioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
1684 break;
1686 case HCITYPE_EHCI:
1687 cmpioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
1688 while(((struct Node *) cmpioreq)->ln_Succ)
1690 Remove(&cmpioreq->iouh_Req.io_Message.mn_Node);
1691 devadrep = (cmpioreq->iouh_DevAddr<<5) + cmpioreq->iouh_Endpoint + ((cmpioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
1692 unit->hu_DevBusyReq[devadrep] = NULL;
1693 ehciFreeAsyncContext(hc, (struct EhciQH *) cmpioreq->iouh_DriverPrivate1);
1694 cmpioreq->iouh_Req.io_Error = IOERR_ABORTED;
1695 ReplyMsg(&cmpioreq->iouh_Req.io_Message);
1696 cmpioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
1698 cmpioreq = (struct IOUsbHWReq *) hc->hc_PeriodicTDQueue.lh_Head;
1699 while(((struct Node *) cmpioreq)->ln_Succ)
1701 Remove(&cmpioreq->iouh_Req.io_Message.mn_Node);
1702 devadrep = (cmpioreq->iouh_DevAddr<<5) + cmpioreq->iouh_Endpoint + ((cmpioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
1703 unit->hu_DevBusyReq[devadrep] = NULL;
1704 ehciFreePeriodicContext(hc, (struct EhciQH *) cmpioreq->iouh_DriverPrivate1);
1705 cmpioreq->iouh_Req.io_Error = IOERR_ABORTED;
1706 ReplyMsg(&cmpioreq->iouh_Req.io_Message);
1707 cmpioreq = (struct IOUsbHWReq *) hc->hc_PeriodicTDQueue.lh_Head;
1709 break;
1711 hc = (struct PCIController *) hc->hc_Node.ln_Succ;
1713 Enable();
1714 /* Return success
1716 return RC_OK;
1718 /* \\\ */
1720 /* /// "NSD stuff" */
1722 static
1723 const UWORD NSDSupported[] =
1725 CMD_FLUSH, CMD_RESET,
1726 UHCMD_QUERYDEVICE, UHCMD_USBRESET,
1727 UHCMD_USBRESUME, UHCMD_USBSUSPEND,
1728 UHCMD_USBOPER, UHCMD_CONTROLXFER ,
1729 UHCMD_ISOXFER, UHCMD_INTXFER,
1730 UHCMD_BULKXFER,
1731 NSCMD_DEVICEQUERY, 0
1734 WORD cmdNSDeviceQuery(struct IOStdReq *ioreq,
1735 struct PCIUnit *unit,
1736 struct PCIDevice *base)
1738 struct my_NSDeviceQueryResult *query;
1740 query = (struct my_NSDeviceQueryResult *) ioreq->io_Data;
1742 KPRINTF(10, ("NSCMD_DEVICEQUERY ioreq: 0x%08lx query: 0x%08lx\n", ioreq, query));
1744 /* NULL ptr?
1745 Enough data?
1746 Valid request?
1748 if((!query) ||
1749 (ioreq->io_Length < sizeof(struct my_NSDeviceQueryResult)) ||
1750 (query->DevQueryFormat != 0) ||
1751 (query->SizeAvailable != 0))
1753 /* Return error. This is special handling, since iorequest is only
1754 guaranteed to be sizeof(struct IOStdReq). If we'd let our
1755 devBeginIO dispatcher return the error, it would trash some
1756 memory past end of the iorequest (ios2_WireError field).
1758 ioreq->io_Error = IOERR_NOCMD;
1759 TermIO((struct IOUsbHWReq *) ioreq, base);
1761 /* Don't reply, we already did.
1763 return RC_DONTREPLY;
1766 ioreq->io_Actual = query->SizeAvailable
1767 = sizeof(struct my_NSDeviceQueryResult);
1768 query->DeviceType = NSDEVTYPE_USBHARDWARE;
1769 query->DeviceSubType = 0;
1770 query->SupportedCommands = NSDSupported;
1772 /* Return success (note that this will NOT poke ios2_WireError).
1774 return RC_OK;
1776 /* \\\ */
1778 /* /// "TermIO()" */
1780 *===========================================================
1781 * TermIO(ioreq, base)
1782 *===========================================================
1784 * Return completed ioreq to sender.
1788 void TermIO(struct IOUsbHWReq *ioreq,
1789 struct PCIDevice *base)
1791 ioreq->iouh_Req.io_Message.mn_Node.ln_Type = NT_FREEMSG;
1793 /* If not quick I/O, reply the message
1795 if(!(ioreq->iouh_Req.io_Flags & IOF_QUICK))
1797 ReplyMsg(&ioreq->iouh_Req.io_Message);
1800 /* \\\ */
1802 /* /// "cmdAbortIO()" */
1803 BOOL cmdAbortIO(struct IOUsbHWReq *ioreq, struct PCIDevice *base)
1805 struct PCIUnit *unit = (struct PCIUnit *) ioreq->iouh_Req.io_Unit;
1806 struct IOUsbHWReq *cmpioreq;
1807 struct PCIController *hc;
1808 UWORD devadrep;
1809 BOOL foundit = FALSE;
1811 KPRINTF(10, ("cmdAbort(%08lx)\n", ioreq));
1813 Disable();
1814 cmpioreq = (struct IOUsbHWReq *) unit->hu_RHIOQueue.lh_Head;
1815 while(((struct Node *) cmpioreq)->ln_Succ)
1817 if(ioreq == cmpioreq)
1819 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
1820 Enable();
1821 return TRUE;
1823 cmpioreq = (struct IOUsbHWReq *) cmpioreq->iouh_Req.io_Message.mn_Node.ln_Succ;
1826 hc = (struct PCIController *) unit->hu_Controllers.lh_Head;
1827 while(hc->hc_Node.ln_Succ)
1829 cmpioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
1830 while(((struct Node *) cmpioreq)->ln_Succ)
1832 if(ioreq == cmpioreq)
1834 foundit = TRUE;
1835 break;
1837 cmpioreq = (struct IOUsbHWReq *) cmpioreq->iouh_Req.io_Message.mn_Node.ln_Succ;
1839 if(!foundit)
1841 cmpioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
1842 while(((struct Node *) cmpioreq)->ln_Succ)
1844 if(ioreq == cmpioreq)
1846 foundit = TRUE;
1847 break;
1849 cmpioreq = (struct IOUsbHWReq *) cmpioreq->iouh_Req.io_Message.mn_Node.ln_Succ;
1852 if(!foundit)
1854 cmpioreq = (struct IOUsbHWReq *) hc->hc_IsoXFerQueue.lh_Head;
1855 while(((struct Node *) cmpioreq)->ln_Succ)
1857 if(ioreq == cmpioreq)
1859 foundit = TRUE;
1860 break;
1862 cmpioreq = (struct IOUsbHWReq *) cmpioreq->iouh_Req.io_Message.mn_Node.ln_Succ;
1865 if(!foundit)
1867 cmpioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
1868 while(((struct Node *) cmpioreq)->ln_Succ)
1870 if(ioreq == cmpioreq)
1872 foundit = TRUE;
1873 break;
1875 cmpioreq = (struct IOUsbHWReq *) cmpioreq->iouh_Req.io_Message.mn_Node.ln_Succ;
1878 if(foundit)
1880 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
1881 break;
1882 } else {
1883 // IOReq is probably pending in some transfer structure
1884 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
1885 switch(hc->hc_HCIType)
1887 case HCITYPE_UHCI:
1888 cmpioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
1889 while(((struct Node *) cmpioreq)->ln_Succ)
1891 if(ioreq == cmpioreq)
1893 foundit = TRUE;
1894 unit->hu_DevBusyReq[devadrep] = NULL;
1895 uhciFreeQContext(hc, (struct UhciQH *) ioreq->iouh_DriverPrivate1);
1896 break;
1898 cmpioreq = (struct IOUsbHWReq *) cmpioreq->iouh_Req.io_Message.mn_Node.ln_Succ;
1900 break;
1902 case HCITYPE_OHCI:
1903 cmpioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
1904 while(((struct Node *) cmpioreq)->ln_Succ)
1906 if(ioreq == cmpioreq)
1908 foundit = TRUE;
1909 unit->hu_DevBusyReq[devadrep] = NULL;
1910 ohciFreeEDContext(hc, (struct OhciED *) ioreq->iouh_DriverPrivate1);
1911 break;
1913 cmpioreq = (struct IOUsbHWReq *) cmpioreq->iouh_Req.io_Message.mn_Node.ln_Succ;
1915 break;
1917 case HCITYPE_EHCI:
1918 cmpioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
1919 while(((struct Node *) cmpioreq)->ln_Succ)
1921 if(ioreq == cmpioreq)
1923 foundit = TRUE;
1924 unit->hu_DevBusyReq[devadrep] = NULL;
1925 ehciFreeAsyncContext(hc, (struct EhciQH *) ioreq->iouh_DriverPrivate1);
1926 break;
1928 cmpioreq = (struct IOUsbHWReq *) cmpioreq->iouh_Req.io_Message.mn_Node.ln_Succ;
1930 cmpioreq = (struct IOUsbHWReq *) hc->hc_PeriodicTDQueue.lh_Head;
1931 while(((struct Node *) cmpioreq)->ln_Succ)
1933 if(ioreq == cmpioreq)
1935 foundit = TRUE;
1936 unit->hu_DevBusyReq[devadrep] = NULL;
1937 ehciFreePeriodicContext(hc, (struct EhciQH *) ioreq->iouh_DriverPrivate1);
1938 break;
1940 cmpioreq = (struct IOUsbHWReq *) cmpioreq->iouh_Req.io_Message.mn_Node.ln_Succ;
1942 break;
1944 if(foundit)
1946 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
1947 break;
1950 hc = (struct PCIController *) hc->hc_Node.ln_Succ;
1952 Enable();
1953 if(!foundit)
1955 KPRINTF(20, ("WARNING, could not abort unknown IOReq %08lx\n", ioreq));
1957 return(foundit);
1959 /* \\\ */
1961 /* /// "uhwCheckRootHubChanges()" */
1962 void uhwCheckRootHubChanges(struct PCIUnit *unit)
1964 struct IOUsbHWReq *ioreq;
1966 if(unit->hu_RootPortChanges && unit->hu_RHIOQueue.lh_Head->ln_Succ)
1968 KPRINTF(1, ("Portchange map %04lx\n", unit->hu_RootPortChanges));
1969 Disable();
1970 ioreq = (struct IOUsbHWReq *) unit->hu_RHIOQueue.lh_Head;
1971 while(((struct Node *) ioreq)->ln_Succ)
1973 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
1974 if((ioreq->iouh_Length > 0) || (unit->hu_RootHubPorts < 8))
1976 *((UBYTE *) ioreq->iouh_Data) = unit->hu_RootPortChanges;
1977 ioreq->iouh_Actual = 1;
1979 else if(ioreq->iouh_Length > 1)
1981 ((UBYTE *) ioreq->iouh_Data)[0] = unit->hu_RootPortChanges;
1982 ((UBYTE *) ioreq->iouh_Data)[1] = unit->hu_RootPortChanges>>8;
1983 ioreq->iouh_Actual = 2;
1986 ReplyMsg(&ioreq->iouh_Req.io_Message);
1987 ioreq = (struct IOUsbHWReq *) unit->hu_RHIOQueue.lh_Head;
1989 unit->hu_RootPortChanges = 0;
1990 Enable();
1993 /* \\\ */
1995 /* /// "uhwCheckSpecialCtrlTransfers()" */
1996 void uhwCheckSpecialCtrlTransfers(struct PCIController *hc, struct IOUsbHWReq *ioreq)
1998 struct PCIUnit *unit = hc->hc_Unit;
2000 /* Clear Feature(Endpoint halt) */
2001 if((ioreq->iouh_SetupData.bmRequestType == (URTF_STANDARD|URTF_ENDPOINT)) &&
2002 (ioreq->iouh_SetupData.bRequest == USR_CLEAR_FEATURE) &&
2003 (ioreq->iouh_SetupData.wValue == AROS_WORD2LE(UFS_ENDPOINT_HALT)))
2005 KPRINTF(10, ("Resetting toggle bit for endpoint %ld\n", AROS_WORD2LE(ioreq->iouh_SetupData.wIndex) & 0xf));
2006 unit->hu_DevDataToggle[(ioreq->iouh_DevAddr<<5)|(AROS_WORD2LE(ioreq->iouh_SetupData.wIndex) & 0xf)|((AROS_WORD2LE(ioreq->iouh_SetupData.wIndex) & 0x80)>>3)] = 0;
2008 else if((ioreq->iouh_SetupData.bmRequestType == (URTF_STANDARD|URTF_DEVICE)) &&
2009 (ioreq->iouh_SetupData.bRequest == USR_SET_ADDRESS))
2011 /* Set Address -> clear all endpoints */
2012 ULONG epnum;
2013 ULONG adr = AROS_WORD2BE(ioreq->iouh_SetupData.wValue)>>3;
2014 KPRINTF(10, ("Resetting toggle bits for device address %ld\n", adr>>5));
2015 for(epnum = 0; epnum < 31; epnum++)
2017 unit->hu_DevDataToggle[adr+epnum] = 0;
2019 // transfer host controller ownership
2020 unit->hu_DevControllers[ioreq->iouh_DevAddr] = NULL;
2021 unit->hu_DevControllers[adr>>5] = hc;
2023 else if((ioreq->iouh_SetupData.bmRequestType == (URTF_CLASS|URTF_OTHER)) &&
2024 (ioreq->iouh_SetupData.bRequest == USR_SET_FEATURE) &&
2025 (ioreq->iouh_SetupData.wValue == AROS_WORD2LE(UFS_PORT_RESET)))
2027 // a hub will be enumerating a device on this host controller soon!
2028 KPRINTF(10, ("Hub RESET caught, assigning Dev0 to %08lx!\n", hc));
2029 unit->hu_DevControllers[0] = hc;
2032 /* \\\ */
2034 /* ---------------------------------------------------------------------- *
2035 * UHCI Specific Stuff *
2036 * ---------------------------------------------------------------------- */
2038 /* /// "uhciFreeQContext()" */
2039 void uhciFreeQContext(struct PCIController *hc, struct UhciQH *uqh)
2041 struct UhciTD *utd = NULL;
2042 struct UhciTD *nextutd;
2044 KPRINTF(5, ("Unlinking QContext %08lx\n", uqh));
2045 // unlink from schedule
2046 uqh->uqh_Pred->uxx_Link = uqh->uqh_Succ->uxx_Self;
2047 SYNC;
2048 EIEIO;
2049 uqh->uqh_Succ->uxx_Pred = uqh->uqh_Pred;
2050 uqh->uqh_Pred->uxx_Succ = uqh->uqh_Succ;
2051 SYNC;
2052 EIEIO;
2054 nextutd = uqh->uqh_FirstTD;
2055 while(nextutd)
2057 KPRINTF(1, ("FreeTD %08lx\n", nextutd));
2058 utd = nextutd;
2059 nextutd = (struct UhciTD *) utd->utd_Succ;
2060 uhciFreeTD(hc, utd);
2062 uhciFreeQH(hc, uqh);
2064 /* \\\ */
2066 /* /// "uhciAllocQH()" */
2067 inline struct UhciQH * uhciAllocQH(struct PCIController *hc)
2069 struct UhciQH *uqh = hc->hc_UhciQHPool;
2071 if(!uqh)
2073 // out of QHs!
2074 KPRINTF(20, ("Out of QHs!\n"));
2075 return NULL;
2078 hc->hc_UhciQHPool = (struct UhciQH *) uqh->uqh_Succ;
2079 return(uqh);
2081 /* \\\ */
2083 /* /// "uhciFreeQH()" */
2084 inline void uhciFreeQH(struct PCIController *hc, struct UhciQH *uqh)
2086 uqh->uqh_Succ = (struct UhciXX *) hc->hc_UhciQHPool;
2087 hc->hc_UhciQHPool = uqh;
2089 /* \\\ */
2091 /* /// "uhciAllocTD()" */
2092 inline struct UhciTD * uhciAllocTD(struct PCIController *hc)
2094 struct UhciTD *utd = hc->hc_UhciTDPool;
2096 if(!utd)
2098 // out of TDs!
2099 KPRINTF(20, ("Out of TDs!\n"));
2100 return NULL;
2103 hc->hc_UhciTDPool = (struct UhciTD *) utd->utd_Succ;
2104 return(utd);
2106 /* \\\ */
2108 /* /// "uhciFreeTD()" */
2109 inline void uhciFreeTD(struct PCIController *hc, struct UhciTD *utd)
2111 utd->utd_Succ = (struct UhciXX *) hc->hc_UhciTDPool;
2112 hc->hc_UhciTDPool = utd;
2114 /* \\\ */
2116 /* /// "uhciUpdateIntTree()" */
2117 void uhciUpdateIntTree(struct PCIController *hc)
2119 struct UhciXX *uxx;
2120 struct UhciXX *preduxx;
2121 struct UhciXX *lastuseduxx;
2122 UWORD cnt;
2124 // optimize linkage between queue heads
2125 preduxx = lastuseduxx = (struct UhciXX *) hc->hc_UhciCtrlQH; //hc->hc_UhciIsoTD;
2126 for(cnt = 0; cnt < 9; cnt++)
2128 uxx = (struct UhciXX *) hc->hc_UhciIntQH[cnt];
2129 if(uxx->uxx_Succ != preduxx)
2131 lastuseduxx = uxx->uxx_Succ;
2133 uxx->uxx_Link = lastuseduxx->uxx_Self;
2134 preduxx = uxx;
2137 /* \\\ */
2139 /* /// "uhciCheckPortStatusChange()" */
2140 void uhciCheckPortStatusChange(struct PCIController *hc)
2142 struct PCIUnit *unit = hc->hc_Unit;
2143 UWORD oldval;
2144 UWORD hciport;
2146 // check for port status change for UHCI and frame rollovers
2148 for(hciport = 0; hciport < 2; hciport++)
2150 UWORD portreg;
2151 UWORD idx = hc->hc_PortNum20[hciport];
2152 // don't pay attention to UHCI port changes when pwned by EHCI
2153 if(!unit->hu_EhciOwned[idx])
2155 portreg = hciport ? UHCI_PORT2STSCTRL : UHCI_PORT1STSCTRL;
2156 oldval = READREG16_LE(hc->hc_RegBase, portreg);
2157 if(oldval & UHPF_ENABLECHANGE)
2159 KPRINTF(10, ("Port %ld (%ld) Enable changed\n", idx, hciport));
2160 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_ENABLE;
2162 if(oldval & UHPF_CONNECTCHANGE)
2164 KPRINTF(10, ("Port %ld (%ld) Connect changed\n", idx, hciport));
2165 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_CONNECTION;
2166 if(!(oldval & UHPF_PORTCONNECTED))
2168 if(unit->hu_PortMap20[idx])
2170 KPRINTF(20, ("Transferring Port %ld back to EHCI\n", idx));
2171 unit->hu_EhciOwned[idx] = TRUE;
2175 if(oldval & UHPF_RESUMEDTX)
2177 KPRINTF(10, ("Port %ld (%ld) Resume changed\n", idx, hciport));
2178 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND|UPSF_PORT_ENABLE;
2179 oldval &= ~UHPF_RESUMEDTX;
2181 if(hc->hc_PortChangeMap[hciport])
2183 unit->hu_RootPortChanges |= 1UL<<(idx+1);
2184 /*KPRINTF(10, ("Port %ld (%ld) contributes %04lx to portmap %04lx\n",
2185 idx, hciport, hc->hc_PortChangeMap[hciport], unit->hu_RootPortChanges));*/
2187 WRITEREG16_LE(hc->hc_RegBase, portreg, oldval);
2191 /* \\\ */
2193 /* /// "uhciHandleFinishedTDs()" */
2194 void uhciHandleFinishedTDs(struct PCIController *hc)
2196 struct PCIDevice *base = hc->hc_Device;
2197 struct PCIUnit *unit = hc->hc_Unit;
2198 struct IOUsbHWReq *ioreq;
2199 struct IOUsbHWReq *nextioreq;
2200 struct UhciQH *uqh;
2201 struct UhciTD *utd;
2202 struct UhciTD *nextutd;
2203 UWORD devadrep;
2204 ULONG len;
2205 ULONG linkelem;
2206 UWORD inspect;
2207 BOOL shortpkt;
2208 ULONG ctrlstatus;
2209 ULONG nextctrlstatus;
2210 ULONG token = 0;
2211 ULONG actual;
2212 BOOL updatetree = FALSE;
2213 BOOL fixsetupterm = FALSE;
2215 KPRINTF(1, ("Checking for work done...\n"));
2216 ioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
2217 while((nextioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ))
2219 uqh = (struct UhciQH *) ioreq->iouh_DriverPrivate1;
2220 if(uqh)
2222 KPRINTF(1, ("Examining IOReq=%08lx with UQH=%08lx\n", ioreq, uqh));
2223 linkelem = READMEM32_LE(&uqh->uqh_Element);
2224 inspect = 0;
2225 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
2226 if(linkelem & UHCI_TERMINATE)
2228 KPRINTF(1, ("UQH terminated %08lx\n", linkelem));
2229 inspect = 2;
2230 } else {
2231 utd = (struct UhciTD *) ((linkelem & UHCI_PTRMASK) - hc->hc_PCIVirtualAdjust - 16); // struct UhciTD starts 16 bytes before physical TD
2232 ctrlstatus = READMEM32_LE(&utd->utd_CtrlStatus);
2233 nextutd = (struct UhciTD *)utd->utd_Succ;
2234 if(!(ctrlstatus & UTCF_ACTIVE) && nextutd)
2236 /* OK, it's not active. Does it look like it's done? Code copied from below.
2237 If not done, check the next TD too. */
2238 if(ctrlstatus & (UTSF_BABBLE|UTSF_STALLED|UTSF_CRCTIMEOUT|UTSF_DATABUFFERERR|UTSF_BITSTUFFERR))
2240 nextutd = 0;
2242 else
2244 token = READMEM32_LE(&utd->utd_Token);
2245 len = (ctrlstatus & UTSM_ACTUALLENGTH) >> UTSS_ACTUALLENGTH;
2246 if((len != (token & UTTM_TRANSLENGTH) >> UTTS_TRANSLENGTH))
2248 nextutd = 0;
2251 if(nextutd)
2253 nextctrlstatus = READMEM32_LE(&nextutd->utd_CtrlStatus);
2256 /* Now, did the element link pointer change while we fetched the status for the pointed at TD?
2257 If so, disregard the gathered information and assume still active. */
2258 if(READMEM32_LE(&uqh->uqh_Element) != linkelem)
2260 /* Oh well, probably still active */
2261 KPRINTF(1, ("Link Element changed, still active.\n"));
2263 else if(!(ctrlstatus & UTCF_ACTIVE) && (nextutd == 0 || !(nextctrlstatus & UTCF_ACTIVE)))
2265 KPRINTF(1, ("CtrlStatus inactive %08lx\n", ctrlstatus));
2266 inspect = 1;
2268 else if(unit->hu_NakTimeoutFrame[devadrep] && (hc->hc_FrameCounter > unit->hu_NakTimeoutFrame[devadrep]))
2270 ioreq->iouh_Req.io_Error = UHIOERR_NAKTIMEOUT;
2271 inspect = 1;
2274 fixsetupterm = FALSE;
2275 if(inspect)
2277 shortpkt = FALSE;
2278 if(inspect < 2) // if all went okay, don't traverse list, assume all bytes successfully transferred
2280 utd = uqh->uqh_FirstTD;
2281 actual = 0;
2284 ctrlstatus = READMEM32_LE(&utd->utd_CtrlStatus);
2285 if(ctrlstatus & UTCF_ACTIVE)
2287 KPRINTF(20, ("Internal error! Still active?!\n"));
2288 if(ctrlstatus & UTSF_BABBLE)
2290 KPRINTF(200, ("HOST CONTROLLER IS DEAD!!!\n"));
2291 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
2292 CONSTWRITEREG16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_HCRESET|UHCF_MAXPACKET64|UHCF_CONFIGURE|UHCF_RUNSTOP);
2293 inspect = 0;
2294 break;
2296 break;
2298 token = READMEM32_LE(&utd->utd_Token);
2299 KPRINTF(1, ("TD=%08lx CS=%08lx Token=%08lx\n", utd, ctrlstatus, token));
2300 if(ctrlstatus & (UTSF_BABBLE|UTSF_STALLED|UTSF_CRCTIMEOUT|UTSF_DATABUFFERERR|UTSF_BITSTUFFERR))
2302 if(ctrlstatus & UTSF_BABBLE)
2304 KPRINTF(20, ("Babble error %08lx/%08lx\n", ctrlstatus, token));
2305 ioreq->iouh_Req.io_Error = UHIOERR_OVERFLOW;
2306 #if 0
2307 // VIA chipset seems to die on babble!?!
2308 KPRINTF(10, ("HW Regs USBCMD=%04lx\n", READREG16_LE(hc->hc_RegBase, UHCI_USBCMD)));
2309 CONSTWRITEREG16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_MAXPACKET64|UHCF_CONFIGURE|UHCF_RUNSTOP);
2310 SYNC;
2311 EIEIO;
2312 #endif
2313 //retry
2314 //ctrlstatus &= ~(UTSF_BABBLE|UTSF_STALLED|UTSF_CRCTIMEOUT|UTSF_DATABUFFERERR|UTSF_BITSTUFFERR|UTSF_NAK);
2315 ctrlstatus |= UTCF_ACTIVE;
2316 WRITEMEM32_LE(&utd->utd_CtrlStatus, ctrlstatus);
2317 SYNC;
2318 EIEIO;
2319 inspect = 3;
2320 break;
2322 else if(ctrlstatus & UTSF_CRCTIMEOUT)
2324 KPRINTF(20, ("CRC/Timeout error IOReq=%08lx DIR=%ld\n", ioreq, ioreq->iouh_Dir));
2325 if(ctrlstatus & UTSF_STALLED)
2327 ioreq->iouh_Req.io_Error = UHIOERR_TIMEOUT;
2328 } else {
2329 ioreq->iouh_Req.io_Error = (ioreq->iouh_Dir == UHDIR_IN) ? UHIOERR_CRCERROR : UHIOERR_TIMEOUT;
2332 else if(ctrlstatus & UTSF_STALLED)
2334 KPRINTF(20, ("STALLED!\n"));
2335 ioreq->iouh_Req.io_Error = UHIOERR_STALL;
2337 else if(ctrlstatus & UTSF_BITSTUFFERR)
2339 KPRINTF(20, ("Bitstuff error\n"));
2340 ioreq->iouh_Req.io_Error = UHIOERR_CRCERROR;
2342 else if(ctrlstatus & UTSF_DATABUFFERERR)
2344 KPRINTF(20, ("Databuffer error\n"));
2345 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
2347 inspect = 0;
2348 break;
2350 if(unit->hu_NakTimeoutFrame[devadrep] && (hc->hc_FrameCounter > unit->hu_NakTimeoutFrame[devadrep]) && (ctrlstatus & UTSF_NAK))
2352 ioreq->iouh_Req.io_Error = UHIOERR_NAKTIMEOUT;
2353 inspect = 0;
2356 len = (ctrlstatus & UTSM_ACTUALLENGTH)>>UTSS_ACTUALLENGTH;
2357 if((len != (token & UTTM_TRANSLENGTH)>>UTTS_TRANSLENGTH))
2359 shortpkt = TRUE;
2361 len = (len+1) & 0x7ff; // get real length
2362 if((token & UTTM_PID)>>UTTS_PID != PID_SETUP) // don't count setup packet
2364 actual += len;
2365 // due to the VIA babble bug workaround, actually more bytes can
2366 // be received than requested, limit the actual value to the upper limit
2367 if(actual > uqh->uqh_Actual)
2369 actual = uqh->uqh_Actual;
2372 if(shortpkt)
2374 break;
2376 } while((utd = (struct UhciTD *) utd->utd_Succ));
2377 if(inspect == 3)
2379 // bail out from babble
2380 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
2381 continue;
2383 if((actual < uqh->uqh_Actual) && (!ioreq->iouh_Req.io_Error) && (!(ioreq->iouh_Flags & UHFF_ALLOWRUNTPKTS)))
2385 KPRINTF(10, ("Short packet: %ld < %ld\n", actual, ioreq->iouh_Length));
2386 ioreq->iouh_Req.io_Error = UHIOERR_RUNTPACKET;
2388 ioreq->iouh_Actual += actual;
2389 } else {
2390 KPRINTF(10, ("all %ld bytes transferred\n", uqh->uqh_Actual));
2391 ioreq->iouh_Actual += uqh->uqh_Actual;
2393 // due to the short packet, the terminal of a setup packet has not been sent. Please do so.
2394 if(shortpkt && (ioreq->iouh_Req.io_Command == UHCMD_CONTROLXFER))
2396 fixsetupterm = TRUE;
2398 // this is actually no short packet but result of the VIA babble fix
2399 if(shortpkt && (ioreq->iouh_Actual == ioreq->iouh_Length))
2401 shortpkt = FALSE;
2403 unit->hu_DevBusyReq[devadrep] = NULL;
2404 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
2405 uhciFreeQContext(hc, uqh);
2406 if(ioreq->iouh_Req.io_Command == UHCMD_INTXFER)
2408 updatetree = TRUE;
2410 if(inspect)
2412 if(inspect < 2) // otherwise, toggle will be right already
2414 // use next data toggle bit based on last successful transaction
2415 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? FALSE : TRUE;
2417 if((!shortpkt && (ioreq->iouh_Actual < ioreq->iouh_Length)) || fixsetupterm)
2419 // fragmented, do some more work
2420 switch(ioreq->iouh_Req.io_Command)
2422 case UHCMD_CONTROLXFER:
2423 KPRINTF(10, ("Rescheduling CtrlTransfer at %ld of %ld\n", ioreq->iouh_Actual, ioreq->iouh_Length));
2424 AddHead(&hc->hc_CtrlXFerQueue, (struct Node *) ioreq);
2425 break;
2427 case UHCMD_INTXFER:
2428 KPRINTF(10, ("Rescheduling IntTransfer at %ld of %ld\n", ioreq->iouh_Actual, ioreq->iouh_Length));
2429 AddHead(&hc->hc_IntXFerQueue, (struct Node *) ioreq);
2430 break;
2432 case UHCMD_BULKXFER:
2433 KPRINTF(10, ("Rescheduling BulkTransfer at %ld of %ld\n", ioreq->iouh_Actual, ioreq->iouh_Length));
2434 AddHead(&hc->hc_BulkXFerQueue, (struct Node *) ioreq);
2435 break;
2437 default:
2438 KPRINTF(10, ("Uhm, internal error, dunno where to queue this req\n"));
2439 ReplyMsg(&ioreq->iouh_Req.io_Message);
2441 } else {
2442 // check for sucessful clear feature and set address ctrl transfers
2443 if(ioreq->iouh_Req.io_Command == UHCMD_CONTROLXFER)
2445 uhwCheckSpecialCtrlTransfers(hc, ioreq);
2447 ReplyMsg(&ioreq->iouh_Req.io_Message);
2449 } else {
2450 // be sure to save the data toggle bit where the error occurred
2451 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? TRUE : FALSE;
2452 ReplyMsg(&ioreq->iouh_Req.io_Message);
2455 } else {
2456 KPRINTF(20, ("IOReq=%08lx has no UQH!\n", ioreq));
2458 ioreq = nextioreq;
2460 if(updatetree)
2462 KPRINTF(10, ("Updating Tree\n"));
2463 uhciUpdateIntTree(hc);
2466 /* \\\ */
2468 /* /// "uhciScheduleCtrlTDs()" */
2469 void uhciScheduleCtrlTDs(struct PCIController *hc)
2471 struct PCIUnit *unit = hc->hc_Unit;
2472 struct IOUsbHWReq *ioreq;
2473 UWORD devadrep;
2474 struct UhciQH *uqh;
2475 struct UhciTD *setuputd;
2476 struct UhciTD *datautd;
2477 struct UhciTD *termutd;
2478 struct UhciTD *predutd;
2479 ULONG actual;
2480 ULONG ctrlstatus;
2481 ULONG token;
2482 ULONG len;
2483 ULONG phyaddr;
2484 BOOL cont;
2486 /* *** CTRL Transfers *** */
2487 KPRINTF(1, ("Scheduling new CTRL transfers...\n"));
2488 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
2489 while(((struct Node *) ioreq)->ln_Succ)
2491 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint;
2492 KPRINTF(10, ("New CTRL transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
2493 /* is endpoint already in use or do we have to wait for next transaction */
2494 if(unit->hu_DevBusyReq[devadrep])
2496 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
2497 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
2498 continue;
2501 uqh = uhciAllocQH(hc);
2502 if(!uqh)
2504 break;
2507 setuputd = uhciAllocTD(hc);
2508 if(!setuputd)
2510 uhciFreeQH(hc, uqh);
2511 break;
2513 termutd = uhciAllocTD(hc);
2514 if(!termutd)
2516 uhciFreeTD(hc, setuputd);
2517 uhciFreeQH(hc, uqh);
2518 break;
2520 uqh->uqh_IOReq = ioreq;
2522 //termutd->utd_QueueHead = setuputd->utd_QueueHead = uqh;
2524 KPRINTF(1, ("SetupTD=%08lx, TermTD=%08lx\n", setuputd, termutd));
2526 // fill setup td
2527 ctrlstatus = UTCF_ACTIVE|UTCF_3ERRORSLIMIT;
2528 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
2530 KPRINTF(5, ("*** LOW SPEED ***\n"));
2531 ctrlstatus |= UTCF_LOWSPEED;
2533 token = (ioreq->iouh_DevAddr<<UTTS_DEVADDR)|(ioreq->iouh_Endpoint<<UTTS_ENDPOINT);
2534 //setuputd->utd_Pred = NULL;
2535 if(ioreq->iouh_Actual)
2537 // this is a continuation of a fragmented ctrl transfer!
2538 KPRINTF(1, ("Continuing FRAGMENT at %ld of %ld\n", ioreq->iouh_Actual, ioreq->iouh_Length));
2539 cont = TRUE;
2540 } else {
2541 cont = FALSE;
2542 uqh->uqh_FirstTD = setuputd;
2543 uqh->uqh_Element = setuputd->utd_Self; // start of queue
2544 WRITEMEM32_LE(&setuputd->utd_CtrlStatus, ctrlstatus);
2545 WRITEMEM32_LE(&setuputd->utd_Token, (PID_SETUP<<UTTS_PID)|token|(7<<UTTS_TRANSLENGTH)|UTTF_DATA0);
2546 WRITEMEM32_LE(&setuputd->utd_BufferPtr, (ULONG) pciGetPhysical(hc, &ioreq->iouh_SetupData));
2549 token |= (ioreq->iouh_SetupData.bmRequestType & URTF_IN) ? PID_IN : PID_OUT;
2550 predutd = setuputd;
2551 actual = ioreq->iouh_Actual;
2552 if(ioreq->iouh_Length - actual)
2554 ctrlstatus |= UTCF_SHORTPACKET;
2555 if(cont)
2557 phyaddr = (ULONG) pciGetPhysical(hc, &(((UBYTE *) ioreq->iouh_Data)[ioreq->iouh_Actual]));
2558 if(!unit->hu_DevDataToggle[devadrep])
2560 // continue with data toggle 0
2561 token |= UTTF_DATA1;
2563 } else {
2564 phyaddr = (ULONG) pciGetPhysical(hc, ioreq->iouh_Data);
2568 datautd = uhciAllocTD(hc);
2569 if(!datautd)
2571 break;
2573 token ^= UTTF_DATA1; // toggle bit
2574 predutd->utd_Link = datautd->utd_Self;
2575 predutd->utd_Succ = (struct UhciXX *) datautd;
2576 //datautd->utd_Pred = (struct UhciXX *) predutd;
2577 //datautd->utd_QueueHead = uqh;
2578 len = ioreq->iouh_Length - actual;
2579 if(len > ioreq->iouh_MaxPktSize)
2581 len = ioreq->iouh_MaxPktSize;
2583 WRITEMEM32_LE(&datautd->utd_CtrlStatus, ctrlstatus);
2584 #if 1
2585 #warning "this workaround for a VIA babble bug will potentially overwrite innocent memory (very rarely), but will avoid the host controller dropping dead completely."
2586 if((len < ioreq->iouh_MaxPktSize) && (ioreq->iouh_SetupData.bmRequestType & URTF_IN))
2588 WRITEMEM32_LE(&datautd->utd_Token, token|((ioreq->iouh_MaxPktSize-1)<<UTTS_TRANSLENGTH)); // no masking need here as len is always >= 1
2589 } else {
2590 WRITEMEM32_LE(&datautd->utd_Token, token|((len-1)<<UTTS_TRANSLENGTH)); // no masking need here as len is always >= 1
2592 #else
2593 WRITEMEM32_LE(&datautd->utd_Token, token|((len-1)<<UTTS_TRANSLENGTH)); // no masking need here as len is always >= 1
2594 #endif
2595 WRITEMEM32_LE(&datautd->utd_BufferPtr, phyaddr);
2596 phyaddr += len;
2597 actual += len;
2598 predutd = datautd;
2599 } while((actual < ioreq->iouh_Length) && (actual - ioreq->iouh_Actual < UHCI_TD_CTRL_LIMIT));
2600 if(actual == ioreq->iouh_Actual)
2602 // not at least one data TD? try again later
2603 uhciFreeTD(hc, setuputd);
2604 uhciFreeTD(hc, termutd);
2605 uhciFreeQH(hc, uqh);
2606 break;
2608 if(cont)
2610 // free Setup packet
2611 KPRINTF(1, ("Freeing setup\n"));
2612 uqh->uqh_FirstTD = (struct UhciTD *) setuputd->utd_Succ;
2613 //uqh->uqh_FirstTD->utd_Pred = NULL;
2614 uqh->uqh_Element = setuputd->utd_Succ->uxx_Self; // start of queue after setup packet
2615 uhciFreeTD(hc, setuputd);
2616 // set toggle for next batch
2617 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? FALSE : TRUE;
2619 } else {
2620 if(cont)
2622 // free Setup packet, assign termination as first packet (no data)
2623 KPRINTF(1, ("Freeing setup (term only)\n"));
2624 uqh->uqh_FirstTD = (struct UhciTD *) termutd;
2625 uqh->uqh_Element = termutd->utd_Self; // start of queue after setup packet
2626 uhciFreeTD(hc, setuputd);
2627 predutd = NULL;
2630 uqh->uqh_Actual = actual - ioreq->iouh_Actual;
2631 ctrlstatus |= UTCF_READYINTEN;
2632 if(actual == ioreq->iouh_Length)
2634 // TERM packet
2635 KPRINTF(1, ("Activating TERM\n"));
2636 token |= UTTF_DATA1;
2637 token ^= (PID_IN^PID_OUT)<<UTTS_PID;
2639 if(predutd)
2641 predutd->utd_Link = termutd->utd_Self;
2642 predutd->utd_Succ = (struct UhciXX *) termutd;
2644 //termutd->utd_Pred = (struct UhciXX *) predutd;
2645 WRITEMEM32_LE(&termutd->utd_CtrlStatus, ctrlstatus);
2646 WRITEMEM32_LE(&termutd->utd_Token, token|(0x7ff<<UTTS_TRANSLENGTH));
2647 CONSTWRITEMEM32_LE(&termutd->utd_Link, UHCI_TERMINATE);
2648 termutd->utd_Succ = NULL;
2649 //uqh->uqh_LastTD = termutd;
2650 } else {
2651 KPRINTF(1, ("Setup data phase fragmented\n"));
2652 // don't create TERM, we don't know the final data toggle bit
2653 // but mark the last data TD for interrupt generation
2654 WRITEMEM32_LE(&predutd->utd_CtrlStatus, ctrlstatus);
2655 uhciFreeTD(hc, termutd);
2656 CONSTWRITEMEM32_LE(&predutd->utd_Link, UHCI_TERMINATE);
2657 predutd->utd_Succ = NULL;
2658 //uqh->uqh_LastTD = predutd;
2661 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
2662 ioreq->iouh_DriverPrivate1 = uqh;
2664 // manage endpoint going busy
2665 unit->hu_DevBusyReq[devadrep] = ioreq;
2666 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
2668 Disable();
2669 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
2671 // looks good to me, now enqueue this entry (just behind the CtrlQH)
2672 uqh->uqh_Succ = hc->hc_UhciCtrlQH->uqh_Succ;
2673 uqh->uqh_Link = uqh->uqh_Succ->uxx_Self;
2674 SYNC;
2675 EIEIO;
2676 uqh->uqh_Pred = (struct UhciXX *) hc->hc_UhciCtrlQH;
2677 uqh->uqh_Succ->uxx_Pred = (struct UhciXX *) uqh;
2678 hc->hc_UhciCtrlQH->uqh_Succ = (struct UhciXX *) uqh;
2679 hc->hc_UhciCtrlQH->uqh_Link = uqh->uqh_Self;
2680 SYNC;
2681 EIEIO;
2682 Enable();
2684 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
2687 /* \\\ */
2689 /* /// "uhciScheduleIntTDs()" */
2690 void uhciScheduleIntTDs(struct PCIController *hc)
2692 struct PCIUnit *unit = hc->hc_Unit;
2693 struct IOUsbHWReq *ioreq;
2694 UWORD cnt;
2695 UWORD devadrep;
2696 struct UhciQH *uqh;
2697 struct UhciQH *intuqh;
2698 struct UhciTD *utd;
2699 struct UhciTD *predutd;
2700 ULONG actual;
2701 ULONG ctrlstatus;
2702 ULONG token;
2703 ULONG len;
2704 ULONG phyaddr;
2706 /* *** INT Transfers *** */
2707 KPRINTF(1, ("Scheduling new INT transfers...\n"));
2708 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
2709 while(((struct Node *) ioreq)->ln_Succ)
2711 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
2712 KPRINTF(10, ("New INT transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
2713 /* is endpoint already in use or do we have to wait for next transaction */
2714 if(unit->hu_DevBusyReq[devadrep])
2716 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
2717 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
2718 continue;
2721 uqh = uhciAllocQH(hc);
2722 if(!uqh)
2724 break;
2727 uqh->uqh_IOReq = ioreq;
2729 ctrlstatus = UTCF_ACTIVE|UTCF_1ERRORLIMIT;
2730 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
2732 KPRINTF(5, ("*** LOW SPEED ***\n"));
2733 ctrlstatus |= UTCF_LOWSPEED;
2735 token = (ioreq->iouh_DevAddr<<UTTS_DEVADDR)|(ioreq->iouh_Endpoint<<UTTS_ENDPOINT);
2736 token |= (ioreq->iouh_Dir == UHDIR_IN) ? PID_IN : PID_OUT;
2737 predutd = NULL;
2738 actual = ioreq->iouh_Actual;
2739 ctrlstatus |= UTCF_SHORTPACKET;
2740 phyaddr = (ULONG) pciGetPhysical(hc, &(((UBYTE *) ioreq->iouh_Data)[ioreq->iouh_Actual]));
2741 if(unit->hu_DevDataToggle[devadrep])
2743 // continue with data toggle 1
2744 KPRINTF(1, ("Data1\n"));
2745 token |= UTTF_DATA1;
2746 } else {
2747 KPRINTF(1, ("Data0\n"));
2751 utd = uhciAllocTD(hc);
2752 if(!utd)
2754 break;
2756 if(predutd)
2758 WRITEMEM32_LE(&predutd->utd_Link, READMEM32_LE(utd->utd_Self)|UHCI_DFS);
2759 predutd->utd_Succ = (struct UhciXX *) utd;
2760 //utd->utd_Pred = (struct UhciXX *) predutd;
2761 } else {
2762 uqh->uqh_FirstTD = utd;
2763 uqh->uqh_Element = utd->utd_Self;
2764 //utd->utd_Pred = NULL;
2766 //utd->utd_QueueHead = uqh;
2767 len = ioreq->iouh_Length - actual;
2768 if(len > ioreq->iouh_MaxPktSize)
2770 len = ioreq->iouh_MaxPktSize;
2773 WRITEMEM32_LE(&utd->utd_CtrlStatus, ctrlstatus);
2774 WRITEMEM32_LE(&utd->utd_Token, token|(((len-1) & 0x7ff)<<UTTS_TRANSLENGTH));
2775 WRITEMEM32_LE(&utd->utd_BufferPtr, phyaddr);
2776 phyaddr += len;
2777 actual += len;
2778 predutd = utd;
2779 token ^= UTTF_DATA1; // toggle bit
2780 } while((actual < ioreq->iouh_Length) && (actual - ioreq->iouh_Actual < UHCI_TD_INT_LIMIT));
2782 if(!utd)
2784 // not at least one data TD? try again later
2785 uhciFreeQH(hc, uqh);
2786 break;
2789 uqh->uqh_Actual = actual - ioreq->iouh_Actual;
2790 // set toggle for next batch / succesful transfer
2791 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? TRUE : FALSE;
2792 if(unit->hu_DevDataToggle[devadrep])
2794 // continue with data toggle 1
2795 KPRINTF(1, ("NewData1\n"));
2796 } else {
2797 KPRINTF(1, ("NewData0\n"));
2799 ctrlstatus |= UTCF_READYINTEN;
2800 WRITEMEM32_LE(&predutd->utd_CtrlStatus, ctrlstatus);
2801 CONSTWRITEMEM32_LE(&utd->utd_Link, UHCI_TERMINATE);
2802 utd->utd_Succ = NULL;
2803 //uqh->uqh_LastTD = utd;
2805 if(ioreq->iouh_Interval >= 255)
2807 intuqh = hc->hc_UhciIntQH[8]; // 256ms interval
2808 } else {
2809 cnt = 0;
2812 intuqh = hc->hc_UhciIntQH[cnt++];
2813 } while(ioreq->iouh_Interval >= (1<<cnt));
2814 KPRINTF(1, ("Scheduled at level %ld\n", cnt));
2817 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
2818 ioreq->iouh_DriverPrivate1 = uqh;
2820 // manage endpoint going busy
2821 unit->hu_DevBusyReq[devadrep] = ioreq;
2822 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
2824 Disable();
2825 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
2827 // looks good to me, now enqueue this entry (just behind the right IntQH)
2828 uqh->uqh_Succ = intuqh->uqh_Succ;
2829 uqh->uqh_Link = intuqh->uqh_Self;
2830 SYNC;
2831 EIEIO;
2832 uqh->uqh_Pred = (struct UhciXX *) intuqh;
2833 uqh->uqh_Succ->uxx_Pred = (struct UhciXX *) uqh;
2834 intuqh->uqh_Succ = (struct UhciXX *) uqh;
2835 intuqh->uqh_Link = uqh->uqh_Self;
2836 SYNC;
2837 EIEIO;
2838 Enable();
2840 uhciUpdateIntTree(hc);
2842 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
2845 /* \\\ */
2847 /* /// "uhciScheduleBulkTDs()" */
2848 void uhciScheduleBulkTDs(struct PCIController *hc)
2850 struct PCIUnit *unit = hc->hc_Unit;
2851 struct IOUsbHWReq *ioreq;
2852 UWORD devadrep;
2853 struct UhciQH *uqh;
2854 struct UhciTD *utd;
2855 struct UhciTD *predutd;
2856 ULONG actual;
2857 ULONG ctrlstatus;
2858 ULONG token;
2859 ULONG len;
2860 ULONG phyaddr;
2861 BOOL forcezero;
2863 /* *** BULK Transfers *** */
2864 KPRINTF(1, ("Scheduling new BULK transfers...\n"));
2865 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
2866 while(((struct Node *) ioreq)->ln_Succ)
2868 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
2869 KPRINTF(10, ("New BULK transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
2870 /* is endpoint already in use or do we have to wait for next transaction */
2871 if(unit->hu_DevBusyReq[devadrep])
2873 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
2874 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
2875 continue;
2878 uqh = uhciAllocQH(hc);
2879 if(!uqh)
2881 break;
2884 uqh->uqh_IOReq = ioreq;
2886 // fill setup td
2887 ctrlstatus = UTCF_ACTIVE|UTCF_1ERRORLIMIT;
2888 token = (ioreq->iouh_DevAddr<<UTTS_DEVADDR)|(ioreq->iouh_Endpoint<<UTTS_ENDPOINT);
2889 token |= (ioreq->iouh_Dir == UHDIR_IN) ? PID_IN : PID_OUT;
2890 predutd = NULL;
2891 actual = ioreq->iouh_Actual;
2892 ctrlstatus |= UTCF_SHORTPACKET;
2893 phyaddr = (ULONG) pciGetPhysical(hc, &(((UBYTE *) ioreq->iouh_Data)[ioreq->iouh_Actual]));
2894 if(unit->hu_DevDataToggle[devadrep])
2896 // continue with data toggle 1
2897 token |= UTTF_DATA1;
2901 utd = uhciAllocTD(hc);
2902 if(!utd)
2904 break;
2906 forcezero = FALSE;
2907 if(predutd)
2909 WRITEMEM32_LE(&predutd->utd_Link, READMEM32_LE(utd->utd_Self)|UHCI_DFS);
2910 predutd->utd_Succ = (struct UhciXX *) utd;
2911 //utd->utd_Pred = (struct UhciXX *) predutd;
2912 } else {
2913 uqh->uqh_FirstTD = utd;
2914 uqh->uqh_Element = utd->utd_Self;
2915 //utd->utd_Pred = NULL;
2917 //utd->utd_QueueHead = uqh;
2918 len = ioreq->iouh_Length - actual;
2919 if(len > ioreq->iouh_MaxPktSize)
2921 len = ioreq->iouh_MaxPktSize;
2924 WRITEMEM32_LE(&utd->utd_CtrlStatus, ctrlstatus);
2925 WRITEMEM32_LE(&utd->utd_Token, token|(((len-1) & 0x7ff)<<UTTS_TRANSLENGTH));
2926 WRITEMEM32_LE(&utd->utd_BufferPtr, phyaddr);
2927 phyaddr += len;
2928 actual += len;
2929 predutd = utd;
2930 token ^= UTTF_DATA1; // toggle bit
2931 if((actual == ioreq->iouh_Length) && len)
2933 if((ioreq->iouh_Flags & UHFF_NOSHORTPKT) || (ioreq->iouh_Dir == UHDIR_IN) || (actual % ioreq->iouh_MaxPktSize))
2935 // no last zero byte packet
2936 break;
2937 } else {
2938 // avoid rare case that the zero byte packet is reached on TD_BULK_LIMIT
2939 forcezero = TRUE;
2942 } while(forcezero || (len && (actual <= ioreq->iouh_Length) && (actual - ioreq->iouh_Actual < UHCI_TD_BULK_LIMIT)));
2944 if(!utd)
2946 // not at least one data TD? try again later
2947 uhciFreeQH(hc, uqh);
2948 break;
2950 uqh->uqh_Actual = actual - ioreq->iouh_Actual;
2951 // set toggle for next batch / succesful transfer
2952 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? TRUE : FALSE;
2954 ctrlstatus |= UTCF_READYINTEN;
2955 WRITEMEM32_LE(&predutd->utd_CtrlStatus, ctrlstatus);
2956 CONSTWRITEMEM32_LE(&utd->utd_Link, UHCI_TERMINATE);
2957 utd->utd_Succ = NULL;
2958 //uqh->uqh_LastTD = utd;
2960 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
2961 ioreq->iouh_DriverPrivate1 = uqh;
2963 // manage endpoint going busy
2964 unit->hu_DevBusyReq[devadrep] = ioreq;
2965 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
2967 Disable();
2968 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
2970 // looks good to me, now enqueue this entry (just behind the BulkQH)
2971 uqh->uqh_Succ = hc->hc_UhciBulkQH->uqh_Succ;
2972 uqh->uqh_Link = uqh->uqh_Succ->uxx_Self;
2973 SYNC;
2974 EIEIO;
2975 uqh->uqh_Pred = (struct UhciXX *) hc->hc_UhciBulkQH;
2976 uqh->uqh_Succ->uxx_Pred = (struct UhciXX *) uqh;
2977 hc->hc_UhciBulkQH->uqh_Succ = (struct UhciXX *) uqh;
2978 hc->hc_UhciBulkQH->uqh_Link = uqh->uqh_Self;
2979 SYNC;
2980 EIEIO;
2981 Enable();
2983 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
2986 /* \\\ */
2988 /* /// "uhciCompleteInt()" */
2989 void uhciCompleteInt(struct PCIController *hc)
2991 ULONG framecnt = READREG16_LE(hc->hc_RegBase, UHCI_FRAMECOUNT);
2993 KPRINTF(1, ("CompleteInt!\n"));
2994 if(framecnt < (hc->hc_FrameCounter & 0xffff))
2996 hc->hc_FrameCounter |= 0xffff;
2997 hc->hc_FrameCounter++;
2998 hc->hc_FrameCounter += framecnt;
2999 KPRINTF(10, ("Frame Counter Rollover %ld\n", hc->hc_FrameCounter));
3002 /* **************** PROCESS DONE TRANSFERS **************** */
3004 uhciCheckPortStatusChange(hc);
3005 uhwCheckRootHubChanges(hc->hc_Unit);
3007 uhciHandleFinishedTDs(hc);
3009 if(hc->hc_CtrlXFerQueue.lh_Head->ln_Succ)
3011 uhciScheduleCtrlTDs(hc);
3014 if(hc->hc_IntXFerQueue.lh_Head->ln_Succ)
3016 uhciScheduleIntTDs(hc);
3019 if(hc->hc_BulkXFerQueue.lh_Head->ln_Succ)
3021 uhciScheduleBulkTDs(hc);
3024 KPRINTF(1, ("CompleteDone\n"));
3026 /* \\\ */
3028 /* /// "uhciIntCode()" */
3029 void uhciIntCode(HIDDT_IRQ_Handler *irq, HIDDT_IRQ_HwInfo *hw)
3031 struct PCIController *hc = (struct PCIController *) irq->h_Data;
3032 struct PCIDevice *base = hc->hc_Device;
3033 UWORD intr;
3035 //KPRINTF(10, ("pciUhciInt()\n"));
3036 intr = READREG16_LE(hc->hc_RegBase, UHCI_USBSTATUS);
3037 if(intr & (UHSF_USBINT|UHSF_USBERRORINT|UHSF_RESUMEDTX|UHSF_HCSYSERROR|UHSF_HCPROCERROR|UHSF_HCHALTED))
3039 WRITEREG16_LE(hc->hc_RegBase, UHCI_USBSTATUS, intr);
3040 KPRINTF(1, ("INT=%04lx\n", intr));
3041 if(intr & (UHSF_HCSYSERROR|UHSF_HCPROCERROR|UHSF_HCHALTED))
3043 KPRINTF(200, ("Host ERROR!\n"));
3044 CONSTWRITEREG16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_HCRESET|UHCF_GLOBALRESET|UHCF_MAXPACKET64|UHCF_CONFIGURE);
3045 //CONSTWRITEREG16_LE(hc->hc_RegBase, UHCI_USBINTEN, 0);
3047 if(!hc->hc_Online)
3049 return;
3051 if(intr & (UHSF_USBINT|UHSF_USBERRORINT))
3053 SureCause(base, &hc->hc_CompleteInt);
3057 /* \\\ */
3059 /* ---------------------------------------------------------------------- *
3060 * OHCI Specific Stuff *
3061 * ---------------------------------------------------------------------- */
3063 /* /// "ohciDebugSchedule()" */
3064 void ohciDebugSchedule(struct PCIController *hc)
3066 ULONG ctrlhead;
3067 ULONG hced;
3068 ULONG epcaps;
3069 ULONG headptr;
3070 ULONG headptrbits;
3071 ULONG tailptr;
3072 ULONG nexted;
3073 ULONG ctrl;
3074 ULONG currptr;
3075 ULONG nexttd;
3076 ULONG buffend;
3077 KPRINTF(10, ("*** Schedule debug!!! ***\n"));
3078 ctrlhead = READREG32_LE(hc->hc_RegBase, OHCI_CTRL_HEAD_ED) - hc->hc_PCIVirtualAdjust;
3079 KPRINTF(10, ("CtrlHead = %08lx, should be %08lx\n", ctrlhead, &hc->hc_OhciCtrlHeadED->oed_EPCaps));
3080 hced = ctrlhead;
3083 epcaps = READMEM32_LE(hced);
3084 tailptr = READMEM32_LE(hced+4);
3085 headptr = headptrbits = READMEM32_LE(hced+8);
3086 headptr &= OHCI_PTRMASK;
3087 nexted = READMEM32_LE(hced+12);
3088 KPRINTF(10, ("ED %08lx: EPCaps=%08lx, HeadP=%08lx, TailP=%08lx, NextED=%08lx\n",
3089 hced, epcaps, headptrbits, tailptr, nexted));
3090 if((!(epcaps & OECF_SKIP)) && (tailptr != headptr) && (!(headptrbits & OEHF_HALTED)))
3092 while(tailptr != headptr)
3094 headptr -= hc->hc_PCIVirtualAdjust;
3095 ctrl = READMEM32_LE(headptr);
3096 currptr = READMEM32_LE(headptr+4);
3097 nexttd = READMEM32_LE(headptr+8);
3098 buffend = READMEM32_LE(headptr+12);
3100 KPRINTF(5, (" TD %08lx: Ctrl=%08lx, CurrPtr=%08lx, NextTD=%08lx, BuffEnd=%08lx\n",
3101 headptr, ctrl, currptr, nexttd, buffend));
3102 headptr = nexttd;
3105 if(!nexted)
3107 break;
3109 hced = nexted - hc->hc_PCIVirtualAdjust;
3110 } while(TRUE);
3112 /* \\\ */
3114 /* /// "ohciFreeEDContext()" */
3115 void ohciFreeEDContext(struct PCIController *hc, struct OhciED *oed)
3117 struct OhciTD *otd;
3118 struct OhciTD *nextotd;
3120 KPRINTF(5, ("Unlinking EDContext %08lx\n", oed));
3122 // unlink from schedule
3123 oed->oed_Succ->oed_Pred = oed->oed_Pred;
3124 oed->oed_Pred->oed_Succ = oed->oed_Succ;
3125 oed->oed_Pred->oed_NextED = oed->oed_Succ->oed_Self;
3126 SYNC
3127 EIEIO;
3129 #if 0
3130 // need to make sure that the endpoint is no longer
3131 Disable();
3132 oed->oed_Succ = hc->hc_OhciAsyncFreeED;
3133 hc->hc_OhciAsyncFreeED = oed;
3134 Enable();
3135 #else
3136 Disable();
3137 nextotd = oed->oed_FirstTD;
3138 while(nextotd)
3140 KPRINTF(1, ("FreeTD %08lx\n", nextotd));
3141 otd = nextotd;
3142 nextotd = (struct OhciTD *) otd->otd_Succ;
3143 ohciFreeTD(hc, otd);
3146 ohciFreeED(hc, oed);
3147 Enable();
3148 #endif
3150 /* \\\ */
3152 /* /// "ohciAllocED()" */
3153 inline struct OhciED * ohciAllocED(struct PCIController *hc)
3155 struct OhciED *oed = hc->hc_OhciEDPool;
3157 if(!oed)
3159 // out of QHs!
3160 KPRINTF(20, ("Out of EDs!\n"));
3161 return NULL;
3164 hc->hc_OhciEDPool = oed->oed_Succ;
3165 return(oed);
3167 /* \\\ */
3169 /* /// "ohciFreeED()" */
3170 inline void ohciFreeED(struct PCIController *hc, struct OhciED *oed)
3172 oed->oed_Succ = hc->hc_OhciEDPool;
3173 hc->hc_OhciEDPool = oed;
3175 /* \\\ */
3177 /* /// "ohciAllocTD()" */
3178 inline struct OhciTD * ohciAllocTD(struct PCIController *hc)
3180 struct OhciTD *otd = hc->hc_OhciTDPool;
3182 if(!otd)
3184 // out of TDs!
3185 KPRINTF(20, ("Out of TDs!\n"));
3186 return NULL;
3189 hc->hc_OhciTDPool = otd->otd_Succ;
3190 return(otd);
3192 /* \\\ */
3194 /* /// "ohciFreeTD()" */
3195 inline void ohciFreeTD(struct PCIController *hc, struct OhciTD *otd)
3197 otd->otd_Succ = hc->hc_OhciTDPool;
3198 hc->hc_OhciTDPool = otd;
3200 /* \\\ */
3202 /* /// "ohciUpdateIntTree()" */
3203 void ohciUpdateIntTree(struct PCIController *hc)
3205 struct OhciED *oed;
3206 struct OhciED *predoed;
3207 struct OhciED *lastusedoed;
3208 UWORD cnt;
3210 // optimize linkage between queue heads
3211 predoed = lastusedoed = hc->hc_OhciTermED;
3212 for(cnt = 0; cnt < 5; cnt++)
3214 oed = hc->hc_OhciIntED[cnt];
3215 if(oed->oed_Succ != predoed)
3217 lastusedoed = oed->oed_Succ;
3219 oed->oed_NextED = lastusedoed->oed_Self;
3220 predoed = oed;
3223 /* \\\ */
3225 /* /// "ohciHandleFinishedTDs()" */
3226 void ohciHandleFinishedTDs(struct PCIController *hc)
3228 struct PCIUnit *unit = hc->hc_Unit;
3229 struct IOUsbHWReq *ioreq;
3230 struct IOUsbHWReq *nextioreq;
3231 struct OhciED *oed;
3232 struct OhciTD *otd;
3233 UWORD devadrep;
3234 ULONG len;
3235 ULONG ctrlstatus;
3236 BOOL updatetree = FALSE;
3237 ULONG donehead;
3238 BOOL retire;
3240 KPRINTF(1, ("Checking for work done...\n"));
3241 Disable();
3242 donehead = hc->hc_OhciDoneQueue;
3243 hc->hc_OhciDoneQueue = 0UL;
3244 Enable();
3245 if(!donehead)
3247 KPRINTF(1, ("Nothing to do!\n"));
3248 return;
3250 otd = (struct OhciTD *) (donehead - hc->hc_PCIVirtualAdjust - 16);
3251 KPRINTF(10, ("DoneHead=%08lx, OTD=%08lx, Frame=%ld\n", donehead, otd, READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT)));
3254 oed = otd->otd_ED;
3255 ctrlstatus = READMEM32_LE(&otd->otd_Ctrl);
3256 if(otd->otd_BufferPtr)
3258 // FIXME this will blow up if physical memory is ever going to be discontinuous
3259 len = READMEM32_LE(&otd->otd_BufferPtr) - (READMEM32_LE(&otd->otd_BufferEnd) + 1 - otd->otd_Length);
3260 } else {
3261 len = otd->otd_Length;
3263 ioreq = oed->oed_IOReq;
3264 KPRINTF(1, ("Examining TD %08lx for ED %08lx (IOReq=%08lx), Status %08lx, len=%ld\n", otd, oed, ioreq, ctrlstatus, len));
3265 ioreq->iouh_Actual += len;
3266 retire = (ioreq->iouh_Actual == ioreq->iouh_Length);
3267 if((ctrlstatus & OTCM_DELAYINT) != OTCF_NOINT)
3269 retire = TRUE;
3271 switch((ctrlstatus & OTCM_COMPLETIONCODE)>>OTCS_COMPLETIONCODE)
3273 case (OTCF_CC_NOERROR>>OTCS_COMPLETIONCODE):
3274 break;
3276 case (OTCF_CC_CRCERROR>>OTCS_COMPLETIONCODE):
3277 KPRINTF(200, ("CRC Error!\n"));
3278 ioreq->iouh_Req.io_Error = UHIOERR_CRCERROR;
3279 retire = TRUE;
3280 break;
3282 case (OTCF_CC_BABBLE>>OTCS_COMPLETIONCODE):
3283 KPRINTF(200, ("Babble/Bitstuffing Error!\n"));
3284 ioreq->iouh_Req.io_Error = UHIOERR_CRCERROR;
3285 retire = TRUE;
3286 break;
3288 case (OTCF_CC_WRONGTOGGLE>>OTCS_COMPLETIONCODE):
3289 KPRINTF(200, ("Data toggle mismatch length = %ld\n", len));
3290 break;
3292 case (OTCF_CC_STALL>>OTCS_COMPLETIONCODE):
3293 KPRINTF(200, ("STALLED!\n"));
3294 ioreq->iouh_Req.io_Error = UHIOERR_STALL;
3295 retire = TRUE;
3296 break;
3298 case (OTCF_CC_TIMEOUT>>OTCS_COMPLETIONCODE):
3299 KPRINTF(200, ("TIMEOUT!\n"));
3300 ioreq->iouh_Req.io_Error = UHIOERR_TIMEOUT;
3301 retire = TRUE;
3302 break;
3304 case (OTCF_CC_PIDCORRUPT>>OTCS_COMPLETIONCODE):
3305 KPRINTF(200, ("PID Error!\n"));
3306 ioreq->iouh_Req.io_Error = UHIOERR_CRCERROR;
3307 retire = TRUE;
3308 break;
3310 case (OTCF_CC_WRONGPID>>OTCS_COMPLETIONCODE):
3311 KPRINTF(200, ("Illegal PID!\n"));
3312 ioreq->iouh_Req.io_Error = UHIOERR_CRCERROR;
3313 retire = TRUE;
3314 break;
3316 case (OTCF_CC_OVERFLOW>>OTCS_COMPLETIONCODE):
3317 KPRINTF(200, ("Overflow Error!\n"));
3318 ioreq->iouh_Req.io_Error = UHIOERR_OVERFLOW;
3319 retire = TRUE;
3320 break;
3322 case (OTCF_CC_SHORTPKT>>OTCS_COMPLETIONCODE):
3323 KPRINTF(10, ("Short packet %ld < %ld\n", len, otd->otd_Length));
3324 if((!ioreq->iouh_Req.io_Error) && (!(ioreq->iouh_Flags & UHFF_ALLOWRUNTPKTS)))
3326 ioreq->iouh_Req.io_Error = UHIOERR_RUNTPACKET;
3328 retire = TRUE;
3329 break;
3331 case (OTCF_CC_OVERRUN>>OTCS_COMPLETIONCODE):
3332 KPRINTF(200, ("Data Overrun Error!\n"));
3333 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
3334 retire = TRUE;
3335 break;
3337 case (OTCF_CC_UNDERRUN>>OTCS_COMPLETIONCODE):
3338 KPRINTF(200, ("Data Underrun Error!\n"));
3339 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
3340 retire = TRUE;
3341 break;
3343 case (OTCF_CC_INVALID>>OTCS_COMPLETIONCODE):
3344 KPRINTF(200, ("Not touched?!?\n"));
3345 break;
3347 if(READMEM32_LE(&oed->oed_HeadPtr) & OEHF_HALTED)
3349 KPRINTF(100, ("OED halted!\n"));
3350 retire = TRUE;
3353 if(retire)
3355 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
3356 AddHead(&hc->hc_OhciRetireQueue, &ioreq->iouh_Req.io_Message.mn_Node);
3359 if(!otd->otd_NextTD)
3361 break;
3363 KPRINTF(1, ("NextTD=%08lx\n", otd->otd_NextTD));
3364 otd = (struct OhciTD *) ((READMEM32_LE(&otd->otd_NextTD) & OHCI_PTRMASK) - hc->hc_PCIVirtualAdjust - 16);
3365 KPRINTF(1, ("NextOTD = %08lx\n", otd));
3366 } while(TRUE);
3368 ioreq = (struct IOUsbHWReq *) hc->hc_OhciRetireQueue.lh_Head;
3369 while((nextioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ))
3371 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
3372 oed = (struct OhciED *) ioreq->iouh_DriverPrivate1;
3373 if(oed)
3375 KPRINTF(10, ("Retiring IOReq=%08lx ED=%08lx, Frame=%ld\n", ioreq, oed, READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT)));
3377 if(oed->oed_Continue)
3379 ULONG actual = ioreq->iouh_Actual;
3380 ULONG oldenables;
3381 ULONG phyaddr;
3382 struct OhciTD *predotd = NULL;
3384 KPRINTF(10, ("Reloading Bulk transfer at %ld of %ld\n", ioreq->iouh_Actual, ioreq->iouh_Length));
3385 otd = oed->oed_FirstTD;
3386 phyaddr = (ULONG) pciGetPhysical(hc, &(((UBYTE *) ioreq->iouh_Data)[actual]));
3389 len = ioreq->iouh_Length - actual;
3390 if(len > OHCI_PAGE_SIZE)
3392 len = OHCI_PAGE_SIZE;
3394 if((!otd->otd_Succ) && (actual + len == ioreq->iouh_Length) && (!ioreq->iouh_Flags & UHFF_NOSHORTPKT) && ((actual % ioreq->iouh_MaxPktSize) == 0))
3396 // special case -- zero padding would not fit in this run,
3397 // and next time, we would forget about it. So rather abort
3398 // reload now, so the zero padding goes with the next reload
3399 break;
3401 predotd = otd;
3402 otd->otd_Length = len;
3403 KPRINTF(1, ("TD with %ld bytes\n", len));
3404 CONSTWRITEMEM32_LE(&otd->otd_Ctrl, OTCF_CC_INVALID|OTCF_NOINT);
3405 if(otd->otd_Succ)
3407 otd->otd_NextTD = otd->otd_Succ->otd_Self;
3409 if(len)
3411 WRITEMEM32_LE(&otd->otd_BufferPtr, phyaddr);
3412 phyaddr += len - 1;
3413 WRITEMEM32_LE(&otd->otd_BufferEnd, phyaddr);
3414 phyaddr++;
3415 } else {
3416 CONSTWRITEMEM32_LE(&otd->otd_BufferPtr, 0);
3417 CONSTWRITEMEM32_LE(&otd->otd_BufferEnd, 0);
3419 actual += len;
3420 otd = otd->otd_Succ;
3421 } while(otd && ((actual < ioreq->iouh_Length) || (len && (ioreq->iouh_Dir == UHDIR_OUT) && (actual == ioreq->iouh_Length) && (!ioreq->iouh_Flags & UHFF_NOSHORTPKT) && ((actual % ioreq->iouh_MaxPktSize) == 0))));
3422 oed->oed_Continue = (actual < ioreq->iouh_Length);
3423 predotd->otd_NextTD = hc->hc_OhciTermTD->otd_Self;
3425 CONSTWRITEMEM32_LE(&predotd->otd_Ctrl, OTCF_CC_INVALID);
3427 Disable();
3428 AddTail(&hc->hc_TDQueue, &ioreq->iouh_Req.io_Message.mn_Node);
3430 // keep toggle bit
3431 ctrlstatus = READMEM32_LE(oed->oed_HeadPtr) & OEHF_DATA1;
3432 WRITEMEM32_LE(&oed->oed_HeadPtr, READMEM32_LE(oed->oed_FirstTD->otd_Self)|ctrlstatus);
3434 oldenables = READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS);
3435 oldenables |= OCSF_BULKENABLE;
3436 WRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, oldenables);
3437 SYNC;
3438 EIEIO;
3439 Enable();
3440 } else {
3441 // disable ED
3442 ctrlstatus = READMEM32_LE(&oed->oed_HeadPtr);
3443 ctrlstatus |= OEHF_HALTED;
3444 WRITEMEM32_LE(&oed->oed_HeadPtr, ctrlstatus);
3446 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
3447 unit->hu_DevBusyReq[devadrep] = NULL;
3448 unit->hu_DevDataToggle[devadrep] = (ctrlstatus & OEHF_DATA1) ? TRUE : FALSE;
3450 ohciFreeEDContext(hc, oed);
3451 if(ioreq->iouh_Req.io_Command == UHCMD_INTXFER)
3453 updatetree = TRUE;
3455 // check for sucessful clear feature and set address ctrl transfers
3456 if((!ioreq->iouh_Req.io_Error) && (ioreq->iouh_Req.io_Command == UHCMD_CONTROLXFER))
3458 uhwCheckSpecialCtrlTransfers(hc, ioreq);
3460 ReplyMsg(&ioreq->iouh_Req.io_Message);
3462 } else {
3463 KPRINTF(20, ("IOReq=%08lx has no OED!\n", ioreq));
3465 ioreq = nextioreq;
3467 if(updatetree)
3469 ohciUpdateIntTree(hc);
3472 /* \\\ */
3474 /* /// "ohciScheduleCtrlTDs()" */
3475 void ohciScheduleCtrlTDs(struct PCIController *hc)
3477 struct PCIUnit *unit = hc->hc_Unit;
3478 struct IOUsbHWReq *ioreq;
3479 UWORD devadrep;
3480 struct OhciED *oed;
3481 struct OhciTD *setupotd;
3482 struct OhciTD *dataotd;
3483 struct OhciTD *termotd;
3484 struct OhciTD *predotd;
3485 ULONG actual;
3486 ULONG epcaps;
3487 ULONG ctrl;
3488 ULONG len;
3489 ULONG phyaddr;
3490 ULONG oldenables;
3492 /* *** CTRL Transfers *** */
3493 KPRINTF(1, ("Scheduling new CTRL transfers...\n"));
3494 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
3495 while(((struct Node *) ioreq)->ln_Succ)
3497 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint;
3498 KPRINTF(10, ("New CTRL transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
3499 /* is endpoint already in use or do we have to wait for next transaction */
3500 if(unit->hu_DevBusyReq[devadrep])
3502 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
3503 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
3504 continue;
3507 oed = ohciAllocED(hc);
3508 if(!oed)
3510 break;
3513 setupotd = ohciAllocTD(hc);
3514 if(!setupotd)
3516 ohciFreeED(hc, oed);
3517 break;
3519 termotd = ohciAllocTD(hc);
3520 if(!termotd)
3522 ohciFreeTD(hc, setupotd);
3523 ohciFreeED(hc, oed);
3524 break;
3526 oed->oed_IOReq = ioreq;
3528 KPRINTF(1, ("SetupTD=%08lx, TermTD=%08lx\n", setupotd, termotd));
3530 // fill setup td
3531 epcaps = (ioreq->iouh_DevAddr<<OECS_DEVADDR)|(ioreq->iouh_Endpoint<<OECS_ENDPOINT)|(ioreq->iouh_MaxPktSize<<OECS_MAXPKTLEN)|OECF_DIRECTION_TD;
3533 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
3535 KPRINTF(5, ("*** LOW SPEED ***\n"));
3536 epcaps |= OECF_LOWSPEED;
3539 WRITEMEM32_LE(&oed->oed_EPCaps, epcaps);
3541 oed->oed_TailPtr = hc->hc_OhciTermTD->otd_Self;
3542 oed->oed_HeadPtr = setupotd->otd_Self;
3543 oed->oed_FirstTD = setupotd;
3545 setupotd->otd_ED = oed;
3546 setupotd->otd_Length = 0; // don't increase io_Actual for that transfer
3547 CONSTWRITEMEM32_LE(&setupotd->otd_Ctrl, OTCF_PIDCODE_SETUP|OTCF_CC_INVALID|OTCF_NOINT);
3548 WRITEMEM32_LE(&setupotd->otd_BufferPtr, (ULONG) pciGetPhysical(hc, &ioreq->iouh_SetupData));
3549 WRITEMEM32_LE(&setupotd->otd_BufferEnd, (ULONG) pciGetPhysical(hc, ((UBYTE *) (&ioreq->iouh_SetupData)) + 7));
3551 ctrl = (ioreq->iouh_SetupData.bmRequestType & URTF_IN) ? (OTCF_PIDCODE_IN|OTCF_CC_INVALID|OTCF_NOINT) : (OTCF_PIDCODE_OUT|OTCF_CC_INVALID|OTCF_NOINT);
3553 predotd = setupotd;
3554 if(ioreq->iouh_Length)
3556 phyaddr = (ULONG) pciGetPhysical(hc, ioreq->iouh_Data);
3557 actual = 0;
3560 dataotd = ohciAllocTD(hc);
3561 if(!dataotd)
3563 predotd->otd_Succ = NULL;
3564 break;
3566 dataotd->otd_ED = oed;
3567 predotd->otd_Succ = dataotd;
3568 predotd->otd_NextTD = dataotd->otd_Self;
3569 len = ioreq->iouh_Length - actual;
3570 if(len > OHCI_PAGE_SIZE)
3572 len = OHCI_PAGE_SIZE;
3574 dataotd->otd_Length = len;
3575 KPRINTF(1, ("TD with %ld bytes\n", len));
3576 WRITEMEM32_LE(&dataotd->otd_Ctrl, ctrl);
3577 WRITEMEM32_LE(&dataotd->otd_BufferPtr, phyaddr);
3578 phyaddr += len - 1;
3579 WRITEMEM32_LE(&dataotd->otd_BufferEnd, phyaddr);
3580 phyaddr++;
3581 actual += len;
3582 predotd = dataotd;
3583 } while(actual < ioreq->iouh_Length);
3585 if(actual != ioreq->iouh_Length)
3587 // out of TDs
3588 KPRINTF(200, ("Out of TDs for Ctrl Transfer!\n"));
3589 dataotd = setupotd->otd_Succ;
3590 ohciFreeTD(hc, setupotd);
3591 while(dataotd)
3593 predotd = dataotd;
3594 dataotd = dataotd->otd_Succ;
3595 ohciFreeTD(hc, predotd);
3597 ohciFreeTD(hc, termotd);
3598 ohciFreeED(hc, oed);
3599 break;
3601 predotd->otd_Succ = termotd;
3602 predotd->otd_NextTD = termotd->otd_Self;
3603 } else {
3604 setupotd->otd_Succ = termotd;
3605 setupotd->otd_NextTD = termotd->otd_Self;
3608 ctrl ^= (OTCF_PIDCODE_IN^OTCF_PIDCODE_OUT)|OTCF_NOINT|OTCF_DATA1|OTCF_TOGGLEFROMTD;
3610 termotd->otd_Length = 0;
3611 termotd->otd_ED = oed;
3612 termotd->otd_Succ = NULL;
3613 termotd->otd_NextTD = hc->hc_OhciTermTD->otd_Self;
3614 CONSTWRITEMEM32_LE(&termotd->otd_Ctrl, ctrl);
3615 CONSTWRITEMEM32_LE(&termotd->otd_BufferPtr, 0);
3616 CONSTWRITEMEM32_LE(&termotd->otd_BufferEnd, 0);
3618 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
3619 ioreq->iouh_DriverPrivate1 = oed;
3621 // manage endpoint going busy
3622 unit->hu_DevBusyReq[devadrep] = ioreq;
3623 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
3625 Disable();
3626 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
3628 // looks good to me, now enqueue this entry
3629 oed->oed_Succ = hc->hc_OhciCtrlTailED;
3630 oed->oed_NextED = oed->oed_Succ->oed_Self;
3631 oed->oed_Pred = hc->hc_OhciCtrlTailED->oed_Pred;
3632 oed->oed_Pred->oed_Succ = oed;
3633 oed->oed_Pred->oed_NextED = oed->oed_Self;
3634 oed->oed_Succ->oed_Pred = oed;
3635 SYNC;
3636 EIEIO;
3638 KPRINTF(5, ("ED: EPCaps=%08lx, HeadPtr=%08lx, TailPtr=%08lx, NextED=%08lx\n",
3639 READMEM32_LE(&oed->oed_EPCaps),
3640 READMEM32_LE(&oed->oed_HeadPtr),
3641 READMEM32_LE(&oed->oed_TailPtr),
3642 READMEM32_LE(&oed->oed_NextED)));
3644 oldenables = READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS);
3645 if(!(oldenables & OCSF_CTRLENABLE))
3647 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CTRL_ED, 0);
3649 oldenables |= OCSF_CTRLENABLE;
3650 WRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, oldenables);
3651 SYNC;
3652 EIEIO;
3653 Enable();
3655 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
3658 /* \\\ */
3660 /* /// "ohciScheduleIntTDs()" */
3661 void ohciScheduleIntTDs(struct PCIController *hc)
3663 struct PCIUnit *unit = hc->hc_Unit;
3664 struct IOUsbHWReq *ioreq;
3665 UWORD devadrep;
3666 struct OhciED *intoed;
3667 struct OhciED *oed;
3668 struct OhciTD *otd;
3669 struct OhciTD *predotd;
3670 ULONG actual;
3671 ULONG epcaps;
3672 ULONG len;
3673 ULONG phyaddr;
3675 /* *** INT Transfers *** */
3676 KPRINTF(1, ("Scheduling new INT transfers...\n"));
3677 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
3678 while(((struct Node *) ioreq)->ln_Succ)
3680 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
3681 KPRINTF(10, ("New INT transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
3682 /* is endpoint already in use or do we have to wait for next transaction */
3683 if(unit->hu_DevBusyReq[devadrep])
3685 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
3686 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
3687 continue;
3690 oed = ohciAllocED(hc);
3691 if(!oed)
3693 break;
3696 oed->oed_IOReq = ioreq;
3698 epcaps = (ioreq->iouh_DevAddr<<OECS_DEVADDR)|(ioreq->iouh_Endpoint<<OECS_ENDPOINT)|(ioreq->iouh_MaxPktSize<<OECS_MAXPKTLEN);
3699 epcaps |= (ioreq->iouh_Dir == UHDIR_IN) ? OECF_DIRECTION_IN : OECF_DIRECTION_OUT;
3701 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
3703 KPRINTF(5, ("*** LOW SPEED ***\n"));
3704 epcaps |= OECF_LOWSPEED;
3707 WRITEMEM32_LE(&oed->oed_EPCaps, epcaps);
3708 oed->oed_TailPtr = hc->hc_OhciTermTD->otd_Self;
3710 predotd = NULL;
3711 phyaddr = (ULONG) pciGetPhysical(hc, ioreq->iouh_Data);
3712 actual = 0;
3715 otd = ohciAllocTD(hc);
3716 if(!otd)
3718 predotd->otd_Succ = NULL;
3719 break;
3721 otd->otd_ED = oed;
3722 if(predotd)
3724 predotd->otd_Succ = otd;
3725 predotd->otd_NextTD = otd->otd_Self;
3726 } else {
3727 WRITEMEM32_LE(&oed->oed_HeadPtr, READMEM32_LE(otd->otd_Self)|(unit->hu_DevDataToggle[devadrep] ? OEHF_DATA1 : 0));
3728 oed->oed_FirstTD = otd;
3730 len = ioreq->iouh_Length - actual;
3731 if(len > OHCI_PAGE_SIZE)
3733 len = OHCI_PAGE_SIZE;
3735 otd->otd_Length = len;
3736 KPRINTF(1, ("TD with %ld bytes\n", len));
3737 CONSTWRITEMEM32_LE(&otd->otd_Ctrl, OTCF_CC_INVALID|OTCF_NOINT);
3738 if(len)
3740 WRITEMEM32_LE(&otd->otd_BufferPtr, phyaddr);
3741 phyaddr += len - 1;
3742 WRITEMEM32_LE(&otd->otd_BufferEnd, phyaddr);
3743 phyaddr++;
3744 } else {
3745 CONSTWRITEMEM32_LE(&otd->otd_BufferPtr, 0);
3746 CONSTWRITEMEM32_LE(&otd->otd_BufferEnd, 0);
3748 actual += len;
3749 predotd = otd;
3750 } while(actual < ioreq->iouh_Length);
3752 if(actual != ioreq->iouh_Length)
3754 // out of TDs
3755 KPRINTF(200, ("Out of TDs for Int Transfer!\n"));
3756 otd = oed->oed_FirstTD;
3757 while(otd)
3759 predotd = otd;
3760 otd = otd->otd_Succ;
3761 ohciFreeTD(hc, predotd);
3763 ohciFreeED(hc, oed);
3764 break;
3766 predotd->otd_Succ = NULL;
3767 predotd->otd_NextTD = hc->hc_OhciTermTD->otd_Self;
3769 CONSTWRITEMEM32_LE(&predotd->otd_Ctrl, OTCF_CC_INVALID);
3771 if(ioreq->iouh_Interval >= 31)
3773 intoed = hc->hc_OhciIntED[4]; // 32ms interval
3774 } else {
3775 UWORD cnt = 0;
3778 intoed = hc->hc_OhciIntED[cnt++];
3779 } while(ioreq->iouh_Interval >= (1<<cnt));
3782 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
3783 ioreq->iouh_DriverPrivate1 = oed;
3785 // manage endpoint going busy
3786 unit->hu_DevBusyReq[devadrep] = ioreq;
3787 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
3789 Disable();
3790 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
3792 // looks good to me, now enqueue this entry (behind Int head)
3793 oed->oed_Succ = intoed->oed_Succ;
3794 oed->oed_NextED = intoed->oed_Succ->oed_Self;
3795 oed->oed_Pred = intoed;
3796 intoed->oed_Succ = oed;
3797 intoed->oed_NextED = oed->oed_Self;
3798 oed->oed_Succ->oed_Pred = oed;
3799 SYNC;
3800 EIEIO;
3802 KPRINTF(5, ("ED: EPCaps=%08lx, HeadPtr=%08lx, TailPtr=%08lx, NextED=%08lx\n",
3803 READMEM32_LE(&oed->oed_EPCaps),
3804 READMEM32_LE(&oed->oed_HeadPtr),
3805 READMEM32_LE(&oed->oed_TailPtr),
3806 READMEM32_LE(&oed->oed_NextED)));
3807 Enable();
3809 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
3812 /* \\\ */
3814 /* /// "ohciScheduleBulkTDs()" */
3815 void ohciScheduleBulkTDs(struct PCIController *hc)
3817 struct PCIUnit *unit = hc->hc_Unit;
3818 struct IOUsbHWReq *ioreq;
3819 UWORD devadrep;
3820 struct OhciED *oed;
3821 struct OhciTD *otd;
3822 struct OhciTD *predotd;
3823 ULONG actual;
3824 ULONG epcaps;
3825 ULONG len;
3826 ULONG phyaddr;
3827 ULONG oldenables;
3829 /* *** BULK Transfers *** */
3830 KPRINTF(1, ("Scheduling new BULK transfers...\n"));
3831 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
3832 while(((struct Node *) ioreq)->ln_Succ)
3834 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
3835 KPRINTF(10, ("New BULK transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
3836 /* is endpoint already in use or do we have to wait for next transaction */
3837 if(unit->hu_DevBusyReq[devadrep])
3839 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
3840 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
3841 continue;
3844 oed = ohciAllocED(hc);
3845 if(!oed)
3847 break;
3850 oed->oed_IOReq = ioreq;
3852 epcaps = (ioreq->iouh_DevAddr<<OECS_DEVADDR)|(ioreq->iouh_Endpoint<<OECS_ENDPOINT)|(ioreq->iouh_MaxPktSize<<OECS_MAXPKTLEN);
3853 epcaps |= (ioreq->iouh_Dir == UHDIR_IN) ? OECF_DIRECTION_IN : OECF_DIRECTION_OUT;
3855 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
3857 KPRINTF(5, ("*** LOW SPEED ***\n"));
3858 epcaps |= OECF_LOWSPEED;
3861 WRITEMEM32_LE(&oed->oed_EPCaps, epcaps);
3862 oed->oed_TailPtr = hc->hc_OhciTermTD->otd_Self;
3864 predotd = NULL;
3865 phyaddr = (ULONG) pciGetPhysical(hc, ioreq->iouh_Data);
3866 actual = 0;
3869 if((actual >= OHCI_TD_BULK_LIMIT) && (actual < ioreq->iouh_Length))
3871 KPRINTF(10, ("Bulk too large, splitting...\n"));
3872 break;
3874 otd = ohciAllocTD(hc);
3875 if(!otd)
3877 predotd->otd_Succ = NULL;
3878 break;
3880 otd->otd_ED = oed;
3881 if(predotd)
3883 predotd->otd_Succ = otd;
3884 predotd->otd_NextTD = otd->otd_Self;
3885 } else {
3886 WRITEMEM32_LE(&oed->oed_HeadPtr, READMEM32_LE(otd->otd_Self)|(unit->hu_DevDataToggle[devadrep] ? OEHF_DATA1 : 0));
3887 oed->oed_FirstTD = otd;
3889 len = ioreq->iouh_Length - actual;
3890 if(len > OHCI_PAGE_SIZE)
3892 len = OHCI_PAGE_SIZE;
3894 otd->otd_Length = len;
3895 KPRINTF(1, ("TD with %ld bytes\n", len));
3896 CONSTWRITEMEM32_LE(&otd->otd_Ctrl, OTCF_CC_INVALID|OTCF_NOINT);
3897 if(len)
3899 WRITEMEM32_LE(&otd->otd_BufferPtr, phyaddr);
3900 phyaddr += len - 1;
3901 WRITEMEM32_LE(&otd->otd_BufferEnd, phyaddr);
3902 phyaddr++;
3903 } else {
3904 CONSTWRITEMEM32_LE(&otd->otd_BufferPtr, 0);
3905 CONSTWRITEMEM32_LE(&otd->otd_BufferEnd, 0);
3907 actual += len;
3909 predotd = otd;
3910 } while((actual < ioreq->iouh_Length) || (len && (ioreq->iouh_Dir == UHDIR_OUT) && (actual == ioreq->iouh_Length) && (!ioreq->iouh_Flags & UHFF_NOSHORTPKT) && ((actual % ioreq->iouh_MaxPktSize) == 0)));
3912 if(!actual)
3914 // out of TDs
3915 KPRINTF(200, ("Out of TDs for Bulk Transfer!\n"));
3916 otd = oed->oed_FirstTD;
3917 while(otd)
3919 predotd = otd;
3920 otd = otd->otd_Succ;
3921 ohciFreeTD(hc, predotd);
3923 ohciFreeED(hc, oed);
3924 break;
3926 oed->oed_Continue = (actual < ioreq->iouh_Length);
3927 predotd->otd_Succ = NULL;
3928 predotd->otd_NextTD = hc->hc_OhciTermTD->otd_Self;
3930 CONSTWRITEMEM32_LE(&predotd->otd_Ctrl, OTCF_CC_INVALID);
3932 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
3933 ioreq->iouh_DriverPrivate1 = oed;
3935 // manage endpoint going busy
3936 unit->hu_DevBusyReq[devadrep] = ioreq;
3937 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
3939 Disable();
3940 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
3942 // looks good to me, now enqueue this entry
3943 oed->oed_Succ = hc->hc_OhciBulkTailED;
3944 oed->oed_NextED = oed->oed_Succ->oed_Self;
3945 oed->oed_Pred = hc->hc_OhciBulkTailED->oed_Pred;
3946 oed->oed_Pred->oed_Succ = oed;
3947 oed->oed_Pred->oed_NextED = oed->oed_Self;
3948 oed->oed_Succ->oed_Pred = oed;
3949 SYNC;
3950 EIEIO;
3952 KPRINTF(10, ("Activating BULK at %ld\n", READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT)));
3954 KPRINTF(5, ("ED: EPCaps=%08lx, HeadPtr=%08lx, TailPtr=%08lx, NextED=%08lx\n",
3955 READMEM32_LE(&oed->oed_EPCaps),
3956 READMEM32_LE(&oed->oed_HeadPtr),
3957 READMEM32_LE(&oed->oed_TailPtr),
3958 READMEM32_LE(&oed->oed_NextED)));
3960 oldenables = READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS);
3961 if(!(oldenables & OCSF_BULKENABLE))
3963 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_BULK_ED, 0);
3965 oldenables |= OCSF_BULKENABLE;
3966 WRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, oldenables);
3967 SYNC;
3968 EIEIO;
3969 Enable();
3970 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
3973 /* \\\ */
3975 /* /// "ohciCompleteInt()" */
3976 void ohciCompleteInt(struct PCIController *hc)
3978 ULONG framecnt = READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT);
3980 KPRINTF(1, ("CompleteInt!\n"));
3981 if(framecnt < (hc->hc_FrameCounter & 0xffff))
3983 hc->hc_FrameCounter |= 0xffff;
3984 hc->hc_FrameCounter++;
3985 hc->hc_FrameCounter += framecnt;
3986 KPRINTF(10, ("Frame Counter Rollover %ld\n", hc->hc_FrameCounter));
3987 } else {
3988 hc->hc_FrameCounter = (hc->hc_FrameCounter & 0xffff0000)|framecnt;
3991 /* **************** PROCESS DONE TRANSFERS **************** */
3993 if(hc->hc_OhciDoneQueue)
3995 ohciHandleFinishedTDs(hc);
3998 if(hc->hc_CtrlXFerQueue.lh_Head->ln_Succ)
4000 ohciScheduleCtrlTDs(hc);
4003 if(hc->hc_IntXFerQueue.lh_Head->ln_Succ)
4005 ohciScheduleIntTDs(hc);
4008 if(hc->hc_BulkXFerQueue.lh_Head->ln_Succ)
4010 ohciScheduleBulkTDs(hc);
4013 KPRINTF(1, ("CompleteDone\n"));
4015 /* \\\ */
4017 /* /// "ohciIntCode()" */
4018 void ohciIntCode(HIDDT_IRQ_Handler *irq, HIDDT_IRQ_HwInfo *hw)
4020 struct PCIController *hc = (struct PCIController *) irq->h_Data;
4021 struct PCIDevice *base = hc->hc_Device;
4022 struct PCIUnit *unit = hc->hc_Unit;
4023 ULONG intr = 0;
4024 ULONG donehead = READMEM32_LE(&hc->hc_OhciHCCA->oha_DoneHead);
4026 if(donehead)
4028 intr = OISF_DONEHEAD;
4029 if(donehead & 1)
4031 intr |= READREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS);
4033 donehead &= OHCI_PTRMASK;
4034 KPRINTF(5, ("New Donehead %08lx for old %08lx\n", donehead, hc->hc_OhciDoneQueue));
4035 if(hc->hc_OhciDoneQueue)
4037 struct OhciTD *donetd = (struct OhciTD *) (donehead - hc->hc_PCIVirtualAdjust - 16);
4038 while(donetd->otd_NextTD)
4040 donetd = (struct OhciTD *) (donetd->otd_NextTD - hc->hc_PCIVirtualAdjust - 16);
4042 WRITEMEM32_LE(&donetd->otd_NextTD, hc->hc_OhciDoneQueue);
4044 hc->hc_OhciDoneQueue = donehead;
4045 CONSTWRITEMEM32_LE(&hc->hc_OhciHCCA->oha_DoneHead, 0);
4046 } else {
4047 intr = READREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS);
4049 if(intr & hc->hc_PCIIntEnMask)
4051 WRITEREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS, intr);
4052 KPRINTF(1, ("INT=%02lx\n", intr));
4053 if(intr & OISF_HOSTERROR)
4055 KPRINTF(200, ("Host ERROR!\n"));
4057 if(intr & OISF_SCHEDOVERRUN)
4059 KPRINTF(200, ("Schedule overrun!\n"));
4061 if(!hc->hc_Online)
4063 return;
4065 if(intr & OISF_FRAMECOUNTOVER)
4067 ULONG framecnt = READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT);
4068 hc->hc_FrameCounter |= 0x7fff;
4069 hc->hc_FrameCounter++;
4070 hc->hc_FrameCounter |= framecnt;
4071 KPRINTF(10, ("Frame Counter Rollover %ld\n", hc->hc_FrameCounter));
4073 if(intr & OISF_HUBCHANGE)
4075 UWORD hciport;
4076 ULONG oldval;
4077 UWORD portreg = OHCI_PORTSTATUS;
4078 for(hciport = 0; hciport < hc->hc_NumPorts; hciport++, portreg += 4)
4080 oldval = READREG32_LE(hc->hc_RegBase, portreg);
4081 if(oldval & OHPF_OVERCURRENTCHG)
4083 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_OVER_CURRENT;
4085 if(oldval & OHPF_RESETCHANGE)
4087 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_RESET;
4089 if(oldval & OHPF_ENABLECHANGE)
4091 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_ENABLE;
4093 if(oldval & OHPF_CONNECTCHANGE)
4095 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_CONNECTION;
4097 if(oldval & OHPF_RESUMEDTX)
4099 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND;
4101 KPRINTF(20, ("PCI Int Port %ld (glob %ld) Change %08lx\n", hciport, hc->hc_PortNum20[hciport] + 1, oldval));
4102 if(hc->hc_PortChangeMap[hciport])
4104 unit->hu_RootPortChanges |= 1UL<<(hc->hc_PortNum20[hciport] + 1);
4107 uhwCheckRootHubChanges(unit);
4109 if(intr & OISF_DONEHEAD)
4111 KPRINTF(10, ("DoneHead %ld\n", READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT)));
4112 SureCause(base, &hc->hc_CompleteInt);
4116 /* \\\ */
4118 /* ---------------------------------------------------------------------- *
4119 * EHCI Specific Stuff *
4120 * ---------------------------------------------------------------------- */
4122 /* /// "ehciFreeAsyncContext()" */
4123 void ehciFreeAsyncContext(struct PCIController *hc, struct EhciQH *eqh)
4125 KPRINTF(5, ("Unlinking AsyncContext %08lx\n", eqh));
4126 // unlink from schedule
4127 eqh->eqh_Pred->eqh_NextQH = eqh->eqh_Succ->eqh_Self;
4128 SYNC;
4129 EIEIO;
4130 eqh->eqh_Succ->eqh_Pred = eqh->eqh_Pred;
4131 eqh->eqh_Pred->eqh_Succ = eqh->eqh_Succ;
4132 SYNC;
4133 EIEIO;
4135 // need to wait until an async schedule rollover before freeing these
4136 Disable();
4137 eqh->eqh_Succ = hc->hc_EhciAsyncFreeQH;
4138 hc->hc_EhciAsyncFreeQH = eqh;
4139 // activate doorbell
4140 WRITEREG32_LE(hc->hc_RegBase, EHCI_USBCMD, hc->hc_EhciUsbCmd|EHUF_ASYNCDOORBELL);
4141 Enable();
4143 /* \\\ */
4145 /* /// "ehciFreePeriodicContext()" */
4146 void ehciFreePeriodicContext(struct PCIController *hc, struct EhciQH *eqh)
4148 struct EhciTD *etd;
4149 struct EhciTD *nextetd;
4151 KPRINTF(5, ("Unlinking PeriodicContext %08lx\n", eqh));
4152 // unlink from schedule
4153 eqh->eqh_Pred->eqh_NextQH = eqh->eqh_Succ->eqh_Self;
4154 SYNC;
4155 EIEIO;
4156 eqh->eqh_Succ->eqh_Pred = eqh->eqh_Pred;
4157 eqh->eqh_Pred->eqh_Succ = eqh->eqh_Succ;
4158 SYNC;
4159 EIEIO;
4161 Disable(); // avoid race condition with interrupt
4162 nextetd = eqh->eqh_FirstTD;
4163 while((etd = nextetd))
4165 KPRINTF(1, ("FreeTD %08lx\n", nextetd));
4166 nextetd = etd->etd_Succ;
4167 ehciFreeTD(hc, etd);
4169 ehciFreeQH(hc, eqh);
4170 Enable();
4172 /* \\\ */
4174 /* /// "ehciFreeQHandTDs()" */
4175 void ehciFreeQHandTDs(struct PCIController *hc, struct EhciQH *eqh)
4177 struct EhciTD *etd = NULL;
4178 struct EhciTD *nextetd;
4180 KPRINTF(5, ("Unlinking QContext %08lx\n", eqh));
4181 nextetd = eqh->eqh_FirstTD;
4182 while(nextetd)
4184 KPRINTF(1, ("FreeTD %08lx\n", nextetd));
4185 etd = nextetd;
4186 nextetd = (struct EhciTD *) etd->etd_Succ;
4187 ehciFreeTD(hc, etd);
4190 ehciFreeQH(hc, eqh);
4192 /* \\\ */
4194 /* /// "ehciAllocQH()" */
4195 inline struct EhciQH * ehciAllocQH(struct PCIController *hc)
4197 struct EhciQH *eqh = hc->hc_EhciQHPool;
4199 if(!eqh)
4201 // out of QHs!
4202 KPRINTF(20, ("Out of QHs!\n"));
4203 return NULL;
4206 hc->hc_EhciQHPool = (struct EhciQH *) eqh->eqh_Succ;
4207 return(eqh);
4209 /* \\\ */
4211 /* /// "ehciFreeQH()" */
4212 inline void ehciFreeQH(struct PCIController *hc, struct EhciQH *eqh)
4214 eqh->eqh_Succ = hc->hc_EhciQHPool;
4215 hc->hc_EhciQHPool = eqh;
4217 /* \\\ */
4219 /* /// "ehciAllocTD()" */
4220 inline struct EhciTD * ehciAllocTD(struct PCIController *hc)
4222 struct EhciTD *etd = hc->hc_EhciTDPool;
4224 if(!etd)
4226 // out of TDs!
4227 KPRINTF(20, ("Out of TDs!\n"));
4228 return NULL;
4231 hc->hc_EhciTDPool = (struct EhciTD *) etd->etd_Succ;
4232 return(etd);
4234 /* \\\ */
4236 /* /// "ehciFreeTD()" */
4237 inline void ehciFreeTD(struct PCIController *hc, struct EhciTD *etd)
4239 etd->etd_Succ = hc->hc_EhciTDPool;
4240 hc->hc_EhciTDPool = etd;
4242 /* \\\ */
4244 /* /// "ehciUpdateIntTree()" */
4245 void ehciUpdateIntTree(struct PCIController *hc)
4247 struct EhciQH *eqh;
4248 struct EhciQH *predeqh;
4249 struct EhciQH *lastusedeqh;
4250 UWORD cnt;
4252 // optimize linkage between queue heads
4253 predeqh = lastusedeqh = hc->hc_EhciTermQH;
4254 for(cnt = 0; cnt < 11; cnt++)
4256 eqh = hc->hc_EhciIntQH[cnt];
4257 if(eqh->eqh_Succ != predeqh)
4259 lastusedeqh = eqh->eqh_Succ;
4261 eqh->eqh_NextQH = lastusedeqh->eqh_Self;
4262 predeqh = eqh;
4265 /* \\\ */
4267 /* /// "ehciHandleFinishedTDs()" */
4268 void ehciHandleFinishedTDs(struct PCIController *hc)
4270 struct PCIUnit *unit = hc->hc_Unit;
4271 struct IOUsbHWReq *ioreq;
4272 struct IOUsbHWReq *nextioreq;
4273 struct EhciQH *eqh;
4274 struct EhciTD *etd;
4275 struct EhciTD *predetd;
4276 UWORD devadrep;
4277 ULONG len;
4278 UWORD inspect;
4279 ULONG nexttd;
4280 BOOL shortpkt;
4281 ULONG ctrlstatus;
4282 ULONG epctrlstatus;
4283 ULONG actual;
4284 BOOL halted;
4285 BOOL updatetree = FALSE;
4286 BOOL zeroterm;
4287 ULONG phyaddr;
4289 KPRINTF(1, ("Checking for Async work done...\n"));
4290 ioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
4291 while((nextioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ))
4293 eqh = (struct EhciQH *) ioreq->iouh_DriverPrivate1;
4294 if(eqh)
4296 KPRINTF(1, ("Examining IOReq=%08lx with EQH=%08lx\n", ioreq, eqh));
4297 SYNC;
4298 EIEIO;
4299 epctrlstatus = READMEM32_LE(&eqh->eqh_CtrlStatus);
4300 nexttd = READMEM32_LE(&eqh->eqh_NextTD);
4301 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
4302 halted = ((epctrlstatus & (ETCF_ACTIVE|ETSF_HALTED)) == ETSF_HALTED);
4303 if(halted || (!(epctrlstatus & ETCF_ACTIVE) && (nexttd & EHCI_TERMINATE)))
4305 KPRINTF(1, ("AS: CS=%08lx CP=%08lx NX=%08lx\n", epctrlstatus, READMEM32_LE(&eqh->eqh_CurrTD), nexttd));
4306 shortpkt = FALSE;
4307 actual = 0;
4308 inspect = 1;
4309 etd = eqh->eqh_FirstTD;
4312 ctrlstatus = READMEM32_LE(&etd->etd_CtrlStatus);
4313 KPRINTF(1, ("AS: CS=%08lx SL=%08lx TD=%08lx\n", ctrlstatus, READMEM32_LE(&etd->etd_Self), etd));
4314 if(ctrlstatus & ETCF_ACTIVE)
4316 if(halted)
4318 KPRINTF(20, ("Async: Halted before TD\n"));
4319 //ctrlstatus = eqh->eqh_CtrlStatus;
4320 inspect = 0;
4321 if(unit->hu_NakTimeoutFrame[devadrep] && (hc->hc_FrameCounter > unit->hu_NakTimeoutFrame[devadrep]))
4323 KPRINTF(20, ("NAK timeout\n"));
4324 ioreq->iouh_Req.io_Error = UHIOERR_NAKTIMEOUT;
4326 break;
4327 } else {
4328 // what happened here? The host controller was just updating the fields and has not finished yet
4329 ctrlstatus = epctrlstatus;
4331 /*KPRINTF(20, ("AS: CS=%08lx CP=%08lx NX=%08lx\n", epctrlstatus, READMEM32_LE(&eqh->eqh_CurrTD), nexttd));
4332 KPRINTF(20, ("AS: CS=%08lx CP=%08lx NX=%08lx\n", READMEM32_LE(&eqh->eqh_CtrlStatus), READMEM32_LE(&eqh->eqh_CurrTD), READMEM32_LE(&eqh->eqh_NextTD)));
4333 KPRINTF(20, ("AS: CS=%08lx SL=%08lx TD=%08lx\n", ctrlstatus, READMEM32_LE(&etd->etd_Self), etd));
4334 etd = eqh->eqh_FirstTD;
4337 KPRINTF(20, ("XX: CS=%08lx SL=%08lx TD=%08lx\n", READMEM32_LE(&etd->etd_CtrlStatus), READMEM32_LE(&etd->etd_Self), etd));
4338 } while(etd = etd->etd_Succ);
4339 KPRINTF(20, ("Async: Internal error! Still active?!\n"));
4340 inspect = 2;
4341 break;*/
4345 if(ctrlstatus & (ETSF_HALTED|ETSF_TRANSERR|ETSF_BABBLE|ETSF_DATABUFFERERR))
4347 if(ctrlstatus & ETSF_BABBLE)
4349 KPRINTF(20, ("Babble error %08lx\n", ctrlstatus));
4350 ioreq->iouh_Req.io_Error = UHIOERR_OVERFLOW;
4352 else if(ctrlstatus & ETSF_DATABUFFERERR)
4354 KPRINTF(20, ("Databuffer error\n"));
4355 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
4357 else if(ctrlstatus & ETSF_TRANSERR)
4359 if((ctrlstatus & ETCM_ERRORLIMIT)>>ETCS_ERRORLIMIT)
4361 KPRINTF(20, ("other kind of STALLED!\n"));
4362 ioreq->iouh_Req.io_Error = UHIOERR_STALL;
4363 } else {
4364 KPRINTF(20, ("TIMEOUT!\n"));
4365 ioreq->iouh_Req.io_Error = UHIOERR_TIMEOUT;
4367 } else {
4368 KPRINTF(20, ("STALLED!\n"));
4369 ioreq->iouh_Req.io_Error = UHIOERR_STALL;
4371 inspect = 0;
4372 break;
4375 len = etd->etd_Length - ((ctrlstatus & ETSM_TRANSLENGTH)>>ETSS_TRANSLENGTH);
4376 if((ctrlstatus & ETCM_PIDCODE) != ETCF_PIDCODE_SETUP) // don't count setup packet
4378 actual += len;
4380 if(ctrlstatus & ETSM_TRANSLENGTH)
4382 KPRINTF(10, ("Short packet: %ld < %ld\n", len, etd->etd_Length));
4383 shortpkt = TRUE;
4384 break;
4386 etd = etd->etd_Succ;
4387 } while(etd && (!(ctrlstatus & ETCF_READYINTEN)));
4388 /*if(inspect == 2)
4390 // phantom halted
4391 ioreq = nextioreq;
4392 continue;
4395 if(((actual + ioreq->iouh_Actual) < eqh->eqh_Actual) && (!ioreq->iouh_Req.io_Error) && (!(ioreq->iouh_Flags & UHFF_ALLOWRUNTPKTS)))
4397 ioreq->iouh_Req.io_Error = UHIOERR_RUNTPACKET;
4399 ioreq->iouh_Actual += actual;
4400 if(inspect && (!shortpkt) && (eqh->eqh_Actual < ioreq->iouh_Length))
4402 KPRINTF(10, ("Reloading BULK at %ld/%ld\n", eqh->eqh_Actual, ioreq->iouh_Length));
4403 // reload
4404 ctrlstatus = (ioreq->iouh_Dir == UHDIR_IN) ? (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_IN) : (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_OUT);
4405 phyaddr = (ULONG) pciGetPhysical(hc, &(((UBYTE *) ioreq->iouh_Data)[ioreq->iouh_Actual]));
4406 predetd = etd = eqh->eqh_FirstTD;
4408 CONSTWRITEMEM32_LE(&eqh->eqh_CurrTD, EHCI_TERMINATE);
4409 CONSTWRITEMEM32_LE(&eqh->eqh_NextTD, EHCI_TERMINATE);
4410 CONSTWRITEMEM32_LE(&eqh->eqh_AltNextTD, EHCI_TERMINATE);
4413 len = ioreq->iouh_Length - eqh->eqh_Actual;
4414 if(len > 4*EHCI_PAGE_SIZE)
4416 len = 4*EHCI_PAGE_SIZE;
4418 etd->etd_Length = len;
4419 KPRINTF(1, ("Reload Bulk TD %08lx len %ld (%ld/%ld) phy=%08lx\n",
4420 etd, len, eqh->eqh_Actual, ioreq->iouh_Length, phyaddr));
4421 WRITEMEM32_LE(&etd->etd_CtrlStatus, ctrlstatus|(len<<ETSS_TRANSLENGTH));
4422 // FIXME need quark scatter gather mechanism here
4423 WRITEMEM32_LE(&etd->etd_BufferPtr[0], phyaddr);
4424 WRITEMEM32_LE(&etd->etd_BufferPtr[1], (phyaddr & EHCI_PAGE_MASK) + (1*EHCI_PAGE_SIZE));
4425 WRITEMEM32_LE(&etd->etd_BufferPtr[2], (phyaddr & EHCI_PAGE_MASK) + (2*EHCI_PAGE_SIZE));
4426 WRITEMEM32_LE(&etd->etd_BufferPtr[3], (phyaddr & EHCI_PAGE_MASK) + (3*EHCI_PAGE_SIZE));
4427 WRITEMEM32_LE(&etd->etd_BufferPtr[4], (phyaddr & EHCI_PAGE_MASK) + (4*EHCI_PAGE_SIZE));
4428 phyaddr += len;
4429 eqh->eqh_Actual += len;
4430 zeroterm = (len && (ioreq->iouh_Dir == UHDIR_OUT) && (eqh->eqh_Actual == ioreq->iouh_Length) && (!ioreq->iouh_Flags & UHFF_NOSHORTPKT) && ((eqh->eqh_Actual % ioreq->iouh_MaxPktSize) == 0));
4431 predetd = etd;
4432 etd = etd->etd_Succ;
4433 if((!etd) && zeroterm)
4435 // rare case where the zero packet would be lost, allocate etd and append zero packet.
4436 etd = ehciAllocTD(hc);
4437 if(!etd)
4439 KPRINTF(200, ("INTERNAL ERROR! This should not happen! Could not allocate zero packet TD\n"));
4440 break;
4442 predetd->etd_Succ = etd;
4443 predetd->etd_NextTD = etd->etd_Self;
4444 predetd->etd_AltNextTD = hc->hc_ShortPktEndTD->etd_Self;
4445 etd->etd_Succ = NULL;
4446 CONSTWRITEMEM32_LE(&etd->etd_NextTD, EHCI_TERMINATE);
4447 CONSTWRITEMEM32_LE(&etd->etd_AltNextTD, EHCI_TERMINATE);
4449 } while(etd && ((eqh->eqh_Actual < ioreq->iouh_Length) || zeroterm));
4450 ctrlstatus |= ETCF_READYINTEN|(predetd->etd_Length<<ETSS_TRANSLENGTH);
4451 WRITEMEM32_LE(&predetd->etd_CtrlStatus, ctrlstatus);
4452 CONSTWRITEMEM32_LE(&predetd->etd_NextTD, EHCI_TERMINATE);
4453 CONSTWRITEMEM32_LE(&predetd->etd_AltNextTD, EHCI_TERMINATE);
4454 SYNC;
4455 EIEIO;
4456 etd = eqh->eqh_FirstTD;
4457 eqh->eqh_NextTD = etd->etd_Self;
4458 SYNC;
4459 EIEIO;
4460 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + (ioreq->iouh_NakTimeout<<3) : 0;
4461 } else {
4462 unit->hu_DevBusyReq[devadrep] = NULL;
4463 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
4464 ehciFreeAsyncContext(hc, eqh);
4465 // use next data toggle bit based on last successful transaction
4466 KPRINTF(1, ("Old Toggle %04lx:%ld\n", devadrep, unit->hu_DevDataToggle[devadrep]));
4467 unit->hu_DevDataToggle[devadrep] = (ctrlstatus & ETCF_DATA1) ? TRUE : FALSE;
4468 KPRINTF(1, ("Toggle now %04lx:%ld\n", devadrep, unit->hu_DevDataToggle[devadrep]));
4469 if(inspect)
4471 if(ioreq->iouh_Req.io_Command == UHCMD_CONTROLXFER)
4473 // check for sucessful clear feature and set address ctrl transfers
4474 uhwCheckSpecialCtrlTransfers(hc, ioreq);
4477 ReplyMsg(&ioreq->iouh_Req.io_Message);
4480 } else {
4481 KPRINTF(20, ("IOReq=%08lx has no UQH!\n", ioreq));
4483 ioreq = nextioreq;
4486 KPRINTF(1, ("Checking for Periodic work done...\n"));
4487 ioreq = (struct IOUsbHWReq *) hc->hc_PeriodicTDQueue.lh_Head;
4488 while((nextioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ))
4490 eqh = (struct EhciQH *) ioreq->iouh_DriverPrivate1;
4491 if(eqh)
4493 KPRINTF(1, ("Examining IOReq=%08lx with EQH=%08lx\n", ioreq, eqh));
4494 nexttd = READMEM32_LE(&eqh->eqh_NextTD);
4495 etd = eqh->eqh_FirstTD;
4496 ctrlstatus = READMEM32_LE(&eqh->eqh_CtrlStatus);
4497 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
4498 halted = ((ctrlstatus & (ETCF_ACTIVE|ETSF_HALTED)) == ETSF_HALTED);
4499 if(halted || (!(ctrlstatus & ETCF_ACTIVE) && (nexttd & EHCI_TERMINATE)))
4501 KPRINTF(1, ("EQH not active %08lx\n", ctrlstatus));
4502 shortpkt = FALSE;
4503 actual = 0;
4504 inspect = 1;
4507 ctrlstatus = READMEM32_LE(&etd->etd_CtrlStatus);
4508 KPRINTF(1, ("Periodic: TD=%08lx CS=%08lx\n", etd, ctrlstatus));
4509 if(ctrlstatus & ETCF_ACTIVE)
4511 if(halted)
4513 KPRINTF(20, ("Periodic: Halted before TD\n"));
4514 //ctrlstatus = eqh->eqh_CtrlStatus;
4515 inspect = 0;
4516 if(unit->hu_NakTimeoutFrame[devadrep] && (hc->hc_FrameCounter > unit->hu_NakTimeoutFrame[devadrep]))
4518 KPRINTF(20, ("NAK timeout\n"));
4519 ioreq->iouh_Req.io_Error = UHIOERR_NAKTIMEOUT;
4521 break;
4522 } else {
4523 KPRINTF(20, ("Periodic: Internal error! Still active?!\n"));
4524 break;
4528 if(ctrlstatus & (ETSF_HALTED|ETSF_TRANSERR|ETSF_BABBLE|ETSF_DATABUFFERERR|ETSF_MISSEDCSPLIT))
4530 if(ctrlstatus & ETSF_BABBLE)
4532 KPRINTF(20, ("Babble error %08lx\n", ctrlstatus));
4533 ioreq->iouh_Req.io_Error = UHIOERR_OVERFLOW;
4535 else if(ctrlstatus & ETSF_MISSEDCSPLIT)
4537 KPRINTF(20, ("Missed CSplit %08lx\n", ctrlstatus));
4538 ioreq->iouh_Req.io_Error = UHIOERR_STALL;
4540 else if(ctrlstatus & ETSF_DATABUFFERERR)
4542 KPRINTF(20, ("Databuffer error\n"));
4543 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
4545 else if(ctrlstatus & ETSF_TRANSERR)
4547 if((ctrlstatus & ETCM_ERRORLIMIT)>>ETCS_ERRORLIMIT)
4549 KPRINTF(20, ("STALLED!\n"));
4550 ioreq->iouh_Req.io_Error = UHIOERR_STALL;
4551 } else {
4552 KPRINTF(20, ("TIMEOUT!\n"));
4553 ioreq->iouh_Req.io_Error = UHIOERR_TIMEOUT;
4556 else if(unit->hu_NakTimeoutFrame[devadrep] && (hc->hc_FrameCounter > unit->hu_NakTimeoutFrame[devadrep]))
4558 ioreq->iouh_Req.io_Error = UHIOERR_NAKTIMEOUT;
4560 inspect = 0;
4561 break;
4564 len = etd->etd_Length - ((ctrlstatus & ETSM_TRANSLENGTH)>>ETSS_TRANSLENGTH);
4565 actual += len;
4566 if(ctrlstatus & ETSM_TRANSLENGTH)
4568 KPRINTF(10, ("Short packet: %ld < %ld\n", len, etd->etd_Length));
4569 shortpkt = TRUE;
4570 break;
4572 etd = etd->etd_Succ;
4573 } while(etd);
4574 if((actual < eqh->eqh_Actual) && (!ioreq->iouh_Req.io_Error) && (!(ioreq->iouh_Flags & UHFF_ALLOWRUNTPKTS)))
4576 ioreq->iouh_Req.io_Error = UHIOERR_RUNTPACKET;
4578 ioreq->iouh_Actual += actual;
4579 unit->hu_DevBusyReq[devadrep] = NULL;
4580 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
4581 ehciFreePeriodicContext(hc, eqh);
4582 updatetree = TRUE;
4583 // use next data toggle bit based on last successful transaction
4584 KPRINTF(1, ("Old Toggle %04lx:%ld\n", devadrep, unit->hu_DevDataToggle[devadrep]));
4585 unit->hu_DevDataToggle[devadrep] = (ctrlstatus & ETCF_DATA1) ? TRUE : FALSE;
4586 KPRINTF(1, ("Toggle now %04lx:%ld\n", devadrep, unit->hu_DevDataToggle[devadrep]));
4587 ReplyMsg(&ioreq->iouh_Req.io_Message);
4589 } else {
4590 KPRINTF(20, ("IOReq=%08lx has no UQH!\n", ioreq));
4592 ioreq = nextioreq;
4594 if(updatetree)
4596 ehciUpdateIntTree(hc);
4599 /* \\\ */
4601 /* /// "ehciScheduleCtrlTDs()" */
4602 void ehciScheduleCtrlTDs(struct PCIController *hc)
4604 struct PCIUnit *unit = hc->hc_Unit;
4605 struct IOUsbHWReq *ioreq;
4606 UWORD devadrep;
4607 struct EhciQH *eqh;
4608 struct EhciTD *setupetd;
4609 struct EhciTD *dataetd;
4610 struct EhciTD *termetd;
4611 struct EhciTD *predetd;
4612 ULONG epcaps;
4613 ULONG ctrlstatus;
4614 ULONG len;
4615 ULONG phyaddr;
4617 /* *** CTRL Transfers *** */
4618 KPRINTF(1, ("Scheduling new CTRL transfers...\n"));
4619 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
4620 while(((struct Node *) ioreq)->ln_Succ)
4622 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint;
4623 KPRINTF(10, ("New CTRL transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
4624 /* is endpoint already in use or do we have to wait for next transaction */
4625 if(unit->hu_DevBusyReq[devadrep])
4627 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
4628 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
4629 continue;
4632 eqh = ehciAllocQH(hc);
4633 if(!eqh)
4635 break;
4638 setupetd = ehciAllocTD(hc);
4639 if(!setupetd)
4641 ehciFreeQH(hc, eqh);
4642 break;
4644 termetd = ehciAllocTD(hc);
4645 if(!termetd)
4647 ehciFreeTD(hc, setupetd);
4648 ehciFreeQH(hc, eqh);
4649 break;
4651 eqh->eqh_IOReq = ioreq;
4652 eqh->eqh_FirstTD = setupetd;
4653 eqh->eqh_Actual = 0;
4655 epcaps = ((0<<EQES_RELOAD)|EQEF_TOGGLEFROMTD)|(ioreq->iouh_MaxPktSize<<EQES_MAXPKTLEN)|(ioreq->iouh_DevAddr<<EQES_DEVADDR)|(ioreq->iouh_Endpoint<<EQES_ENDPOINT);
4656 if(ioreq->iouh_Flags & UHFF_SPLITTRANS)
4658 KPRINTF(10, ("*** SPLIT TRANSACTION to HubPort %ld at Addr %ld\n", ioreq->iouh_SplitHubPort, ioreq->iouh_SplitHubAddr));
4659 // full speed and low speed handling
4660 WRITEMEM32_LE(&eqh->eqh_SplitCtrl, EQSF_MULTI_1|(ioreq->iouh_SplitHubPort<<EQSS_PORTNUMBER)|(ioreq->iouh_SplitHubAddr<<EQSS_HUBADDRESS));
4661 epcaps |= EQEF_SPLITCTRLEP;
4662 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
4664 KPRINTF(10, ("*** LOW SPEED ***\n"));
4665 epcaps |= EQEF_LOWSPEED;
4667 } else {
4668 CONSTWRITEMEM32_LE(&eqh->eqh_SplitCtrl, EQSF_MULTI_1);
4669 epcaps |= EQEF_HIGHSPEED;
4671 WRITEMEM32_LE(&eqh->eqh_EPCaps, epcaps);
4672 //eqh->eqh_CtrlStatus = eqh->eqh_CurrTD = 0;
4673 //eqh->eqh_AltNextTD = eqh->eqh_NextTD = setupetd->etd_Self;
4675 //termetd->etd_QueueHead = setupetd->etd_QueueHead = eqh;
4677 KPRINTF(1, ("SetupTD=%08lx, TermTD=%08lx\n", setupetd, termetd));
4679 // fill setup td
4680 setupetd->etd_Length = 8;
4682 CONSTWRITEMEM32_LE(&setupetd->etd_CtrlStatus, (8<<ETSS_TRANSLENGTH)|ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_SETUP);
4683 phyaddr = (ULONG) pciGetPhysical(hc, &ioreq->iouh_SetupData);
4684 WRITEMEM32_LE(&setupetd->etd_BufferPtr[0], phyaddr);
4685 WRITEMEM32_LE(&setupetd->etd_BufferPtr[1], (phyaddr + 8) & EHCI_PAGE_MASK); // theoretically, setup data may cross one page
4686 setupetd->etd_BufferPtr[2] = 0; // clear for overlay bits
4688 ctrlstatus = (ioreq->iouh_SetupData.bmRequestType & URTF_IN) ? (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_IN) : (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_OUT);
4689 predetd = setupetd;
4690 if(ioreq->iouh_Length)
4692 phyaddr = (ULONG) pciGetPhysical(hc, ioreq->iouh_Data);
4695 dataetd = ehciAllocTD(hc);
4696 if(!dataetd)
4698 break;
4700 ctrlstatus ^= ETCF_DATA1; // toggle bit
4701 predetd->etd_Succ = dataetd;
4702 predetd->etd_NextTD = dataetd->etd_Self;
4703 dataetd->etd_AltNextTD = termetd->etd_Self;
4705 len = ioreq->iouh_Length - eqh->eqh_Actual;
4706 if(len > 4*EHCI_PAGE_SIZE)
4708 len = 4*EHCI_PAGE_SIZE;
4710 dataetd->etd_Length = len;
4711 WRITEMEM32_LE(&dataetd->etd_CtrlStatus, ctrlstatus|(len<<ETSS_TRANSLENGTH));
4712 // FIXME need quark scatter gather mechanism here
4713 WRITEMEM32_LE(&dataetd->etd_BufferPtr[0], phyaddr);
4714 WRITEMEM32_LE(&dataetd->etd_BufferPtr[1], (phyaddr & EHCI_PAGE_MASK) + (1*EHCI_PAGE_SIZE));
4715 WRITEMEM32_LE(&dataetd->etd_BufferPtr[2], (phyaddr & EHCI_PAGE_MASK) + (2*EHCI_PAGE_SIZE));
4716 WRITEMEM32_LE(&dataetd->etd_BufferPtr[3], (phyaddr & EHCI_PAGE_MASK) + (3*EHCI_PAGE_SIZE));
4717 WRITEMEM32_LE(&dataetd->etd_BufferPtr[4], (phyaddr & EHCI_PAGE_MASK) + (4*EHCI_PAGE_SIZE));
4718 phyaddr += len;
4719 eqh->eqh_Actual += len;
4720 predetd = dataetd;
4721 } while(eqh->eqh_Actual < ioreq->iouh_Length);
4722 if(!dataetd)
4724 // not enough dataetds? try again later
4725 ehciFreeQHandTDs(hc, eqh);
4726 ehciFreeTD(hc, termetd); // this one's not linked yet
4727 break;
4730 // TERM packet
4731 ctrlstatus |= ETCF_DATA1|ETCF_READYINTEN;
4732 ctrlstatus ^= (ETCF_PIDCODE_IN^ETCF_PIDCODE_OUT);
4734 predetd->etd_NextTD = termetd->etd_Self;
4735 predetd->etd_Succ = termetd;
4736 CONSTWRITEMEM32_LE(&termetd->etd_NextTD, EHCI_TERMINATE);
4737 CONSTWRITEMEM32_LE(&termetd->etd_AltNextTD, EHCI_TERMINATE);
4738 WRITEMEM32_LE(&termetd->etd_CtrlStatus, ctrlstatus);
4739 termetd->etd_Length = 0;
4740 termetd->etd_BufferPtr[0] = 0; // clear for overlay bits
4741 termetd->etd_BufferPtr[1] = 0; // clear for overlay bits
4742 termetd->etd_BufferPtr[2] = 0; // clear for overlay bits
4743 termetd->etd_Succ = NULL;
4745 // due to sillicon bugs, we fill in the first overlay ourselves.
4746 eqh->eqh_CurrTD = setupetd->etd_Self;
4747 eqh->eqh_NextTD = setupetd->etd_NextTD;
4748 eqh->eqh_AltNextTD = setupetd->etd_AltNextTD;
4749 eqh->eqh_CtrlStatus = setupetd->etd_CtrlStatus;
4750 eqh->eqh_BufferPtr[0] = setupetd->etd_BufferPtr[0];
4751 eqh->eqh_BufferPtr[1] = setupetd->etd_BufferPtr[1];
4752 eqh->eqh_BufferPtr[2] = 0;
4754 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
4755 ioreq->iouh_DriverPrivate1 = eqh;
4757 // manage endpoint going busy
4758 unit->hu_DevBusyReq[devadrep] = ioreq;
4759 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + (ioreq->iouh_NakTimeout<<3) : 0;
4761 Disable();
4762 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
4764 // looks good to me, now enqueue this entry (just behind the asyncQH)
4765 eqh->eqh_Succ = hc->hc_EhciAsyncQH->eqh_Succ;
4766 eqh->eqh_NextQH = eqh->eqh_Succ->eqh_Self;
4767 SYNC;
4768 EIEIO;
4769 eqh->eqh_Pred = hc->hc_EhciAsyncQH;
4770 eqh->eqh_Succ->eqh_Pred = eqh;
4771 hc->hc_EhciAsyncQH->eqh_Succ = eqh;
4772 hc->hc_EhciAsyncQH->eqh_NextQH = eqh->eqh_Self;
4773 SYNC;
4774 EIEIO;
4775 Enable();
4777 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
4780 /* \\\ */
4782 /* /// "ehciScheduleIntTDs()" */
4783 void ehciScheduleIntTDs(struct PCIController *hc)
4785 struct PCIUnit *unit = hc->hc_Unit;
4786 struct IOUsbHWReq *ioreq;
4787 UWORD devadrep;
4788 UWORD cnt;
4789 struct EhciQH *eqh;
4790 struct EhciQH *inteqh;
4791 struct EhciTD *etd;
4792 struct EhciTD *predetd;
4793 ULONG epcaps;
4794 ULONG ctrlstatus;
4795 ULONG splitctrl;
4796 ULONG len;
4797 ULONG phyaddr;
4799 /* *** INT Transfers *** */
4800 KPRINTF(1, ("Scheduling new INT transfers...\n"));
4801 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
4802 while(((struct Node *) ioreq)->ln_Succ)
4804 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
4805 KPRINTF(10, ("New INT transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
4806 /* is endpoint already in use or do we have to wait for next transaction */
4807 if(unit->hu_DevBusyReq[devadrep])
4809 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
4810 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
4811 continue;
4814 eqh = ehciAllocQH(hc);
4815 if(!eqh)
4817 break;
4820 eqh->eqh_IOReq = ioreq;
4821 eqh->eqh_Actual = 0;
4823 epcaps = (0<<EQES_RELOAD)|(ioreq->iouh_MaxPktSize<<EQES_MAXPKTLEN)|(ioreq->iouh_DevAddr<<EQES_DEVADDR)|(ioreq->iouh_Endpoint<<EQES_ENDPOINT);
4824 if(ioreq->iouh_Flags & UHFF_SPLITTRANS)
4826 KPRINTF(10, ("*** SPLIT TRANSACTION to HubPort %ld at Addr %ld\n", ioreq->iouh_SplitHubPort, ioreq->iouh_SplitHubAddr));
4827 // full speed and low speed handling
4828 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
4830 KPRINTF(10, ("*** LOW SPEED ***\n"));
4831 epcaps |= EQEF_LOWSPEED;
4833 WRITEMEM32_LE(&eqh->eqh_SplitCtrl, (EQSF_MULTI_1|(0x01<<EQSS_MUSOFACTIVE)|(0x1c<<EQSS_MUSOFCSPLIT))|(ioreq->iouh_SplitHubPort<<EQSS_PORTNUMBER)|(ioreq->iouh_SplitHubAddr<<EQSS_HUBADDRESS));
4834 if(ioreq->iouh_Interval >= 255)
4836 inteqh = hc->hc_EhciIntQH[8]; // 256ms interval
4837 } else {
4838 cnt = 0;
4841 inteqh = hc->hc_EhciIntQH[cnt++];
4842 } while(ioreq->iouh_Interval >= (1<<cnt));
4844 } else {
4845 epcaps |= EQEF_HIGHSPEED;
4846 if(ioreq->iouh_Flags & UHFF_MULTI_3)
4848 splitctrl = EQSF_MULTI_3;
4850 else if(ioreq->iouh_Flags & UHFF_MULTI_2)
4852 splitctrl = EQSF_MULTI_2;
4853 } else {
4854 splitctrl = EQSF_MULTI_1;
4856 if(ioreq->iouh_Interval < 2) // 0-1 µFrames
4858 splitctrl |= (0xff<<EQSS_MUSOFACTIVE);
4860 else if(ioreq->iouh_Interval < 4) // 2-3 µFrames
4862 splitctrl |= (0x55<<EQSS_MUSOFACTIVE);
4864 else if(ioreq->iouh_Interval < 8) // 4-7 µFrames
4866 splitctrl |= (0x22<<EQSS_MUSOFACTIVE);
4868 else if(ioreq->iouh_Interval > 511) // 64ms and higher
4870 splitctrl |= (0x10<<EQSS_MUSOFACTIVE);
4872 else //if(ioreq->iouh_Interval >= 8) // 1-64ms
4874 splitctrl |= (0x01<<EQSS_MUSOFACTIVE);
4876 WRITEMEM32_LE(&eqh->eqh_SplitCtrl, splitctrl);
4877 if(ioreq->iouh_Interval >= 1024)
4879 inteqh = hc->hc_EhciIntQH[10]; // 1024µFrames interval
4880 } else {
4881 cnt = 0;
4884 inteqh = hc->hc_EhciIntQH[cnt++];
4885 } while(ioreq->iouh_Interval >= (1<<cnt));
4888 WRITEMEM32_LE(&eqh->eqh_EPCaps, epcaps);
4889 //eqh->eqh_CtrlStatus = eqh->eqh_CurrTD = 0;
4890 eqh->eqh_FirstTD = NULL; // clear for ehciFreeQHandTDs()
4892 ctrlstatus = (ioreq->iouh_Dir == UHDIR_IN) ? (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_IN) : (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_OUT);
4893 if(unit->hu_DevDataToggle[devadrep])
4895 // continue with data toggle 0
4896 ctrlstatus |= ETCF_DATA1;
4898 predetd = NULL;
4899 phyaddr = (ULONG) pciGetPhysical(hc, ioreq->iouh_Data);
4902 etd = ehciAllocTD(hc);
4903 if(!etd)
4905 break;
4907 if(predetd)
4909 predetd->etd_Succ = etd;
4910 predetd->etd_NextTD = etd->etd_Self;
4911 predetd->etd_AltNextTD = hc->hc_ShortPktEndTD->etd_Self;
4912 } else {
4913 eqh->eqh_FirstTD = etd;
4914 //eqh->eqh_AltNextTD = eqh->eqh_NextTD = etd->etd_Self;
4917 len = ioreq->iouh_Length - eqh->eqh_Actual;
4918 if(len > 4*EHCI_PAGE_SIZE)
4920 len = 4*EHCI_PAGE_SIZE;
4922 etd->etd_Length = len;
4923 WRITEMEM32_LE(&etd->etd_CtrlStatus, ctrlstatus|(len<<ETSS_TRANSLENGTH));
4924 // FIXME need quark scatter gather mechanism here
4925 WRITEMEM32_LE(&etd->etd_BufferPtr[0], phyaddr);
4926 WRITEMEM32_LE(&etd->etd_BufferPtr[1], (phyaddr & EHCI_PAGE_MASK) + (1*EHCI_PAGE_SIZE));
4927 WRITEMEM32_LE(&etd->etd_BufferPtr[2], (phyaddr & EHCI_PAGE_MASK) + (2*EHCI_PAGE_SIZE));
4928 WRITEMEM32_LE(&etd->etd_BufferPtr[3], (phyaddr & EHCI_PAGE_MASK) + (3*EHCI_PAGE_SIZE));
4929 WRITEMEM32_LE(&etd->etd_BufferPtr[4], (phyaddr & EHCI_PAGE_MASK) + (4*EHCI_PAGE_SIZE));
4930 phyaddr += len;
4931 eqh->eqh_Actual += len;
4932 predetd = etd;
4933 } while(eqh->eqh_Actual < ioreq->iouh_Length);
4935 if(!etd)
4937 // not enough etds? try again later
4938 ehciFreeQHandTDs(hc, eqh);
4939 break;
4941 ctrlstatus |= ETCF_READYINTEN|(etd->etd_Length<<ETSS_TRANSLENGTH);
4942 WRITEMEM32_LE(&predetd->etd_CtrlStatus, ctrlstatus);
4944 CONSTWRITEMEM32_LE(&predetd->etd_NextTD, EHCI_TERMINATE);
4945 CONSTWRITEMEM32_LE(&predetd->etd_AltNextTD, EHCI_TERMINATE);
4946 predetd->etd_Succ = NULL;
4948 // due to sillicon bugs, we fill in the first overlay ourselves.
4949 etd = eqh->eqh_FirstTD;
4950 eqh->eqh_CurrTD = etd->etd_Self;
4951 eqh->eqh_NextTD = etd->etd_NextTD;
4952 eqh->eqh_AltNextTD = etd->etd_AltNextTD;
4953 eqh->eqh_CtrlStatus = etd->etd_CtrlStatus;
4954 eqh->eqh_BufferPtr[0] = etd->etd_BufferPtr[0];
4955 eqh->eqh_BufferPtr[1] = etd->etd_BufferPtr[1];
4956 eqh->eqh_BufferPtr[2] = etd->etd_BufferPtr[2];
4957 eqh->eqh_BufferPtr[3] = etd->etd_BufferPtr[3];
4958 eqh->eqh_BufferPtr[4] = etd->etd_BufferPtr[4];
4960 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
4961 ioreq->iouh_DriverPrivate1 = eqh;
4963 // manage endpoint going busy
4964 unit->hu_DevBusyReq[devadrep] = ioreq;
4965 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + (ioreq->iouh_NakTimeout<<3) : 0;
4967 Disable();
4968 AddTail(&hc->hc_PeriodicTDQueue, (struct Node *) ioreq);
4970 // looks good to me, now enqueue this entry in the right IntQH
4971 eqh->eqh_Succ = inteqh->eqh_Succ;
4972 eqh->eqh_NextQH = eqh->eqh_Succ->eqh_Self;
4973 SYNC;
4974 EIEIO;
4975 eqh->eqh_Pred = inteqh;
4976 eqh->eqh_Succ->eqh_Pred = eqh;
4977 inteqh->eqh_Succ = eqh;
4978 inteqh->eqh_NextQH = eqh->eqh_Self;
4979 SYNC;
4980 EIEIO;
4981 Enable();
4983 ehciUpdateIntTree(hc);
4985 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
4988 /* \\\ */
4990 /* /// "ehciScheduleBulkTDs()" */
4991 void ehciScheduleBulkTDs(struct PCIController *hc)
4993 struct PCIUnit *unit = hc->hc_Unit;
4994 struct IOUsbHWReq *ioreq;
4995 UWORD devadrep;
4996 struct EhciQH *eqh;
4997 struct EhciTD *etd = NULL;
4998 struct EhciTD *predetd;
4999 ULONG epcaps;
5000 ULONG ctrlstatus;
5001 ULONG splitctrl;
5002 ULONG len;
5003 ULONG phyaddr;
5005 /* *** BULK Transfers *** */
5006 KPRINTF(1, ("Scheduling new BULK transfers...\n"));
5007 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
5008 while(((struct Node *) ioreq)->ln_Succ)
5010 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
5011 KPRINTF(10, ("New BULK transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
5012 /* is endpoint already in use or do we have to wait for next transaction */
5013 if(unit->hu_DevBusyReq[devadrep])
5015 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
5016 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
5017 continue;
5020 eqh = ehciAllocQH(hc);
5021 if(!eqh)
5023 break;
5026 eqh->eqh_IOReq = ioreq;
5027 eqh->eqh_Actual = 0;
5029 epcaps = (0<<EQES_RELOAD)|(ioreq->iouh_MaxPktSize<<EQES_MAXPKTLEN)|(ioreq->iouh_DevAddr<<EQES_DEVADDR)|(ioreq->iouh_Endpoint<<EQES_ENDPOINT);
5030 if(ioreq->iouh_Flags & UHFF_SPLITTRANS)
5032 KPRINTF(10, ("*** SPLIT TRANSACTION to HubPort %ld at Addr %ld\n", ioreq->iouh_SplitHubPort, ioreq->iouh_SplitHubAddr));
5033 // full speed and low speed handling
5034 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
5036 KPRINTF(10, ("*** LOW SPEED ***\n"));
5037 epcaps |= EQEF_LOWSPEED;
5039 WRITEMEM32_LE(&eqh->eqh_SplitCtrl, EQSF_MULTI_1|(ioreq->iouh_SplitHubPort<<EQSS_PORTNUMBER)|(ioreq->iouh_SplitHubAddr<<EQSS_HUBADDRESS));
5040 } else {
5041 epcaps |= EQEF_HIGHSPEED;
5042 if(ioreq->iouh_Flags & UHFF_MULTI_3)
5044 splitctrl = EQSF_MULTI_3;
5046 else if(ioreq->iouh_Flags & UHFF_MULTI_2)
5048 splitctrl = EQSF_MULTI_2;
5049 } else {
5050 splitctrl = EQSF_MULTI_1;
5052 WRITEMEM32_LE(&eqh->eqh_SplitCtrl, splitctrl);
5054 WRITEMEM32_LE(&eqh->eqh_EPCaps, epcaps);
5055 //eqh->eqh_CtrlStatus = eqh->eqh_CurrTD = 0;
5056 eqh->eqh_FirstTD = NULL; // clear for ehciFreeQHandTDs()
5058 ctrlstatus = (ioreq->iouh_Dir == UHDIR_IN) ? (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_IN) : (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_OUT);
5059 if(unit->hu_DevDataToggle[devadrep])
5061 // continue with data toggle 0
5062 ctrlstatus |= ETCF_DATA1;
5064 predetd = NULL;
5065 phyaddr = (ULONG) pciGetPhysical(hc, ioreq->iouh_Data);
5068 if((eqh->eqh_Actual >= EHCI_TD_BULK_LIMIT) && (eqh->eqh_Actual < ioreq->iouh_Length))
5070 KPRINTF(10, ("Bulk too large, splitting...\n"));
5071 break;
5073 etd = ehciAllocTD(hc);
5074 if(!etd)
5076 break;
5078 if(predetd)
5080 predetd->etd_Succ = etd;
5081 predetd->etd_NextTD = etd->etd_Self;
5082 predetd->etd_AltNextTD = hc->hc_ShortPktEndTD->etd_Self;
5083 } else {
5084 eqh->eqh_FirstTD = etd;
5085 //eqh->eqh_AltNextTD = eqh->eqh_NextTD = etd->etd_Self;
5088 len = ioreq->iouh_Length - eqh->eqh_Actual;
5089 if(len > 4*EHCI_PAGE_SIZE)
5091 len = 4*EHCI_PAGE_SIZE;
5093 etd->etd_Length = len;
5094 KPRINTF(1, ("Bulk TD %08lx len %ld (%ld/%ld) phy=%08lx\n",
5095 etd, len, eqh->eqh_Actual, ioreq->iouh_Length, phyaddr));
5096 WRITEMEM32_LE(&etd->etd_CtrlStatus, ctrlstatus|(len<<ETSS_TRANSLENGTH));
5097 // FIXME need quark scatter gather mechanism here
5098 WRITEMEM32_LE(&etd->etd_BufferPtr[0], phyaddr);
5099 WRITEMEM32_LE(&etd->etd_BufferPtr[1], (phyaddr & EHCI_PAGE_MASK) + (1*EHCI_PAGE_SIZE));
5100 WRITEMEM32_LE(&etd->etd_BufferPtr[2], (phyaddr & EHCI_PAGE_MASK) + (2*EHCI_PAGE_SIZE));
5101 WRITEMEM32_LE(&etd->etd_BufferPtr[3], (phyaddr & EHCI_PAGE_MASK) + (3*EHCI_PAGE_SIZE));
5102 WRITEMEM32_LE(&etd->etd_BufferPtr[4], (phyaddr & EHCI_PAGE_MASK) + (4*EHCI_PAGE_SIZE));
5103 phyaddr += len;
5104 eqh->eqh_Actual += len;
5106 predetd = etd;
5107 } while((eqh->eqh_Actual < ioreq->iouh_Length) || (len && (ioreq->iouh_Dir == UHDIR_OUT) && (eqh->eqh_Actual == ioreq->iouh_Length) && (!ioreq->iouh_Flags & UHFF_NOSHORTPKT) && ((eqh->eqh_Actual % ioreq->iouh_MaxPktSize) == 0)));
5109 if(!etd)
5111 // not enough etds? try again later
5112 ehciFreeQHandTDs(hc, eqh);
5113 break;
5115 ctrlstatus |= ETCF_READYINTEN|(predetd->etd_Length<<ETSS_TRANSLENGTH);
5116 WRITEMEM32_LE(&predetd->etd_CtrlStatus, ctrlstatus);
5118 predetd->etd_Succ = NULL;
5119 CONSTWRITEMEM32_LE(&predetd->etd_NextTD, EHCI_TERMINATE);
5120 CONSTWRITEMEM32_LE(&predetd->etd_AltNextTD, EHCI_TERMINATE);
5122 // due to sillicon bugs, we fill in the first overlay ourselves.
5123 etd = eqh->eqh_FirstTD;
5124 eqh->eqh_CurrTD = etd->etd_Self;
5125 eqh->eqh_NextTD = etd->etd_NextTD;
5126 eqh->eqh_AltNextTD = etd->etd_AltNextTD;
5127 eqh->eqh_CtrlStatus = etd->etd_CtrlStatus;
5128 eqh->eqh_BufferPtr[0] = etd->etd_BufferPtr[0];
5129 eqh->eqh_BufferPtr[1] = etd->etd_BufferPtr[1];
5130 eqh->eqh_BufferPtr[2] = etd->etd_BufferPtr[2];
5131 eqh->eqh_BufferPtr[3] = etd->etd_BufferPtr[3];
5132 eqh->eqh_BufferPtr[4] = etd->etd_BufferPtr[4];
5134 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
5135 ioreq->iouh_DriverPrivate1 = eqh;
5137 // manage endpoint going busy
5138 unit->hu_DevBusyReq[devadrep] = ioreq;
5139 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + (ioreq->iouh_NakTimeout<<3) : 0;
5141 Disable();
5142 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
5144 // looks good to me, now enqueue this entry (just behind the asyncQH)
5145 eqh->eqh_Succ = hc->hc_EhciAsyncQH->eqh_Succ;
5146 eqh->eqh_NextQH = eqh->eqh_Succ->eqh_Self;
5147 SYNC;
5148 EIEIO;
5149 eqh->eqh_Pred = hc->hc_EhciAsyncQH;
5150 eqh->eqh_Succ->eqh_Pred = eqh;
5151 hc->hc_EhciAsyncQH->eqh_Succ = eqh;
5152 hc->hc_EhciAsyncQH->eqh_NextQH = eqh->eqh_Self;
5153 SYNC;
5154 EIEIO;
5155 Enable();
5157 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
5160 /* \\\ */
5162 /* /// "ehciCompleteInt()" */
5163 void ehciCompleteInt(struct PCIController *hc)
5165 ULONG framecnt = READREG32_LE(hc->hc_RegBase, EHCI_FRAMECOUNT);
5167 KPRINTF(1, ("CompleteInt!\n"));
5168 hc->hc_FrameCounter = (hc->hc_FrameCounter & 0xffffc000) + framecnt;
5170 /* **************** PROCESS DONE TRANSFERS **************** */
5172 if(hc->hc_AsyncAdvanced)
5174 struct EhciQH *eqh;
5175 struct EhciTD *etd;
5176 struct EhciTD *nextetd;
5178 hc->hc_AsyncAdvanced = FALSE;
5180 KPRINTF(1, ("AsyncAdvance %08lx\n", hc->hc_EhciAsyncFreeQH));
5182 while((eqh = hc->hc_EhciAsyncFreeQH))
5184 KPRINTF(1, ("FreeQH %08lx\n", eqh));
5185 nextetd = eqh->eqh_FirstTD;
5186 while((etd = nextetd))
5188 KPRINTF(1, ("FreeTD %08lx\n", nextetd));
5189 nextetd = etd->etd_Succ;
5190 ehciFreeTD(hc, etd);
5192 hc->hc_EhciAsyncFreeQH = eqh->eqh_Succ;
5193 ehciFreeQH(hc, eqh);
5197 ehciHandleFinishedTDs(hc);
5199 if(hc->hc_CtrlXFerQueue.lh_Head->ln_Succ)
5201 ehciScheduleCtrlTDs(hc);
5204 if(hc->hc_IntXFerQueue.lh_Head->ln_Succ)
5206 ehciScheduleIntTDs(hc);
5209 if(hc->hc_BulkXFerQueue.lh_Head->ln_Succ)
5211 ehciScheduleBulkTDs(hc);
5214 KPRINTF(1, ("CompleteDone\n"));
5216 /* \\\ */
5218 /* /// "ehciIntCode()" */
5219 void ehciIntCode(HIDDT_IRQ_Handler *irq, HIDDT_IRQ_HwInfo *hw)
5221 struct PCIController *hc = (struct PCIController *) irq->h_Data;
5222 struct PCIDevice *base = hc->hc_Device;
5223 struct PCIUnit *unit = hc->hc_Unit;
5224 ULONG intr;
5226 KPRINTF(1, ("pciEhciInt()\n"));
5227 intr = READREG32_LE(hc->hc_RegBase, EHCI_USBSTATUS);
5228 if(intr & hc->hc_PCIIntEnMask)
5230 WRITEREG32_LE(hc->hc_RegBase, EHCI_USBSTATUS, intr);
5231 KPRINTF(1, ("INT=%04lx\n", intr));
5232 if(!hc->hc_Online)
5234 return;
5236 if(intr & EHSF_FRAMECOUNTOVER)
5238 ULONG framecnt = READREG32_LE(hc->hc_RegBase, EHCI_FRAMECOUNT);
5239 hc->hc_FrameCounter = (hc->hc_FrameCounter|0x3fff) + 1 + framecnt;
5240 KPRINTF(5, ("Frame Counter Rollover %ld\n", hc->hc_FrameCounter));
5242 if(intr & EHSF_ASYNCADVANCE)
5244 KPRINTF(1, ("AsyncAdvance\n"));
5245 hc->hc_AsyncAdvanced = TRUE;
5247 if(intr & (EHSF_TDDONE|EHSF_TDERROR|EHSF_ASYNCADVANCE))
5249 SureCause(base, &hc->hc_CompleteInt);
5251 if(intr & EHSF_HOSTERROR)
5253 KPRINTF(200, ("Host ERROR!\n"));
5255 if(intr & EHSF_PORTCHANGED)
5257 UWORD hciport;
5258 ULONG oldval;
5259 UWORD portreg = EHCI_PORTSC1;
5260 for(hciport = 0; hciport < hc->hc_NumPorts; hciport++, portreg += 4)
5262 oldval = READREG32_LE(hc->hc_RegBase, portreg);
5263 // reflect port ownership (shortcut without hc->hc_PortNum20[hciport], as usb 2.0 maps 1:1)
5264 unit->hu_EhciOwned[hciport] = (oldval & EHPF_NOTPORTOWNER) ? FALSE : TRUE;
5265 if(oldval & EHPF_ENABLECHANGE)
5267 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_ENABLE;
5269 if(oldval & EHPF_CONNECTCHANGE)
5271 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_CONNECTION;
5273 if(oldval & EHPF_RESUMEDTX)
5275 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND|UPSF_PORT_ENABLE;
5277 if(oldval & EHPF_OVERCURRENTCHG)
5279 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_OVER_CURRENT;
5281 WRITEREG32_LE(hc->hc_RegBase, portreg, oldval);
5282 KPRINTF(20, ("PCI Int Port %ld Change %08lx\n", hciport + 1, oldval));
5283 if(hc->hc_PortChangeMap[hciport])
5285 unit->hu_RootPortChanges |= 1UL<<(hciport + 1);
5288 uhwCheckRootHubChanges(unit);
5292 /* \\\ */
5294 /* /// "uhwNakTimeoutInt()" */
5295 AROS_UFH1(void, uhwNakTimeoutInt,
5296 AROS_UFHA(struct PCIUnit *, unit, A1))
5298 AROS_USERFUNC_INIT
5300 struct PCIDevice *base = unit->hu_Device;
5301 struct PCIController *hc;
5302 struct IOUsbHWReq *ioreq;
5303 struct UhciQH *uqh;
5304 struct UhciTD *utd;
5305 struct EhciQH *eqh;
5306 struct OhciED *oed;
5307 UWORD devadrep;
5308 UWORD cnt;
5309 ULONG linkelem;
5310 ULONG ctrlstatus;
5312 //KPRINTF(10, ("NakTimeoutInt()\n"));
5314 // check for port status change for UHCI and frame rollovers and NAK Timeouts
5315 hc = (struct PCIController *) unit->hu_Controllers.lh_Head;
5316 while(hc->hc_Node.ln_Succ)
5318 if(!hc->hc_Online)
5320 hc = (struct PCIController *) hc->hc_Node.ln_Succ;
5321 continue;
5323 switch(hc->hc_HCIType)
5325 case HCITYPE_UHCI:
5327 ULONG framecnt = READREG16_LE(hc->hc_RegBase, UHCI_FRAMECOUNT);
5329 if(framecnt < (hc->hc_FrameCounter & 0xffff))
5331 hc->hc_FrameCounter = (hc->hc_FrameCounter|0xffff) + 1 + framecnt;
5332 KPRINTF(10, ("Frame Counter Rollover %ld\n", hc->hc_FrameCounter));
5334 framecnt = hc->hc_FrameCounter;
5336 // NakTimeout
5337 ioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
5338 while(((struct Node *) ioreq)->ln_Succ)
5340 if(ioreq->iouh_Flags & UHFF_NAKTIMEOUT)
5342 uqh = (struct UhciQH *) ioreq->iouh_DriverPrivate1;
5343 if(uqh)
5345 KPRINTF(1, ("Examining IOReq=%08lx with UQH=%08lx\n", ioreq, uqh));
5346 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
5347 linkelem = READMEM32_LE(&uqh->uqh_Element);
5348 if(linkelem & UHCI_TERMINATE)
5350 KPRINTF(1, ("UQH terminated %08lx\n", linkelem));
5351 if(framecnt > unit->hu_NakTimeoutFrame[devadrep])
5353 // give the thing the chance to exit gracefully
5354 KPRINTF(20, ("Terminated? NAK timeout %ld > %ld, IOReq=%08lx\n", framecnt, unit->hu_NakTimeoutFrame[devadrep], ioreq));
5355 SureCause(base, &hc->hc_CompleteInt);
5357 } else {
5358 utd = (struct UhciTD *) ((linkelem & UHCI_PTRMASK) - hc->hc_PCIVirtualAdjust - 16); // struct UhciTD starts 16 before physical TD
5359 ctrlstatus = READMEM32_LE(&utd->utd_CtrlStatus);
5360 if(ctrlstatus & UTCF_ACTIVE)
5362 if(framecnt > unit->hu_NakTimeoutFrame[devadrep])
5364 // give the thing the chance to exit gracefully
5365 KPRINTF(20, ("NAK timeout %ld > %ld, IOReq=%08lx\n", framecnt, unit->hu_NakTimeoutFrame[devadrep], ioreq));
5366 ctrlstatus &= ~UTCF_ACTIVE;
5367 WRITEMEM32_LE(&utd->utd_CtrlStatus, ctrlstatus);
5368 SureCause(base, &hc->hc_CompleteInt);
5370 } else {
5371 if(framecnt > unit->hu_NakTimeoutFrame[devadrep])
5373 // give the thing the chance to exit gracefully
5374 KPRINTF(20, ("Terminated? NAK timeout %ld > %ld, IOReq=%08lx\n", framecnt, unit->hu_NakTimeoutFrame[devadrep], ioreq));
5375 SureCause(base, &hc->hc_CompleteInt);
5381 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
5384 uhciCheckPortStatusChange(hc);
5385 break;
5388 case HCITYPE_OHCI:
5390 ULONG framecnt = READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT);
5391 framecnt = hc->hc_FrameCounter = (hc->hc_FrameCounter & 0xffff0000) + framecnt;
5392 // NakTimeout
5393 ioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
5394 while(((struct Node *) ioreq)->ln_Succ)
5396 if(ioreq->iouh_Flags & UHFF_NAKTIMEOUT)
5398 oed = (struct OhciED *) ioreq->iouh_DriverPrivate1;
5399 if(oed)
5401 KPRINTF(1, ("CTRL=%04lx, CMD=%01lx, F=%ld, hccaDH=%08lx, hcDH=%08lx, CH=%08lx, CCH=%08lx, IntEn=%08lx\n",
5402 READREG32_LE(hc->hc_RegBase, OHCI_CONTROL),
5403 READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS),
5404 READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT),
5405 READMEM32_LE(&hc->hc_OhciHCCA->oha_DoneHead),
5406 READREG32_LE(hc->hc_RegBase, OHCI_DONEHEAD),
5407 READREG32_LE(hc->hc_RegBase, OHCI_CTRL_HEAD_ED),
5408 READREG32_LE(hc->hc_RegBase, OHCI_CTRL_ED),
5409 READREG32_LE(hc->hc_RegBase, OHCI_INTEN)));
5411 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
5412 ctrlstatus = READMEM32_LE(&oed->oed_HeadPtr);
5413 KPRINTF(1, ("Examining IOReq=%08lx with OED=%08lx HeadPtr=%08lx\n", ioreq, oed, ctrlstatus));
5414 if(framecnt > unit->hu_NakTimeoutFrame[devadrep])
5416 //ohciDebugSchedule(hc);
5417 if(ctrlstatus & OEHF_HALTED)
5419 // give the thing the chance to exit gracefully
5420 KPRINTF(20, ("Terminated? NAK timeout %ld > %ld, IOReq=%08lx\n", framecnt, unit->hu_NakTimeoutFrame[devadrep], ioreq));
5421 SureCause(base, &hc->hc_CompleteInt);
5422 } else {
5423 // give the thing the chance to exit gracefully
5424 KPRINTF(20, ("NAK timeout %ld > %ld, IOReq=%08lx\n", framecnt, unit->hu_NakTimeoutFrame[devadrep], ioreq));
5425 ctrlstatus |= OEHF_HALTED;
5426 WRITEMEM32_LE(&oed->oed_HeadPtr, ctrlstatus);
5427 ioreq->iouh_Req.io_Error = UHIOERR_NAKTIMEOUT;
5428 unit->hu_DevBusyReq[devadrep] = NULL;
5429 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
5430 ohciFreeEDContext(hc, oed);
5431 KPRINTF(1, ("Old Toggle %04lx:%ld\n", devadrep, unit->hu_DevDataToggle[devadrep]));
5432 unit->hu_DevDataToggle[devadrep] = (ctrlstatus & OEHF_DATA1) ? TRUE : FALSE;
5433 KPRINTF(1, ("Toggle now %04lx:%ld\n", devadrep, unit->hu_DevDataToggle[devadrep]));
5434 ReplyMsg(&ioreq->iouh_Req.io_Message);
5439 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
5441 break;
5444 case HCITYPE_EHCI:
5446 ULONG framecnt = READREG32_LE(hc->hc_RegBase, EHCI_FRAMECOUNT);
5447 framecnt = hc->hc_FrameCounter = (hc->hc_FrameCounter & 0xffffc000) + framecnt;
5448 // NakTimeout
5449 for(cnt = 0; cnt < 1; cnt++)
5451 ioreq = (struct IOUsbHWReq *) (cnt ? hc->hc_PeriodicTDQueue.lh_Head : hc->hc_TDQueue.lh_Head);
5452 while(((struct Node *) ioreq)->ln_Succ)
5454 if(ioreq->iouh_Flags & UHFF_NAKTIMEOUT)
5456 eqh = (struct EhciQH *) ioreq->iouh_DriverPrivate1;
5457 if(eqh)
5459 KPRINTF(1, ("Examining IOReq=%08lx with EQH=%08lx\n", ioreq, eqh));
5460 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
5461 ctrlstatus = READMEM32_LE(&eqh->eqh_CtrlStatus);
5462 if(ctrlstatus & ETCF_ACTIVE)
5464 if(framecnt > unit->hu_NakTimeoutFrame[devadrep])
5466 // give the thing the chance to exit gracefully
5467 KPRINTF(20, ("NAK timeout %ld > %ld, IOReq=%08lx\n", framecnt, unit->hu_NakTimeoutFrame[devadrep], ioreq));
5468 ctrlstatus &= ~ETCF_ACTIVE;
5469 ctrlstatus |= ETSF_HALTED;
5470 WRITEMEM32_LE(&eqh->eqh_CtrlStatus, ctrlstatus);
5471 SureCause(base, &hc->hc_CompleteInt);
5473 } else {
5474 if(ctrlstatus & ETCF_READYINTEN)
5476 KPRINTF(10, ("INT missed?!? Manually causing it! %08lx, IOReq=%08lx\n",
5477 ctrlstatus, ioreq));
5478 SureCause(base, &hc->hc_CompleteInt);
5483 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
5486 break;
5489 hc = (struct PCIController *) hc->hc_Node.ln_Succ;
5492 uhwCheckRootHubChanges(unit);
5494 /* Update frame counter */
5495 unit->hu_NakTimeoutReq.tr_time.tv_micro = 150*1000;
5496 SendIO((APTR) &unit->hu_NakTimeoutReq);
5498 AROS_USERFUNC_EXIT
5500 /* \\\ */