Initial import of work-in-progress of Poseidon port.
[cake.git] / rom / usb / pciusb / uhwcmd.c
blob001461d34d5a3a54d3f09366770992331a3e6930
1 /* uhwcmd.c - pciusb.device by Chris Hodges
2 */
4 #include "uhwcmd.h"
5 #include <devices/usb_hub.h>
6 #include <strings.h>
8 #include <proto/utility.h>
9 #include <proto/exec.h>
11 #define NewList NEWLIST
13 /* Root hub data */
14 const struct UsbStdDevDesc RHDevDesc = { sizeof(struct UsbStdDevDesc), UDT_DEVICE, AROS_WORD2LE(0x0110), HUB_CLASSCODE, 0, 0, 8, AROS_WORD2LE(0x0000), AROS_WORD2LE(0x0000), AROS_WORD2LE(0x0100), 1, 2, 0, 1 };
16 const struct UsbStdCfgDesc RHCfgDesc = { 9, UDT_CONFIGURATION, AROS_WORD2LE(9+9+7), 1, 1, 3, USCAF_ONE|USCAF_SELF_POWERED, 0 };
17 const struct UsbStdIfDesc RHIfDesc = { 9, UDT_INTERFACE, 0, 0, 1, HUB_CLASSCODE, 0, 0, 4 };
18 const struct UsbStdEPDesc RHEPDesc = { 7, UDT_ENDPOINT, URTF_IN|1, USEAF_INTERRUPT, AROS_WORD2LE(1), 255 };
19 const struct UsbHubDesc RHHubDesc = { 9, UDT_HUB, 0, AROS_WORD2LE(UHCF_INDIVID_POWER|UHCF_INDIVID_OVP), 0, 1, 1, 0 };
21 const CONST_STRPTR RHStrings[] = { "Chris Hodges", "PCI Root Hub", "Standard Config", "Hub interface" };
23 /* /// "SureCause()" */
24 void SureCause(struct PCIDevice *base, struct Interrupt *interrupt)
26 /* this is a workaround for the original Cause() function missing tailed calls */
27 Disable();
28 if((interrupt->is_Node.ln_Type == NT_SOFTINT) || (interrupt->is_Node.ln_Type == NT_USER))
30 // signal tailed call
31 interrupt->is_Node.ln_Type = NT_USER;
32 } else {
35 interrupt->is_Node.ln_Type = NT_SOFTINT;
36 Enable();
37 (*((void (*)(struct Interrupt *)) (interrupt->is_Code)))(interrupt->is_Data);
38 Disable();
39 } while(interrupt->is_Node.ln_Type != NT_SOFTINT);
40 interrupt->is_Node.ln_Type = NT_INTERRUPT;
42 Enable();
44 /* \\\ */
46 /* /// "uhwOpenTimer()" */
47 BOOL uhwOpenTimer(struct PCIUnit *unit, struct PCIDevice *base)
49 if((unit->hu_MsgPort = CreateMsgPort()))
51 if((unit->hu_TimerReq = (struct timerequest *) CreateIORequest(unit->hu_MsgPort, sizeof(struct timerequest))))
53 if(!OpenDevice("timer.device", UNIT_MICROHZ, (struct IORequest *) unit->hu_TimerReq, 0))
55 unit->hu_TimerReq->tr_node.io_Message.mn_Node.ln_Name = "PCI hardware";
56 unit->hu_TimerReq->tr_node.io_Command = TR_ADDREQUEST;
57 KPRINTF(1, ("opened timer device\n"));
58 return(TRUE);
60 DeleteIORequest((struct IORequest *) unit->hu_TimerReq);
61 unit->hu_TimerReq = NULL;
63 DeleteMsgPort(unit->hu_MsgPort);
64 unit->hu_MsgPort = NULL;
66 KPRINTF(5, ("failed to open timer.device\n"));
67 return(FALSE);
69 /* \\\ */
71 /* /// "uhwDelayMS()" */
72 void uhwDelayMS(ULONG milli, struct PCIUnit *unit, struct PCIDevice *base)
74 unit->hu_TimerReq->tr_time.tv_secs = 0;
75 unit->hu_TimerReq->tr_time.tv_micro = milli * 1000;
76 DoIO((struct IORequest *) unit->hu_TimerReq);
78 /* \\\ */
80 /* /// "uhwCloseTimer()" */
81 void uhwCloseTimer(struct PCIUnit *unit, struct PCIDevice *base)
83 if(unit->hu_MsgPort)
85 if(unit->hu_TimerReq)
87 KPRINTF(1, ("closing timer.device\n"));
88 CloseDevice((APTR) unit->hu_TimerReq);
89 DeleteIORequest((struct IORequest *) unit->hu_TimerReq);
90 unit->hu_TimerReq = NULL;
92 DeleteMsgPort(unit->hu_MsgPort);
93 unit->hu_MsgPort = NULL;
96 /* \\\ */
98 /* /// "uhwHWInit()" */
99 void uhwHWInit(struct PCIController *hc)
101 KPRINTF(1, ("Reset\n"));
102 //unit->hu_FrameCounter = 1;
103 //unit->hu_RootHubAddr = 0;
105 /* \\\ */
107 /* /// "Open_Unit()" */
108 struct Unit * Open_Unit(struct IOUsbHWReq *ioreq,
109 LONG unitnr,
110 struct PCIDevice *base)
112 struct PCIUnit *unit = NULL;
114 if(!base->hd_ScanDone)
116 base->hd_ScanDone = TRUE;
117 if(!pciInit(base))
119 return NULL;
122 unit = (struct PCIUnit *) base->hd_Units.lh_Head;
123 while(((struct Node *) unit)->ln_Succ)
125 if(unit->hu_UnitNo == unitnr)
127 break;
129 unit = (struct PCIUnit *) ((struct Node *) unit)->ln_Succ;
131 if(!((struct Node *) unit)->ln_Succ)
133 KPRINTF(20, ("Unit %ld does not exist!\n", unitnr));
134 return NULL;
136 if(unit->hu_UnitAllocated)
138 ioreq->iouh_Req.io_Error = IOERR_UNITBUSY;
139 KPRINTF(5, ("Unit %ld already open!\n", unitnr));
140 return NULL;
143 if(uhwOpenTimer(unit, base))
146 if(pciAllocUnit(unit)) // hardware self test
149 unit->hu_NakTimeoutInt.is_Node.ln_Type = NT_INTERRUPT;
150 unit->hu_NakTimeoutInt.is_Node.ln_Name = "PCI NakTimeout";
151 unit->hu_NakTimeoutInt.is_Node.ln_Pri = -16;
152 unit->hu_NakTimeoutInt.is_Data = unit;
153 unit->hu_NakTimeoutInt.is_Code = (void (*)(void)) &uhwNakTimeoutInt;
155 CopyMem(unit->hu_TimerReq, &unit->hu_NakTimeoutReq, sizeof(struct timerequest));
156 unit->hu_NakTimeoutReq.tr_node.io_Message.mn_ReplyPort = &unit->hu_NakTimeoutMsgPort;
157 unit->hu_NakTimeoutMsgPort.mp_Node.ln_Type = NT_MSGPORT;
158 unit->hu_NakTimeoutMsgPort.mp_Flags = PA_SOFTINT;
159 unit->hu_NakTimeoutMsgPort.mp_SigTask = &unit->hu_NakTimeoutInt;
160 NewList(&unit->hu_NakTimeoutMsgPort.mp_MsgList);
161 Cause(&unit->hu_NakTimeoutInt);
162 return(&unit->hu_Unit);
163 } else {
164 ioreq->iouh_Req.io_Error = IOERR_SELFTEST;
165 KPRINTF(20, ("Hardware allocation failure!\n"));
167 uhwCloseTimer(unit, base);
169 return(NULL);
171 /* \\\ */
173 /* /// "Close_Unit()" */
174 void Close_Unit(struct PCIDevice *base,
175 struct PCIUnit *unit,
176 struct IOUsbHWReq *ioreq)
178 /* Disable all interrupts */
179 unit->hu_NakTimeoutMsgPort.mp_Flags = PA_IGNORE;
180 unit->hu_NakTimeoutInt.is_Node.ln_Type = NT_SOFTINT;
181 AbortIO((APTR) &unit->hu_NakTimeoutReq);
183 pciFreeUnit(unit);
185 uhwCloseTimer(unit, base);
186 unit->hu_UnitAllocated = FALSE;
188 /* \\\ */
190 /* /// "uhwGetUsbState()" */
191 UWORD uhwGetUsbState(struct IOUsbHWReq *ioreq,
192 struct PCIUnit *unit,
193 struct PCIDevice *base)
195 return(ioreq->iouh_State = UHSF_OPERATIONAL);
197 /* \\\ */
199 /* /// "cmdReset()" */
201 *======================================================================
202 * cmdReset(ioreq, unit, base)
203 *======================================================================
205 * This is the device CMD_RESET routine.
207 * Resets the whole USB hardware. Goes into USBOperational mode right
208 * after. Must NOT be called from an interrupt.
212 WORD cmdReset(struct IOUsbHWReq *ioreq,
213 struct PCIUnit *unit,
214 struct PCIDevice *base)
216 KPRINTF(10, ("CMD_RESET ioreq: 0x%08lx\n", ioreq));
217 //uhwHWInit(unit);
219 uhwDelayMS(1, unit, base);
220 uhwGetUsbState(ioreq, unit, base);
222 if(ioreq->iouh_State & UHSF_OPERATIONAL)
224 return RC_OK;
226 return UHIOERR_USBOFFLINE;
228 /* \\\ */
230 /* /// "cmdUsbReset()" */
232 *======================================================================
233 * cmdUsbReset(ioreq, unit, base)
234 *======================================================================
236 * This is the device UHCMD_USBRESET routine.
238 * Resets the USB bus. Goes into USBOperational mode right after. Must
239 * NOT be called from an interrupt.
243 WORD cmdUsbReset(struct IOUsbHWReq *ioreq,
244 struct PCIUnit *unit,
245 struct PCIDevice *base)
247 KPRINTF(10, ("UHCMD_USBRESET ioreq: 0x%08lx\n", ioreq));
249 /* FIXME */
250 uhwGetUsbState(ioreq, unit, base);
252 unit->hu_FrameCounter = 1;
253 unit->hu_RootHubAddr = 0;
255 if(ioreq->iouh_State & UHSF_OPERATIONAL)
257 return RC_OK;
259 return UHIOERR_USBOFFLINE;
261 /* \\\ */
263 /* /// "cmdUsbResume()" */
265 *======================================================================
266 * cmdUsbResume(ioreq, unit, base)
267 *======================================================================
269 * This is the device UHCMD_USBRESUME routine.
271 * Tries to resume from USBSuspend mode into USBOperational.
272 * Must NOT be called from an interrupt.
276 WORD cmdUsbResume(struct IOUsbHWReq *ioreq,
277 struct PCIUnit *unit,
278 struct PCIDevice *base)
280 KPRINTF(10, ("UHCMD_USBRESUME ioreq: 0x%08lx\n", ioreq));
282 /* FIXME */
283 uhwGetUsbState(ioreq, unit, base);
284 if(ioreq->iouh_State & UHSF_OPERATIONAL)
286 return RC_OK;
288 return UHIOERR_USBOFFLINE;
290 /* \\\ */
292 /* /// "cmdUsbSuspend()" */
294 *======================================================================
295 * cmdUsbSuspend(ioreq, unit, base)
296 *======================================================================
298 * This is the device UHCMD_USBSUSPEND routine.
300 * Sets the USB into USBSuspend mode.
301 * Must NOT be called from an interrupt.
305 WORD cmdUsbSuspend(struct IOUsbHWReq *ioreq,
306 struct PCIUnit *unit,
307 struct PCIDevice *base)
309 KPRINTF(10, ("UHCMD_USBSUSPEND ioreq: 0x%08lx\n", ioreq));
311 /* FIXME */
312 uhwGetUsbState(ioreq, unit, base);
313 if(ioreq->iouh_State & UHSF_SUSPENDED)
315 return RC_OK;
317 return UHIOERR_USBOFFLINE;
319 /* \\\ */
321 /* /// "cmdUsbOper()" */
323 *======================================================================
324 * cmdUsbOper(ioreq, unit, base)
325 *======================================================================
327 * This is the device UHCMD_USBOPER routine.
329 * Sets the USB into USBOperational mode.
330 * Must NOT be called from an interrupt.
334 WORD cmdUsbOper(struct IOUsbHWReq *ioreq,
335 struct PCIUnit *unit,
336 struct PCIDevice *base)
338 KPRINTF(10, ("UHCMD_USBOPER ioreq: 0x%08lx\n", ioreq));
340 /* FIXME */
341 uhwGetUsbState(ioreq, unit, base);
342 if(ioreq->iouh_State & UHSF_OPERATIONAL)
344 return RC_OK;
346 return UHIOERR_USBOFFLINE;
348 /* \\\ */
350 /* /// "cmdQueryDevice()" */
352 *======================================================================
353 * cmdQueryDevice(ioreq, unit, base)
354 *======================================================================
356 * This is the device UHCMD_QUERYDEVICE routine.
358 * Returns information about the hardware.
362 WORD cmdQueryDevice(struct IOUsbHWReq *ioreq,
363 struct PCIUnit *unit,
364 struct PCIDevice *base)
366 struct TagItem *taglist = (struct TagItem *) ioreq->iouh_Data;
367 struct TagItem *tag;
368 ULONG count = 0;
370 KPRINTF(10, ("UHCMD_QUERYDEVICE ioreq: 0x%08lx, taglist: 0x%08lx\n", ioreq, taglist));
372 if((tag = FindTagItem(UHA_State, taglist)))
374 *((ULONG *) tag->ti_Data) = (ULONG) uhwGetUsbState(ioreq, unit, base);
375 count++;
377 if((tag = FindTagItem(UHA_Manufacturer, taglist)))
379 *((STRPTR *) tag->ti_Data) = "Chris Hodges";
380 count++;
382 if((tag = FindTagItem(UHA_ProductName, taglist)))
384 *((STRPTR *) tag->ti_Data) = "PCI UHCI/OHCI/EHCI USB Host Controller";
385 count++;
387 if((tag = FindTagItem(UHA_Description, taglist)))
389 *((STRPTR *) tag->ti_Data) = "Generic adaptive host controller driver for PCI cards";
390 count++;
392 if((tag = FindTagItem(UHA_Copyright, taglist)))
394 *((STRPTR *) tag->ti_Data) = "©2007-2009 Chris Hodges";
395 count++;
397 if((tag = FindTagItem(UHA_Version, taglist)))
399 *((ULONG *) tag->ti_Data) = VERSION_NUMBER;
400 count++;
402 if((tag = FindTagItem(UHA_Revision, taglist)))
404 *((ULONG *) tag->ti_Data) = REVISION_NUMBER;
405 count++;
407 if((tag = FindTagItem(UHA_DriverVersion, taglist)))
409 *((ULONG *) tag->ti_Data) = 0x220;
410 count++;
412 if((tag = FindTagItem(UHA_Capabilities, taglist)))
414 *((ULONG *) tag->ti_Data) = UHCF_USB20;
415 count++;
417 ioreq->iouh_Actual = count;
418 return RC_OK;
420 /* \\\ */
422 /* /// "cmdControlXFerRootHub()" */
423 WORD cmdControlXFerRootHub(struct IOUsbHWReq *ioreq,
424 struct PCIUnit *unit,
425 struct PCIDevice *base)
427 struct PCIController *hc;
428 struct PCIController *chc;
429 UWORD rt = ioreq->iouh_SetupData.bmRequestType;
430 UWORD req = ioreq->iouh_SetupData.bRequest;
431 UWORD idx = AROS_WORD2LE(ioreq->iouh_SetupData.wIndex);
432 UWORD val = AROS_WORD2LE(ioreq->iouh_SetupData.wValue);
433 UWORD len = AROS_WORD2LE(ioreq->iouh_SetupData.wLength);
434 UWORD hciport;
435 ULONG numports = unit->hu_RootHubPorts;
436 BOOL cmdgood;
437 ULONG cnt;
439 if(ioreq->iouh_Endpoint)
441 return(UHIOERR_STALL);
444 if(len != ioreq->iouh_Length)
446 KPRINTF(20, ("RH: Len (%ld != %ld) mismatch!\n", len != ioreq->iouh_Length));
447 return(UHIOERR_STALL);
449 switch(rt)
451 case (URTF_STANDARD|URTF_DEVICE):
452 switch(req)
454 case USR_SET_ADDRESS:
455 KPRINTF(1, ("RH: SetAddress = %ld\n", val));
456 unit->hu_RootHubAddr = val;
457 ioreq->iouh_Actual = len;
458 return(0);
460 case USR_SET_CONFIGURATION:
461 KPRINTF(1, ("RH: SetConfiguration=%ld\n", val));
462 ioreq->iouh_Actual = len;
463 return(0);
465 break;
467 case (URTF_IN|URTF_STANDARD|URTF_DEVICE):
468 switch(req)
470 case USR_GET_DESCRIPTOR:
471 switch(val>>8)
473 case UDT_DEVICE:
474 KPRINTF(1, ("RH: GetDeviceDescriptor (%ld)\n", len));
475 ioreq->iouh_Actual = (len > sizeof(struct UsbStdDevDesc)) ? sizeof(struct UsbStdDevDesc) : len;
476 CopyMem((APTR) &RHDevDesc, ioreq->iouh_Data, ioreq->iouh_Actual);
477 if(ioreq->iouh_Length >= sizeof(struct UsbStdDevDesc))
479 if(unit->hu_RootHub20Ports)
481 struct UsbStdDevDesc *usdd = (struct UsbStdDevDesc *) ioreq->iouh_Data;
482 usdd->bcdUSB = AROS_WORD2LE(0x0200); // signal a highspeed root hub
483 usdd->bDeviceProtocol = 1; // single TT
486 return(0);
488 case UDT_CONFIGURATION:
490 UBYTE tmpbuf[9+9+7];
491 KPRINTF(1, ("RH: GetConfigDescriptor (%ld)\n", len));
492 CopyMem((APTR) &RHCfgDesc, tmpbuf, 9);
493 CopyMem((APTR) &RHIfDesc, &tmpbuf[9], 9);
494 CopyMem((APTR) &RHEPDesc, &tmpbuf[9+9], 7);
495 if(unit->hu_RootHub20Ports)
497 struct UsbStdEPDesc *usepd = (struct UsbStdEPDesc *) &tmpbuf[9+9];
498 usepd->bInterval = 12; // 2048 µFrames
500 ioreq->iouh_Actual = (len > 9+9+7) ? 9+9+7 : len;
501 CopyMem(tmpbuf, ioreq->iouh_Data, ioreq->iouh_Actual);
502 return(0);
505 case UDT_STRING:
506 if(val & 0xff) /* get lang array */
508 CONST_STRPTR source = NULL;
509 UWORD *mptr = ioreq->iouh_Data;
510 UWORD slen = 1;
511 KPRINTF(1, ("RH: GetString %04lx (%ld)\n", val, len));
512 if((val & 0xff) > 4) /* index too high? */
514 return(UHIOERR_STALL);
516 source = RHStrings[(val & 0xff)-1];
517 if(len > 1)
519 ioreq->iouh_Actual = 2;
520 while(*source++)
522 slen++;
524 source = RHStrings[(val & 0xff)-1];
525 *mptr++ = (slen<<9)|UDT_STRING;
526 while(ioreq->iouh_Actual+1 < len)
528 *mptr++ = AROS_WORD2LE(*source);
529 source++;
530 ioreq->iouh_Actual += 2;
531 if(!(*source))
533 break;
537 } else {
538 UWORD *mptr = ioreq->iouh_Data;
539 KPRINTF(1, ("RH: GetLangArray %04lx (%ld)\n", val, len));
540 if(len > 1)
542 ioreq->iouh_Actual = 2;
543 mptr[0] = AROS_WORD2BE((4<<8)|UDT_STRING);
544 if(len > 3)
546 ioreq->iouh_Actual += 2;
547 mptr[1] = AROS_WORD2LE(0x0409);
551 return(0);
553 default:
554 KPRINTF(1, ("RH: Unsupported Descriptor %04lx\n", idx));
556 break;
558 case USR_GET_CONFIGURATION:
559 if(len == 1)
561 KPRINTF(1, ("RH: GetConfiguration\n"));
562 ((UBYTE *) ioreq->iouh_Data)[0] = 1;
563 ioreq->iouh_Actual = len;
564 return(0);
566 break;
568 break;
570 case (URTF_CLASS|URTF_OTHER):
571 switch(req)
573 case USR_SET_FEATURE:
574 if((!idx) && (idx > numports))
576 KPRINTF(20, ("Port %ld out of range\n", idx));
577 return(UHIOERR_STALL);
579 chc = unit->hu_PortMap11[idx - 1];
580 if(unit->hu_EhciOwned[idx - 1])
582 hc = unit->hu_PortMap20[idx - 1];
583 hciport = idx - 1;
584 } else {
585 hc = chc;
586 hciport = unit->hu_PortNum11[idx - 1];
588 cmdgood = FALSE;
589 switch(hc->hc_HCIType)
591 case HCITYPE_UHCI:
593 UWORD portreg = hciport ? UHCI_PORT2STSCTRL : UHCI_PORT1STSCTRL;
594 ULONG oldval = READREG16_LE(hc->hc_RegBase, portreg) & ~(UHPF_ENABLECHANGE|UHPF_CONNECTCHANGE); // these are clear-on-write!
595 ULONG newval = oldval;
596 switch(val)
598 /* case UFS_PORT_CONNECTION: not possible */
599 case UFS_PORT_ENABLE:
600 KPRINTF(10, ("Enabling Port (%s)\n", newval & UHPF_PORTENABLE ? "already" : "ok"));
601 newval |= UHPF_PORTENABLE;
602 cmdgood = TRUE;
603 break;
605 case UFS_PORT_SUSPEND:
606 newval |= UHPF_PORTSUSPEND;
607 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND; // manually fake suspend change
608 cmdgood = TRUE;
609 break;
611 /* case UFS_PORT_OVER_CURRENT: not possible */
612 case UFS_PORT_RESET:
613 KPRINTF(10, ("Resetting Port (%s)\n", newval & UHPF_PORTRESET ? "already" : "ok"));
615 // this is an ugly blocking workaround to the inability of UHCI to clear reset automatically
616 newval &= ~(UHPF_PORTSUSPEND|UHPF_PORTENABLE);
617 newval |= UHPF_PORTRESET;
618 WRITEREG16_LE(hc->hc_RegBase, portreg, newval);
619 uhwDelayMS(75, unit, base);
620 newval = READREG16_LE(hc->hc_RegBase, portreg) & ~(UHPF_ENABLECHANGE|UHPF_CONNECTCHANGE|UHPF_PORTSUSPEND|UHPF_PORTENABLE);
621 KPRINTF(10, ("Reset=%s\n", newval & UHPF_PORTRESET ? "GOOD" : "BAD!"));
622 newval &= ~UHPF_PORTRESET;
623 WRITEREG16_LE(hc->hc_RegBase, portreg, newval);
624 uhwDelayMS(5, unit, base);
625 newval = READREG16_LE(hc->hc_RegBase, portreg) & ~(UHPF_ENABLECHANGE|UHPF_CONNECTCHANGE|UHPF_PORTSUSPEND);
626 KPRINTF(10, ("Reset=%s\n", newval & UHPF_PORTRESET ? "BAD!" : "GOOD"));
627 newval &= ~UHPF_PORTRESET;
628 newval |= UHPF_PORTENABLE;
629 WRITEREG16_LE(hc->hc_RegBase, portreg, newval);
630 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_RESET|UPSF_PORT_ENABLE; // manually fake reset change
631 uhwDelayMS(10, unit, base);
633 cnt = 100;
636 uhwDelayMS(1, unit, base);
637 newval = READREG16_LE(hc->hc_RegBase, portreg) & ~(UHPF_ENABLECHANGE|UHPF_CONNECTCHANGE);
638 } while(--cnt && (!(newval & UHPF_PORTENABLE)));
639 if(cnt)
641 KPRINTF(10, ("Enabled after %ld ticks\n", 100-cnt));
642 } else {
643 KPRINTF(20, ("Port refuses to be enabled!\n"));
644 return(UHIOERR_HOSTERROR);
646 // make enumeration possible
647 unit->hu_DevControllers[0] = hc;
648 cmdgood = TRUE;
649 break;
651 case UFS_PORT_POWER:
652 KPRINTF(10, ("Powering Port\n"));
653 // ignore for UHCI, is always powered
654 cmdgood = TRUE;
655 break;
657 /* case UFS_PORT_LOW_SPEED: not possible */
658 /* case UFS_C_PORT_CONNECTION:
659 case UFS_C_PORT_ENABLE:
660 case UFS_C_PORT_SUSPEND:
661 case UFS_C_PORT_OVER_CURRENT:
662 case UFS_C_PORT_RESET: */
664 if(cmdgood)
666 KPRINTF(5, ("Port %ld SET_FEATURE %04lx->%04lx\n", idx, oldval, newval));
667 WRITEREG16_LE(hc->hc_RegBase, portreg, newval);
668 return(0);
670 break;
673 case HCITYPE_OHCI:
675 UWORD portreg = OHCI_PORTSTATUS + (hciport<<2);
676 ULONG oldval = READREG32_LE(hc->hc_RegBase, portreg);
678 switch(val)
680 /* case UFS_PORT_CONNECTION: not possible */
681 case UFS_PORT_ENABLE:
682 KPRINTF(10, ("Enabling Port (%s)\n", oldval & OHPF_PORTENABLE ? "already" : "ok"));
683 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_PORTENABLE);
684 cmdgood = TRUE;
685 break;
687 case UFS_PORT_SUSPEND:
688 KPRINTF(10, ("Suspending Port (%s)\n", oldval & OHPF_PORTSUSPEND ? "already" : "ok"));
689 //hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND; // manually fake suspend change
690 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_PORTSUSPEND);
691 cmdgood = TRUE;
692 break;
694 /* case UFS_PORT_OVER_CURRENT: not possible */
695 case UFS_PORT_RESET:
696 KPRINTF(10, ("Resetting Port (%s)\n", oldval & OHPF_PORTRESET ? "already" : "ok"));
697 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_PORTRESET);
698 // make enumeration possible
699 unit->hu_DevControllers[0] = hc;
700 cmdgood = TRUE;
701 break;
703 case UFS_PORT_POWER:
704 KPRINTF(10, ("Powering Port (%s)\n", oldval & OHPF_PORTPOWER ? "already" : "ok"));
705 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_PORTPOWER);
706 cmdgood = TRUE;
707 break;
709 /* case UFS_PORT_LOW_SPEED: not possible */
710 /* case UFS_C_PORT_CONNECTION:
711 case UFS_C_PORT_ENABLE:
712 case UFS_C_PORT_SUSPEND:
713 case UFS_C_PORT_OVER_CURRENT:
714 case UFS_C_PORT_RESET: */
716 if(cmdgood)
718 return(0);
720 break;
723 case HCITYPE_EHCI:
725 UWORD portreg = EHCI_PORTSC1 + (hciport<<2);
726 ULONG oldval = READREG32_LE(hc->hc_RegBase, portreg) & ~(EHPF_OVERCURRENTCHG|EHPF_ENABLECHANGE|EHPF_CONNECTCHANGE); // these are clear-on-write!
727 ULONG newval = oldval;
728 switch(val)
730 /* case UFS_PORT_CONNECTION: not possible */
731 case UFS_PORT_ENABLE:
732 KPRINTF(10, ("Enabling Port (%s)\n", newval & EHPF_PORTENABLE ? "already" : "ok"));
733 newval |= EHPF_PORTENABLE;
734 cmdgood = TRUE;
735 break;
737 case UFS_PORT_SUSPEND:
738 newval |= EHPF_PORTSUSPEND;
739 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND; // manually fake suspend change
740 cmdgood = TRUE;
741 break;
743 /* case UFS_PORT_OVER_CURRENT: not possible */
744 case UFS_PORT_RESET:
745 KPRINTF(10, ("Resetting Port (%s)\n", newval & EHPF_PORTRESET ? "already" : "ok"));
747 // this is an ugly blocking workaround to the inability of UHCI to clear reset automatically
748 newval &= ~(EHPF_PORTSUSPEND|EHPF_PORTENABLE);
749 newval |= EHPF_PORTRESET;
750 WRITEREG32_LE(hc->hc_RegBase, portreg, newval);
751 uhwDelayMS(75, unit, base);
752 newval = READREG32_LE(hc->hc_RegBase, portreg) & ~(EHPF_OVERCURRENTCHG|EHPF_ENABLECHANGE|EHPF_CONNECTCHANGE|EHPF_PORTSUSPEND|EHPF_PORTENABLE);
753 KPRINTF(10, ("Reset=%s\n", newval & EHPF_PORTRESET ? "GOOD" : "BAD!"));
754 newval &= ~EHPF_PORTRESET;
755 WRITEREG32_LE(hc->hc_RegBase, portreg, newval);
756 uhwDelayMS(10, unit, base);
757 newval = READREG32_LE(hc->hc_RegBase, portreg) & ~(EHPF_OVERCURRENTCHG|EHPF_ENABLECHANGE|EHPF_CONNECTCHANGE|EHPF_PORTSUSPEND);
758 KPRINTF(10, ("Reset=%s\n", newval & EHPF_PORTRESET ? "BAD!" : "GOOD"));
759 KPRINTF(10, ("Highspeed=%s\n", newval & EHPF_PORTENABLE ? "YES!" : "NO"));
760 if(!(newval & EHPF_PORTENABLE))
762 // if not highspeed, release ownership
763 KPRINTF(20, ("Transferring ownership to UHCI/OHCI port %ld\n", unit->hu_PortNum11[idx - 1]));
764 KPRINTF(10, ("Device is %s\n", newval & EHPF_LINESTATUS_DM ? "LOWSPEED" : "FULLSPEED"));
765 unit->hu_EhciOwned[idx - 1] = FALSE;
766 newval |= EHPF_NOTPORTOWNER;
767 WRITEREG32_LE(hc->hc_RegBase, portreg, newval);
768 // enable companion controller port
769 switch(chc->hc_HCIType)
771 case HCITYPE_UHCI:
773 UWORD uhcihciport = unit->hu_PortNum11[idx - 1];
774 UWORD uhciportreg = uhcihciport ? UHCI_PORT2STSCTRL : UHCI_PORT1STSCTRL;
775 ULONG uhcinewval;
777 uhcinewval = READREG16_LE(chc->hc_RegBase, uhciportreg) & ~(UHPF_ENABLECHANGE|UHPF_CONNECTCHANGE|UHPF_PORTSUSPEND);
778 KPRINTF(10, ("UHCI Reset=%s\n", uhcinewval & UHPF_PORTRESET ? "BAD!" : "GOOD"));
779 if((uhcinewval & UHPF_PORTRESET))//|| (newval & EHPF_LINESTATUS_DM))
781 // this is an ugly blocking workaround to the inability of UHCI to clear reset automatically
782 KPRINTF(20, ("Uhm, UHCI reset was bad!\n"));
783 uhcinewval &= ~(UHPF_PORTSUSPEND|UHPF_PORTENABLE);
784 uhcinewval |= UHPF_PORTRESET;
785 WRITEREG16_LE(chc->hc_RegBase, uhciportreg, uhcinewval);
786 uhwDelayMS(75, unit, base);
787 uhcinewval = READREG16_LE(chc->hc_RegBase, uhciportreg) & ~(UHPF_ENABLECHANGE|UHPF_CONNECTCHANGE|UHPF_PORTSUSPEND|UHPF_PORTENABLE);
788 KPRINTF(10, ("ReReset=%s\n", uhcinewval & UHPF_PORTRESET ? "GOOD" : "BAD!"));
789 uhcinewval &= ~UHPF_PORTRESET;
790 WRITEREG16_LE(chc->hc_RegBase, uhciportreg, uhcinewval);
791 uhwDelayMS(5, unit, base);
792 uhcinewval = READREG16_LE(chc->hc_RegBase, uhciportreg) & ~(UHPF_ENABLECHANGE|UHPF_CONNECTCHANGE|UHPF_PORTSUSPEND);
793 KPRINTF(10, ("ReReset=%s\n", uhcinewval & UHPF_PORTRESET ? "STILL BAD!" : "GOOD"));
795 uhcinewval &= ~UHPF_PORTRESET;
796 uhcinewval |= UHPF_PORTENABLE;
797 WRITEREG16_LE(chc->hc_RegBase, uhciportreg, uhcinewval);
798 chc->hc_PortChangeMap[uhcihciport] |= UPSF_PORT_RESET|UPSF_PORT_ENABLE; // manually fake reset change
799 uhwDelayMS(5, unit, base);
800 cnt = 100;
803 uhwDelayMS(1, unit, base);
804 uhcinewval = READREG16_LE(chc->hc_RegBase, uhciportreg) & ~(UHPF_ENABLECHANGE|UHPF_CONNECTCHANGE);
805 } while(--cnt && (!(uhcinewval & UHPF_PORTENABLE)));
806 if(cnt)
808 KPRINTF(10, ("Enabled after %ld ticks\n", 100-cnt));
809 } else {
810 KPRINTF(20, ("Port refuses to be enabled!\n"));
811 return(UHIOERR_HOSTERROR);
813 break;
816 case HCITYPE_OHCI:
818 UWORD ohcihciport = unit->hu_PortNum11[idx - 1];
819 UWORD ohciportreg = OHCI_PORTSTATUS + (ohcihciport<<2);
820 ULONG ohcioldval = READREG32_LE(hc->hc_RegBase, portreg);
821 KPRINTF(10, ("OHCI Resetting Port (%s)\n", ohcioldval & OHPF_PORTRESET ? "already" : "ok"));
822 WRITEREG32_LE(chc->hc_RegBase, ohciportreg, OHPF_PORTPOWER|OHPF_PORTRESET);
823 break;
827 // make enumeration possible
828 unit->hu_DevControllers[0] = chc;
829 } else {
830 newval &= ~EHPF_PORTRESET;
831 newval |= EHPF_PORTENABLE;
832 WRITEREG16_LE(hc->hc_RegBase, portreg, newval);
833 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_RESET; // manually fake reset change
834 uhwDelayMS(10, unit, base);
835 cnt = 100;
838 uhwDelayMS(1, unit, base);
839 newval = READREG32_LE(hc->hc_RegBase, portreg) & ~(EHPF_OVERCURRENTCHG|EHPF_ENABLECHANGE|EHPF_CONNECTCHANGE);
840 } while(--cnt && (!(newval & EHPF_PORTENABLE)));
841 if(cnt)
843 KPRINTF(10, ("Enabled after %ld ticks\n", 100-cnt));
844 } else {
845 KPRINTF(20, ("Port refuses to be enabled!\n"));
846 return(UHIOERR_HOSTERROR);
848 // make enumeration possible
849 unit->hu_DevControllers[0] = hc;
851 cmdgood = TRUE;
852 break;
854 case UFS_PORT_POWER:
855 KPRINTF(10, ("Powering Port\n"));
856 newval |= EHPF_PORTPOWER;
857 cmdgood = TRUE;
858 break;
860 /* case UFS_PORT_LOW_SPEED: not possible */
861 /* case UFS_C_PORT_CONNECTION:
862 case UFS_C_PORT_ENABLE:
863 case UFS_C_PORT_SUSPEND:
864 case UFS_C_PORT_OVER_CURRENT:
865 case UFS_C_PORT_RESET: */
867 if(cmdgood)
869 KPRINTF(5, ("Port %ld SET_FEATURE %04lx->%04lx\n", idx, oldval, newval));
870 WRITEREG32_LE(hc->hc_RegBase, portreg, newval);
871 return(0);
873 break;
876 break;
878 case USR_CLEAR_FEATURE:
879 if((!idx) && (idx > numports))
881 KPRINTF(20, ("Port %ld out of range\n", idx));
882 return(UHIOERR_STALL);
884 if(unit->hu_EhciOwned[idx - 1])
886 hc = unit->hu_PortMap20[idx - 1];
887 hciport = idx - 1;
888 } else {
889 hc = unit->hu_PortMap11[idx - 1];
890 hciport = unit->hu_PortNum11[idx - 1];
892 KPRINTF(10, ("Clear Feature %ld maps from glob. Port %ld to local Port %ld\n", val, idx, hciport));
893 cmdgood = FALSE;
894 switch(hc->hc_HCIType)
896 case HCITYPE_UHCI:
898 UWORD portreg = hciport ? UHCI_PORT2STSCTRL : UHCI_PORT1STSCTRL;
899 ULONG oldval = READREG16_LE(hc->hc_RegBase, portreg) & ~(UHPF_ENABLECHANGE|UHPF_CONNECTCHANGE); // these are clear-on-write!
900 ULONG newval = oldval;
901 switch(val)
903 case UFS_PORT_ENABLE:
904 KPRINTF(10, ("Disabling Port (%s)\n", newval & UHPF_PORTENABLE ? "ok" : "already"));
905 newval &= ~UHPF_PORTENABLE;
906 cmdgood = TRUE;
907 // disable enumeration
908 unit->hu_DevControllers[0] = NULL;
909 break;
911 case UFS_PORT_SUSPEND:
912 newval &= ~UHPF_PORTSUSPEND;
913 cmdgood = TRUE;
914 break;
916 case UFS_PORT_POWER: // ignore for UHCI, there's no power control here
917 KPRINTF(10, ("Disabling Power\n"));
918 KPRINTF(10, ("Disabling Port (%s)\n", newval & UHPF_PORTENABLE ? "ok" : "already"));
919 newval &= ~UHPF_PORTENABLE;
920 cmdgood = TRUE;
921 break;
923 case UFS_C_PORT_CONNECTION:
924 newval |= UHPF_CONNECTCHANGE; // clear-on-write!
925 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_CONNECTION;
926 cmdgood = TRUE;
927 break;
929 case UFS_C_PORT_ENABLE:
930 newval |= UHPF_ENABLECHANGE; // clear-on-write!
931 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_ENABLE;
932 cmdgood = TRUE;
933 break;
935 case UFS_C_PORT_SUSPEND: // ignore for UHCI, there's no bit indicating this
936 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_SUSPEND; // manually fake suspend change clearing
937 cmdgood = TRUE;
938 break;
940 case UFS_C_PORT_OVER_CURRENT: // ignore for UHCI, there's no bit indicating this
941 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_OVER_CURRENT; // manually fake over current clearing
942 cmdgood = TRUE;
943 break;
945 case UFS_C_PORT_RESET: // ignore for UHCI, there's no bit indicating this
946 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_RESET; // manually fake reset change clearing
947 cmdgood = TRUE;
948 break;
950 if(cmdgood)
952 KPRINTF(5, ("Port %ld CLEAR_FEATURE %04lx->%04lx\n", idx, oldval, newval));
953 WRITEREG16_LE(hc->hc_RegBase, portreg, newval);
954 if(hc->hc_PortChangeMap[hciport])
956 unit->hu_RootPortChanges |= 1UL<<idx;
957 } else {
958 unit->hu_RootPortChanges &= ~(1UL<<idx);
960 return(0);
962 break;
965 case HCITYPE_OHCI:
967 UWORD portreg = OHCI_PORTSTATUS + (hciport<<2);
968 ULONG oldval = READREG32_LE(hc->hc_RegBase, portreg);
970 switch(val)
972 case UFS_PORT_ENABLE:
973 KPRINTF(10, ("Disabling Port (%s)\n", oldval & OHPF_PORTENABLE ? "ok" : "already"));
974 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_PORTDISABLE);
975 cmdgood = TRUE;
976 break;
978 case UFS_PORT_SUSPEND:
979 KPRINTF(10, ("Resuming Port (%s)\n", oldval & OHPF_PORTSUSPEND ? "ok" : "already"));
980 //hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_SUSPEND; // manually fake suspend change
981 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_RESUME);
982 cmdgood = TRUE;
983 break;
985 case UFS_PORT_POWER:
986 KPRINTF(10, ("Unpowering Port (%s)\n", oldval & OHPF_PORTPOWER ? "ok" : "already"));
987 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_PORTUNPOWER);
988 cmdgood = TRUE;
989 break;
991 case UFS_C_PORT_CONNECTION:
992 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_CONNECTCHANGE);
993 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_CONNECTION;
994 cmdgood = TRUE;
995 break;
997 case UFS_C_PORT_ENABLE:
998 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_ENABLECHANGE);
999 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_ENABLE;
1000 cmdgood = TRUE;
1001 break;
1003 case UFS_C_PORT_SUSPEND:
1004 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_RESUMEDTX);
1005 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_SUSPEND;
1006 cmdgood = TRUE;
1007 break;
1009 case UFS_C_PORT_OVER_CURRENT:
1010 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_OVERCURRENTCHG);
1011 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_OVER_CURRENT;
1012 cmdgood = TRUE;
1013 break;
1015 case UFS_C_PORT_RESET:
1016 WRITEREG32_LE(hc->hc_RegBase, portreg, OHPF_RESETCHANGE);
1017 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_RESET;
1018 cmdgood = TRUE;
1019 break;
1021 if(cmdgood)
1023 return(0);
1025 break;
1028 case HCITYPE_EHCI:
1030 UWORD portreg = EHCI_PORTSC1 + (hciport<<2);
1031 ULONG oldval = READREG32_LE(hc->hc_RegBase, portreg) & ~(EHPF_OVERCURRENTCHG|EHPF_ENABLECHANGE|EHPF_CONNECTCHANGE); // these are clear-on-write!
1032 ULONG newval = oldval;
1033 switch(val)
1035 case UFS_PORT_ENABLE:
1036 KPRINTF(10, ("Disabling Port (%s)\n", newval & EHPF_PORTENABLE ? "ok" : "already"));
1037 newval &= ~EHPF_PORTENABLE;
1038 cmdgood = TRUE;
1039 // disable enumeration
1040 unit->hu_DevControllers[0] = NULL;
1041 break;
1043 case UFS_PORT_SUSPEND:
1044 newval &= ~EHPF_PORTSUSPEND;
1045 cmdgood = TRUE;
1046 break;
1048 case UFS_PORT_POWER: // ignore for UHCI, there's no power control here
1049 KPRINTF(10, ("Disabling Power (%s)\n", newval & EHPF_PORTPOWER ? "ok" : "already"));
1050 KPRINTF(10, ("Disabling Port (%s)\n", newval & EHPF_PORTENABLE ? "ok" : "already"));
1051 newval &= ~(EHPF_PORTENABLE|EHPF_PORTPOWER);
1052 cmdgood = TRUE;
1053 break;
1055 case UFS_C_PORT_CONNECTION:
1056 newval |= EHPF_CONNECTCHANGE; // clear-on-write!
1057 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_CONNECTION;
1058 cmdgood = TRUE;
1059 break;
1061 case UFS_C_PORT_ENABLE:
1062 newval |= EHPF_ENABLECHANGE; // clear-on-write!
1063 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_ENABLE;
1064 cmdgood = TRUE;
1065 break;
1067 case UFS_C_PORT_SUSPEND: // ignore for EHCI, there's no bit indicating this
1068 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_SUSPEND; // manually fake suspend change clearing
1069 cmdgood = TRUE;
1070 break;
1072 case UFS_C_PORT_OVER_CURRENT:
1073 newval |= EHPF_OVERCURRENTCHG; // clear-on-write!
1074 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_OVER_CURRENT; // manually fake over current clearing
1075 cmdgood = TRUE;
1076 break;
1078 case UFS_C_PORT_RESET: // ignore for EHCI, there's no bit indicating this
1079 hc->hc_PortChangeMap[hciport] &= ~UPSF_PORT_RESET; // manually fake reset change clearing
1080 cmdgood = TRUE;
1081 break;
1083 if(cmdgood)
1085 KPRINTF(5, ("Port %ld CLEAR_FEATURE %08lx->%08lx\n", idx, oldval, newval));
1086 WRITEREG32_LE(hc->hc_RegBase, portreg, newval);
1087 if(hc->hc_PortChangeMap[hciport])
1089 unit->hu_RootPortChanges |= 1UL<<idx;
1090 } else {
1091 unit->hu_RootPortChanges &= ~(1UL<<idx);
1093 return(0);
1095 break;
1098 break;
1100 break;
1102 case (URTF_IN|URTF_CLASS|URTF_OTHER):
1103 switch(req)
1105 case USR_GET_STATUS:
1107 UWORD *mptr = ioreq->iouh_Data;
1108 if(len != sizeof(struct UsbPortStatus))
1110 return(UHIOERR_STALL);
1112 if((!idx) && (idx > numports))
1114 KPRINTF(20, ("Port %ld out of range\n", idx));
1115 return(UHIOERR_STALL);
1117 if(unit->hu_EhciOwned[idx - 1])
1119 hc = unit->hu_PortMap20[idx - 1];
1120 hciport = idx - 1;
1121 } else {
1122 hc = unit->hu_PortMap11[idx - 1];
1123 hciport = unit->hu_PortNum11[idx - 1];
1125 switch(hc->hc_HCIType)
1127 case HCITYPE_UHCI:
1129 UWORD portreg = hciport ? UHCI_PORT2STSCTRL : UHCI_PORT1STSCTRL;
1130 UWORD oldval = READREG16_LE(hc->hc_RegBase, portreg);
1131 *mptr = UPSF_PORT_POWER;
1132 if(oldval & UHPF_PORTCONNECTED) *mptr |= UPSF_PORT_CONNECTION;
1133 if(oldval & UHPF_PORTENABLE) *mptr |= UPSF_PORT_ENABLE;
1134 if(oldval & UHPF_LOWSPEED) *mptr |= UPSF_PORT_LOW_SPEED;
1135 if(oldval & UHPF_PORTRESET) *mptr |= UPSF_PORT_RESET;
1136 if(oldval & UHPF_PORTSUSPEND) *mptr |= UPSF_PORT_SUSPEND;
1138 KPRINTF(5, ("UHCI Port %ld is %s\n", idx, oldval & UHPF_LOWSPEED ? "LOWSPEED" : "FULLSPEED"));
1139 KPRINTF(5, ("UHCI Port %ld Status %08lx\n", idx, *mptr));
1141 mptr++;
1142 if(oldval & UHPF_ENABLECHANGE)
1144 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_ENABLE;
1146 if(oldval & UHPF_CONNECTCHANGE)
1148 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_CONNECTION;
1150 if(oldval & UHPF_RESUMEDTX)
1152 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND|UPSF_PORT_ENABLE;
1154 *mptr = hc->hc_PortChangeMap[hciport];
1155 WRITEREG16_LE(hc->hc_RegBase, portreg, oldval);
1156 KPRINTF(5, ("UHCI Port %ld Change %08lx\n", idx, *mptr));
1157 return(0);
1160 case HCITYPE_OHCI:
1162 UWORD portreg = OHCI_PORTSTATUS + (hciport<<2);
1163 ULONG oldval = READREG32_LE(hc->hc_RegBase, portreg);
1165 *mptr = 0;
1166 if(oldval & OHPF_PORTPOWER) *mptr |= UPSF_PORT_POWER;
1167 if(oldval & OHPF_OVERCURRENT) *mptr |= UPSF_PORT_OVER_CURRENT;
1168 if(oldval & OHPF_PORTCONNECTED) *mptr |= UPSF_PORT_CONNECTION;
1169 if(oldval & OHPF_PORTENABLE) *mptr |= UPSF_PORT_ENABLE;
1170 if(oldval & OHPF_LOWSPEED) *mptr |= UPSF_PORT_LOW_SPEED;
1171 if(oldval & OHPF_PORTRESET) *mptr |= UPSF_PORT_RESET;
1172 if(oldval & OHPF_PORTSUSPEND) *mptr |= UPSF_PORT_SUSPEND;
1174 KPRINTF(5, ("OHCI Port %ld (glob. %ld) is %s\n", hciport, idx, oldval & OHPF_LOWSPEED ? "LOWSPEED" : "FULLSPEED"));
1175 KPRINTF(5, ("OHCI Port %ld Status %08lx (%08lx)\n", idx, *mptr, oldval));
1177 mptr++;
1178 if(oldval & OHPF_OVERCURRENTCHG)
1180 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_OVER_CURRENT;
1182 if(oldval & OHPF_RESETCHANGE)
1184 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_RESET;
1186 if(oldval & OHPF_ENABLECHANGE)
1188 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_ENABLE;
1190 if(oldval & OHPF_CONNECTCHANGE)
1192 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_CONNECTION;
1194 if(oldval & OHPF_RESUMEDTX)
1196 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND;
1198 *mptr = hc->hc_PortChangeMap[hciport];
1199 KPRINTF(5, ("OHCI Port %ld Change %08lx\n", idx, *mptr));
1200 return(0);
1203 case HCITYPE_EHCI:
1205 UWORD portreg = EHCI_PORTSC1 + (hciport<<2);
1206 ULONG oldval = READREG32_LE(hc->hc_RegBase, portreg);
1208 *mptr = 0;
1209 if(oldval & EHPF_PORTCONNECTED) *mptr |= UPSF_PORT_CONNECTION;
1210 if(oldval & EHPF_PORTENABLE) *mptr |= UPSF_PORT_ENABLE|UPSF_PORT_HIGH_SPEED;
1211 if((oldval & (EHPF_LINESTATUS_DM|EHPF_PORTCONNECTED|EHPF_PORTENABLE)) ==
1212 (EHPF_LINESTATUS_DM|EHPF_PORTCONNECTED))
1214 KPRINTF(10, ("EHCI Port %ld is LOWSPEED\n", idx));
1215 // we need to detect low speed devices prior to reset
1216 *mptr |= UPSF_PORT_LOW_SPEED;
1219 if(oldval & EHPF_PORTRESET) *mptr |= UPSF_PORT_RESET;
1220 if(oldval & EHPF_PORTSUSPEND) *mptr |= UPSF_PORT_SUSPEND;
1221 if(oldval & EHPF_PORTPOWER) *mptr |= UPSF_PORT_POWER;
1222 if(oldval & EHPM_PORTINDICATOR) *mptr |= UPSF_PORT_INDICATOR;
1224 KPRINTF(5, ("EHCI Port %ld Status %08lx\n", idx, *mptr));
1226 mptr++;
1227 if(oldval & EHPF_ENABLECHANGE)
1229 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_ENABLE;
1231 if(oldval & EHPF_CONNECTCHANGE)
1233 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_CONNECTION;
1235 if(oldval & EHPF_RESUMEDTX)
1237 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND|UPSF_PORT_ENABLE;
1239 if(oldval & EHPF_OVERCURRENTCHG)
1241 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_OVER_CURRENT;
1243 *mptr = hc->hc_PortChangeMap[hciport];
1244 WRITEREG32_LE(hc->hc_RegBase, portreg, oldval);
1245 KPRINTF(5, ("EHCI Port %ld Change %08lx\n", idx, *mptr));
1246 return(0);
1249 return(0);
1253 break;
1255 case (URTF_IN|URTF_CLASS|URTF_DEVICE):
1256 switch(req)
1258 case USR_GET_STATUS:
1260 UWORD *mptr = ioreq->iouh_Data;
1261 if(len < sizeof(struct UsbHubStatus))
1263 return(UHIOERR_STALL);
1265 *mptr++ = 0;
1266 *mptr++ = 0;
1267 ioreq->iouh_Actual = 4;
1268 return(0);
1271 case USR_GET_DESCRIPTOR:
1272 switch(val>>8)
1274 case UDT_HUB:
1276 ULONG hubdesclen = 9;
1277 ULONG powergood = 1;
1278 struct UsbHubDesc *uhd = (struct UsbHubDesc *) ioreq->iouh_Data;
1279 KPRINTF(1, ("RH: GetHubDescriptor (%ld)\n", len));
1280 if(unit->hu_RootHubPorts > 7) // needs two bytes for port masks
1282 hubdesclen += 2;
1284 ioreq->iouh_Actual = (len > hubdesclen) ? hubdesclen : len;
1285 CopyMem((APTR) &RHHubDesc, ioreq->iouh_Data, ioreq->iouh_Actual);
1286 if(ioreq->iouh_Length)
1288 uhd->bLength = hubdesclen;
1290 if(ioreq->iouh_Length >= 6)
1292 hc = (struct PCIController *) unit->hu_Controllers.lh_Head;
1293 while(hc->hc_Node.ln_Succ)
1295 if(hc->hc_HCIType == HCITYPE_OHCI)
1297 ULONG localpwgood = READREG32_LE(hc->hc_RegBase, OHCI_HUBDESCA & OHAM_POWERGOOD) >> OHAS_POWERGOOD;
1298 if(localpwgood > powergood)
1300 powergood = localpwgood;
1301 KPRINTF(10, ("Increasing power good time to %ld\n", powergood));
1304 hc = (struct PCIController *) hc->hc_Node.ln_Succ;
1307 uhd->bPwrOn2PwrGood = powergood;
1309 if(ioreq->iouh_Length >= hubdesclen)
1311 uhd->bNbrPorts = unit->hu_RootHubPorts;
1312 if(hubdesclen == 9)
1314 uhd->DeviceRemovable = 0;
1315 uhd->PortPwrCtrlMask = (1<<(unit->hu_RootHubPorts+2))-2;
1316 } else {
1317 // each field is now 16 bits wide
1318 uhd->DeviceRemovable = 0;
1319 uhd->PortPwrCtrlMask = 0;
1320 ((UBYTE *) ioreq->iouh_Data)[9] = (1<<(unit->hu_RootHubPorts+2))-2;
1321 ((UBYTE *) ioreq->iouh_Data)[10] = ((1<<(unit->hu_RootHubPorts+2))-2)>>8;
1324 return(0);
1327 default:
1328 KPRINTF(20, ("RH: Unsupported Descriptor %04lx\n", idx));
1330 break;
1334 KPRINTF(20, ("RH: Unsupported command %02lx %02lx %04lx %04lx %04lx!\n", rt, req, idx, val, len));
1335 return(UHIOERR_STALL);
1337 /* \\\ */
1339 /* /// "cmdIntXFerRootHub()" */
1340 WORD cmdIntXFerRootHub(struct IOUsbHWReq *ioreq,
1341 struct PCIUnit *unit,
1342 struct PCIDevice *base)
1344 if((ioreq->iouh_Endpoint != 1) || (!ioreq->iouh_Length))
1346 return(UHIOERR_STALL);
1349 if(unit->hu_RootPortChanges)
1351 KPRINTF(1, ("Immediate Portchange map %04lx\n", unit->hu_RootPortChanges));
1353 if((unit->hu_RootHubPorts < 8) || (ioreq->iouh_Length == 1))
1355 *((UBYTE *) ioreq->iouh_Data) = unit->hu_RootPortChanges;
1356 ioreq->iouh_Actual = 1;
1357 } else {
1358 ((UBYTE *) ioreq->iouh_Data)[0] = unit->hu_RootPortChanges;
1359 ((UBYTE *) ioreq->iouh_Data)[1] = unit->hu_RootPortChanges>>8;
1360 ioreq->iouh_Actual = 2;
1362 unit->hu_RootPortChanges = 0;
1363 return(0);
1365 ioreq->iouh_Req.io_Flags &= ~IOF_QUICK;
1366 Disable();
1367 AddTail(&unit->hu_RHIOQueue, (struct Node *) ioreq);
1368 Enable();
1369 return(RC_DONTREPLY);
1371 /* \\\ */
1373 /* /// "cmdControlXFer()" */
1375 *======================================================================
1376 * cmdControlXFer(ioreq, unit, base)
1377 *======================================================================
1379 * This is the device UHCMD_CONTROLXFER routine.
1381 * First it check if the usb is in proper state and if user passed arguments
1382 * are valid. If everything is ok, the request is linked to queue of
1383 * pending transfer requests.
1387 WORD cmdControlXFer(struct IOUsbHWReq *ioreq,
1388 struct PCIUnit *unit,
1389 struct PCIDevice *base)
1391 struct PCIController *hc;
1393 KPRINTF(10, ("UHCMD_CONTROLXFER ioreq: 0x%08lx\n", ioreq));
1394 uhwGetUsbState(ioreq, unit, base);
1395 if(!(ioreq->iouh_State & UHSF_OPERATIONAL))
1397 return(UHIOERR_USBOFFLINE);
1399 /* Root hub emulation */
1400 if(ioreq->iouh_DevAddr == unit->hu_RootHubAddr)
1402 return(cmdControlXFerRootHub(ioreq, unit, base));
1405 hc = unit->hu_DevControllers[ioreq->iouh_DevAddr];
1406 if(!hc)
1408 KPRINTF(20, ("No Host controller assigned to device address %ld\n", ioreq->iouh_DevAddr));
1409 return(UHIOERR_HOSTERROR);
1412 ioreq->iouh_Req.io_Flags &= ~IOF_QUICK;
1413 ioreq->iouh_Actual = 0;
1415 Disable();
1416 AddTail(&hc->hc_CtrlXFerQueue, (struct Node *) ioreq);
1417 Enable();
1418 SureCause(base, &hc->hc_CompleteInt);
1420 KPRINTF(10, ("UHCMD_CONTROLXFER processed ioreq: 0x%08lx\n", ioreq));
1421 return(RC_DONTREPLY);
1423 /* \\\ */
1425 /* /// "cmdBulkXFer()" */
1427 *======================================================================
1428 * cmdBulkXFer(ioreq, unit, base)
1429 *======================================================================
1431 * This is the device UHCMD_BULKXFER routine.
1433 * First it check if the usb is in proper state and if user passed arguments
1434 * are valid. If everything is ok, the request is linked to queue of
1435 * pending transfer requests.
1439 WORD cmdBulkXFer(struct IOUsbHWReq *ioreq,
1440 struct PCIUnit *unit,
1441 struct PCIDevice *base)
1443 struct PCIController *hc;
1445 KPRINTF(10, ("UHCMD_BULKXFER ioreq: 0x%08lx\n", ioreq));
1446 uhwGetUsbState(ioreq, unit, base);
1447 if(!(ioreq->iouh_State & UHSF_OPERATIONAL))
1449 return(UHIOERR_USBOFFLINE);
1452 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
1454 return(UHIOERR_BADPARAMS);
1457 hc = unit->hu_DevControllers[ioreq->iouh_DevAddr];
1458 if(!hc)
1460 return(UHIOERR_HOSTERROR);
1463 ioreq->iouh_Req.io_Flags &= ~IOF_QUICK;
1464 ioreq->iouh_Actual = 0;
1466 Disable();
1467 AddTail(&hc->hc_BulkXFerQueue, (struct Node *) ioreq);
1468 Enable();
1469 SureCause(base, &hc->hc_CompleteInt);
1471 KPRINTF(10, ("UHCMD_BULKXFER processed ioreq: 0x%08lx\n", ioreq));
1472 return(RC_DONTREPLY);
1474 /* \\\ */
1476 /* /// "cmdIsoXFer()" */
1478 *======================================================================
1479 * cmdIsoXFer(ioreq, unit, base)
1480 *======================================================================
1482 * This is the device UHCMD_ISOXFER routine.
1484 * First it check if the usb is in proper state and if user passed arguments
1485 * are valid. If everything is ok, the request is linked to queue of
1486 * pending transfer requests.
1490 WORD cmdIsoXFer(struct IOUsbHWReq *ioreq,
1491 struct PCIUnit *unit,
1492 struct PCIDevice *base)
1494 struct PCIController *hc;
1496 KPRINTF(10, ("UHCMD_ISOXFER ioreq: 0x%08lx\n", ioreq));
1497 uhwGetUsbState(ioreq, unit, base);
1498 if(!(ioreq->iouh_State & UHSF_OPERATIONAL))
1500 return(UHIOERR_USBOFFLINE);
1503 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
1505 return(UHIOERR_BADPARAMS);
1508 hc = unit->hu_DevControllers[ioreq->iouh_DevAddr];
1509 if(!hc)
1511 return(UHIOERR_HOSTERROR);
1514 ioreq->iouh_Req.io_Flags &= ~IOF_QUICK;
1515 ioreq->iouh_Actual = 0;
1517 Disable();
1518 AddTail(&hc->hc_IsoXFerQueue, (struct Node *) ioreq);
1519 Enable();
1520 SureCause(base, &hc->hc_CompleteInt);
1522 KPRINTF(10, ("UHCMD_ISOXFER processed ioreq: 0x%08lx\n", ioreq));
1523 return(RC_DONTREPLY);
1525 /* \\\ */
1527 /* /// "cmdIntXFer()" */
1529 *======================================================================
1530 * cmdIntXFer(ioreq, unit, base)
1531 *======================================================================
1533 * This is the device UHCMD_INTXFER routine.
1535 * First it check if the usb is in proper state and if user passed arguments
1536 * are valid. If everything is ok, the request is linked to queue of
1537 * pending transfer requests.
1541 WORD cmdIntXFer(struct IOUsbHWReq *ioreq,
1542 struct PCIUnit *unit,
1543 struct PCIDevice *base)
1545 struct PCIController *hc;
1547 KPRINTF(10, ("UHCMD_INTXFER ioreq: 0x%08lx\n", ioreq));
1548 //uhwDelayMS(1000, unit, base); /* Wait 200 ms */
1549 uhwGetUsbState(ioreq, unit, base);
1550 if(!(ioreq->iouh_State & UHSF_OPERATIONAL))
1552 return(UHIOERR_USBOFFLINE);
1555 /* Root Hub Emulation */
1556 if(ioreq->iouh_DevAddr == unit->hu_RootHubAddr)
1558 return(cmdIntXFerRootHub(ioreq, unit, base));
1561 hc = unit->hu_DevControllers[ioreq->iouh_DevAddr];
1562 if(!hc)
1564 return(UHIOERR_HOSTERROR);
1567 ioreq->iouh_Req.io_Flags &= ~IOF_QUICK;
1568 ioreq->iouh_Actual = 0;
1570 Disable();
1571 AddTail(&hc->hc_IntXFerQueue, (struct Node *) ioreq);
1572 Enable();
1573 SureCause(base, &hc->hc_CompleteInt);
1575 KPRINTF(10, ("UHCMD_INTXFER processed ioreq: 0x%08lx\n", ioreq));
1576 return(RC_DONTREPLY);
1578 /* \\\ */
1580 /* /// "cmdFlush()" */
1582 *======================================================================
1583 * cmdFlush(ioreq, base)
1584 *======================================================================
1586 * This is the device CMD_FLUSH routine.
1588 * This routine abort all pending transfer requests.
1592 WORD cmdFlush(struct IOUsbHWReq *ioreq,
1593 struct PCIUnit *unit,
1594 struct PCIDevice *base)
1596 struct IOUsbHWReq *cmpioreq;
1597 struct PCIController *hc;
1598 UWORD devadrep;
1600 KPRINTF(10, ("CMD_FLUSH ioreq: 0x%08lx\n", ioreq));
1602 Disable();
1603 cmpioreq = (struct IOUsbHWReq *) unit->hu_RHIOQueue.lh_Head;
1604 while(((struct Node *) cmpioreq)->ln_Succ)
1606 Remove(&cmpioreq->iouh_Req.io_Message.mn_Node);
1607 cmpioreq->iouh_Req.io_Error = IOERR_ABORTED;
1608 ReplyMsg(&cmpioreq->iouh_Req.io_Message);
1609 cmpioreq = (struct IOUsbHWReq *) unit->hu_RHIOQueue.lh_Head;
1611 hc = (struct PCIController *) unit->hu_Controllers.lh_Head;
1612 while(hc->hc_Node.ln_Succ)
1614 cmpioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
1615 while(((struct Node *) cmpioreq)->ln_Succ)
1617 Remove(&cmpioreq->iouh_Req.io_Message.mn_Node);
1618 cmpioreq->iouh_Req.io_Error = IOERR_ABORTED;
1619 ReplyMsg(&cmpioreq->iouh_Req.io_Message);
1620 cmpioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
1622 cmpioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
1623 while(((struct Node *) cmpioreq)->ln_Succ)
1625 Remove(&cmpioreq->iouh_Req.io_Message.mn_Node);
1626 cmpioreq->iouh_Req.io_Error = IOERR_ABORTED;
1627 ReplyMsg(&cmpioreq->iouh_Req.io_Message);
1628 cmpioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
1630 cmpioreq = (struct IOUsbHWReq *) hc->hc_IsoXFerQueue.lh_Head;
1631 while(((struct Node *) cmpioreq)->ln_Succ)
1633 Remove(&cmpioreq->iouh_Req.io_Message.mn_Node);
1634 cmpioreq->iouh_Req.io_Error = IOERR_ABORTED;
1635 ReplyMsg(&cmpioreq->iouh_Req.io_Message);
1636 cmpioreq = (struct IOUsbHWReq *) hc->hc_IsoXFerQueue.lh_Head;
1638 cmpioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
1639 while(((struct Node *) cmpioreq)->ln_Succ)
1641 Remove(&cmpioreq->iouh_Req.io_Message.mn_Node);
1642 cmpioreq->iouh_Req.io_Error = IOERR_ABORTED;
1643 ReplyMsg(&cmpioreq->iouh_Req.io_Message);
1644 cmpioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
1646 switch(hc->hc_HCIType)
1648 case HCITYPE_UHCI:
1649 cmpioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
1650 while(((struct Node *) cmpioreq)->ln_Succ)
1652 Remove(&cmpioreq->iouh_Req.io_Message.mn_Node);
1653 devadrep = (cmpioreq->iouh_DevAddr<<5) + cmpioreq->iouh_Endpoint + ((cmpioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
1654 unit->hu_DevBusyReq[devadrep] = NULL;
1655 uhciFreeQContext(hc, (struct UhciQH *) cmpioreq->iouh_DriverPrivate1);
1656 cmpioreq->iouh_Req.io_Error = IOERR_ABORTED;
1657 ReplyMsg(&cmpioreq->iouh_Req.io_Message);
1658 cmpioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
1660 break;
1662 case HCITYPE_EHCI:
1663 cmpioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
1664 while(((struct Node *) cmpioreq)->ln_Succ)
1666 Remove(&cmpioreq->iouh_Req.io_Message.mn_Node);
1667 devadrep = (cmpioreq->iouh_DevAddr<<5) + cmpioreq->iouh_Endpoint + ((cmpioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
1668 unit->hu_DevBusyReq[devadrep] = NULL;
1669 ehciFreeAsyncContext(hc, (struct EhciQH *) cmpioreq->iouh_DriverPrivate1);
1670 cmpioreq->iouh_Req.io_Error = IOERR_ABORTED;
1671 ReplyMsg(&cmpioreq->iouh_Req.io_Message);
1672 cmpioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
1674 cmpioreq = (struct IOUsbHWReq *) hc->hc_PeriodicTDQueue.lh_Head;
1675 while(((struct Node *) cmpioreq)->ln_Succ)
1677 Remove(&cmpioreq->iouh_Req.io_Message.mn_Node);
1678 devadrep = (cmpioreq->iouh_DevAddr<<5) + cmpioreq->iouh_Endpoint + ((cmpioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
1679 unit->hu_DevBusyReq[devadrep] = NULL;
1680 ehciFreePeriodicContext(hc, (struct EhciQH *) cmpioreq->iouh_DriverPrivate1);
1681 cmpioreq->iouh_Req.io_Error = IOERR_ABORTED;
1682 ReplyMsg(&cmpioreq->iouh_Req.io_Message);
1683 cmpioreq = (struct IOUsbHWReq *) hc->hc_PeriodicTDQueue.lh_Head;
1685 break;
1687 hc = (struct PCIController *) hc->hc_Node.ln_Succ;
1689 Enable();
1690 /* Return success
1692 return RC_OK;
1694 /* \\\ */
1696 /* /// "NSD stuff" */
1698 static
1699 const UWORD NSDSupported[] =
1701 CMD_FLUSH, CMD_RESET,
1702 UHCMD_QUERYDEVICE, UHCMD_USBRESET,
1703 UHCMD_USBRESUME, UHCMD_USBSUSPEND,
1704 UHCMD_USBOPER, UHCMD_CONTROLXFER ,
1705 UHCMD_ISOXFER, UHCMD_INTXFER,
1706 UHCMD_BULKXFER,
1707 NSCMD_DEVICEQUERY, 0
1710 WORD cmdNSDeviceQuery(struct IOStdReq *ioreq,
1711 struct PCIUnit *unit,
1712 struct PCIDevice *base)
1714 struct my_NSDeviceQueryResult *query;
1716 query = (struct my_NSDeviceQueryResult *) ioreq->io_Data;
1718 KPRINTF(10, ("NSCMD_DEVICEQUERY ioreq: 0x%08lx query: 0x%08lx\n", ioreq, query));
1720 /* NULL ptr?
1721 Enough data?
1722 Valid request?
1724 if((!query) ||
1725 (ioreq->io_Length < sizeof(struct my_NSDeviceQueryResult)) ||
1726 (query->DevQueryFormat != 0) ||
1727 (query->SizeAvailable != 0))
1729 /* Return error. This is special handling, since iorequest is only
1730 guaranteed to be sizeof(struct IOStdReq). If we'd let our
1731 devBeginIO dispatcher return the error, it would trash some
1732 memory past end of the iorequest (ios2_WireError field).
1734 ioreq->io_Error = IOERR_NOCMD;
1735 TermIO((struct IOUsbHWReq *) ioreq, base);
1737 /* Don't reply, we already did.
1739 return RC_DONTREPLY;
1742 ioreq->io_Actual = query->SizeAvailable
1743 = sizeof(struct my_NSDeviceQueryResult);
1744 query->DeviceType = NSDEVTYPE_USBHARDWARE;
1745 query->DeviceSubType = 0;
1746 query->SupportedCommands = NSDSupported;
1748 /* Return success (note that this will NOT poke ios2_WireError).
1750 return RC_OK;
1752 /* \\\ */
1754 /* /// "TermIO()" */
1756 *===========================================================
1757 * TermIO(ioreq, base)
1758 *===========================================================
1760 * Return completed ioreq to sender.
1764 void TermIO(struct IOUsbHWReq *ioreq,
1765 struct PCIDevice *base)
1767 ioreq->iouh_Req.io_Message.mn_Node.ln_Type = NT_FREEMSG;
1769 /* If not quick I/O, reply the message
1771 if(!(ioreq->iouh_Req.io_Flags & IOF_QUICK))
1773 ReplyMsg(&ioreq->iouh_Req.io_Message);
1776 /* \\\ */
1778 /* /// "cmdAbortIO()" */
1779 BOOL cmdAbortIO(struct IOUsbHWReq *ioreq, struct PCIDevice *base)
1781 struct PCIUnit *unit = (struct PCIUnit *) ioreq->iouh_Req.io_Unit;
1782 struct IOUsbHWReq *cmpioreq;
1783 struct PCIController *hc;
1784 UWORD devadrep;
1785 BOOL foundit = FALSE;
1787 KPRINTF(10, ("cmdAbort(%08lx)\n", ioreq));
1789 Disable();
1790 cmpioreq = (struct IOUsbHWReq *) unit->hu_RHIOQueue.lh_Head;
1791 while(((struct Node *) cmpioreq)->ln_Succ)
1793 if(ioreq == cmpioreq)
1795 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
1796 Enable();
1797 return TRUE;
1799 cmpioreq = (struct IOUsbHWReq *) cmpioreq->iouh_Req.io_Message.mn_Node.ln_Succ;
1802 hc = (struct PCIController *) unit->hu_Controllers.lh_Head;
1803 while(hc->hc_Node.ln_Succ)
1805 cmpioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
1806 while(((struct Node *) cmpioreq)->ln_Succ)
1808 if(ioreq == cmpioreq)
1810 foundit = TRUE;
1811 break;
1813 cmpioreq = (struct IOUsbHWReq *) cmpioreq->iouh_Req.io_Message.mn_Node.ln_Succ;
1815 if(!foundit)
1817 cmpioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
1818 while(((struct Node *) cmpioreq)->ln_Succ)
1820 if(ioreq == cmpioreq)
1822 foundit = TRUE;
1823 break;
1825 cmpioreq = (struct IOUsbHWReq *) cmpioreq->iouh_Req.io_Message.mn_Node.ln_Succ;
1828 if(!foundit)
1830 cmpioreq = (struct IOUsbHWReq *) hc->hc_IsoXFerQueue.lh_Head;
1831 while(((struct Node *) cmpioreq)->ln_Succ)
1833 if(ioreq == cmpioreq)
1835 foundit = TRUE;
1836 break;
1838 cmpioreq = (struct IOUsbHWReq *) cmpioreq->iouh_Req.io_Message.mn_Node.ln_Succ;
1841 if(!foundit)
1843 cmpioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
1844 while(((struct Node *) cmpioreq)->ln_Succ)
1846 if(ioreq == cmpioreq)
1848 foundit = TRUE;
1849 break;
1851 cmpioreq = (struct IOUsbHWReq *) cmpioreq->iouh_Req.io_Message.mn_Node.ln_Succ;
1854 if(foundit)
1856 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
1857 break;
1858 } else {
1859 // IOReq is probably pending in some transfer structure
1860 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
1861 switch(hc->hc_HCIType)
1863 case HCITYPE_UHCI:
1864 cmpioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
1865 while(((struct Node *) cmpioreq)->ln_Succ)
1867 if(ioreq == cmpioreq)
1869 foundit = TRUE;
1870 unit->hu_DevBusyReq[devadrep] = NULL;
1871 uhciFreeQContext(hc, (struct UhciQH *) ioreq->iouh_DriverPrivate1);
1872 break;
1874 cmpioreq = (struct IOUsbHWReq *) cmpioreq->iouh_Req.io_Message.mn_Node.ln_Succ;
1876 break;
1878 case HCITYPE_OHCI:
1879 cmpioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
1880 while(((struct Node *) cmpioreq)->ln_Succ)
1882 if(ioreq == cmpioreq)
1884 foundit = TRUE;
1885 unit->hu_DevBusyReq[devadrep] = NULL;
1886 ohciFreeEDContext(hc, (struct OhciED *) ioreq->iouh_DriverPrivate1);
1887 break;
1889 cmpioreq = (struct IOUsbHWReq *) cmpioreq->iouh_Req.io_Message.mn_Node.ln_Succ;
1891 break;
1893 case HCITYPE_EHCI:
1894 cmpioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
1895 while(((struct Node *) cmpioreq)->ln_Succ)
1897 if(ioreq == cmpioreq)
1899 foundit = TRUE;
1900 unit->hu_DevBusyReq[devadrep] = NULL;
1901 ehciFreeAsyncContext(hc, (struct EhciQH *) ioreq->iouh_DriverPrivate1);
1902 break;
1904 cmpioreq = (struct IOUsbHWReq *) cmpioreq->iouh_Req.io_Message.mn_Node.ln_Succ;
1906 cmpioreq = (struct IOUsbHWReq *) hc->hc_PeriodicTDQueue.lh_Head;
1907 while(((struct Node *) cmpioreq)->ln_Succ)
1909 if(ioreq == cmpioreq)
1911 foundit = TRUE;
1912 unit->hu_DevBusyReq[devadrep] = NULL;
1913 ehciFreePeriodicContext(hc, (struct EhciQH *) ioreq->iouh_DriverPrivate1);
1914 break;
1916 cmpioreq = (struct IOUsbHWReq *) cmpioreq->iouh_Req.io_Message.mn_Node.ln_Succ;
1918 break;
1920 if(foundit)
1922 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
1923 break;
1926 hc = (struct PCIController *) hc->hc_Node.ln_Succ;
1928 Enable();
1929 if(!foundit)
1931 KPRINTF(20, ("WARNING, could not abort unknown IOReq %08lx\n", ioreq));
1933 return(foundit);
1935 /* \\\ */
1937 /* /// "uhwCheckRootHubChanges()" */
1938 void uhwCheckRootHubChanges(struct PCIUnit *unit)
1940 struct IOUsbHWReq *ioreq;
1942 if(unit->hu_RootPortChanges && unit->hu_RHIOQueue.lh_Head->ln_Succ)
1944 KPRINTF(1, ("Portchange map %04lx\n", unit->hu_RootPortChanges));
1945 Disable();
1946 ioreq = (struct IOUsbHWReq *) unit->hu_RHIOQueue.lh_Head;
1947 while(((struct Node *) ioreq)->ln_Succ)
1949 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
1950 if((ioreq->iouh_Length > 0) || (unit->hu_RootHubPorts < 8))
1952 *((UBYTE *) ioreq->iouh_Data) = unit->hu_RootPortChanges;
1953 ioreq->iouh_Actual = 1;
1955 else if(ioreq->iouh_Length > 1)
1957 ((UBYTE *) ioreq->iouh_Data)[0] = unit->hu_RootPortChanges;
1958 ((UBYTE *) ioreq->iouh_Data)[1] = unit->hu_RootPortChanges>>8;
1959 ioreq->iouh_Actual = 2;
1962 ReplyMsg(&ioreq->iouh_Req.io_Message);
1963 ioreq = (struct IOUsbHWReq *) unit->hu_RHIOQueue.lh_Head;
1965 unit->hu_RootPortChanges = 0;
1966 Enable();
1969 /* \\\ */
1971 /* /// "uhwCheckSpecialCtrlTransfers()" */
1972 void uhwCheckSpecialCtrlTransfers(struct PCIController *hc, struct IOUsbHWReq *ioreq)
1974 struct PCIUnit *unit = hc->hc_Unit;
1976 /* Clear Feature(Endpoint halt) */
1977 if((ioreq->iouh_SetupData.bmRequestType == (URTF_STANDARD|URTF_ENDPOINT)) &&
1978 (ioreq->iouh_SetupData.bRequest == USR_CLEAR_FEATURE) &&
1979 (ioreq->iouh_SetupData.wValue == AROS_WORD2LE(UFS_ENDPOINT_HALT)))
1981 KPRINTF(10, ("Resetting toggle bit for endpoint %ld\n", AROS_WORD2LE(ioreq->iouh_SetupData.wIndex) & 0xf));
1982 unit->hu_DevDataToggle[(ioreq->iouh_DevAddr<<5)|(AROS_WORD2LE(ioreq->iouh_SetupData.wIndex) & 0xf)|((AROS_WORD2LE(ioreq->iouh_SetupData.wIndex) & 0x80)>>3)] = 0;
1984 else if((ioreq->iouh_SetupData.bmRequestType == (URTF_STANDARD|URTF_DEVICE)) &&
1985 (ioreq->iouh_SetupData.bRequest == USR_SET_ADDRESS))
1987 /* Set Address -> clear all endpoints */
1988 ULONG epnum;
1989 ULONG adr = ioreq->iouh_SetupData.wValue>>3;
1990 KPRINTF(10, ("Resetting toggle bits for device address %ld\n", adr>>5));
1991 for(epnum = 0; epnum < 31; epnum++)
1993 unit->hu_DevDataToggle[adr+epnum] = 0;
1995 // transfer host controller ownership
1996 unit->hu_DevControllers[ioreq->iouh_DevAddr] = NULL;
1997 unit->hu_DevControllers[adr>>5] = hc;
1999 else if((ioreq->iouh_SetupData.bmRequestType == (URTF_CLASS|URTF_OTHER)) &&
2000 (ioreq->iouh_SetupData.bRequest == USR_SET_FEATURE) &&
2001 (ioreq->iouh_SetupData.wValue == AROS_WORD2LE(UFS_PORT_RESET)))
2003 // a hub will be enumerating a device on this host controller soon!
2004 KPRINTF(10, ("Hub RESET caught, assigning Dev0 to %08lx!\n", hc));
2005 unit->hu_DevControllers[0] = hc;
2008 /* \\\ */
2010 /* ---------------------------------------------------------------------- *
2011 * UHCI Specific Stuff *
2012 * ---------------------------------------------------------------------- */
2014 /* /// "uhciFreeQContext()" */
2015 void uhciFreeQContext(struct PCIController *hc, struct UhciQH *uqh)
2017 struct UhciTD *utd = NULL;
2018 struct UhciTD *nextutd;
2020 KPRINTF(5, ("Unlinking QContext %08lx\n", uqh));
2021 // unlink from schedule
2022 uqh->uqh_Pred->uxx_Link = uqh->uqh_Succ->uxx_Self;
2023 SYNC;
2024 EIEIO;
2025 uqh->uqh_Succ->uxx_Pred = uqh->uqh_Pred;
2026 uqh->uqh_Pred->uxx_Succ = uqh->uqh_Succ;
2027 SYNC;
2028 EIEIO;
2030 nextutd = uqh->uqh_FirstTD;
2031 while(nextutd)
2033 KPRINTF(1, ("FreeTD %08lx\n", nextutd));
2034 utd = nextutd;
2035 nextutd = (struct UhciTD *) utd->utd_Succ;
2036 uhciFreeTD(hc, utd);
2038 uhciFreeQH(hc, uqh);
2040 /* \\\ */
2042 /* /// "uhciAllocQH()" */
2043 inline struct UhciQH * uhciAllocQH(struct PCIController *hc)
2045 struct UhciQH *uqh = hc->hc_UhciQHPool;
2047 if(!uqh)
2049 // out of QHs!
2050 KPRINTF(20, ("Out of QHs!\n"));
2051 return NULL;
2054 hc->hc_UhciQHPool = (struct UhciQH *) uqh->uqh_Succ;
2055 return(uqh);
2057 /* \\\ */
2059 /* /// "uhciFreeQH()" */
2060 inline void uhciFreeQH(struct PCIController *hc, struct UhciQH *uqh)
2062 uqh->uqh_Succ = (struct UhciXX *) hc->hc_UhciQHPool;
2063 hc->hc_UhciQHPool = uqh;
2065 /* \\\ */
2067 /* /// "uhciAllocTD()" */
2068 inline struct UhciTD * uhciAllocTD(struct PCIController *hc)
2070 struct UhciTD *utd = hc->hc_UhciTDPool;
2072 if(!utd)
2074 // out of TDs!
2075 KPRINTF(20, ("Out of TDs!\n"));
2076 return NULL;
2079 hc->hc_UhciTDPool = (struct UhciTD *) utd->utd_Succ;
2080 return(utd);
2082 /* \\\ */
2084 /* /// "uhciFreeTD()" */
2085 inline void uhciFreeTD(struct PCIController *hc, struct UhciTD *utd)
2087 utd->utd_Succ = (struct UhciXX *) hc->hc_UhciTDPool;
2088 hc->hc_UhciTDPool = utd;
2090 /* \\\ */
2092 /* /// "uhciUpdateIntTree()" */
2093 void uhciUpdateIntTree(struct PCIController *hc)
2095 struct UhciXX *uxx;
2096 struct UhciXX *preduxx;
2097 struct UhciXX *lastuseduxx;
2098 UWORD cnt;
2100 // optimize linkage between queue heads
2101 preduxx = lastuseduxx = (struct UhciXX *) hc->hc_UhciCtrlQH; //hc->hc_UhciIsoTD;
2102 for(cnt = 0; cnt < 9; cnt++)
2104 uxx = (struct UhciXX *) hc->hc_UhciIntQH[cnt];
2105 if(uxx->uxx_Succ != preduxx)
2107 lastuseduxx = uxx->uxx_Succ;
2109 uxx->uxx_Link = lastuseduxx->uxx_Self;
2110 preduxx = uxx;
2113 /* \\\ */
2115 /* /// "uhciCheckPortStatusChange()" */
2116 void uhciCheckPortStatusChange(struct PCIController *hc)
2118 struct PCIUnit *unit = hc->hc_Unit;
2119 UWORD oldval;
2120 UWORD hciport;
2122 // check for port status change for UHCI and frame rollovers
2124 for(hciport = 0; hciport < 2; hciport++)
2126 UWORD portreg;
2127 UWORD idx = hc->hc_PortNum20[hciport];
2128 // don't pay attention to UHCI port changes when pwned by EHCI
2129 if(!unit->hu_EhciOwned[idx])
2131 portreg = hciport ? UHCI_PORT2STSCTRL : UHCI_PORT1STSCTRL;
2132 oldval = READREG16_LE(hc->hc_RegBase, portreg);
2133 if(oldval & UHPF_ENABLECHANGE)
2135 KPRINTF(10, ("Port %ld (%ld) Enable changed\n", idx, hciport));
2136 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_ENABLE;
2138 if(oldval & UHPF_CONNECTCHANGE)
2140 KPRINTF(10, ("Port %ld (%ld) Connect changed\n", idx, hciport));
2141 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_CONNECTION;
2142 if(!(oldval & UHPF_PORTCONNECTED))
2144 if(unit->hu_PortMap20[idx])
2146 KPRINTF(20, ("Transferring Port %ld back to EHCI\n", idx));
2147 unit->hu_EhciOwned[idx] = TRUE;
2151 if(oldval & UHPF_RESUMEDTX)
2153 KPRINTF(10, ("Port %ld (%ld) Resume changed\n", idx, hciport));
2154 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND|UPSF_PORT_ENABLE;
2155 oldval &= ~UHPF_RESUMEDTX;
2157 if(hc->hc_PortChangeMap[hciport])
2159 unit->hu_RootPortChanges |= 1UL<<(idx+1);
2160 /*KPRINTF(10, ("Port %ld (%ld) contributes %04lx to portmap %04lx\n",
2161 idx, hciport, hc->hc_PortChangeMap[hciport], unit->hu_RootPortChanges));*/
2163 WRITEREG16_LE(hc->hc_RegBase, portreg, oldval);
2167 /* \\\ */
2169 /* /// "uhciHandleFinishedTDs()" */
2170 void uhciHandleFinishedTDs(struct PCIController *hc)
2172 struct PCIUnit *unit = hc->hc_Unit;
2173 struct IOUsbHWReq *ioreq;
2174 struct IOUsbHWReq *nextioreq;
2175 struct UhciQH *uqh;
2176 struct UhciTD *utd;
2177 UWORD devadrep;
2178 ULONG len;
2179 ULONG linkelem;
2180 UWORD inspect;
2181 BOOL shortpkt;
2182 ULONG ctrlstatus;
2183 ULONG token = 0;
2184 ULONG actual;
2185 BOOL updatetree = FALSE;
2187 KPRINTF(1, ("Checking for work done...\n"));
2188 ioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
2189 while((nextioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ))
2191 uqh = (struct UhciQH *) ioreq->iouh_DriverPrivate1;
2192 if(uqh)
2194 KPRINTF(1, ("Examining IOReq=%08lx with UQH=%08lx\n", ioreq, uqh));
2195 linkelem = READMEM32_LE(&uqh->uqh_Element);
2196 inspect = 0;
2197 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
2198 if(linkelem & UHCI_TERMINATE)
2200 KPRINTF(1, ("UQH terminated %08lx\n", linkelem));
2201 inspect = 2;
2202 } else {
2203 utd = (struct UhciTD *) ((linkelem & UHCI_PTRMASK) - hc->hc_PCIVirtualAdjust - 16); // struct UhciTD starts 16 bytes before physical TD
2204 ctrlstatus = READMEM32_LE(&utd->utd_CtrlStatus);
2205 if(!(ctrlstatus & UTCF_ACTIVE))
2207 KPRINTF(1, ("CtrlStatus inactive %08lx\n", ctrlstatus));
2208 inspect = 1;
2210 else if(unit->hu_NakTimeoutFrame[devadrep] && (hc->hc_FrameCounter > unit->hu_NakTimeoutFrame[devadrep]))
2212 ioreq->iouh_Req.io_Error = UHIOERR_NAKTIMEOUT;
2213 inspect = 1;
2216 if(inspect)
2218 shortpkt = FALSE;
2219 if(inspect < 2) // if all went okay, don't traverse list, assume all bytes successfully transferred
2221 utd = uqh->uqh_FirstTD;
2222 actual = 0;
2225 ctrlstatus = READMEM32_LE(&utd->utd_CtrlStatus);
2226 if(ctrlstatus & UTCF_ACTIVE)
2228 KPRINTF(20, ("Internal error! Still active?!\n"));
2229 if(ctrlstatus & UTSF_BABBLE)
2231 KPRINTF(200, ("HOST CONTROLLER IS DEAD!!!\n"));
2232 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
2233 CONSTWRITEREG16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_HCRESET|UHCF_MAXPACKET64|UHCF_CONFIGURE|UHCF_RUNSTOP);
2234 inspect = 0;
2235 break;
2237 break;
2239 token = READMEM32_LE(&utd->utd_Token);
2240 KPRINTF(1, ("TD=%08lx CS=%08lx Token=%08lx\n", utd, ctrlstatus, token));
2241 if(ctrlstatus & (UTSF_BABBLE|UTSF_STALLED|UTSF_CRCTIMEOUT|UTSF_DATABUFFERERR|UTSF_BITSTUFFERR))
2243 if(ctrlstatus & UTSF_BABBLE)
2245 KPRINTF(20, ("Babble error %08lx/%08lx\n", ctrlstatus, token));
2246 ioreq->iouh_Req.io_Error = UHIOERR_OVERFLOW;
2247 #if 0
2248 // VIA chipset seems to die on babble!?!
2249 KPRINTF(10, ("HW Regs USBCMD=%04lx\n", READREG16_LE(hc->hc_RegBase, UHCI_USBCMD)));
2250 CONSTWRITEREG16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_MAXPACKET64|UHCF_CONFIGURE|UHCF_RUNSTOP);
2251 SYNC;
2252 EIEIO;
2253 #endif
2254 //retry
2255 //ctrlstatus &= ~(UTSF_BABBLE|UTSF_STALLED|UTSF_CRCTIMEOUT|UTSF_DATABUFFERERR|UTSF_BITSTUFFERR|UTSF_NAK);
2256 ctrlstatus |= UTCF_ACTIVE;
2257 WRITEMEM32_LE(&utd->utd_CtrlStatus, ctrlstatus);
2258 SYNC;
2259 EIEIO;
2260 inspect = 3;
2261 break;
2263 else if(ctrlstatus & UTSF_CRCTIMEOUT)
2265 KPRINTF(20, ("CRC/Timeout error IOReq=%08lx DIR=%ld\n", ioreq, ioreq->iouh_Dir));
2266 if(ctrlstatus & UTSF_STALLED)
2268 ioreq->iouh_Req.io_Error = UHIOERR_TIMEOUT;
2269 } else {
2270 ioreq->iouh_Req.io_Error = (ioreq->iouh_Dir == UHDIR_IN) ? UHIOERR_CRCERROR : UHIOERR_TIMEOUT;
2273 else if(ctrlstatus & UTSF_STALLED)
2275 KPRINTF(20, ("STALLED!\n"));
2276 ioreq->iouh_Req.io_Error = UHIOERR_STALL;
2278 else if(ctrlstatus & UTSF_BITSTUFFERR)
2280 KPRINTF(20, ("Bitstuff error\n"));
2281 ioreq->iouh_Req.io_Error = UHIOERR_CRCERROR;
2283 else if(ctrlstatus & UTSF_DATABUFFERERR)
2285 KPRINTF(20, ("Databuffer error\n"));
2286 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
2288 inspect = 0;
2289 break;
2291 if(unit->hu_NakTimeoutFrame[devadrep] && (hc->hc_FrameCounter > unit->hu_NakTimeoutFrame[devadrep]) && (ctrlstatus & UTSF_NAK))
2293 ioreq->iouh_Req.io_Error = UHIOERR_NAKTIMEOUT;
2294 inspect = 0;
2297 len = (ctrlstatus & UTSM_ACTUALLENGTH)>>UTSS_ACTUALLENGTH;
2298 if((len != (token & UTTM_TRANSLENGTH)>>UTTS_TRANSLENGTH))
2300 shortpkt = TRUE;
2302 len = (len+1) & 0x7ff; // get real length
2303 if((token & UTTM_PID)>>UTTS_PID != PID_SETUP) // don't count setup packet
2305 actual += len;
2307 if(shortpkt)
2309 break;
2311 } while((utd = (struct UhciTD *) utd->utd_Succ));
2312 if(inspect == 3)
2314 // bail out from babble
2315 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
2316 continue;
2318 if((actual < uqh->uqh_Actual) && (!ioreq->iouh_Req.io_Error) && (!(ioreq->iouh_Flags & UHFF_ALLOWRUNTPKTS)))
2320 KPRINTF(10, ("Short packet: %ld < %ld\n", actual, ioreq->iouh_Length));
2321 ioreq->iouh_Req.io_Error = UHIOERR_RUNTPACKET;
2323 ioreq->iouh_Actual += actual;
2324 } else {
2325 KPRINTF(10, ("all %ld bytes transferred\n", uqh->uqh_Actual));
2326 ioreq->iouh_Actual += uqh->uqh_Actual;
2328 // this is actually no short packet but result of the VIA babble fix
2329 if(shortpkt && (ioreq->iouh_Actual == ioreq->iouh_Length))
2331 shortpkt = FALSE;
2333 unit->hu_DevBusyReq[devadrep] = NULL;
2334 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
2335 uhciFreeQContext(hc, uqh);
2336 if(ioreq->iouh_Req.io_Command == UHCMD_INTXFER)
2338 updatetree = TRUE;
2340 if(inspect)
2342 if(inspect < 2) // otherwise, toggle will be right already
2344 // use next data toggle bit based on last successful transaction
2345 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? FALSE : TRUE;
2347 if(!shortpkt && (ioreq->iouh_Actual < ioreq->iouh_Length))
2349 // fragmented, do some more work
2350 switch(ioreq->iouh_Req.io_Command)
2352 case UHCMD_CONTROLXFER:
2353 KPRINTF(10, ("Rescheduling CtrlTransfer at %ld of %ld\n", ioreq->iouh_Actual, ioreq->iouh_Length));
2354 AddHead(&hc->hc_CtrlXFerQueue, (struct Node *) ioreq);
2355 break;
2357 case UHCMD_INTXFER:
2358 KPRINTF(10, ("Rescheduling IntTransfer at %ld of %ld\n", ioreq->iouh_Actual, ioreq->iouh_Length));
2359 AddHead(&hc->hc_IntXFerQueue, (struct Node *) ioreq);
2360 break;
2362 case UHCMD_BULKXFER:
2363 KPRINTF(10, ("Rescheduling BulkTransfer at %ld of %ld\n", ioreq->iouh_Actual, ioreq->iouh_Length));
2364 AddHead(&hc->hc_BulkXFerQueue, (struct Node *) ioreq);
2365 break;
2367 default:
2368 KPRINTF(10, ("Uhm, internal error, dunno where to queue this req\n"));
2369 ReplyMsg(&ioreq->iouh_Req.io_Message);
2371 } else {
2372 // check for sucessful clear feature and set address ctrl transfers
2373 if(ioreq->iouh_Req.io_Command == UHCMD_CONTROLXFER)
2375 uhwCheckSpecialCtrlTransfers(hc, ioreq);
2377 ReplyMsg(&ioreq->iouh_Req.io_Message);
2379 } else {
2380 // be sure to save the data toggle bit where the error occurred
2381 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? TRUE : FALSE;
2382 ReplyMsg(&ioreq->iouh_Req.io_Message);
2385 } else {
2386 KPRINTF(20, ("IOReq=%08lx has no UQH!\n", ioreq));
2388 ioreq = nextioreq;
2390 if(updatetree)
2392 KPRINTF(10, ("Updating Tree\n"));
2393 uhciUpdateIntTree(hc);
2396 /* \\\ */
2398 /* /// "uhciScheduleCtrlTDs()" */
2399 void uhciScheduleCtrlTDs(struct PCIController *hc)
2401 struct PCIUnit *unit = hc->hc_Unit;
2402 struct IOUsbHWReq *ioreq;
2403 UWORD devadrep;
2404 struct UhciQH *uqh;
2405 struct UhciTD *setuputd;
2406 struct UhciTD *datautd;
2407 struct UhciTD *termutd;
2408 struct UhciTD *predutd;
2409 ULONG actual;
2410 ULONG ctrlstatus;
2411 ULONG token;
2412 ULONG len;
2413 ULONG phyaddr;
2414 BOOL cont;
2416 /* *** CTRL Transfers *** */
2417 KPRINTF(1, ("Scheduling new CTRL transfers...\n"));
2418 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
2419 while(((struct Node *) ioreq)->ln_Succ)
2421 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint;
2422 KPRINTF(10, ("New CTRL transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
2423 /* is endpoint already in use or do we have to wait for next transaction */
2424 if(unit->hu_DevBusyReq[devadrep])
2426 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
2427 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
2428 continue;
2431 uqh = uhciAllocQH(hc);
2432 if(!uqh)
2434 break;
2437 setuputd = uhciAllocTD(hc);
2438 if(!setuputd)
2440 uhciFreeQH(hc, uqh);
2441 break;
2443 termutd = uhciAllocTD(hc);
2444 if(!termutd)
2446 uhciFreeTD(hc, setuputd);
2447 uhciFreeQH(hc, uqh);
2448 break;
2450 uqh->uqh_IOReq = ioreq;
2452 //termutd->utd_QueueHead = setuputd->utd_QueueHead = uqh;
2454 KPRINTF(1, ("SetupTD=%08lx, TermTD=%08lx\n", setuputd, termutd));
2456 // fill setup td
2457 ctrlstatus = UTCF_ACTIVE|UTCF_3ERRORSLIMIT;
2458 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
2460 KPRINTF(5, ("*** LOW SPEED ***\n"));
2461 ctrlstatus |= UTCF_LOWSPEED;
2463 token = (ioreq->iouh_DevAddr<<UTTS_DEVADDR)|(ioreq->iouh_Endpoint<<UTTS_ENDPOINT);
2464 //setuputd->utd_Pred = NULL;
2465 if(ioreq->iouh_Actual)
2467 // this is a continuation of a fragmented ctrl transfer!
2468 KPRINTF(1, ("Continuing FRAGMENT at %ld of %ld\n", ioreq->iouh_Actual, ioreq->iouh_Length));
2469 cont = TRUE;
2470 } else {
2471 cont = FALSE;
2472 uqh->uqh_FirstTD = setuputd;
2473 uqh->uqh_Element = setuputd->utd_Self; // start of queue
2474 WRITEMEM32_LE(&setuputd->utd_CtrlStatus, ctrlstatus);
2475 WRITEMEM32_LE(&setuputd->utd_Token, (PID_SETUP<<UTTS_PID)|token|(7<<UTTS_TRANSLENGTH)|UTTF_DATA0);
2476 WRITEMEM32_LE(&setuputd->utd_BufferPtr, (ULONG) pciGetPhysical(hc, &ioreq->iouh_SetupData));
2479 token |= (ioreq->iouh_SetupData.bmRequestType & URTF_IN) ? PID_IN : PID_OUT;
2480 predutd = setuputd;
2481 actual = ioreq->iouh_Actual;
2482 if(ioreq->iouh_Length)
2484 ctrlstatus |= UTCF_SHORTPACKET;
2485 if(cont)
2487 phyaddr = (ULONG) pciGetPhysical(hc, &(((UBYTE *) ioreq->iouh_Data)[ioreq->iouh_Actual]));
2488 if(!unit->hu_DevDataToggle[devadrep])
2490 // continue with data toggle 0
2491 token |= UTTF_DATA1;
2493 } else {
2494 phyaddr = (ULONG) pciGetPhysical(hc, ioreq->iouh_Data);
2498 datautd = uhciAllocTD(hc);
2499 if(!datautd)
2501 break;
2503 token ^= UTTF_DATA1; // toggle bit
2504 predutd->utd_Link = datautd->utd_Self;
2505 predutd->utd_Succ = (struct UhciXX *) datautd;
2506 //datautd->utd_Pred = (struct UhciXX *) predutd;
2507 //datautd->utd_QueueHead = uqh;
2508 len = ioreq->iouh_Length - actual;
2509 if(len > ioreq->iouh_MaxPktSize)
2511 len = ioreq->iouh_MaxPktSize;
2513 WRITEMEM32_LE(&datautd->utd_CtrlStatus, ctrlstatus);
2514 #if 1
2515 #warning "this workaround for a VIA babble bug will potentially overwrite innocent memory (very rarely), but will avoid the host controller dropping dead completely."
2516 if((len < ioreq->iouh_MaxPktSize) && (ioreq->iouh_SetupData.bmRequestType & URTF_IN))
2518 WRITEMEM32_LE(&datautd->utd_Token, token|((ioreq->iouh_MaxPktSize-1)<<UTTS_TRANSLENGTH)); // no masking need here as len is always >= 1
2519 } else {
2520 WRITEMEM32_LE(&datautd->utd_Token, token|((len-1)<<UTTS_TRANSLENGTH)); // no masking need here as len is always >= 1
2522 #else
2523 WRITEMEM32_LE(&datautd->utd_Token, token|((len-1)<<UTTS_TRANSLENGTH)); // no masking need here as len is always >= 1
2524 #endif
2525 WRITEMEM32_LE(&datautd->utd_BufferPtr, phyaddr);
2526 phyaddr += len;
2527 actual += len;
2528 predutd = datautd;
2529 } while((actual < ioreq->iouh_Length) && (actual - ioreq->iouh_Actual < UHCI_TD_CTRL_LIMIT));
2530 if(actual == ioreq->iouh_Actual)
2532 // not at least one data TD? try again later
2533 uhciFreeTD(hc, setuputd);
2534 uhciFreeTD(hc, termutd);
2535 uhciFreeQH(hc, uqh);
2536 break;
2538 if(cont)
2540 // free Setup packet
2541 KPRINTF(1, ("Freeing setup\n"));
2542 uqh->uqh_FirstTD = (struct UhciTD *) setuputd->utd_Succ;
2543 //uqh->uqh_FirstTD->utd_Pred = NULL;
2544 uqh->uqh_Element = setuputd->utd_Succ->uxx_Self; // start of queue after setup packet
2545 uhciFreeTD(hc, setuputd);
2546 // set toggle for next batch
2547 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? FALSE : TRUE;
2550 uqh->uqh_Actual = actual - ioreq->iouh_Actual;
2551 ctrlstatus |= UTCF_READYINTEN;
2552 if(actual == ioreq->iouh_Length)
2554 // TERM packet
2555 KPRINTF(1, ("Activating TERM\n"));
2556 token |= UTTF_DATA1;
2557 token ^= (PID_IN^PID_OUT)<<UTTS_PID;
2559 predutd->utd_Link = termutd->utd_Self;
2560 predutd->utd_Succ = (struct UhciXX *) termutd;
2561 //termutd->utd_Pred = (struct UhciXX *) predutd;
2562 WRITEMEM32_LE(&termutd->utd_CtrlStatus, ctrlstatus);
2563 WRITEMEM32_LE(&termutd->utd_Token, token|(0x7ff<<UTTS_TRANSLENGTH));
2564 CONSTWRITEMEM32_LE(&termutd->utd_Link, UHCI_TERMINATE);
2565 termutd->utd_Succ = NULL;
2566 //uqh->uqh_LastTD = termutd;
2567 } else {
2568 KPRINTF(1, ("Setup data phase fragmented\n"));
2569 // don't create TERM, we don't know the final data toggle bit
2570 // but mark the last data TD for interrupt generation
2571 WRITEMEM32_LE(&predutd->utd_CtrlStatus, ctrlstatus);
2572 uhciFreeTD(hc, termutd);
2573 CONSTWRITEMEM32_LE(&predutd->utd_Link, UHCI_TERMINATE);
2574 predutd->utd_Succ = NULL;
2575 //uqh->uqh_LastTD = predutd;
2578 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
2579 ioreq->iouh_DriverPrivate1 = uqh;
2581 // manage endpoint going busy
2582 unit->hu_DevBusyReq[devadrep] = ioreq;
2583 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
2585 Disable();
2586 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
2588 // looks good to me, now enqueue this entry (just behind the CtrlQH)
2589 uqh->uqh_Succ = hc->hc_UhciCtrlQH->uqh_Succ;
2590 uqh->uqh_Link = uqh->uqh_Succ->uxx_Self;
2591 SYNC;
2592 EIEIO;
2593 uqh->uqh_Pred = (struct UhciXX *) hc->hc_UhciCtrlQH;
2594 uqh->uqh_Succ->uxx_Pred = (struct UhciXX *) uqh;
2595 hc->hc_UhciCtrlQH->uqh_Succ = (struct UhciXX *) uqh;
2596 hc->hc_UhciCtrlQH->uqh_Link = uqh->uqh_Self;
2597 SYNC;
2598 EIEIO;
2599 Enable();
2601 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
2604 /* \\\ */
2606 /* /// "uhciScheduleIntTDs()" */
2607 void uhciScheduleIntTDs(struct PCIController *hc)
2609 struct PCIUnit *unit = hc->hc_Unit;
2610 struct IOUsbHWReq *ioreq;
2611 UWORD cnt;
2612 UWORD devadrep;
2613 struct UhciQH *uqh;
2614 struct UhciQH *intuqh;
2615 struct UhciTD *utd;
2616 struct UhciTD *predutd;
2617 ULONG actual;
2618 ULONG ctrlstatus;
2619 ULONG token;
2620 ULONG len;
2621 ULONG phyaddr;
2623 /* *** INT Transfers *** */
2624 KPRINTF(1, ("Scheduling new INT transfers...\n"));
2625 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
2626 while(((struct Node *) ioreq)->ln_Succ)
2628 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
2629 KPRINTF(10, ("New INT transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
2630 /* is endpoint already in use or do we have to wait for next transaction */
2631 if(unit->hu_DevBusyReq[devadrep])
2633 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
2634 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
2635 continue;
2638 uqh = uhciAllocQH(hc);
2639 if(!uqh)
2641 break;
2644 uqh->uqh_IOReq = ioreq;
2646 ctrlstatus = UTCF_ACTIVE|UTCF_1ERRORLIMIT;
2647 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
2649 KPRINTF(5, ("*** LOW SPEED ***\n"));
2650 ctrlstatus |= UTCF_LOWSPEED;
2652 token = (ioreq->iouh_DevAddr<<UTTS_DEVADDR)|(ioreq->iouh_Endpoint<<UTTS_ENDPOINT);
2653 token |= (ioreq->iouh_Dir == UHDIR_IN) ? PID_IN : PID_OUT;
2654 predutd = NULL;
2655 actual = ioreq->iouh_Actual;
2656 ctrlstatus |= UTCF_SHORTPACKET;
2657 phyaddr = (ULONG) pciGetPhysical(hc, &(((UBYTE *) ioreq->iouh_Data)[ioreq->iouh_Actual]));
2658 if(unit->hu_DevDataToggle[devadrep])
2660 // continue with data toggle 1
2661 KPRINTF(1, ("Data1\n"));
2662 token |= UTTF_DATA1;
2663 } else {
2664 KPRINTF(1, ("Data0\n"));
2668 utd = uhciAllocTD(hc);
2669 if(!utd)
2671 break;
2673 if(predutd)
2675 WRITEMEM32_LE(&predutd->utd_Link, READMEM32_LE(utd->utd_Self)|UHCI_DFS);
2676 predutd->utd_Succ = (struct UhciXX *) utd;
2677 //utd->utd_Pred = (struct UhciXX *) predutd;
2678 } else {
2679 uqh->uqh_FirstTD = utd;
2680 uqh->uqh_Element = utd->utd_Self;
2681 //utd->utd_Pred = NULL;
2683 //utd->utd_QueueHead = uqh;
2684 len = ioreq->iouh_Length - actual;
2685 if(len > ioreq->iouh_MaxPktSize)
2687 len = ioreq->iouh_MaxPktSize;
2690 WRITEMEM32_LE(&utd->utd_CtrlStatus, ctrlstatus);
2691 WRITEMEM32_LE(&utd->utd_Token, token|(((len-1) & 0x7ff)<<UTTS_TRANSLENGTH));
2692 WRITEMEM32_LE(&utd->utd_BufferPtr, phyaddr);
2693 phyaddr += len;
2694 actual += len;
2695 predutd = utd;
2696 token ^= UTTF_DATA1; // toggle bit
2697 } while((actual < ioreq->iouh_Length) && (actual - ioreq->iouh_Actual < UHCI_TD_INT_LIMIT));
2699 if(!utd)
2701 // not at least one data TD? try again later
2702 uhciFreeQH(hc, uqh);
2703 break;
2706 uqh->uqh_Actual = actual - ioreq->iouh_Actual;
2707 // set toggle for next batch / succesful transfer
2708 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? TRUE : FALSE;
2709 if(unit->hu_DevDataToggle[devadrep])
2711 // continue with data toggle 1
2712 KPRINTF(1, ("NewData1\n"));
2713 } else {
2714 KPRINTF(1, ("NewData0\n"));
2716 ctrlstatus |= UTCF_READYINTEN;
2717 WRITEMEM32_LE(&predutd->utd_CtrlStatus, ctrlstatus);
2718 CONSTWRITEMEM32_LE(&utd->utd_Link, UHCI_TERMINATE);
2719 utd->utd_Succ = NULL;
2720 //uqh->uqh_LastTD = utd;
2722 if(ioreq->iouh_Interval >= 255)
2724 intuqh = hc->hc_UhciIntQH[8]; // 256ms interval
2725 } else {
2726 cnt = 0;
2729 intuqh = hc->hc_UhciIntQH[cnt++];
2730 } while(ioreq->iouh_Interval > (1<<cnt));
2731 KPRINTF(1, ("Scheduled at level %ld\n", cnt));
2734 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
2735 ioreq->iouh_DriverPrivate1 = uqh;
2737 // manage endpoint going busy
2738 unit->hu_DevBusyReq[devadrep] = ioreq;
2739 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
2741 Disable();
2742 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
2744 // looks good to me, now enqueue this entry (just behind the right IntQH)
2745 uqh->uqh_Succ = intuqh->uqh_Succ;
2746 uqh->uqh_Link = intuqh->uqh_Self;
2747 SYNC;
2748 EIEIO;
2749 uqh->uqh_Pred = (struct UhciXX *) intuqh;
2750 uqh->uqh_Succ->uxx_Pred = (struct UhciXX *) uqh;
2751 intuqh->uqh_Succ = (struct UhciXX *) uqh;
2752 intuqh->uqh_Link = uqh->uqh_Self;
2753 SYNC;
2754 EIEIO;
2755 Enable();
2757 uhciUpdateIntTree(hc);
2759 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
2762 /* \\\ */
2764 /* /// "uhciScheduleBulkTDs()" */
2765 void uhciScheduleBulkTDs(struct PCIController *hc)
2767 struct PCIUnit *unit = hc->hc_Unit;
2768 struct IOUsbHWReq *ioreq;
2769 UWORD devadrep;
2770 struct UhciQH *uqh;
2771 struct UhciTD *utd;
2772 struct UhciTD *predutd;
2773 ULONG actual;
2774 ULONG ctrlstatus;
2775 ULONG token;
2776 ULONG len;
2777 ULONG phyaddr;
2778 BOOL forcezero;
2780 /* *** BULK Transfers *** */
2781 KPRINTF(1, ("Scheduling new BULK transfers...\n"));
2782 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
2783 while(((struct Node *) ioreq)->ln_Succ)
2785 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
2786 KPRINTF(10, ("New BULK transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
2787 /* is endpoint already in use or do we have to wait for next transaction */
2788 if(unit->hu_DevBusyReq[devadrep])
2790 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
2791 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
2792 continue;
2795 uqh = uhciAllocQH(hc);
2796 if(!uqh)
2798 break;
2801 uqh->uqh_IOReq = ioreq;
2803 // fill setup td
2804 ctrlstatus = UTCF_ACTIVE|UTCF_1ERRORLIMIT;
2805 token = (ioreq->iouh_DevAddr<<UTTS_DEVADDR)|(ioreq->iouh_Endpoint<<UTTS_ENDPOINT);
2806 token |= (ioreq->iouh_Dir == UHDIR_IN) ? PID_IN : PID_OUT;
2807 predutd = NULL;
2808 actual = ioreq->iouh_Actual;
2809 ctrlstatus |= UTCF_SHORTPACKET;
2810 phyaddr = (ULONG) pciGetPhysical(hc, &(((UBYTE *) ioreq->iouh_Data)[ioreq->iouh_Actual]));
2811 if(unit->hu_DevDataToggle[devadrep])
2813 // continue with data toggle 1
2814 token |= UTTF_DATA1;
2818 utd = uhciAllocTD(hc);
2819 if(!utd)
2821 break;
2823 forcezero = FALSE;
2824 if(predutd)
2826 WRITEMEM32_LE(&predutd->utd_Link, READMEM32_LE(utd->utd_Self)|UHCI_DFS);
2827 predutd->utd_Succ = (struct UhciXX *) utd;
2828 //utd->utd_Pred = (struct UhciXX *) predutd;
2829 } else {
2830 uqh->uqh_FirstTD = utd;
2831 uqh->uqh_Element = utd->utd_Self;
2832 //utd->utd_Pred = NULL;
2834 //utd->utd_QueueHead = uqh;
2835 len = ioreq->iouh_Length - actual;
2836 if(len > ioreq->iouh_MaxPktSize)
2838 len = ioreq->iouh_MaxPktSize;
2841 WRITEMEM32_LE(&utd->utd_CtrlStatus, ctrlstatus);
2842 WRITEMEM32_LE(&utd->utd_Token, token|(((len-1) & 0x7ff)<<UTTS_TRANSLENGTH));
2843 WRITEMEM32_LE(&utd->utd_BufferPtr, phyaddr);
2844 phyaddr += len;
2845 actual += len;
2846 predutd = utd;
2847 token ^= UTTF_DATA1; // toggle bit
2848 if((actual == ioreq->iouh_Length) && len)
2850 if((ioreq->iouh_Flags & UHFF_NOSHORTPKT) || (ioreq->iouh_Dir == UHDIR_IN) || (actual % ioreq->iouh_MaxPktSize))
2852 // no last zero byte packet
2853 break;
2854 } else {
2855 // avoid rare case that the zero byte packet is reached on TD_BULK_LIMIT
2856 forcezero = TRUE;
2859 } while(forcezero || (len && (actual <= ioreq->iouh_Length) && (actual - ioreq->iouh_Actual < UHCI_TD_BULK_LIMIT)));
2861 if(!utd)
2863 // not at least one data TD? try again later
2864 uhciFreeQH(hc, uqh);
2865 break;
2867 uqh->uqh_Actual = actual - ioreq->iouh_Actual;
2868 // set toggle for next batch / succesful transfer
2869 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? TRUE : FALSE;
2871 ctrlstatus |= UTCF_READYINTEN;
2872 WRITEMEM32_LE(&predutd->utd_CtrlStatus, ctrlstatus);
2873 CONSTWRITEMEM32_LE(&utd->utd_Link, UHCI_TERMINATE);
2874 utd->utd_Succ = NULL;
2875 //uqh->uqh_LastTD = utd;
2877 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
2878 ioreq->iouh_DriverPrivate1 = uqh;
2880 // manage endpoint going busy
2881 unit->hu_DevBusyReq[devadrep] = ioreq;
2882 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
2884 Disable();
2885 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
2887 // looks good to me, now enqueue this entry (just behind the BulkQH)
2888 uqh->uqh_Succ = hc->hc_UhciBulkQH->uqh_Succ;
2889 uqh->uqh_Link = uqh->uqh_Succ->uxx_Self;
2890 SYNC;
2891 EIEIO;
2892 uqh->uqh_Pred = (struct UhciXX *) hc->hc_UhciBulkQH;
2893 uqh->uqh_Succ->uxx_Pred = (struct UhciXX *) uqh;
2894 hc->hc_UhciBulkQH->uqh_Succ = (struct UhciXX *) uqh;
2895 hc->hc_UhciBulkQH->uqh_Link = uqh->uqh_Self;
2896 SYNC;
2897 EIEIO;
2898 Enable();
2900 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
2903 /* \\\ */
2905 /* /// "uhciCompleteInt()" */
2906 void uhciCompleteInt(struct PCIController *hc)
2908 ULONG framecnt = READREG16_LE(hc->hc_RegBase, UHCI_FRAMECOUNT);
2910 KPRINTF(1, ("CompleteInt!\n"));
2911 if(framecnt < (hc->hc_FrameCounter & 0xffff))
2913 hc->hc_FrameCounter |= 0xffff;
2914 hc->hc_FrameCounter++;
2915 hc->hc_FrameCounter += framecnt;
2916 KPRINTF(10, ("Frame Counter Rollover %ld\n", hc->hc_FrameCounter));
2919 /* **************** PROCESS DONE TRANSFERS **************** */
2921 uhciCheckPortStatusChange(hc);
2922 uhwCheckRootHubChanges(hc->hc_Unit);
2924 uhciHandleFinishedTDs(hc);
2926 if(hc->hc_CtrlXFerQueue.lh_Head->ln_Succ)
2928 uhciScheduleCtrlTDs(hc);
2931 if(hc->hc_IntXFerQueue.lh_Head->ln_Succ)
2933 uhciScheduleIntTDs(hc);
2936 if(hc->hc_BulkXFerQueue.lh_Head->ln_Succ)
2938 uhciScheduleBulkTDs(hc);
2941 KPRINTF(1, ("CompleteDone\n"));
2943 /* \\\ */
2945 /* /// "uhciIntCode()" */
2946 void uhciIntCode(HIDDT_IRQ_Handler *irq, HIDDT_IRQ_HwInfo *hw)
2948 struct PCIController *hc = (struct PCIController *) irq->h_Data;
2949 struct PCIDevice *base = hc->hc_Device;
2950 UWORD intr;
2952 //KPRINTF(10, ("pciUhciInt()\n"));
2953 intr = READREG16_LE(hc->hc_RegBase, UHCI_USBSTATUS);
2954 if(intr & (UHSF_USBINT|UHSF_USBERRORINT|UHSF_RESUMEDTX|UHSF_HCSYSERROR|UHSF_HCPROCERROR|UHSF_HCHALTED))
2956 WRITEREG16_LE(hc->hc_RegBase, UHCI_USBSTATUS, intr);
2957 KPRINTF(1, ("INT=%04lx\n", intr));
2958 if(intr & (UHSF_HCSYSERROR|UHSF_HCPROCERROR|UHSF_HCHALTED))
2960 KPRINTF(200, ("Host ERROR!\n"));
2961 CONSTWRITEREG16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_HCRESET|UHCF_GLOBALRESET|UHCF_MAXPACKET64|UHCF_CONFIGURE);
2962 //CONSTWRITEREG16_LE(hc->hc_RegBase, UHCI_USBINTEN, 0);
2964 if(!hc->hc_Online)
2966 return;
2968 if(intr & (UHSF_USBINT|UHSF_USBERRORINT))
2970 SureCause(base, &hc->hc_CompleteInt);
2974 /* \\\ */
2976 /* ---------------------------------------------------------------------- *
2977 * OHCI Specific Stuff *
2978 * ---------------------------------------------------------------------- */
2980 /* /// "ohciDebugSchedule()" */
2981 void ohciDebugSchedule(struct PCIController *hc)
2983 ULONG ctrlhead;
2984 ULONG hced;
2985 ULONG epcaps;
2986 ULONG headptr;
2987 ULONG headptrbits;
2988 ULONG tailptr;
2989 ULONG nexted;
2990 ULONG ctrl;
2991 ULONG currptr;
2992 ULONG nexttd;
2993 ULONG buffend;
2994 KPRINTF(10, ("*** Schedule debug!!! ***\n"));
2995 ctrlhead = READREG32_LE(hc->hc_RegBase, OHCI_CTRL_HEAD_ED) - hc->hc_PCIVirtualAdjust;
2996 KPRINTF(10, ("CtrlHead = %08lx, should be %08lx\n", ctrlhead, &hc->hc_OhciCtrlHeadED->oed_EPCaps));
2997 hced = ctrlhead;
3000 epcaps = READMEM32_LE(hced);
3001 tailptr = READMEM32_LE(hced+4);
3002 headptr = headptrbits = READMEM32_LE(hced+8);
3003 headptr &= OHCI_PTRMASK;
3004 nexted = READMEM32_LE(hced+12);
3005 KPRINTF(10, ("ED %08lx: EPCaps=%08lx, HeadP=%08lx, TailP=%08lx, NextED=%08lx\n",
3006 hced, epcaps, headptrbits, tailptr, nexted));
3007 if((!(epcaps & OECF_SKIP)) && (tailptr != headptr) && (!(headptrbits & OEHF_HALTED)))
3009 while(tailptr != headptr)
3011 headptr -= hc->hc_PCIVirtualAdjust;
3012 ctrl = READMEM32_LE(headptr);
3013 currptr = READMEM32_LE(headptr+4);
3014 nexttd = READMEM32_LE(headptr+8);
3015 buffend = READMEM32_LE(headptr+12);
3017 KPRINTF(5, (" TD %08lx: Ctrl=%08lx, CurrPtr=%08lx, NextTD=%08lx, BuffEnd=%08lx\n",
3018 headptr, ctrl, currptr, nexttd, buffend));
3019 headptr = nexttd;
3022 if(!nexted)
3024 break;
3026 hced = nexted - hc->hc_PCIVirtualAdjust;
3027 } while(TRUE);
3029 /* \\\ */
3031 /* /// "ohciFreeEDContext()" */
3032 void ohciFreeEDContext(struct PCIController *hc, struct OhciED *oed)
3034 struct OhciTD *otd;
3035 struct OhciTD *nextotd;
3037 KPRINTF(5, ("Unlinking EDContext %08lx\n", oed));
3039 // unlink from schedule
3040 oed->oed_Succ->oed_Pred = oed->oed_Pred;
3041 oed->oed_Pred->oed_Succ = oed->oed_Succ;
3042 oed->oed_Pred->oed_NextED = oed->oed_Succ->oed_Self;
3043 SYNC
3044 EIEIO;
3046 #if 0
3047 // need to make sure that the endpoint is no longer
3048 Disable();
3049 oed->oed_Succ = hc->hc_OhciAsyncFreeED;
3050 hc->hc_OhciAsyncFreeED = oed;
3051 Enable();
3052 #else
3053 Disable();
3054 nextotd = oed->oed_FirstTD;
3055 while(nextotd)
3057 KPRINTF(1, ("FreeTD %08lx\n", nextotd));
3058 otd = nextotd;
3059 nextotd = (struct OhciTD *) otd->otd_Succ;
3060 ohciFreeTD(hc, otd);
3063 ohciFreeED(hc, oed);
3064 Enable();
3065 #endif
3067 /* \\\ */
3069 /* /// "ohciAllocED()" */
3070 inline struct OhciED * ohciAllocED(struct PCIController *hc)
3072 struct OhciED *oed = hc->hc_OhciEDPool;
3074 if(!oed)
3076 // out of QHs!
3077 KPRINTF(20, ("Out of EDs!\n"));
3078 return NULL;
3081 hc->hc_OhciEDPool = oed->oed_Succ;
3082 return(oed);
3084 /* \\\ */
3086 /* /// "ohciFreeED()" */
3087 inline void ohciFreeED(struct PCIController *hc, struct OhciED *oed)
3089 oed->oed_Succ = hc->hc_OhciEDPool;
3090 hc->hc_OhciEDPool = oed;
3092 /* \\\ */
3094 /* /// "ohciAllocTD()" */
3095 inline struct OhciTD * ohciAllocTD(struct PCIController *hc)
3097 struct OhciTD *otd = hc->hc_OhciTDPool;
3099 if(!otd)
3101 // out of TDs!
3102 KPRINTF(20, ("Out of TDs!\n"));
3103 return NULL;
3106 hc->hc_OhciTDPool = otd->otd_Succ;
3107 return(otd);
3109 /* \\\ */
3111 /* /// "ohciFreeTD()" */
3112 inline void ohciFreeTD(struct PCIController *hc, struct OhciTD *otd)
3114 otd->otd_Succ = hc->hc_OhciTDPool;
3115 hc->hc_OhciTDPool = otd;
3117 /* \\\ */
3119 /* /// "ohciUpdateIntTree()" */
3120 void ohciUpdateIntTree(struct PCIController *hc)
3122 struct OhciED *oed;
3123 struct OhciED *predoed;
3124 struct OhciED *lastusedoed;
3125 UWORD cnt;
3127 // optimize linkage between queue heads
3128 predoed = lastusedoed = hc->hc_OhciTermED;
3129 for(cnt = 0; cnt < 5; cnt++)
3131 oed = hc->hc_OhciIntED[cnt];
3132 if(oed->oed_Succ != predoed)
3134 lastusedoed = oed->oed_Succ;
3136 oed->oed_NextED = lastusedoed->oed_Self;
3137 predoed = oed;
3140 /* \\\ */
3142 /* /// "ohciHandleFinishedTDs()" */
3143 void ohciHandleFinishedTDs(struct PCIController *hc)
3145 struct PCIUnit *unit = hc->hc_Unit;
3146 struct IOUsbHWReq *ioreq;
3147 struct IOUsbHWReq *nextioreq;
3148 struct OhciED *oed;
3149 struct OhciTD *otd;
3150 UWORD devadrep;
3151 ULONG len;
3152 ULONG ctrlstatus;
3153 BOOL updatetree = FALSE;
3154 ULONG donehead;
3155 BOOL retire;
3157 KPRINTF(1, ("Checking for work done...\n"));
3158 Disable();
3159 donehead = hc->hc_OhciDoneQueue;
3160 hc->hc_OhciDoneQueue = 0UL;
3161 Enable();
3162 if(!donehead)
3164 KPRINTF(1, ("Nothing to do!\n"));
3165 return;
3167 otd = (struct OhciTD *) (donehead - hc->hc_PCIVirtualAdjust - 16);
3168 KPRINTF(10, ("DoneHead=%08lx, OTD=%08lx, Frame=%ld\n", donehead, otd, READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT)));
3171 oed = otd->otd_ED;
3172 ctrlstatus = READMEM32_LE(&otd->otd_Ctrl);
3173 if(otd->otd_BufferPtr)
3175 // FIXME this will blow up if physical memory is ever going to be discontinuous
3176 len = READMEM32_LE(&otd->otd_BufferEnd) - READMEM32_LE(&otd->otd_BufferPtr) + 1;
3177 } else {
3178 len = otd->otd_Length;
3180 ioreq = oed->oed_IOReq;
3181 KPRINTF(1, ("Examining TD %08lx for ED %08lx (IOReq=%08lx), Status %08lx, len=%ld\n", otd, oed, ioreq, ctrlstatus, len));
3182 ioreq->iouh_Actual += len;
3183 retire = (ioreq->iouh_Actual == ioreq->iouh_Length);
3184 if((ctrlstatus & OTCM_DELAYINT) != OTCF_NOINT)
3186 retire = TRUE;
3188 switch((ctrlstatus & OTCM_COMPLETIONCODE)>>OTCS_COMPLETIONCODE)
3190 case (OTCF_CC_NOERROR>>OTCS_COMPLETIONCODE):
3191 break;
3193 case (OTCF_CC_CRCERROR>>OTCS_COMPLETIONCODE):
3194 KPRINTF(200, ("CRC Error!\n"));
3195 ioreq->iouh_Req.io_Error = UHIOERR_CRCERROR;
3196 retire = TRUE;
3197 break;
3199 case (OTCF_CC_BABBLE>>OTCS_COMPLETIONCODE):
3200 KPRINTF(200, ("Babble/Bitstuffing Error!\n"));
3201 ioreq->iouh_Req.io_Error = UHIOERR_CRCERROR;
3202 retire = TRUE;
3203 break;
3205 case (OTCF_CC_WRONGTOGGLE>>OTCS_COMPLETIONCODE):
3206 KPRINTF(200, ("Data toggle mismatch length = %ld\n", len));
3207 break;
3209 case (OTCF_CC_STALL>>OTCS_COMPLETIONCODE):
3210 KPRINTF(200, ("STALLED!\n"));
3211 ioreq->iouh_Req.io_Error = UHIOERR_STALL;
3212 retire = TRUE;
3213 break;
3215 case (OTCF_CC_TIMEOUT>>OTCS_COMPLETIONCODE):
3216 KPRINTF(200, ("TIMEOUT!\n"));
3217 ioreq->iouh_Req.io_Error = UHIOERR_TIMEOUT;
3218 retire = TRUE;
3219 break;
3221 case (OTCF_CC_PIDCORRUPT>>OTCS_COMPLETIONCODE):
3222 KPRINTF(200, ("PID Error!\n"));
3223 ioreq->iouh_Req.io_Error = UHIOERR_CRCERROR;
3224 retire = TRUE;
3225 break;
3227 case (OTCF_CC_WRONGPID>>OTCS_COMPLETIONCODE):
3228 KPRINTF(200, ("Illegal PID!\n"));
3229 ioreq->iouh_Req.io_Error = UHIOERR_CRCERROR;
3230 retire = TRUE;
3231 break;
3233 case (OTCF_CC_OVERFLOW>>OTCS_COMPLETIONCODE):
3234 KPRINTF(200, ("Overflow Error!\n"));
3235 ioreq->iouh_Req.io_Error = UHIOERR_OVERFLOW;
3236 retire = TRUE;
3237 break;
3239 case (OTCF_CC_SHORTPKT>>OTCS_COMPLETIONCODE):
3240 KPRINTF(10, ("Short packet %ld < %ld\n", len, otd->otd_Length));
3241 if((!ioreq->iouh_Req.io_Error) && (!(ioreq->iouh_Flags & UHFF_ALLOWRUNTPKTS)))
3243 ioreq->iouh_Req.io_Error = UHIOERR_RUNTPACKET;
3245 retire = TRUE;
3246 break;
3248 case (OTCF_CC_OVERRUN>>OTCS_COMPLETIONCODE):
3249 KPRINTF(200, ("Data Overrun Error!\n"));
3250 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
3251 retire = TRUE;
3252 break;
3254 case (OTCF_CC_UNDERRUN>>OTCS_COMPLETIONCODE):
3255 KPRINTF(200, ("Data Underrun Error!\n"));
3256 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
3257 retire = TRUE;
3258 break;
3260 case (OTCF_CC_INVALID>>OTCS_COMPLETIONCODE):
3261 KPRINTF(200, ("Not touched?!?\n"));
3262 break;
3264 if(READMEM32_LE(&oed->oed_HeadPtr) & OEHF_HALTED)
3266 KPRINTF(100, ("OED halted!\n"));
3267 retire = TRUE;
3270 if(retire)
3272 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
3273 AddHead(&hc->hc_OhciRetireQueue, &ioreq->iouh_Req.io_Message.mn_Node);
3276 if(!otd->otd_NextTD)
3278 break;
3280 KPRINTF(1, ("NextTD=%08lx\n", otd->otd_NextTD));
3281 otd = (struct OhciTD *) (READMEM32_LE(&otd->otd_NextTD) - hc->hc_PCIVirtualAdjust - 16);
3282 KPRINTF(1, ("NextOTD = %08lx\n", otd));
3283 } while(TRUE);
3285 ioreq = (struct IOUsbHWReq *) hc->hc_OhciRetireQueue.lh_Head;
3286 while((nextioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ))
3288 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
3289 oed = (struct OhciED *) ioreq->iouh_DriverPrivate1;
3290 if(oed)
3292 KPRINTF(10, ("Retiring IOReq=%08lx ED=%08lx, Frame=%ld\n", ioreq, oed, READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT)));
3294 if(oed->oed_Continue)
3296 ULONG actual = ioreq->iouh_Actual;
3297 ULONG oldenables;
3298 ULONG phyaddr;
3299 struct OhciTD *predotd = NULL;
3301 KPRINTF(10, ("Reloading Bulk transfer at %ld of %ld\n", ioreq->iouh_Actual, ioreq->iouh_Length));
3302 otd = oed->oed_FirstTD;
3303 phyaddr = (ULONG) pciGetPhysical(hc, &(((UBYTE *) ioreq->iouh_Data)[actual]));
3306 len = ioreq->iouh_Length - actual;
3307 if(len > OHCI_PAGE_SIZE)
3309 len = OHCI_PAGE_SIZE;
3311 if((!otd->otd_Succ) && (actual + len == ioreq->iouh_Length) && (!ioreq->iouh_Flags & UHFF_NOSHORTPKT) && ((actual % ioreq->iouh_MaxPktSize) == 0))
3313 // special case -- zero padding would not fit in this run,
3314 // and next time, we would forget about it. So rather abort
3315 // reload now, so the zero padding goes with the next reload
3316 break;
3318 predotd = otd;
3319 otd->otd_Length = len;
3320 KPRINTF(1, ("TD with %ld bytes\n", len));
3321 CONSTWRITEMEM32_LE(&otd->otd_Ctrl, OTCF_CC_INVALID|OTCF_NOINT);
3322 if(otd->otd_Succ)
3324 otd->otd_NextTD = otd->otd_Succ->otd_Self;
3326 if(len)
3328 WRITEMEM32_LE(&otd->otd_BufferPtr, phyaddr);
3329 phyaddr += len - 1;
3330 WRITEMEM32_LE(&otd->otd_BufferEnd, phyaddr);
3331 phyaddr++;
3332 } else {
3333 CONSTWRITEMEM32_LE(&otd->otd_BufferPtr, 0);
3334 CONSTWRITEMEM32_LE(&otd->otd_BufferEnd, 0);
3336 actual += len;
3337 otd = otd->otd_Succ;
3338 } while(otd && ((actual < ioreq->iouh_Length) || (len && (ioreq->iouh_Dir == UHDIR_OUT) && (actual == ioreq->iouh_Length) && (!ioreq->iouh_Flags & UHFF_NOSHORTPKT) && ((actual % ioreq->iouh_MaxPktSize) == 0))));
3339 oed->oed_Continue = (actual < ioreq->iouh_Length);
3340 predotd->otd_NextTD = hc->hc_OhciTermTD->otd_Self;
3342 CONSTWRITEMEM32_LE(&predotd->otd_Ctrl, OTCF_CC_INVALID);
3344 Disable();
3345 AddTail(&hc->hc_TDQueue, &ioreq->iouh_Req.io_Message.mn_Node);
3347 // keep toggle bit
3348 ctrlstatus = READMEM32_LE(oed->oed_HeadPtr) & OEHF_DATA1;
3349 WRITEMEM32_LE(&oed->oed_HeadPtr, READMEM32_LE(oed->oed_FirstTD->otd_Self)|ctrlstatus);
3351 oldenables = READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS);
3352 oldenables |= OCSF_BULKENABLE;
3353 WRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, oldenables);
3354 SYNC;
3355 EIEIO;
3356 Enable();
3357 } else {
3358 // disable ED
3359 ctrlstatus = READMEM32_LE(&oed->oed_HeadPtr);
3360 ctrlstatus |= OEHF_HALTED;
3361 WRITEMEM32_LE(&oed->oed_HeadPtr, ctrlstatus);
3363 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
3364 unit->hu_DevBusyReq[devadrep] = NULL;
3365 unit->hu_DevDataToggle[devadrep] = (ctrlstatus & OEHF_DATA1) ? TRUE : FALSE;
3367 ohciFreeEDContext(hc, oed);
3368 if(ioreq->iouh_Req.io_Command == UHCMD_INTXFER)
3370 updatetree = TRUE;
3372 // check for sucessful clear feature and set address ctrl transfers
3373 if((!ioreq->iouh_Req.io_Error) && (ioreq->iouh_Req.io_Command == UHCMD_CONTROLXFER))
3375 uhwCheckSpecialCtrlTransfers(hc, ioreq);
3377 ReplyMsg(&ioreq->iouh_Req.io_Message);
3379 } else {
3380 KPRINTF(20, ("IOReq=%08lx has no OED!\n", ioreq));
3382 ioreq = nextioreq;
3384 if(updatetree)
3386 ohciUpdateIntTree(hc);
3389 /* \\\ */
3391 /* /// "ohciScheduleCtrlTDs()" */
3392 void ohciScheduleCtrlTDs(struct PCIController *hc)
3394 struct PCIUnit *unit = hc->hc_Unit;
3395 struct IOUsbHWReq *ioreq;
3396 UWORD devadrep;
3397 struct OhciED *oed;
3398 struct OhciTD *setupotd;
3399 struct OhciTD *dataotd;
3400 struct OhciTD *termotd;
3401 struct OhciTD *predotd;
3402 ULONG actual;
3403 ULONG epcaps;
3404 ULONG ctrl;
3405 ULONG len;
3406 ULONG phyaddr;
3407 ULONG oldenables;
3409 /* *** CTRL Transfers *** */
3410 KPRINTF(1, ("Scheduling new CTRL transfers...\n"));
3411 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
3412 while(((struct Node *) ioreq)->ln_Succ)
3414 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint;
3415 KPRINTF(10, ("New CTRL transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
3416 /* is endpoint already in use or do we have to wait for next transaction */
3417 if(unit->hu_DevBusyReq[devadrep])
3419 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
3420 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
3421 continue;
3424 oed = ohciAllocED(hc);
3425 if(!oed)
3427 break;
3430 setupotd = ohciAllocTD(hc);
3431 if(!setupotd)
3433 ohciFreeED(hc, oed);
3434 break;
3436 termotd = ohciAllocTD(hc);
3437 if(!termotd)
3439 ohciFreeTD(hc, setupotd);
3440 ohciFreeED(hc, oed);
3441 break;
3443 oed->oed_IOReq = ioreq;
3445 KPRINTF(1, ("SetupTD=%08lx, TermTD=%08lx\n", setupotd, termotd));
3447 // fill setup td
3448 epcaps = (ioreq->iouh_DevAddr<<OECS_DEVADDR)|(ioreq->iouh_Endpoint<<OECS_ENDPOINT)|(ioreq->iouh_MaxPktSize<<OECS_MAXPKTLEN)|OECF_DIRECTION_TD;
3450 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
3452 KPRINTF(5, ("*** LOW SPEED ***\n"));
3453 epcaps |= OECF_LOWSPEED;
3456 WRITEMEM32_LE(&oed->oed_EPCaps, epcaps);
3458 oed->oed_TailPtr = hc->hc_OhciTermTD->otd_Self;
3459 oed->oed_HeadPtr = setupotd->otd_Self;
3460 oed->oed_FirstTD = setupotd;
3462 setupotd->otd_ED = oed;
3463 setupotd->otd_Length = 0; // don't increase io_Actual for that transfer
3464 CONSTWRITEMEM32_LE(&setupotd->otd_Ctrl, OTCF_PIDCODE_SETUP|OTCF_CC_INVALID|OTCF_NOINT);
3465 WRITEMEM32_LE(&setupotd->otd_BufferPtr, (ULONG) pciGetPhysical(hc, &ioreq->iouh_SetupData));
3466 WRITEMEM32_LE(&setupotd->otd_BufferEnd, (ULONG) pciGetPhysical(hc, ((UBYTE *) (&ioreq->iouh_SetupData)) + 7));
3468 ctrl = (ioreq->iouh_SetupData.bmRequestType & URTF_IN) ? (OTCF_PIDCODE_IN|OTCF_CC_INVALID|OTCF_NOINT) : (OTCF_PIDCODE_OUT|OTCF_CC_INVALID|OTCF_NOINT);
3470 predotd = setupotd;
3471 if(ioreq->iouh_Length)
3473 phyaddr = (ULONG) pciGetPhysical(hc, ioreq->iouh_Data);
3474 actual = 0;
3477 dataotd = ohciAllocTD(hc);
3478 if(!dataotd)
3480 predotd->otd_Succ = NULL;
3481 break;
3483 dataotd->otd_ED = oed;
3484 predotd->otd_Succ = dataotd;
3485 predotd->otd_NextTD = dataotd->otd_Self;
3486 len = ioreq->iouh_Length - actual;
3487 if(len > OHCI_PAGE_SIZE)
3489 len = OHCI_PAGE_SIZE;
3491 dataotd->otd_Length = len;
3492 KPRINTF(1, ("TD with %ld bytes\n", len));
3493 WRITEMEM32_LE(&dataotd->otd_Ctrl, ctrl);
3494 WRITEMEM32_LE(&dataotd->otd_BufferPtr, phyaddr);
3495 phyaddr += len - 1;
3496 WRITEMEM32_LE(&dataotd->otd_BufferEnd, phyaddr);
3497 phyaddr++;
3498 actual += len;
3499 predotd = dataotd;
3500 } while(actual < ioreq->iouh_Length);
3502 if(actual != ioreq->iouh_Length)
3504 // out of TDs
3505 KPRINTF(200, ("Out of TDs for Ctrl Transfer!\n"));
3506 dataotd = setupotd->otd_Succ;
3507 ohciFreeTD(hc, setupotd);
3508 while(dataotd)
3510 predotd = dataotd;
3511 dataotd = dataotd->otd_Succ;
3512 ohciFreeTD(hc, predotd);
3514 ohciFreeTD(hc, termotd);
3515 ohciFreeED(hc, oed);
3516 break;
3518 predotd->otd_Succ = termotd;
3519 predotd->otd_NextTD = termotd->otd_Self;
3520 } else {
3521 setupotd->otd_Succ = termotd;
3522 setupotd->otd_NextTD = termotd->otd_Self;
3525 ctrl ^= (OTCF_PIDCODE_IN^OTCF_PIDCODE_OUT)|OTCF_NOINT|OTCF_DATA1|OTCF_TOGGLEFROMTD;
3527 termotd->otd_Length = 0;
3528 termotd->otd_ED = oed;
3529 termotd->otd_Succ = NULL;
3530 termotd->otd_NextTD = hc->hc_OhciTermTD->otd_Self;
3531 CONSTWRITEMEM32_LE(&termotd->otd_Ctrl, ctrl);
3532 CONSTWRITEMEM32_LE(&termotd->otd_BufferPtr, 0);
3533 CONSTWRITEMEM32_LE(&termotd->otd_BufferEnd, 0);
3535 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
3536 ioreq->iouh_DriverPrivate1 = oed;
3538 // manage endpoint going busy
3539 unit->hu_DevBusyReq[devadrep] = ioreq;
3540 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
3542 Disable();
3543 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
3545 // looks good to me, now enqueue this entry
3546 oed->oed_Succ = hc->hc_OhciCtrlTailED;
3547 oed->oed_NextED = oed->oed_Succ->oed_Self;
3548 oed->oed_Pred = hc->hc_OhciCtrlTailED->oed_Pred;
3549 oed->oed_Pred->oed_Succ = oed;
3550 oed->oed_Pred->oed_NextED = oed->oed_Self;
3551 oed->oed_Succ->oed_Pred = oed;
3552 SYNC;
3553 EIEIO;
3555 KPRINTF(5, ("ED: EPCaps=%08lx, HeadPtr=%08lx, TailPtr=%08lx, NextED=%08lx\n",
3556 READMEM32_LE(&oed->oed_EPCaps),
3557 READMEM32_LE(&oed->oed_HeadPtr),
3558 READMEM32_LE(&oed->oed_TailPtr),
3559 READMEM32_LE(&oed->oed_NextED)));
3561 oldenables = READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS);
3562 if(!(oldenables & OCSF_CTRLENABLE))
3564 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CTRL_ED, 0);
3566 oldenables |= OCSF_CTRLENABLE;
3567 WRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, oldenables);
3568 SYNC;
3569 EIEIO;
3570 Enable();
3572 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
3575 /* \\\ */
3577 /* /// "ohciScheduleIntTDs()" */
3578 void ohciScheduleIntTDs(struct PCIController *hc)
3580 struct PCIUnit *unit = hc->hc_Unit;
3581 struct IOUsbHWReq *ioreq;
3582 UWORD devadrep;
3583 struct OhciED *intoed;
3584 struct OhciED *oed;
3585 struct OhciTD *otd;
3586 struct OhciTD *predotd;
3587 ULONG actual;
3588 ULONG epcaps;
3589 ULONG len;
3590 ULONG phyaddr;
3592 /* *** INT Transfers *** */
3593 KPRINTF(1, ("Scheduling new INT transfers...\n"));
3594 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
3595 while(((struct Node *) ioreq)->ln_Succ)
3597 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
3598 KPRINTF(10, ("New INT transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
3599 /* is endpoint already in use or do we have to wait for next transaction */
3600 if(unit->hu_DevBusyReq[devadrep])
3602 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
3603 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
3604 continue;
3607 oed = ohciAllocED(hc);
3608 if(!oed)
3610 break;
3613 oed->oed_IOReq = ioreq;
3615 epcaps = (ioreq->iouh_DevAddr<<OECS_DEVADDR)|(ioreq->iouh_Endpoint<<OECS_ENDPOINT)|(ioreq->iouh_MaxPktSize<<OECS_MAXPKTLEN);
3616 epcaps |= (ioreq->iouh_Dir == UHDIR_IN) ? OECF_DIRECTION_IN : OECF_DIRECTION_OUT;
3618 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
3620 KPRINTF(5, ("*** LOW SPEED ***\n"));
3621 epcaps |= OECF_LOWSPEED;
3624 WRITEMEM32_LE(&oed->oed_EPCaps, epcaps);
3625 oed->oed_TailPtr = hc->hc_OhciTermTD->otd_Self;
3627 predotd = NULL;
3628 phyaddr = (ULONG) pciGetPhysical(hc, ioreq->iouh_Data);
3629 actual = 0;
3632 otd = ohciAllocTD(hc);
3633 if(!otd)
3635 predotd->otd_Succ = NULL;
3636 break;
3638 otd->otd_ED = oed;
3639 if(predotd)
3641 predotd->otd_Succ = otd;
3642 predotd->otd_NextTD = otd->otd_Self;
3643 } else {
3644 WRITEMEM32_LE(&oed->oed_HeadPtr, READMEM32_LE(otd->otd_Self)|(unit->hu_DevDataToggle[devadrep] ? OEHF_DATA1 : 0));
3645 oed->oed_FirstTD = otd;
3647 len = ioreq->iouh_Length - actual;
3648 if(len > OHCI_PAGE_SIZE)
3650 len = OHCI_PAGE_SIZE;
3652 otd->otd_Length = len;
3653 KPRINTF(1, ("TD with %ld bytes\n", len));
3654 CONSTWRITEMEM32_LE(&otd->otd_Ctrl, OTCF_CC_INVALID|OTCF_NOINT);
3655 if(len)
3657 WRITEMEM32_LE(&otd->otd_BufferPtr, phyaddr);
3658 phyaddr += len - 1;
3659 WRITEMEM32_LE(&otd->otd_BufferEnd, phyaddr);
3660 phyaddr++;
3661 } else {
3662 CONSTWRITEMEM32_LE(&otd->otd_BufferPtr, 0);
3663 CONSTWRITEMEM32_LE(&otd->otd_BufferEnd, 0);
3665 actual += len;
3666 predotd = otd;
3667 } while(actual < ioreq->iouh_Length);
3669 if(actual != ioreq->iouh_Length)
3671 // out of TDs
3672 KPRINTF(200, ("Out of TDs for Int Transfer!\n"));
3673 otd = oed->oed_FirstTD;
3674 while(otd)
3676 predotd = otd;
3677 otd = otd->otd_Succ;
3678 ohciFreeTD(hc, predotd);
3680 ohciFreeED(hc, oed);
3681 break;
3683 predotd->otd_Succ = NULL;
3684 predotd->otd_NextTD = hc->hc_OhciTermTD->otd_Self;
3686 CONSTWRITEMEM32_LE(&predotd->otd_Ctrl, OTCF_CC_INVALID);
3688 if(ioreq->iouh_Interval >= 31)
3690 intoed = hc->hc_OhciIntED[4]; // 32ms interval
3691 } else {
3692 UWORD cnt = 0;
3695 intoed = hc->hc_OhciIntED[cnt++];
3696 } while(ioreq->iouh_Interval > (1<<cnt));
3699 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
3700 ioreq->iouh_DriverPrivate1 = oed;
3702 // manage endpoint going busy
3703 unit->hu_DevBusyReq[devadrep] = ioreq;
3704 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
3706 Disable();
3707 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
3709 // looks good to me, now enqueue this entry (behind Int head)
3710 oed->oed_Succ = intoed->oed_Succ;
3711 oed->oed_NextED = intoed->oed_Succ->oed_Self;
3712 oed->oed_Pred = intoed;
3713 intoed->oed_Succ = oed;
3714 intoed->oed_NextED = oed->oed_Self;
3715 oed->oed_Succ->oed_Pred = oed;
3716 SYNC;
3717 EIEIO;
3719 KPRINTF(5, ("ED: EPCaps=%08lx, HeadPtr=%08lx, TailPtr=%08lx, NextED=%08lx\n",
3720 READMEM32_LE(&oed->oed_EPCaps),
3721 READMEM32_LE(&oed->oed_HeadPtr),
3722 READMEM32_LE(&oed->oed_TailPtr),
3723 READMEM32_LE(&oed->oed_NextED)));
3724 Enable();
3726 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
3729 /* \\\ */
3731 /* /// "ohciScheduleBulkTDs()" */
3732 void ohciScheduleBulkTDs(struct PCIController *hc)
3734 struct PCIUnit *unit = hc->hc_Unit;
3735 struct IOUsbHWReq *ioreq;
3736 UWORD devadrep;
3737 struct OhciED *oed;
3738 struct OhciTD *otd;
3739 struct OhciTD *predotd;
3740 ULONG actual;
3741 ULONG epcaps;
3742 ULONG len;
3743 ULONG phyaddr;
3744 ULONG oldenables;
3746 /* *** BULK Transfers *** */
3747 KPRINTF(1, ("Scheduling new BULK transfers...\n"));
3748 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
3749 while(((struct Node *) ioreq)->ln_Succ)
3751 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
3752 KPRINTF(10, ("New BULK transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
3753 /* is endpoint already in use or do we have to wait for next transaction */
3754 if(unit->hu_DevBusyReq[devadrep])
3756 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
3757 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
3758 continue;
3761 oed = ohciAllocED(hc);
3762 if(!oed)
3764 break;
3767 oed->oed_IOReq = ioreq;
3769 epcaps = (ioreq->iouh_DevAddr<<OECS_DEVADDR)|(ioreq->iouh_Endpoint<<OECS_ENDPOINT)|(ioreq->iouh_MaxPktSize<<OECS_MAXPKTLEN);
3770 epcaps |= (ioreq->iouh_Dir == UHDIR_IN) ? OECF_DIRECTION_IN : OECF_DIRECTION_OUT;
3772 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
3774 KPRINTF(5, ("*** LOW SPEED ***\n"));
3775 epcaps |= OECF_LOWSPEED;
3778 WRITEMEM32_LE(&oed->oed_EPCaps, epcaps);
3779 oed->oed_TailPtr = hc->hc_OhciTermTD->otd_Self;
3781 predotd = NULL;
3782 phyaddr = (ULONG) pciGetPhysical(hc, ioreq->iouh_Data);
3783 actual = 0;
3786 if((actual >= OHCI_TD_BULK_LIMIT) && (actual < ioreq->iouh_Length))
3788 KPRINTF(10, ("Bulk too large, splitting...\n"));
3789 break;
3791 otd = ohciAllocTD(hc);
3792 if(!otd)
3794 predotd->otd_Succ = NULL;
3795 break;
3797 otd->otd_ED = oed;
3798 if(predotd)
3800 predotd->otd_Succ = otd;
3801 predotd->otd_NextTD = otd->otd_Self;
3802 } else {
3803 WRITEMEM32_LE(&oed->oed_HeadPtr, READMEM32_LE(otd->otd_Self)|(unit->hu_DevDataToggle[devadrep] ? OEHF_DATA1 : 0));
3804 oed->oed_FirstTD = otd;
3806 len = ioreq->iouh_Length - actual;
3807 if(len > OHCI_PAGE_SIZE)
3809 len = OHCI_PAGE_SIZE;
3811 otd->otd_Length = len;
3812 KPRINTF(1, ("TD with %ld bytes\n", len));
3813 CONSTWRITEMEM32_LE(&otd->otd_Ctrl, OTCF_CC_INVALID|OTCF_NOINT);
3814 if(len)
3816 WRITEMEM32_LE(&otd->otd_BufferPtr, phyaddr);
3817 phyaddr += len - 1;
3818 WRITEMEM32_LE(&otd->otd_BufferEnd, phyaddr);
3819 phyaddr++;
3820 } else {
3821 CONSTWRITEMEM32_LE(&otd->otd_BufferPtr, 0);
3822 CONSTWRITEMEM32_LE(&otd->otd_BufferEnd, 0);
3824 actual += len;
3826 predotd = otd;
3827 } while((actual < ioreq->iouh_Length) || (len && (ioreq->iouh_Dir == UHDIR_OUT) && (actual == ioreq->iouh_Length) && (!ioreq->iouh_Flags & UHFF_NOSHORTPKT) && ((actual % ioreq->iouh_MaxPktSize) == 0)));
3829 if(!actual)
3831 // out of TDs
3832 KPRINTF(200, ("Out of TDs for Bulk Transfer!\n"));
3833 otd = oed->oed_FirstTD;
3834 while(otd)
3836 predotd = otd;
3837 otd = otd->otd_Succ;
3838 ohciFreeTD(hc, predotd);
3840 ohciFreeED(hc, oed);
3841 break;
3843 oed->oed_Continue = (actual < ioreq->iouh_Length);
3844 predotd->otd_Succ = NULL;
3845 predotd->otd_NextTD = hc->hc_OhciTermTD->otd_Self;
3847 CONSTWRITEMEM32_LE(&predotd->otd_Ctrl, OTCF_CC_INVALID);
3849 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
3850 ioreq->iouh_DriverPrivate1 = oed;
3852 // manage endpoint going busy
3853 unit->hu_DevBusyReq[devadrep] = ioreq;
3854 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
3856 Disable();
3857 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
3859 // looks good to me, now enqueue this entry
3860 oed->oed_Succ = hc->hc_OhciBulkTailED;
3861 oed->oed_NextED = oed->oed_Succ->oed_Self;
3862 oed->oed_Pred = hc->hc_OhciBulkTailED->oed_Pred;
3863 oed->oed_Pred->oed_Succ = oed;
3864 oed->oed_Pred->oed_NextED = oed->oed_Self;
3865 oed->oed_Succ->oed_Pred = oed;
3866 SYNC;
3867 EIEIO;
3869 KPRINTF(10, ("Activating BULK at %ld\n", READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT)));
3871 KPRINTF(5, ("ED: EPCaps=%08lx, HeadPtr=%08lx, TailPtr=%08lx, NextED=%08lx\n",
3872 READMEM32_LE(&oed->oed_EPCaps),
3873 READMEM32_LE(&oed->oed_HeadPtr),
3874 READMEM32_LE(&oed->oed_TailPtr),
3875 READMEM32_LE(&oed->oed_NextED)));
3877 oldenables = READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS);
3878 if(!(oldenables & OCSF_BULKENABLE))
3880 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_BULK_ED, 0);
3882 oldenables |= OCSF_BULKENABLE;
3883 WRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, oldenables);
3884 SYNC;
3885 EIEIO;
3886 Enable();
3887 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
3890 /* \\\ */
3892 /* /// "ohciCompleteInt()" */
3893 void ohciCompleteInt(struct PCIController *hc)
3895 ULONG framecnt = READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT);
3897 KPRINTF(1, ("CompleteInt!\n"));
3898 if(framecnt < (hc->hc_FrameCounter & 0xffff))
3900 hc->hc_FrameCounter |= 0xffff;
3901 hc->hc_FrameCounter++;
3902 hc->hc_FrameCounter += framecnt;
3903 KPRINTF(10, ("Frame Counter Rollover %ld\n", hc->hc_FrameCounter));
3904 } else {
3905 hc->hc_FrameCounter = (hc->hc_FrameCounter & 0xffff0000)|framecnt;
3908 /* **************** PROCESS DONE TRANSFERS **************** */
3910 if(hc->hc_OhciDoneQueue)
3912 ohciHandleFinishedTDs(hc);
3915 if(hc->hc_CtrlXFerQueue.lh_Head->ln_Succ)
3917 ohciScheduleCtrlTDs(hc);
3920 if(hc->hc_IntXFerQueue.lh_Head->ln_Succ)
3922 ohciScheduleIntTDs(hc);
3925 if(hc->hc_BulkXFerQueue.lh_Head->ln_Succ)
3927 ohciScheduleBulkTDs(hc);
3930 KPRINTF(1, ("CompleteDone\n"));
3932 /* \\\ */
3934 /* /// "ohciIntCode()" */
3935 void ohciIntCode(HIDDT_IRQ_Handler *irq, HIDDT_IRQ_HwInfo *hw)
3937 struct PCIController *hc = (struct PCIController *) irq->h_Data;
3938 struct PCIDevice *base = hc->hc_Device;
3939 struct PCIUnit *unit = hc->hc_Unit;
3940 ULONG intr = 0;
3941 ULONG donehead = READMEM32_LE(&hc->hc_OhciHCCA->oha_DoneHead);
3943 if(donehead)
3945 intr = OISF_DONEHEAD;
3946 if(donehead & 1)
3948 intr |= READREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS);
3950 donehead &= OHCI_PTRMASK;
3951 KPRINTF(5, ("New Donehead %08lx for old %08lx\n", donehead, hc->hc_OhciDoneQueue));
3952 if(hc->hc_OhciDoneQueue)
3954 struct OhciTD *donetd = (struct OhciTD *) (hc->hc_OhciDoneQueue - hc->hc_PCIVirtualAdjust - 16);
3955 WRITEMEM32_LE(donetd->otd_NextTD, donehead);
3957 hc->hc_OhciDoneQueue = donehead;
3958 CONSTWRITEMEM32_LE(&hc->hc_OhciHCCA->oha_DoneHead, 0);
3959 } else {
3960 intr = READREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS);
3962 if(intr & hc->hc_PCIIntEnMask)
3964 WRITEREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS, intr);
3965 KPRINTF(1, ("INT=%02lx\n", intr));
3966 if(intr & OISF_HOSTERROR)
3968 KPRINTF(200, ("Host ERROR!\n"));
3970 if(intr & OISF_SCHEDOVERRUN)
3972 KPRINTF(200, ("Schedule overrun!\n"));
3974 if(!hc->hc_Online)
3976 return;
3978 if(intr & OISF_FRAMECOUNTOVER)
3980 ULONG framecnt = READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT);
3981 hc->hc_FrameCounter |= 0x7fff;
3982 hc->hc_FrameCounter++;
3983 hc->hc_FrameCounter |= framecnt;
3984 KPRINTF(10, ("Frame Counter Rollover %ld\n", hc->hc_FrameCounter));
3986 if(intr & OISF_HUBCHANGE)
3988 UWORD hciport;
3989 ULONG oldval;
3990 UWORD portreg = OHCI_PORTSTATUS;
3991 for(hciport = 0; hciport < hc->hc_NumPorts; hciport++, portreg += 4)
3993 oldval = READREG32_LE(hc->hc_RegBase, portreg);
3994 if(oldval & OHPF_OVERCURRENTCHG)
3996 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_OVER_CURRENT;
3998 if(oldval & OHPF_RESETCHANGE)
4000 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_RESET;
4002 if(oldval & OHPF_ENABLECHANGE)
4004 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_ENABLE;
4006 if(oldval & OHPF_CONNECTCHANGE)
4008 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_CONNECTION;
4010 if(oldval & OHPF_RESUMEDTX)
4012 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND;
4014 KPRINTF(20, ("PCI Int Port %ld (glob %ld) Change %08lx\n", hciport, hc->hc_PortNum20[hciport] + 1, oldval));
4015 if(hc->hc_PortChangeMap[hciport])
4017 unit->hu_RootPortChanges |= 1UL<<(hc->hc_PortNum20[hciport] + 1);
4020 uhwCheckRootHubChanges(unit);
4022 if(intr & OISF_DONEHEAD)
4024 KPRINTF(10, ("DoneHead %ld\n", READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT)));
4025 SureCause(base, &hc->hc_CompleteInt);
4029 /* \\\ */
4031 /* ---------------------------------------------------------------------- *
4032 * EHCI Specific Stuff *
4033 * ---------------------------------------------------------------------- */
4035 /* /// "ehciFreeAsyncContext()" */
4036 void ehciFreeAsyncContext(struct PCIController *hc, struct EhciQH *eqh)
4038 KPRINTF(5, ("Unlinking AsyncContext %08lx\n", eqh));
4039 // unlink from schedule
4040 eqh->eqh_Pred->eqh_NextQH = eqh->eqh_Succ->eqh_Self;
4041 SYNC;
4042 EIEIO;
4043 eqh->eqh_Succ->eqh_Pred = eqh->eqh_Pred;
4044 eqh->eqh_Pred->eqh_Succ = eqh->eqh_Succ;
4045 SYNC;
4046 EIEIO;
4048 // need to wait until an async schedule rollover before freeing these
4049 Disable();
4050 eqh->eqh_Succ = hc->hc_EhciAsyncFreeQH;
4051 hc->hc_EhciAsyncFreeQH = eqh;
4052 // activate doorbell
4053 WRITEREG32_LE(hc->hc_RegBase, EHCI_USBCMD, hc->hc_EhciUsbCmd|EHUF_ASYNCDOORBELL);
4054 Enable();
4056 /* \\\ */
4058 /* /// "ehciFreePeriodicContext()" */
4059 void ehciFreePeriodicContext(struct PCIController *hc, struct EhciQH *eqh)
4061 struct EhciTD *etd;
4062 struct EhciTD *nextetd;
4064 KPRINTF(5, ("Unlinking PeriodicContext %08lx\n", eqh));
4065 // unlink from schedule
4066 eqh->eqh_Pred->eqh_NextQH = eqh->eqh_Succ->eqh_Self;
4067 SYNC;
4068 EIEIO;
4069 eqh->eqh_Succ->eqh_Pred = eqh->eqh_Pred;
4070 eqh->eqh_Pred->eqh_Succ = eqh->eqh_Succ;
4071 SYNC;
4072 EIEIO;
4074 Disable(); // avoid race condition with interrupt
4075 nextetd = eqh->eqh_FirstTD;
4076 while((etd = nextetd))
4078 KPRINTF(1, ("FreeTD %08lx\n", nextetd));
4079 nextetd = etd->etd_Succ;
4080 ehciFreeTD(hc, etd);
4082 ehciFreeQH(hc, eqh);
4083 Enable();
4085 /* \\\ */
4087 /* /// "ehciFreeQHandTDs()" */
4088 void ehciFreeQHandTDs(struct PCIController *hc, struct EhciQH *eqh)
4090 struct EhciTD *etd = NULL;
4091 struct EhciTD *nextetd;
4093 KPRINTF(5, ("Unlinking QContext %08lx\n", eqh));
4094 nextetd = eqh->eqh_FirstTD;
4095 while(nextetd)
4097 KPRINTF(1, ("FreeTD %08lx\n", nextetd));
4098 etd = nextetd;
4099 nextetd = (struct EhciTD *) etd->etd_Succ;
4100 ehciFreeTD(hc, etd);
4103 ehciFreeQH(hc, eqh);
4105 /* \\\ */
4107 /* /// "ehciAllocQH()" */
4108 inline struct EhciQH * ehciAllocQH(struct PCIController *hc)
4110 struct EhciQH *eqh = hc->hc_EhciQHPool;
4112 if(!eqh)
4114 // out of QHs!
4115 KPRINTF(20, ("Out of QHs!\n"));
4116 return NULL;
4119 hc->hc_EhciQHPool = (struct EhciQH *) eqh->eqh_Succ;
4120 return(eqh);
4122 /* \\\ */
4124 /* /// "ehciFreeQH()" */
4125 inline void ehciFreeQH(struct PCIController *hc, struct EhciQH *eqh)
4127 eqh->eqh_Succ = hc->hc_EhciQHPool;
4128 hc->hc_EhciQHPool = eqh;
4130 /* \\\ */
4132 /* /// "ehciAllocTD()" */
4133 inline struct EhciTD * ehciAllocTD(struct PCIController *hc)
4135 struct EhciTD *etd = hc->hc_EhciTDPool;
4137 if(!etd)
4139 // out of TDs!
4140 KPRINTF(20, ("Out of TDs!\n"));
4141 return NULL;
4144 hc->hc_EhciTDPool = (struct EhciTD *) etd->etd_Succ;
4145 return(etd);
4147 /* \\\ */
4149 /* /// "ehciFreeTD()" */
4150 inline void ehciFreeTD(struct PCIController *hc, struct EhciTD *etd)
4152 etd->etd_Succ = hc->hc_EhciTDPool;
4153 hc->hc_EhciTDPool = etd;
4155 /* \\\ */
4157 /* /// "ehciUpdateIntTree()" */
4158 void ehciUpdateIntTree(struct PCIController *hc)
4160 struct EhciQH *eqh;
4161 struct EhciQH *predeqh;
4162 struct EhciQH *lastusedeqh;
4163 UWORD cnt;
4165 // optimize linkage between queue heads
4166 predeqh = lastusedeqh = hc->hc_EhciTermQH;
4167 for(cnt = 0; cnt < 11; cnt++)
4169 eqh = hc->hc_EhciIntQH[cnt];
4170 if(eqh->eqh_Succ != predeqh)
4172 lastusedeqh = eqh->eqh_Succ;
4174 eqh->eqh_NextQH = lastusedeqh->eqh_Self;
4175 predeqh = eqh;
4178 /* \\\ */
4180 /* /// "ehciHandleFinishedTDs()" */
4181 void ehciHandleFinishedTDs(struct PCIController *hc)
4183 struct PCIUnit *unit = hc->hc_Unit;
4184 struct IOUsbHWReq *ioreq;
4185 struct IOUsbHWReq *nextioreq;
4186 struct EhciQH *eqh;
4187 struct EhciTD *etd;
4188 struct EhciTD *predetd;
4189 UWORD devadrep;
4190 ULONG len;
4191 UWORD inspect;
4192 ULONG nexttd;
4193 BOOL shortpkt;
4194 ULONG ctrlstatus;
4195 ULONG epctrlstatus;
4196 ULONG actual;
4197 BOOL halted;
4198 BOOL updatetree = FALSE;
4199 BOOL zeroterm;
4200 ULONG phyaddr;
4202 KPRINTF(1, ("Checking for Async work done...\n"));
4203 ioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
4204 while((nextioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ))
4206 eqh = (struct EhciQH *) ioreq->iouh_DriverPrivate1;
4207 if(eqh)
4209 KPRINTF(1, ("Examining IOReq=%08lx with EQH=%08lx\n", ioreq, eqh));
4210 SYNC;
4211 EIEIO;
4212 epctrlstatus = READMEM32_LE(&eqh->eqh_CtrlStatus);
4213 nexttd = READMEM32_LE(&eqh->eqh_NextTD);
4214 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
4215 halted = ((epctrlstatus & (ETCF_ACTIVE|ETSF_HALTED)) == ETSF_HALTED);
4216 if(halted || (!(epctrlstatus & ETCF_ACTIVE) && (nexttd & EHCI_TERMINATE)))
4218 KPRINTF(1, ("AS: CS=%08lx CP=%08lx NX=%08lx\n", epctrlstatus, READMEM32_LE(&eqh->eqh_CurrTD), nexttd));
4219 shortpkt = FALSE;
4220 actual = 0;
4221 inspect = 1;
4222 etd = eqh->eqh_FirstTD;
4225 ctrlstatus = READMEM32_LE(&etd->etd_CtrlStatus);
4226 KPRINTF(1, ("AS: CS=%08lx SL=%08lx TD=%08lx\n", ctrlstatus, READMEM32_LE(&etd->etd_Self), etd));
4227 if(ctrlstatus & ETCF_ACTIVE)
4229 if(halted)
4231 KPRINTF(20, ("Async: Halted before TD\n"));
4232 //ctrlstatus = eqh->eqh_CtrlStatus;
4233 inspect = 0;
4234 if(unit->hu_NakTimeoutFrame[devadrep] && (hc->hc_FrameCounter > unit->hu_NakTimeoutFrame[devadrep]))
4236 KPRINTF(20, ("NAK timeout\n"));
4237 ioreq->iouh_Req.io_Error = UHIOERR_NAKTIMEOUT;
4239 break;
4240 } else {
4241 // what happened here? The host controller was just updating the fields and has not finished yet
4242 ctrlstatus = epctrlstatus;
4244 /*KPRINTF(20, ("AS: CS=%08lx CP=%08lx NX=%08lx\n", epctrlstatus, READMEM32_LE(&eqh->eqh_CurrTD), nexttd));
4245 KPRINTF(20, ("AS: CS=%08lx CP=%08lx NX=%08lx\n", READMEM32_LE(&eqh->eqh_CtrlStatus), READMEM32_LE(&eqh->eqh_CurrTD), READMEM32_LE(&eqh->eqh_NextTD)));
4246 KPRINTF(20, ("AS: CS=%08lx SL=%08lx TD=%08lx\n", ctrlstatus, READMEM32_LE(&etd->etd_Self), etd));
4247 etd = eqh->eqh_FirstTD;
4250 KPRINTF(20, ("XX: CS=%08lx SL=%08lx TD=%08lx\n", READMEM32_LE(&etd->etd_CtrlStatus), READMEM32_LE(&etd->etd_Self), etd));
4251 } while(etd = etd->etd_Succ);
4252 KPRINTF(20, ("Async: Internal error! Still active?!\n"));
4253 inspect = 2;
4254 break;*/
4258 if(ctrlstatus & (ETSF_HALTED|ETSF_TRANSERR|ETSF_BABBLE|ETSF_DATABUFFERERR))
4260 if(ctrlstatus & ETSF_BABBLE)
4262 KPRINTF(20, ("Babble error %08lx\n", ctrlstatus));
4263 ioreq->iouh_Req.io_Error = UHIOERR_OVERFLOW;
4265 else if(ctrlstatus & ETSF_DATABUFFERERR)
4267 KPRINTF(20, ("Databuffer error\n"));
4268 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
4270 else if(ctrlstatus & ETSF_TRANSERR)
4272 if((ctrlstatus & ETCM_ERRORLIMIT)>>ETCS_ERRORLIMIT)
4274 KPRINTF(20, ("STALLED!\n"));
4275 ioreq->iouh_Req.io_Error = UHIOERR_STALL;
4276 } else {
4277 KPRINTF(20, ("TIMEOUT!\n"));
4278 ioreq->iouh_Req.io_Error = UHIOERR_TIMEOUT;
4281 inspect = 0;
4282 break;
4285 len = etd->etd_Length - ((ctrlstatus & ETSM_TRANSLENGTH)>>ETSS_TRANSLENGTH);
4286 if((ctrlstatus & ETCM_PIDCODE) != ETCF_PIDCODE_SETUP) // don't count setup packet
4288 actual += len;
4290 if(ctrlstatus & ETSM_TRANSLENGTH)
4292 KPRINTF(10, ("Short packet: %ld < %ld\n", len, etd->etd_Length));
4293 shortpkt = TRUE;
4294 break;
4296 etd = etd->etd_Succ;
4297 } while(etd && (!(ctrlstatus & ETCF_READYINTEN)));
4298 /*if(inspect == 2)
4300 // phantom halted
4301 ioreq = nextioreq;
4302 continue;
4305 if(((actual + ioreq->iouh_Actual) < eqh->eqh_Actual) && (!ioreq->iouh_Req.io_Error) && (!(ioreq->iouh_Flags & UHFF_ALLOWRUNTPKTS)))
4307 ioreq->iouh_Req.io_Error = UHIOERR_RUNTPACKET;
4309 ioreq->iouh_Actual += actual;
4310 if(inspect && (!shortpkt) && (eqh->eqh_Actual < ioreq->iouh_Length))
4312 KPRINTF(10, ("Reloading BULK at %ld/%ld\n", eqh->eqh_Actual, ioreq->iouh_Length));
4313 // reload
4314 ctrlstatus = (ioreq->iouh_Dir == UHDIR_IN) ? (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_IN) : (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_OUT);
4315 phyaddr = (ULONG) pciGetPhysical(hc, &(((UBYTE *) ioreq->iouh_Data)[ioreq->iouh_Actual]));
4316 predetd = etd = eqh->eqh_FirstTD;
4318 CONSTWRITEMEM32_LE(&eqh->eqh_CurrTD, EHCI_TERMINATE);
4319 CONSTWRITEMEM32_LE(&eqh->eqh_NextTD, EHCI_TERMINATE);
4320 CONSTWRITEMEM32_LE(&eqh->eqh_AltNextTD, EHCI_TERMINATE);
4323 len = ioreq->iouh_Length - eqh->eqh_Actual;
4324 if(len > 4*EHCI_PAGE_SIZE)
4326 len = 4*EHCI_PAGE_SIZE;
4328 etd->etd_Length = len;
4329 KPRINTF(1, ("Reload Bulk TD %08lx len %ld (%ld/%ld) phy=%08lx\n",
4330 etd, len, eqh->eqh_Actual, ioreq->iouh_Length, phyaddr));
4331 WRITEMEM32_LE(&etd->etd_CtrlStatus, ctrlstatus|(len<<ETSS_TRANSLENGTH));
4332 // FIXME need quark scatter gather mechanism here
4333 WRITEMEM32_LE(&etd->etd_BufferPtr[0], phyaddr);
4334 WRITEMEM32_LE(&etd->etd_BufferPtr[1], (phyaddr & EHCI_PAGE_MASK) + (1*EHCI_PAGE_SIZE));
4335 WRITEMEM32_LE(&etd->etd_BufferPtr[2], (phyaddr & EHCI_PAGE_MASK) + (2*EHCI_PAGE_SIZE));
4336 WRITEMEM32_LE(&etd->etd_BufferPtr[3], (phyaddr & EHCI_PAGE_MASK) + (3*EHCI_PAGE_SIZE));
4337 WRITEMEM32_LE(&etd->etd_BufferPtr[4], (phyaddr & EHCI_PAGE_MASK) + (4*EHCI_PAGE_SIZE));
4338 phyaddr += len;
4339 eqh->eqh_Actual += len;
4340 zeroterm = (len && (ioreq->iouh_Dir == UHDIR_OUT) && (eqh->eqh_Actual == ioreq->iouh_Length) && (!ioreq->iouh_Flags & UHFF_NOSHORTPKT) && ((eqh->eqh_Actual % ioreq->iouh_MaxPktSize) == 0));
4341 predetd = etd;
4342 etd = etd->etd_Succ;
4343 if((!etd) && zeroterm)
4345 // rare case where the zero packet would be lost, allocate etd and append zero packet.
4346 etd = ehciAllocTD(hc);
4347 if(!etd)
4349 KPRINTF(200, ("INTERNAL ERROR! This should not happen! Could not allocate zero packet TD\n"));
4350 break;
4352 predetd->etd_Succ = etd;
4353 predetd->etd_NextTD = etd->etd_Self;
4354 predetd->etd_AltNextTD = hc->hc_ShortPktEndTD->etd_Self;
4355 etd->etd_Succ = NULL;
4356 CONSTWRITEMEM32_LE(&etd->etd_NextTD, EHCI_TERMINATE);
4357 CONSTWRITEMEM32_LE(&etd->etd_AltNextTD, EHCI_TERMINATE);
4359 } while(etd && ((eqh->eqh_Actual < ioreq->iouh_Length) || zeroterm));
4360 ctrlstatus |= ETCF_READYINTEN|(predetd->etd_Length<<ETSS_TRANSLENGTH);
4361 WRITEMEM32_LE(&predetd->etd_CtrlStatus, ctrlstatus);
4362 CONSTWRITEMEM32_LE(&predetd->etd_NextTD, EHCI_TERMINATE);
4363 CONSTWRITEMEM32_LE(&predetd->etd_AltNextTD, EHCI_TERMINATE);
4364 SYNC;
4365 EIEIO;
4366 etd = eqh->eqh_FirstTD;
4367 eqh->eqh_NextTD = etd->etd_Self;
4368 SYNC;
4369 EIEIO;
4370 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + (ioreq->iouh_NakTimeout<<3) : 0;
4371 } else {
4372 unit->hu_DevBusyReq[devadrep] = NULL;
4373 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
4374 ehciFreeAsyncContext(hc, eqh);
4375 // use next data toggle bit based on last successful transaction
4376 KPRINTF(1, ("Old Toggle %04lx:%ld\n", devadrep, unit->hu_DevDataToggle[devadrep]));
4377 unit->hu_DevDataToggle[devadrep] = (ctrlstatus & ETCF_DATA1) ? TRUE : FALSE;
4378 KPRINTF(1, ("Toggle now %04lx:%ld\n", devadrep, unit->hu_DevDataToggle[devadrep]));
4379 if(inspect)
4381 if(ioreq->iouh_Req.io_Command == UHCMD_CONTROLXFER)
4383 // check for sucessful clear feature and set address ctrl transfers
4384 uhwCheckSpecialCtrlTransfers(hc, ioreq);
4387 ReplyMsg(&ioreq->iouh_Req.io_Message);
4390 } else {
4391 KPRINTF(20, ("IOReq=%08lx has no UQH!\n", ioreq));
4393 ioreq = nextioreq;
4396 KPRINTF(1, ("Checking for Periodic work done...\n"));
4397 ioreq = (struct IOUsbHWReq *) hc->hc_PeriodicTDQueue.lh_Head;
4398 while((nextioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ))
4400 eqh = (struct EhciQH *) ioreq->iouh_DriverPrivate1;
4401 if(eqh)
4403 KPRINTF(1, ("Examining IOReq=%08lx with EQH=%08lx\n", ioreq, eqh));
4404 nexttd = READMEM32_LE(&eqh->eqh_NextTD);
4405 etd = eqh->eqh_FirstTD;
4406 ctrlstatus = READMEM32_LE(&eqh->eqh_CtrlStatus);
4407 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
4408 halted = ((ctrlstatus & (ETCF_ACTIVE|ETSF_HALTED)) == ETSF_HALTED);
4409 if(halted || (!(ctrlstatus & ETCF_ACTIVE) && (nexttd & EHCI_TERMINATE)))
4411 KPRINTF(1, ("EQH not active %08lx\n", ctrlstatus));
4412 shortpkt = FALSE;
4413 actual = 0;
4414 inspect = 1;
4417 ctrlstatus = READMEM32_LE(&etd->etd_CtrlStatus);
4418 KPRINTF(1, ("Periodic: TD=%08lx CS=%08lx\n", etd, ctrlstatus));
4419 if(ctrlstatus & ETCF_ACTIVE)
4421 if(halted)
4423 KPRINTF(20, ("Periodic: Halted before TD\n"));
4424 //ctrlstatus = eqh->eqh_CtrlStatus;
4425 inspect = 0;
4426 if(unit->hu_NakTimeoutFrame[devadrep] && (hc->hc_FrameCounter > unit->hu_NakTimeoutFrame[devadrep]))
4428 KPRINTF(20, ("NAK timeout\n"));
4429 ioreq->iouh_Req.io_Error = UHIOERR_NAKTIMEOUT;
4431 break;
4432 } else {
4433 KPRINTF(20, ("Periodic: Internal error! Still active?!\n"));
4434 break;
4438 if(ctrlstatus & (ETSF_HALTED|ETSF_TRANSERR|ETSF_BABBLE|ETSF_DATABUFFERERR|ETSF_MISSEDCSPLIT))
4440 if(ctrlstatus & ETSF_BABBLE)
4442 KPRINTF(20, ("Babble error %08lx\n", ctrlstatus));
4443 ioreq->iouh_Req.io_Error = UHIOERR_OVERFLOW;
4445 else if(ctrlstatus & ETSF_MISSEDCSPLIT)
4447 KPRINTF(20, ("Missed CSplit %08lx\n", ctrlstatus));
4448 ioreq->iouh_Req.io_Error = UHIOERR_STALL;
4450 else if(ctrlstatus & ETSF_DATABUFFERERR)
4452 KPRINTF(20, ("Databuffer error\n"));
4453 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
4455 else if(ctrlstatus & ETSF_TRANSERR)
4457 if((ctrlstatus & ETCM_ERRORLIMIT)>>ETCS_ERRORLIMIT)
4459 KPRINTF(20, ("STALLED!\n"));
4460 ioreq->iouh_Req.io_Error = UHIOERR_STALL;
4461 } else {
4462 KPRINTF(20, ("TIMEOUT!\n"));
4463 ioreq->iouh_Req.io_Error = UHIOERR_TIMEOUT;
4466 else if(unit->hu_NakTimeoutFrame[devadrep] && (hc->hc_FrameCounter > unit->hu_NakTimeoutFrame[devadrep]))
4468 ioreq->iouh_Req.io_Error = UHIOERR_NAKTIMEOUT;
4470 inspect = 0;
4471 break;
4474 len = etd->etd_Length - ((ctrlstatus & ETSM_TRANSLENGTH)>>ETSS_TRANSLENGTH);
4475 actual += len;
4476 if(ctrlstatus & ETSM_TRANSLENGTH)
4478 KPRINTF(10, ("Short packet: %ld < %ld\n", len, etd->etd_Length));
4479 shortpkt = TRUE;
4480 break;
4482 etd = etd->etd_Succ;
4483 } while(etd);
4484 if((actual < eqh->eqh_Actual) && (!ioreq->iouh_Req.io_Error) && (!(ioreq->iouh_Flags & UHFF_ALLOWRUNTPKTS)))
4486 ioreq->iouh_Req.io_Error = UHIOERR_RUNTPACKET;
4488 ioreq->iouh_Actual += actual;
4489 unit->hu_DevBusyReq[devadrep] = NULL;
4490 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
4491 ehciFreePeriodicContext(hc, eqh);
4492 updatetree = TRUE;
4493 // use next data toggle bit based on last successful transaction
4494 KPRINTF(1, ("Old Toggle %04lx:%ld\n", devadrep, unit->hu_DevDataToggle[devadrep]));
4495 unit->hu_DevDataToggle[devadrep] = (ctrlstatus & ETCF_DATA1) ? TRUE : FALSE;
4496 KPRINTF(1, ("Toggle now %04lx:%ld\n", devadrep, unit->hu_DevDataToggle[devadrep]));
4497 ReplyMsg(&ioreq->iouh_Req.io_Message);
4499 } else {
4500 KPRINTF(20, ("IOReq=%08lx has no UQH!\n", ioreq));
4502 ioreq = nextioreq;
4504 if(updatetree)
4506 ehciUpdateIntTree(hc);
4509 /* \\\ */
4511 /* /// "ehciScheduleCtrlTDs()" */
4512 void ehciScheduleCtrlTDs(struct PCIController *hc)
4514 struct PCIUnit *unit = hc->hc_Unit;
4515 struct IOUsbHWReq *ioreq;
4516 UWORD devadrep;
4517 struct EhciQH *eqh;
4518 struct EhciTD *setupetd;
4519 struct EhciTD *dataetd;
4520 struct EhciTD *termetd;
4521 struct EhciTD *predetd;
4522 ULONG epcaps;
4523 ULONG ctrlstatus;
4524 ULONG len;
4525 ULONG phyaddr;
4527 /* *** CTRL Transfers *** */
4528 KPRINTF(1, ("Scheduling new CTRL transfers...\n"));
4529 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
4530 while(((struct Node *) ioreq)->ln_Succ)
4532 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint;
4533 KPRINTF(10, ("New CTRL transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
4534 /* is endpoint already in use or do we have to wait for next transaction */
4535 if(unit->hu_DevBusyReq[devadrep])
4537 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
4538 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
4539 continue;
4542 eqh = ehciAllocQH(hc);
4543 if(!eqh)
4545 break;
4548 setupetd = ehciAllocTD(hc);
4549 if(!setupetd)
4551 ehciFreeQH(hc, eqh);
4552 break;
4554 termetd = ehciAllocTD(hc);
4555 if(!termetd)
4557 ehciFreeTD(hc, setupetd);
4558 ehciFreeQH(hc, eqh);
4559 break;
4561 eqh->eqh_IOReq = ioreq;
4562 eqh->eqh_FirstTD = setupetd;
4563 eqh->eqh_Actual = 0;
4565 epcaps = ((0<<EQES_RELOAD)|EQEF_TOGGLEFROMTD)|(ioreq->iouh_MaxPktSize<<EQES_MAXPKTLEN)|(ioreq->iouh_DevAddr<<EQES_DEVADDR)|(ioreq->iouh_Endpoint<<EQES_ENDPOINT);
4566 if(ioreq->iouh_Flags & UHFF_SPLITTRANS)
4568 KPRINTF(10, ("*** SPLIT TRANSACTION to HubPort %ld at Addr %ld\n", ioreq->iouh_SplitHubPort, ioreq->iouh_SplitHubAddr));
4569 // full speed and low speed handling
4570 WRITEMEM32_LE(&eqh->eqh_SplitCtrl, EQSF_MULTI_1|(ioreq->iouh_SplitHubPort<<EQSS_PORTNUMBER)|(ioreq->iouh_SplitHubAddr<<EQSS_HUBADDRESS));
4571 epcaps |= EQEF_SPLITCTRLEP;
4572 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
4574 KPRINTF(10, ("*** LOW SPEED ***\n"));
4575 epcaps |= EQEF_LOWSPEED;
4577 } else {
4578 CONSTWRITEMEM32_LE(&eqh->eqh_SplitCtrl, EQSF_MULTI_1);
4579 epcaps |= EQEF_HIGHSPEED;
4581 WRITEMEM32_LE(&eqh->eqh_EPCaps, epcaps);
4582 //eqh->eqh_CtrlStatus = eqh->eqh_CurrTD = 0;
4583 //eqh->eqh_AltNextTD = eqh->eqh_NextTD = setupetd->etd_Self;
4585 //termetd->etd_QueueHead = setupetd->etd_QueueHead = eqh;
4587 KPRINTF(1, ("SetupTD=%08lx, TermTD=%08lx\n", setupetd, termetd));
4589 // fill setup td
4590 setupetd->etd_Length = 8;
4592 CONSTWRITEMEM32_LE(&setupetd->etd_CtrlStatus, (8<<ETSS_TRANSLENGTH)|ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_SETUP);
4593 phyaddr = (ULONG) pciGetPhysical(hc, &ioreq->iouh_SetupData);
4594 WRITEMEM32_LE(&setupetd->etd_BufferPtr[0], phyaddr);
4595 WRITEMEM32_LE(&setupetd->etd_BufferPtr[1], (phyaddr + 8) & EHCI_PAGE_MASK); // theoretically, setup data may cross one page
4596 setupetd->etd_BufferPtr[2] = 0; // clear for overlay bits
4598 ctrlstatus = (ioreq->iouh_SetupData.bmRequestType & URTF_IN) ? (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_IN) : (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_OUT);
4599 predetd = setupetd;
4600 if(ioreq->iouh_Length)
4602 phyaddr = (ULONG) pciGetPhysical(hc, ioreq->iouh_Data);
4605 dataetd = ehciAllocTD(hc);
4606 if(!dataetd)
4608 break;
4610 ctrlstatus ^= ETCF_DATA1; // toggle bit
4611 predetd->etd_Succ = dataetd;
4612 predetd->etd_NextTD = dataetd->etd_Self;
4613 dataetd->etd_AltNextTD = termetd->etd_Self;
4615 len = ioreq->iouh_Length - eqh->eqh_Actual;
4616 if(len > 4*EHCI_PAGE_SIZE)
4618 len = 4*EHCI_PAGE_SIZE;
4620 dataetd->etd_Length = len;
4621 WRITEMEM32_LE(&dataetd->etd_CtrlStatus, ctrlstatus|(len<<ETSS_TRANSLENGTH));
4622 // FIXME need quark scatter gather mechanism here
4623 WRITEMEM32_LE(&dataetd->etd_BufferPtr[0], phyaddr);
4624 WRITEMEM32_LE(&dataetd->etd_BufferPtr[1], (phyaddr & EHCI_PAGE_MASK) + (1*EHCI_PAGE_SIZE));
4625 WRITEMEM32_LE(&dataetd->etd_BufferPtr[2], (phyaddr & EHCI_PAGE_MASK) + (2*EHCI_PAGE_SIZE));
4626 WRITEMEM32_LE(&dataetd->etd_BufferPtr[3], (phyaddr & EHCI_PAGE_MASK) + (3*EHCI_PAGE_SIZE));
4627 WRITEMEM32_LE(&dataetd->etd_BufferPtr[4], (phyaddr & EHCI_PAGE_MASK) + (4*EHCI_PAGE_SIZE));
4628 phyaddr += len;
4629 eqh->eqh_Actual += len;
4630 predetd = dataetd;
4631 } while(eqh->eqh_Actual < ioreq->iouh_Length);
4632 if(!dataetd)
4634 // not enough dataetds? try again later
4635 ehciFreeQHandTDs(hc, eqh);
4636 ehciFreeTD(hc, termetd); // this one's not linked yet
4637 break;
4640 // TERM packet
4641 ctrlstatus |= ETCF_DATA1|ETCF_READYINTEN;
4642 ctrlstatus ^= (ETCF_PIDCODE_IN^ETCF_PIDCODE_OUT);
4644 predetd->etd_NextTD = termetd->etd_Self;
4645 predetd->etd_Succ = termetd;
4646 CONSTWRITEMEM32_LE(&termetd->etd_NextTD, EHCI_TERMINATE);
4647 CONSTWRITEMEM32_LE(&termetd->etd_AltNextTD, EHCI_TERMINATE);
4648 WRITEMEM32_LE(&termetd->etd_CtrlStatus, ctrlstatus);
4649 termetd->etd_Length = 0;
4650 termetd->etd_BufferPtr[0] = 0; // clear for overlay bits
4651 termetd->etd_BufferPtr[1] = 0; // clear for overlay bits
4652 termetd->etd_BufferPtr[2] = 0; // clear for overlay bits
4653 termetd->etd_Succ = NULL;
4655 // due to sillicon bugs, we fill in the first overlay ourselves.
4656 eqh->eqh_CurrTD = setupetd->etd_Self;
4657 eqh->eqh_NextTD = setupetd->etd_NextTD;
4658 eqh->eqh_AltNextTD = setupetd->etd_AltNextTD;
4659 eqh->eqh_CtrlStatus = setupetd->etd_CtrlStatus;
4660 eqh->eqh_BufferPtr[0] = setupetd->etd_BufferPtr[0];
4661 eqh->eqh_BufferPtr[1] = setupetd->etd_BufferPtr[1];
4662 eqh->eqh_BufferPtr[2] = 0;
4664 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
4665 ioreq->iouh_DriverPrivate1 = eqh;
4667 // manage endpoint going busy
4668 unit->hu_DevBusyReq[devadrep] = ioreq;
4669 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + (ioreq->iouh_NakTimeout<<3) : 0;
4671 Disable();
4672 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
4674 // looks good to me, now enqueue this entry (just behind the asyncQH)
4675 eqh->eqh_Succ = hc->hc_EhciAsyncQH->eqh_Succ;
4676 eqh->eqh_NextQH = eqh->eqh_Succ->eqh_Self;
4677 SYNC;
4678 EIEIO;
4679 eqh->eqh_Pred = hc->hc_EhciAsyncQH;
4680 eqh->eqh_Succ->eqh_Pred = eqh;
4681 hc->hc_EhciAsyncQH->eqh_Succ = eqh;
4682 hc->hc_EhciAsyncQH->eqh_NextQH = eqh->eqh_Self;
4683 SYNC;
4684 EIEIO;
4685 Enable();
4687 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
4690 /* \\\ */
4692 /* /// "ehciScheduleIntTDs()" */
4693 void ehciScheduleIntTDs(struct PCIController *hc)
4695 struct PCIUnit *unit = hc->hc_Unit;
4696 struct IOUsbHWReq *ioreq;
4697 UWORD devadrep;
4698 UWORD cnt;
4699 struct EhciQH *eqh;
4700 struct EhciQH *inteqh;
4701 struct EhciTD *etd;
4702 struct EhciTD *predetd;
4703 ULONG epcaps;
4704 ULONG ctrlstatus;
4705 ULONG splitctrl;
4706 ULONG len;
4707 ULONG phyaddr;
4709 /* *** INT Transfers *** */
4710 KPRINTF(1, ("Scheduling new INT transfers...\n"));
4711 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
4712 while(((struct Node *) ioreq)->ln_Succ)
4714 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
4715 KPRINTF(10, ("New INT transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
4716 /* is endpoint already in use or do we have to wait for next transaction */
4717 if(unit->hu_DevBusyReq[devadrep])
4719 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
4720 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
4721 continue;
4724 eqh = ehciAllocQH(hc);
4725 if(!eqh)
4727 break;
4730 eqh->eqh_IOReq = ioreq;
4731 eqh->eqh_Actual = 0;
4733 epcaps = (0<<EQES_RELOAD)|(ioreq->iouh_MaxPktSize<<EQES_MAXPKTLEN)|(ioreq->iouh_DevAddr<<EQES_DEVADDR)|(ioreq->iouh_Endpoint<<EQES_ENDPOINT);
4734 if(ioreq->iouh_Flags & UHFF_SPLITTRANS)
4736 KPRINTF(10, ("*** SPLIT TRANSACTION to HubPort %ld at Addr %ld\n", ioreq->iouh_SplitHubPort, ioreq->iouh_SplitHubAddr));
4737 // full speed and low speed handling
4738 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
4740 KPRINTF(10, ("*** LOW SPEED ***\n"));
4741 epcaps |= EQEF_LOWSPEED;
4743 WRITEMEM32_LE(&eqh->eqh_SplitCtrl, (EQSF_MULTI_1|(0x01<<EQSS_MUSOFACTIVE)|(0x1c<<EQSS_MUSOFCSPLIT))|(ioreq->iouh_SplitHubPort<<EQSS_PORTNUMBER)|(ioreq->iouh_SplitHubAddr<<EQSS_HUBADDRESS));
4744 if(ioreq->iouh_Interval >= 255)
4746 inteqh = hc->hc_EhciIntQH[8]; // 256ms interval
4747 } else {
4748 cnt = 0;
4751 inteqh = hc->hc_EhciIntQH[cnt++];
4752 } while(ioreq->iouh_Interval > (1<<cnt));
4754 } else {
4755 epcaps |= EQEF_HIGHSPEED;
4756 if(ioreq->iouh_Flags & UHFF_MULTI_3)
4758 splitctrl = EQSF_MULTI_3;
4760 else if(ioreq->iouh_Flags & UHFF_MULTI_2)
4762 splitctrl = EQSF_MULTI_2;
4763 } else {
4764 splitctrl = EQSF_MULTI_1;
4766 if(ioreq->iouh_Interval < 2) // 0-1 µFrames
4768 splitctrl |= (0xff<<EQSS_MUSOFACTIVE);
4770 else if(ioreq->iouh_Interval < 4) // 2-3 µFrames
4772 splitctrl |= (0x55<<EQSS_MUSOFACTIVE);
4774 else if(ioreq->iouh_Interval < 8) // 4-7 µFrames
4776 splitctrl |= (0x22<<EQSS_MUSOFACTIVE);
4778 else if(ioreq->iouh_Interval > 511) // 64ms and higher
4780 splitctrl |= (0x10<<EQSS_MUSOFACTIVE);
4782 else //if(ioreq->iouh_Interval >= 8) // 1-64ms
4784 splitctrl |= (0x01<<EQSS_MUSOFACTIVE);
4786 WRITEMEM32_LE(&eqh->eqh_SplitCtrl, splitctrl);
4787 if(ioreq->iouh_Interval >= 1024)
4789 inteqh = hc->hc_EhciIntQH[10]; // 1024µFrames interval
4790 } else {
4791 cnt = 0;
4794 inteqh = hc->hc_EhciIntQH[cnt++];
4795 } while(ioreq->iouh_Interval > (1<<cnt));
4798 WRITEMEM32_LE(&eqh->eqh_EPCaps, epcaps);
4799 //eqh->eqh_CtrlStatus = eqh->eqh_CurrTD = 0;
4800 eqh->eqh_FirstTD = NULL; // clear for ehciFreeQHandTDs()
4802 ctrlstatus = (ioreq->iouh_Dir == UHDIR_IN) ? (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_IN) : (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_OUT);
4803 if(unit->hu_DevDataToggle[devadrep])
4805 // continue with data toggle 0
4806 ctrlstatus |= ETCF_DATA1;
4808 predetd = NULL;
4809 phyaddr = (ULONG) pciGetPhysical(hc, ioreq->iouh_Data);
4812 etd = ehciAllocTD(hc);
4813 if(!etd)
4815 break;
4817 if(predetd)
4819 predetd->etd_Succ = etd;
4820 predetd->etd_NextTD = etd->etd_Self;
4821 predetd->etd_AltNextTD = hc->hc_ShortPktEndTD->etd_Self;
4822 } else {
4823 eqh->eqh_FirstTD = etd;
4824 //eqh->eqh_AltNextTD = eqh->eqh_NextTD = etd->etd_Self;
4827 len = ioreq->iouh_Length - eqh->eqh_Actual;
4828 if(len > 4*EHCI_PAGE_SIZE)
4830 len = 4*EHCI_PAGE_SIZE;
4832 etd->etd_Length = len;
4833 WRITEMEM32_LE(&etd->etd_CtrlStatus, ctrlstatus|(len<<ETSS_TRANSLENGTH));
4834 // FIXME need quark scatter gather mechanism here
4835 WRITEMEM32_LE(&etd->etd_BufferPtr[0], phyaddr);
4836 WRITEMEM32_LE(&etd->etd_BufferPtr[1], (phyaddr & EHCI_PAGE_MASK) + (1*EHCI_PAGE_SIZE));
4837 WRITEMEM32_LE(&etd->etd_BufferPtr[2], (phyaddr & EHCI_PAGE_MASK) + (2*EHCI_PAGE_SIZE));
4838 WRITEMEM32_LE(&etd->etd_BufferPtr[3], (phyaddr & EHCI_PAGE_MASK) + (3*EHCI_PAGE_SIZE));
4839 WRITEMEM32_LE(&etd->etd_BufferPtr[4], (phyaddr & EHCI_PAGE_MASK) + (4*EHCI_PAGE_SIZE));
4840 phyaddr += len;
4841 eqh->eqh_Actual += len;
4842 predetd = etd;
4843 } while(eqh->eqh_Actual < ioreq->iouh_Length);
4845 if(!etd)
4847 // not enough etds? try again later
4848 ehciFreeQHandTDs(hc, eqh);
4849 break;
4851 ctrlstatus |= ETCF_READYINTEN|(etd->etd_Length<<ETSS_TRANSLENGTH);
4852 WRITEMEM32_LE(&predetd->etd_CtrlStatus, ctrlstatus);
4854 CONSTWRITEMEM32_LE(&predetd->etd_NextTD, EHCI_TERMINATE);
4855 CONSTWRITEMEM32_LE(&predetd->etd_AltNextTD, EHCI_TERMINATE);
4856 predetd->etd_Succ = NULL;
4858 // due to sillicon bugs, we fill in the first overlay ourselves.
4859 etd = eqh->eqh_FirstTD;
4860 eqh->eqh_CurrTD = etd->etd_Self;
4861 eqh->eqh_NextTD = etd->etd_NextTD;
4862 eqh->eqh_AltNextTD = etd->etd_AltNextTD;
4863 eqh->eqh_CtrlStatus = etd->etd_CtrlStatus;
4864 eqh->eqh_BufferPtr[0] = etd->etd_BufferPtr[0];
4865 eqh->eqh_BufferPtr[1] = etd->etd_BufferPtr[1];
4866 eqh->eqh_BufferPtr[2] = etd->etd_BufferPtr[2];
4867 eqh->eqh_BufferPtr[3] = etd->etd_BufferPtr[3];
4868 eqh->eqh_BufferPtr[4] = etd->etd_BufferPtr[4];
4870 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
4871 ioreq->iouh_DriverPrivate1 = eqh;
4873 // manage endpoint going busy
4874 unit->hu_DevBusyReq[devadrep] = ioreq;
4875 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + (ioreq->iouh_NakTimeout<<3) : 0;
4877 Disable();
4878 AddTail(&hc->hc_PeriodicTDQueue, (struct Node *) ioreq);
4880 // looks good to me, now enqueue this entry in the right IntQH
4881 eqh->eqh_Succ = inteqh->eqh_Succ;
4882 eqh->eqh_NextQH = eqh->eqh_Succ->eqh_Self;
4883 SYNC;
4884 EIEIO;
4885 eqh->eqh_Pred = inteqh;
4886 eqh->eqh_Succ->eqh_Pred = eqh;
4887 inteqh->eqh_Succ = eqh;
4888 inteqh->eqh_NextQH = eqh->eqh_Self;
4889 SYNC;
4890 EIEIO;
4891 Enable();
4893 ehciUpdateIntTree(hc);
4895 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
4898 /* \\\ */
4900 /* /// "ehciScheduleBulkTDs()" */
4901 void ehciScheduleBulkTDs(struct PCIController *hc)
4903 struct PCIUnit *unit = hc->hc_Unit;
4904 struct IOUsbHWReq *ioreq;
4905 UWORD devadrep;
4906 struct EhciQH *eqh;
4907 struct EhciTD *etd = NULL;
4908 struct EhciTD *predetd;
4909 ULONG epcaps;
4910 ULONG ctrlstatus;
4911 ULONG splitctrl;
4912 ULONG len;
4913 ULONG phyaddr;
4915 /* *** BULK Transfers *** */
4916 KPRINTF(1, ("Scheduling new BULK transfers...\n"));
4917 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
4918 while(((struct Node *) ioreq)->ln_Succ)
4920 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
4921 KPRINTF(10, ("New BULK transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
4922 /* is endpoint already in use or do we have to wait for next transaction */
4923 if(unit->hu_DevBusyReq[devadrep])
4925 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
4926 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
4927 continue;
4930 eqh = ehciAllocQH(hc);
4931 if(!eqh)
4933 break;
4936 eqh->eqh_IOReq = ioreq;
4937 eqh->eqh_Actual = 0;
4939 epcaps = (0<<EQES_RELOAD)|(ioreq->iouh_MaxPktSize<<EQES_MAXPKTLEN)|(ioreq->iouh_DevAddr<<EQES_DEVADDR)|(ioreq->iouh_Endpoint<<EQES_ENDPOINT);
4940 if(ioreq->iouh_Flags & UHFF_SPLITTRANS)
4942 KPRINTF(10, ("*** SPLIT TRANSACTION to HubPort %ld at Addr %ld\n", ioreq->iouh_SplitHubPort, ioreq->iouh_SplitHubAddr));
4943 // full speed and low speed handling
4944 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
4946 KPRINTF(10, ("*** LOW SPEED ***\n"));
4947 epcaps |= EQEF_LOWSPEED;
4949 WRITEMEM32_LE(&eqh->eqh_SplitCtrl, EQSF_MULTI_1|(ioreq->iouh_SplitHubPort<<EQSS_PORTNUMBER)|(ioreq->iouh_SplitHubAddr<<EQSS_HUBADDRESS));
4950 } else {
4951 epcaps |= EQEF_HIGHSPEED;
4952 if(ioreq->iouh_Flags & UHFF_MULTI_3)
4954 splitctrl = EQSF_MULTI_3;
4956 else if(ioreq->iouh_Flags & UHFF_MULTI_2)
4958 splitctrl = EQSF_MULTI_2;
4959 } else {
4960 splitctrl = EQSF_MULTI_1;
4962 WRITEMEM32_LE(&eqh->eqh_SplitCtrl, splitctrl);
4964 WRITEMEM32_LE(&eqh->eqh_EPCaps, epcaps);
4965 //eqh->eqh_CtrlStatus = eqh->eqh_CurrTD = 0;
4966 eqh->eqh_FirstTD = NULL; // clear for ehciFreeQHandTDs()
4968 ctrlstatus = (ioreq->iouh_Dir == UHDIR_IN) ? (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_IN) : (ETCF_3ERRORSLIMIT|ETCF_ACTIVE|ETCF_PIDCODE_OUT);
4969 if(unit->hu_DevDataToggle[devadrep])
4971 // continue with data toggle 0
4972 ctrlstatus |= ETCF_DATA1;
4974 predetd = NULL;
4975 phyaddr = (ULONG) pciGetPhysical(hc, ioreq->iouh_Data);
4978 if((eqh->eqh_Actual >= EHCI_TD_BULK_LIMIT) && (eqh->eqh_Actual < ioreq->iouh_Length))
4980 KPRINTF(10, ("Bulk too large, splitting...\n"));
4981 break;
4983 etd = ehciAllocTD(hc);
4984 if(!etd)
4986 break;
4988 if(predetd)
4990 predetd->etd_Succ = etd;
4991 predetd->etd_NextTD = etd->etd_Self;
4992 predetd->etd_AltNextTD = hc->hc_ShortPktEndTD->etd_Self;
4993 } else {
4994 eqh->eqh_FirstTD = etd;
4995 //eqh->eqh_AltNextTD = eqh->eqh_NextTD = etd->etd_Self;
4998 len = ioreq->iouh_Length - eqh->eqh_Actual;
4999 if(len > 4*EHCI_PAGE_SIZE)
5001 len = 4*EHCI_PAGE_SIZE;
5003 etd->etd_Length = len;
5004 KPRINTF(1, ("Bulk TD %08lx len %ld (%ld/%ld) phy=%08lx\n",
5005 etd, len, eqh->eqh_Actual, ioreq->iouh_Length, phyaddr));
5006 WRITEMEM32_LE(&etd->etd_CtrlStatus, ctrlstatus|(len<<ETSS_TRANSLENGTH));
5007 // FIXME need quark scatter gather mechanism here
5008 WRITEMEM32_LE(&etd->etd_BufferPtr[0], phyaddr);
5009 WRITEMEM32_LE(&etd->etd_BufferPtr[1], (phyaddr & EHCI_PAGE_MASK) + (1*EHCI_PAGE_SIZE));
5010 WRITEMEM32_LE(&etd->etd_BufferPtr[2], (phyaddr & EHCI_PAGE_MASK) + (2*EHCI_PAGE_SIZE));
5011 WRITEMEM32_LE(&etd->etd_BufferPtr[3], (phyaddr & EHCI_PAGE_MASK) + (3*EHCI_PAGE_SIZE));
5012 WRITEMEM32_LE(&etd->etd_BufferPtr[4], (phyaddr & EHCI_PAGE_MASK) + (4*EHCI_PAGE_SIZE));
5013 phyaddr += len;
5014 eqh->eqh_Actual += len;
5016 predetd = etd;
5017 } while((eqh->eqh_Actual < ioreq->iouh_Length) || (len && (ioreq->iouh_Dir == UHDIR_OUT) && (eqh->eqh_Actual == ioreq->iouh_Length) && (!ioreq->iouh_Flags & UHFF_NOSHORTPKT) && ((eqh->eqh_Actual % ioreq->iouh_MaxPktSize) == 0)));
5019 if(!etd)
5021 // not enough etds? try again later
5022 ehciFreeQHandTDs(hc, eqh);
5023 break;
5025 ctrlstatus |= ETCF_READYINTEN|(predetd->etd_Length<<ETSS_TRANSLENGTH);
5026 WRITEMEM32_LE(&predetd->etd_CtrlStatus, ctrlstatus);
5028 predetd->etd_Succ = NULL;
5029 CONSTWRITEMEM32_LE(&predetd->etd_NextTD, EHCI_TERMINATE);
5030 CONSTWRITEMEM32_LE(&predetd->etd_AltNextTD, EHCI_TERMINATE);
5032 // due to sillicon bugs, we fill in the first overlay ourselves.
5033 etd = eqh->eqh_FirstTD;
5034 eqh->eqh_CurrTD = etd->etd_Self;
5035 eqh->eqh_NextTD = etd->etd_NextTD;
5036 eqh->eqh_AltNextTD = etd->etd_AltNextTD;
5037 eqh->eqh_CtrlStatus = etd->etd_CtrlStatus;
5038 eqh->eqh_BufferPtr[0] = etd->etd_BufferPtr[0];
5039 eqh->eqh_BufferPtr[1] = etd->etd_BufferPtr[1];
5040 eqh->eqh_BufferPtr[2] = etd->etd_BufferPtr[2];
5041 eqh->eqh_BufferPtr[3] = etd->etd_BufferPtr[3];
5042 eqh->eqh_BufferPtr[4] = etd->etd_BufferPtr[4];
5044 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
5045 ioreq->iouh_DriverPrivate1 = eqh;
5047 // manage endpoint going busy
5048 unit->hu_DevBusyReq[devadrep] = ioreq;
5049 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + (ioreq->iouh_NakTimeout<<3) : 0;
5051 Disable();
5052 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
5054 // looks good to me, now enqueue this entry (just behind the asyncQH)
5055 eqh->eqh_Succ = hc->hc_EhciAsyncQH->eqh_Succ;
5056 eqh->eqh_NextQH = eqh->eqh_Succ->eqh_Self;
5057 SYNC;
5058 EIEIO;
5059 eqh->eqh_Pred = hc->hc_EhciAsyncQH;
5060 eqh->eqh_Succ->eqh_Pred = eqh;
5061 hc->hc_EhciAsyncQH->eqh_Succ = eqh;
5062 hc->hc_EhciAsyncQH->eqh_NextQH = eqh->eqh_Self;
5063 SYNC;
5064 EIEIO;
5065 Enable();
5067 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
5070 /* \\\ */
5072 /* /// "ehciCompleteInt()" */
5073 void ehciCompleteInt(struct PCIController *hc)
5075 ULONG framecnt = READREG32_LE(hc->hc_RegBase, EHCI_FRAMECOUNT);
5077 KPRINTF(1, ("CompleteInt!\n"));
5078 hc->hc_FrameCounter = (hc->hc_FrameCounter & 0xffffc000) + framecnt;
5080 /* **************** PROCESS DONE TRANSFERS **************** */
5082 if(hc->hc_AsyncAdvanced)
5084 struct EhciQH *eqh;
5085 struct EhciTD *etd;
5086 struct EhciTD *nextetd;
5088 hc->hc_AsyncAdvanced = FALSE;
5090 KPRINTF(1, ("AsyncAdvance %08lx\n", hc->hc_EhciAsyncFreeQH));
5092 while((eqh = hc->hc_EhciAsyncFreeQH))
5094 KPRINTF(1, ("FreeQH %08lx\n", eqh));
5095 nextetd = eqh->eqh_FirstTD;
5096 while((etd = nextetd))
5098 KPRINTF(1, ("FreeTD %08lx\n", nextetd));
5099 nextetd = etd->etd_Succ;
5100 ehciFreeTD(hc, etd);
5102 hc->hc_EhciAsyncFreeQH = eqh->eqh_Succ;
5103 ehciFreeQH(hc, eqh);
5107 ehciHandleFinishedTDs(hc);
5109 if(hc->hc_CtrlXFerQueue.lh_Head->ln_Succ)
5111 ehciScheduleCtrlTDs(hc);
5114 if(hc->hc_IntXFerQueue.lh_Head->ln_Succ)
5116 ehciScheduleIntTDs(hc);
5119 if(hc->hc_BulkXFerQueue.lh_Head->ln_Succ)
5121 ehciScheduleBulkTDs(hc);
5124 KPRINTF(1, ("CompleteDone\n"));
5126 /* \\\ */
5128 /* /// "ehciIntCode()" */
5129 void ehciIntCode(HIDDT_IRQ_Handler *irq, HIDDT_IRQ_HwInfo *hw)
5131 struct PCIController *hc = (struct PCIController *) irq->h_Data;
5132 struct PCIDevice *base = hc->hc_Device;
5133 struct PCIUnit *unit = hc->hc_Unit;
5134 ULONG intr;
5136 KPRINTF(1, ("pciEhciInt()\n"));
5137 intr = READREG32_LE(hc->hc_RegBase, EHCI_USBSTATUS);
5138 if(intr & hc->hc_PCIIntEnMask)
5140 WRITEREG32_LE(hc->hc_RegBase, EHCI_USBSTATUS, intr);
5141 KPRINTF(1, ("INT=%04lx\n", intr));
5142 if(!hc->hc_Online)
5144 return;
5146 if(intr & EHSF_FRAMECOUNTOVER)
5148 ULONG framecnt = READREG32_LE(hc->hc_RegBase, EHCI_FRAMECOUNT);
5149 hc->hc_FrameCounter = (hc->hc_FrameCounter|0x3fff) + 1 + framecnt;
5150 KPRINTF(5, ("Frame Counter Rollover %ld\n", hc->hc_FrameCounter));
5152 if(intr & EHSF_ASYNCADVANCE)
5154 KPRINTF(1, ("AsyncAdvance\n"));
5155 hc->hc_AsyncAdvanced = TRUE;
5157 if(intr & (EHSF_TDDONE|EHSF_TDERROR|EHSF_ASYNCADVANCE))
5159 SureCause(base, &hc->hc_CompleteInt);
5161 if(intr & EHSF_HOSTERROR)
5163 KPRINTF(200, ("Host ERROR!\n"));
5165 if(intr & EHSF_PORTCHANGED)
5167 UWORD hciport;
5168 ULONG oldval;
5169 UWORD portreg = EHCI_PORTSC1;
5170 for(hciport = 0; hciport < hc->hc_NumPorts; hciport++, portreg += 4)
5172 oldval = READREG32_LE(hc->hc_RegBase, portreg);
5173 // reflect port ownership (shortcut without hc->hc_PortNum20[hciport], as usb 2.0 maps 1:1)
5174 unit->hu_EhciOwned[hciport] = (oldval & EHPF_NOTPORTOWNER) ? FALSE : TRUE;
5175 if(oldval & EHPF_ENABLECHANGE)
5177 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_ENABLE;
5179 if(oldval & EHPF_CONNECTCHANGE)
5181 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_CONNECTION;
5183 if(oldval & EHPF_RESUMEDTX)
5185 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND|UPSF_PORT_ENABLE;
5187 if(oldval & EHPF_OVERCURRENTCHG)
5189 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_OVER_CURRENT;
5191 WRITEREG32_LE(hc->hc_RegBase, portreg, oldval);
5192 KPRINTF(20, ("PCI Int Port %ld Change %08lx\n", hciport + 1, oldval));
5193 if(hc->hc_PortChangeMap[hciport])
5195 unit->hu_RootPortChanges |= 1UL<<(hciport + 1);
5198 uhwCheckRootHubChanges(unit);
5202 /* \\\ */
5204 /* /// "uhwNakTimeoutInt()" */
5205 AROS_UFH1(void, uhwNakTimeoutInt,
5206 AROS_UFHA(struct PCIUnit *, unit, A1))
5208 AROS_USERFUNC_INIT
5210 struct PCIDevice *base = unit->hu_Device;
5211 struct PCIController *hc;
5212 struct IOUsbHWReq *ioreq;
5213 struct UhciQH *uqh;
5214 struct UhciTD *utd;
5215 struct EhciQH *eqh;
5216 struct OhciED *oed;
5217 UWORD devadrep;
5218 UWORD cnt;
5219 ULONG linkelem;
5220 ULONG ctrlstatus;
5222 //KPRINTF(10, ("NakTimeoutInt()\n"));
5224 // check for port status change for UHCI and frame rollovers and NAK Timeouts
5225 hc = (struct PCIController *) unit->hu_Controllers.lh_Head;
5226 while(hc->hc_Node.ln_Succ)
5228 if(!hc->hc_Online)
5230 hc = (struct PCIController *) hc->hc_Node.ln_Succ;
5231 continue;
5233 switch(hc->hc_HCIType)
5235 case HCITYPE_UHCI:
5237 ULONG framecnt = READREG16_LE(hc->hc_RegBase, UHCI_FRAMECOUNT);
5239 if(framecnt < (hc->hc_FrameCounter & 0xffff))
5241 hc->hc_FrameCounter = (hc->hc_FrameCounter|0xffff) + 1 + framecnt;
5242 KPRINTF(10, ("Frame Counter Rollover %ld\n", hc->hc_FrameCounter));
5244 framecnt = hc->hc_FrameCounter;
5246 // NakTimeout
5247 ioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
5248 while(((struct Node *) ioreq)->ln_Succ)
5250 if(ioreq->iouh_Flags & UHFF_NAKTIMEOUT)
5252 uqh = (struct UhciQH *) ioreq->iouh_DriverPrivate1;
5253 if(uqh)
5255 KPRINTF(1, ("Examining IOReq=%08lx with UQH=%08lx\n", ioreq, uqh));
5256 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
5257 linkelem = READMEM32_LE(&uqh->uqh_Element);
5258 if(linkelem & UHCI_TERMINATE)
5260 KPRINTF(1, ("UQH terminated %08lx\n", linkelem));
5261 if(framecnt > unit->hu_NakTimeoutFrame[devadrep])
5263 // give the thing the chance to exit gracefully
5264 KPRINTF(20, ("Terminated? NAK timeout %ld > %ld, IOReq=%08lx\n", framecnt, unit->hu_NakTimeoutFrame[devadrep], ioreq));
5265 SureCause(base, &hc->hc_CompleteInt);
5267 } else {
5268 utd = (struct UhciTD *) ((linkelem & UHCI_PTRMASK) - hc->hc_PCIVirtualAdjust - 16); // struct UhciTD starts 16 before physical TD
5269 ctrlstatus = READMEM32_LE(&utd->utd_CtrlStatus);
5270 if(ctrlstatus & UTCF_ACTIVE)
5272 if(framecnt > unit->hu_NakTimeoutFrame[devadrep])
5274 // give the thing the chance to exit gracefully
5275 KPRINTF(20, ("NAK timeout %ld > %ld, IOReq=%08lx\n", framecnt, unit->hu_NakTimeoutFrame[devadrep], ioreq));
5276 ctrlstatus &= ~UTCF_ACTIVE;
5277 WRITEMEM32_LE(&utd->utd_CtrlStatus, ctrlstatus);
5278 SureCause(base, &hc->hc_CompleteInt);
5280 } else {
5281 if(framecnt > unit->hu_NakTimeoutFrame[devadrep])
5283 // give the thing the chance to exit gracefully
5284 KPRINTF(20, ("Terminated? NAK timeout %ld > %ld, IOReq=%08lx\n", framecnt, unit->hu_NakTimeoutFrame[devadrep], ioreq));
5285 SureCause(base, &hc->hc_CompleteInt);
5291 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
5294 uhciCheckPortStatusChange(hc);
5295 break;
5298 case HCITYPE_OHCI:
5300 ULONG framecnt = READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT);
5301 framecnt = hc->hc_FrameCounter = (hc->hc_FrameCounter & 0xffff0000) + framecnt;
5302 // NakTimeout
5303 ioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
5304 while(((struct Node *) ioreq)->ln_Succ)
5306 if(ioreq->iouh_Flags & UHFF_NAKTIMEOUT)
5308 oed = (struct OhciED *) ioreq->iouh_DriverPrivate1;
5309 if(oed)
5311 KPRINTF(1, ("CTRL=%04lx, CMD=%01lx, F=%ld, hccaDH=%08lx, hcDH=%08lx, CH=%08lx, CCH=%08lx, IntEn=%08lx\n",
5312 READREG32_LE(hc->hc_RegBase, OHCI_CONTROL),
5313 READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS),
5314 READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT),
5315 READMEM32_LE(&hc->hc_OhciHCCA->oha_DoneHead),
5316 READREG32_LE(hc->hc_RegBase, OHCI_DONEHEAD),
5317 READREG32_LE(hc->hc_RegBase, OHCI_CTRL_HEAD_ED),
5318 READREG32_LE(hc->hc_RegBase, OHCI_CTRL_ED),
5319 READREG32_LE(hc->hc_RegBase, OHCI_INTEN)));
5321 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
5322 ctrlstatus = READMEM32_LE(&oed->oed_HeadPtr);
5323 KPRINTF(1, ("Examining IOReq=%08lx with OED=%08lx HeadPtr=%08lx\n", ioreq, oed, ctrlstatus));
5324 if(framecnt > unit->hu_NakTimeoutFrame[devadrep])
5326 //ohciDebugSchedule(hc);
5327 if(ctrlstatus & OEHF_HALTED)
5329 // give the thing the chance to exit gracefully
5330 KPRINTF(20, ("Terminated? NAK timeout %ld > %ld, IOReq=%08lx\n", framecnt, unit->hu_NakTimeoutFrame[devadrep], ioreq));
5331 SureCause(base, &hc->hc_CompleteInt);
5332 } else {
5333 // give the thing the chance to exit gracefully
5334 KPRINTF(20, ("NAK timeout %ld > %ld, IOReq=%08lx\n", framecnt, unit->hu_NakTimeoutFrame[devadrep], ioreq));
5335 ctrlstatus |= OEHF_HALTED;
5336 WRITEMEM32_LE(&oed->oed_HeadPtr, ctrlstatus);
5337 ioreq->iouh_Req.io_Error = UHIOERR_NAKTIMEOUT;
5338 unit->hu_DevBusyReq[devadrep] = NULL;
5339 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
5340 ohciFreeEDContext(hc, oed);
5341 KPRINTF(1, ("Old Toggle %04lx:%ld\n", devadrep, unit->hu_DevDataToggle[devadrep]));
5342 unit->hu_DevDataToggle[devadrep] = (ctrlstatus & OEHF_DATA1) ? TRUE : FALSE;
5343 KPRINTF(1, ("Toggle now %04lx:%ld\n", devadrep, unit->hu_DevDataToggle[devadrep]));
5344 ReplyMsg(&ioreq->iouh_Req.io_Message);
5349 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
5351 break;
5354 case HCITYPE_EHCI:
5356 ULONG framecnt = READREG32_LE(hc->hc_RegBase, EHCI_FRAMECOUNT);
5357 framecnt = hc->hc_FrameCounter = (hc->hc_FrameCounter & 0xffffc000) + framecnt;
5358 // NakTimeout
5359 for(cnt = 0; cnt < 1; cnt++)
5361 ioreq = (struct IOUsbHWReq *) (cnt ? hc->hc_PeriodicTDQueue.lh_Head : hc->hc_TDQueue.lh_Head);
5362 while(((struct Node *) ioreq)->ln_Succ)
5364 if(ioreq->iouh_Flags & UHFF_NAKTIMEOUT)
5366 eqh = (struct EhciQH *) ioreq->iouh_DriverPrivate1;
5367 if(eqh)
5369 KPRINTF(1, ("Examining IOReq=%08lx with EQH=%08lx\n", ioreq, eqh));
5370 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
5371 ctrlstatus = READMEM32_LE(&eqh->eqh_CtrlStatus);
5372 if(ctrlstatus & ETCF_ACTIVE)
5374 if(framecnt > unit->hu_NakTimeoutFrame[devadrep])
5376 // give the thing the chance to exit gracefully
5377 KPRINTF(20, ("NAK timeout %ld > %ld, IOReq=%08lx\n", framecnt, unit->hu_NakTimeoutFrame[devadrep], ioreq));
5378 ctrlstatus &= ~ETCF_ACTIVE;
5379 ctrlstatus |= ETSF_HALTED;
5380 WRITEMEM32_LE(&eqh->eqh_CtrlStatus, ctrlstatus);
5381 SureCause(base, &hc->hc_CompleteInt);
5383 } else {
5384 if(ctrlstatus & ETCF_READYINTEN)
5386 KPRINTF(10, ("INT missed?!? Manually causing it! %08lx, IOReq=%08lx\n",
5387 ctrlstatus, ioreq));
5388 SureCause(base, &hc->hc_CompleteInt);
5393 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
5396 break;
5399 hc = (struct PCIController *) hc->hc_Node.ln_Succ;
5402 uhwCheckRootHubChanges(unit);
5404 /* Update frame counter */
5405 unit->hu_NakTimeoutReq.tr_time.tv_micro = 150*1000;
5406 SendIO((APTR) &unit->hu_NakTimeoutReq);
5408 AROS_USERFUNC_EXIT
5410 /* \\\ */