2 Copyright © 2010-2011, The AROS Development Team. All rights reserved
6 #include <proto/exec.h>
10 #include <devices/usb_hub.h>
14 #undef HiddPCIDeviceAttrBase
15 #define HiddPCIDeviceAttrBase (hd->hd_HiddPCIDeviceAB)
17 #define HiddAttrBase (hd->hd_HiddAB)
19 static AROS_INTH1(UhciResetHandler
, struct PCIController
*, hc
)
23 // stop controller and disable all interrupts
24 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, 0);
25 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBINTEN
, 0);
30 void uhciFreeQContext(struct PCIController
*hc
, struct UhciQH
*uqh
) {
32 struct UhciTD
*utd
= NULL
;
33 struct UhciTD
*nextutd
;
35 KPRINTF(5, ("Unlinking QContext %08lx\n", uqh
));
36 // unlink from schedule
37 uqh
->uqh_Pred
->uxx_Link
= uqh
->uqh_Succ
->uxx_Self
;
40 uqh
->uqh_Succ
->uxx_Pred
= uqh
->uqh_Pred
;
41 uqh
->uqh_Pred
->uxx_Succ
= uqh
->uqh_Succ
;
44 nextutd
= uqh
->uqh_FirstTD
;
47 KPRINTF(1, ("FreeTD %08lx\n", nextutd
));
49 nextutd
= (struct UhciTD
*) utd
->utd_Succ
;
55 void uhciUpdateIntTree(struct PCIController
*hc
) {
58 struct UhciXX
*preduxx
;
59 struct UhciXX
*lastuseduxx
;
62 // optimize linkage between queue heads
63 preduxx
= lastuseduxx
= (struct UhciXX
*) hc
->hc_UhciCtrlQH
; //hc->hc_UhciIsoTD;
64 for(cnt
= 0; cnt
< 9; cnt
++)
66 uxx
= (struct UhciXX
*) hc
->hc_UhciIntQH
[cnt
];
67 if(uxx
->uxx_Succ
!= preduxx
)
69 lastuseduxx
= uxx
->uxx_Succ
;
71 uxx
->uxx_Link
= lastuseduxx
->uxx_Self
;
76 void uhciCheckPortStatusChange(struct PCIController
*hc
) {
78 struct PCIUnit
*unit
= hc
->hc_Unit
;
82 // check for port status change for UHCI and frame rollovers
84 for(hciport
= 0; hciport
< 2; hciport
++) {
86 UWORD idx
= hc
->hc_PortNumGlobal
[hciport
];
88 portreg
= hciport
? UHCI_PORT2STSCTRL
: UHCI_PORT1STSCTRL
;
89 oldval
= READIO16_LE(hc
->hc_RegBase
, portreg
);
91 if(oldval
& UHPF_ENABLECHANGE
) {
92 KPRINTF(200, ("Port %ld Enable changed\n", hciport
));
93 hc
->hc_PortChangeMap
[hciport
] |= UPSF_PORT_ENABLE
;
96 if(oldval
& UHPF_CONNECTCHANGE
) {
97 KPRINTF(200, ("Port %ld Connect changed\n", hciport
));
98 hc
->hc_PortChangeMap
[hciport
] |= UPSF_PORT_CONNECTION
;
100 if(!(oldval
& UHPF_PORTCONNECTED
)) {
101 KPRINTF(200, ("Device removed on port %ld \n", hciport
));
105 if(oldval
& UHPF_RESUMEDTX
) {
106 KPRINTF(200, ("Port %ld Resume changed\n", hciport
));
107 hc
->hc_PortChangeMap
[hciport
] |= UPSF_PORT_SUSPEND
|UPSF_PORT_ENABLE
;
108 oldval
&= ~UHPF_RESUMEDTX
;
111 if(hc
->hc_PortChangeMap
[hciport
]) {
112 unit
->hu_RootPortChanges
|= 1UL<<(idx
+1);
113 /*KPRINTF(10, ("Port %ld (%ld) contributes %04lx to portmap %04lx\n", idx, hciport, hc->hc_PortChangeMap[hciport], unit->hu_RootPortChanges));*/
116 WRITEIO16_LE(hc
->hc_RegBase
, portreg
, oldval
);
121 void uhciHandleFinishedTDs(struct PCIController
*hc
) {
123 struct PCIUnit
*unit
= hc
->hc_Unit
;
124 struct IOUsbHWReq
*ioreq
;
125 struct IOUsbHWReq
*nextioreq
;
128 struct UhciTD
*nextutd
;
135 ULONG nextctrlstatus
= 0;
138 BOOL updatetree
= FALSE
;
139 BOOL fixsetupterm
= FALSE
;
141 KPRINTF(1, ("Checking for work done...\n"));
142 ioreq
= (struct IOUsbHWReq
*) hc
->hc_TDQueue
.lh_Head
;
143 while((nextioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
))
145 uqh
= (struct UhciQH
*) ioreq
->iouh_DriverPrivate1
;
148 KPRINTF(1, ("Examining IOReq=%08lx with UQH=%08lx\n", ioreq
, uqh
));
149 linkelem
= READMEM32_LE(&uqh
->uqh_Element
);
151 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
+ ((ioreq
->iouh_Dir
== UHDIR_IN
) ? 0x10 : 0);
152 if(linkelem
& UHCI_TERMINATE
)
154 KPRINTF(1, ("UQH terminated %08lx\n", linkelem
));
157 utd
= (struct UhciTD
*) (IPTR
) ((linkelem
& UHCI_PTRMASK
) - hc
->hc_PCIVirtualAdjust
- 16); // struct UhciTD starts 16 bytes before physical TD
158 ctrlstatus
= READMEM32_LE(&utd
->utd_CtrlStatus
);
159 nextutd
= (struct UhciTD
*)utd
->utd_Succ
;
160 if(!(ctrlstatus
& UTCF_ACTIVE
) && nextutd
)
162 /* OK, it's not active. Does it look like it's done? Code copied from below.
163 If not done, check the next TD too. */
164 if(ctrlstatus
& (UTSF_BABBLE
|UTSF_STALLED
|UTSF_CRCTIMEOUT
|UTSF_DATABUFFERERR
|UTSF_BITSTUFFERR
))
170 token
= READMEM32_LE(&utd
->utd_Token
);
171 len
= (ctrlstatus
& UTSM_ACTUALLENGTH
) >> UTSS_ACTUALLENGTH
;
172 if((len
!= (token
& UTTM_TRANSLENGTH
) >> UTTS_TRANSLENGTH
))
179 nextctrlstatus
= READMEM32_LE(&nextutd
->utd_CtrlStatus
);
182 /* Now, did the element link pointer change while we fetched the status for the pointed at TD?
183 If so, disregard the gathered information and assume still active. */
184 if(READMEM32_LE(&uqh
->uqh_Element
) != linkelem
)
186 /* Oh well, probably still active */
187 KPRINTF(1, ("Link Element changed, still active.\n"));
189 else if(!(ctrlstatus
& UTCF_ACTIVE
) && (nextutd
== 0 || !(nextctrlstatus
& UTCF_ACTIVE
)))
191 KPRINTF(1, ("CtrlStatus inactive %08lx\n", ctrlstatus
));
194 else if(unit
->hu_NakTimeoutFrame
[devadrep
] && (hc
->hc_FrameCounter
> unit
->hu_NakTimeoutFrame
[devadrep
]))
196 ioreq
->iouh_Req
.io_Error
= UHIOERR_NAKTIMEOUT
;
200 fixsetupterm
= FALSE
;
204 if(inspect
< 2) // if all went okay, don't traverse list, assume all bytes successfully transferred
206 utd
= uqh
->uqh_FirstTD
;
210 ctrlstatus
= READMEM32_LE(&utd
->utd_CtrlStatus
);
211 if(ctrlstatus
& UTCF_ACTIVE
)
213 KPRINTF(20, ("Internal error! Still active?!\n"));
214 if(ctrlstatus
& UTSF_BABBLE
)
216 KPRINTF(200, ("HOST CONTROLLER IS DEAD!!!\n"));
217 ioreq
->iouh_Req
.io_Error
= UHIOERR_HOSTERROR
;
218 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, UHCF_HCRESET
|UHCF_MAXPACKET64
|UHCF_CONFIGURE
|UHCF_RUNSTOP
);
224 token
= READMEM32_LE(&utd
->utd_Token
);
225 KPRINTF(1, ("TD=%08lx CS=%08lx Token=%08lx\n", utd
, ctrlstatus
, token
));
226 if(ctrlstatus
& (UTSF_BABBLE
|UTSF_STALLED
|UTSF_CRCTIMEOUT
|UTSF_DATABUFFERERR
|UTSF_BITSTUFFERR
))
228 if(ctrlstatus
& UTSF_BABBLE
)
230 KPRINTF(20, ("Babble error %08lx/%08lx\n", ctrlstatus
, token
));
231 ioreq
->iouh_Req
.io_Error
= UHIOERR_OVERFLOW
;
233 // VIA chipset seems to die on babble!?!
234 KPRINTF(10, ("HW Regs USBCMD=%04lx\n", READIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
)));
235 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, UHCF_MAXPACKET64
|UHCF_CONFIGURE
|UHCF_RUNSTOP
);
239 //ctrlstatus &= ~(UTSF_BABBLE|UTSF_STALLED|UTSF_CRCTIMEOUT|UTSF_DATABUFFERERR|UTSF_BITSTUFFERR|UTSF_NAK);
240 ctrlstatus
|= UTCF_ACTIVE
;
241 WRITEMEM32_LE(&utd
->utd_CtrlStatus
, ctrlstatus
);
246 else if(ctrlstatus
& UTSF_CRCTIMEOUT
)
248 KPRINTF(20, ("CRC/Timeout error IOReq=%08lx DIR=%ld\n", ioreq
, ioreq
->iouh_Dir
));
249 if(ctrlstatus
& UTSF_STALLED
)
251 ioreq
->iouh_Req
.io_Error
= UHIOERR_TIMEOUT
;
253 ioreq
->iouh_Req
.io_Error
= (ioreq
->iouh_Dir
== UHDIR_IN
) ? UHIOERR_CRCERROR
: UHIOERR_TIMEOUT
;
256 else if(ctrlstatus
& UTSF_STALLED
)
258 KPRINTF(20, ("STALLED!\n"));
259 ioreq
->iouh_Req
.io_Error
= UHIOERR_STALL
;
261 else if(ctrlstatus
& UTSF_BITSTUFFERR
)
263 KPRINTF(20, ("Bitstuff error\n"));
264 ioreq
->iouh_Req
.io_Error
= UHIOERR_CRCERROR
;
266 else if(ctrlstatus
& UTSF_DATABUFFERERR
)
268 KPRINTF(20, ("Databuffer error\n"));
269 ioreq
->iouh_Req
.io_Error
= UHIOERR_HOSTERROR
;
274 if(unit
->hu_NakTimeoutFrame
[devadrep
] && (hc
->hc_FrameCounter
> unit
->hu_NakTimeoutFrame
[devadrep
]) && (ctrlstatus
& UTSF_NAK
))
276 ioreq
->iouh_Req
.io_Error
= UHIOERR_NAKTIMEOUT
;
280 len
= (ctrlstatus
& UTSM_ACTUALLENGTH
)>>UTSS_ACTUALLENGTH
;
281 if((len
!= (token
& UTTM_TRANSLENGTH
)>>UTTS_TRANSLENGTH
))
285 len
= (len
+1) & 0x7ff; // get real length
286 if((token
& UTTM_PID
)>>UTTS_PID
!= PID_SETUP
) // don't count setup packet
289 // due to the VIA babble bug workaround, actually more bytes can
290 // be received than requested, limit the actual value to the upper limit
291 if(actual
> uqh
->uqh_Actual
)
293 actual
= uqh
->uqh_Actual
;
300 } while((utd
= (struct UhciTD
*) utd
->utd_Succ
));
303 // bail out from babble
304 ioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
;
307 if((actual
< uqh
->uqh_Actual
) && (!ioreq
->iouh_Req
.io_Error
) && (!(ioreq
->iouh_Flags
& UHFF_ALLOWRUNTPKTS
)))
309 KPRINTF(10, ("Short packet: %ld < %ld\n", actual
, ioreq
->iouh_Length
));
310 ioreq
->iouh_Req
.io_Error
= UHIOERR_RUNTPACKET
;
312 ioreq
->iouh_Actual
+= actual
;
314 KPRINTF(10, ("all %ld bytes transferred\n", uqh
->uqh_Actual
));
315 ioreq
->iouh_Actual
+= uqh
->uqh_Actual
;
317 // due to the short packet, the terminal of a setup packet has not been sent. Please do so.
318 if(shortpkt
&& (ioreq
->iouh_Req
.io_Command
== UHCMD_CONTROLXFER
))
322 // this is actually no short packet but result of the VIA babble fix
323 if(shortpkt
&& (ioreq
->iouh_Actual
== ioreq
->iouh_Length
))
327 unit
->hu_DevBusyReq
[devadrep
] = NULL
;
328 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
329 uhciFreeQContext(hc
, uqh
);
330 if(ioreq
->iouh_Req
.io_Command
== UHCMD_INTXFER
)
336 if(inspect
< 2) // otherwise, toggle will be right already
338 // use next data toggle bit based on last successful transaction
339 unit
->hu_DevDataToggle
[devadrep
] = (token
& UTTF_DATA1
) ? FALSE
: TRUE
;
341 if((!shortpkt
&& (ioreq
->iouh_Actual
< ioreq
->iouh_Length
)) || fixsetupterm
)
343 // fragmented, do some more work
344 switch(ioreq
->iouh_Req
.io_Command
)
346 case UHCMD_CONTROLXFER
:
347 KPRINTF(10, ("Rescheduling CtrlTransfer at %ld of %ld\n", ioreq
->iouh_Actual
, ioreq
->iouh_Length
));
348 AddHead(&hc
->hc_CtrlXFerQueue
, (struct Node
*) ioreq
);
352 KPRINTF(10, ("Rescheduling IntTransfer at %ld of %ld\n", ioreq
->iouh_Actual
, ioreq
->iouh_Length
));
353 AddHead(&hc
->hc_IntXFerQueue
, (struct Node
*) ioreq
);
357 KPRINTF(10, ("Rescheduling BulkTransfer at %ld of %ld\n", ioreq
->iouh_Actual
, ioreq
->iouh_Length
));
358 AddHead(&hc
->hc_BulkXFerQueue
, (struct Node
*) ioreq
);
362 KPRINTF(10, ("Uhm, internal error, dunno where to queue this req\n"));
363 ReplyMsg(&ioreq
->iouh_Req
.io_Message
);
366 // check for sucessful clear feature and set address ctrl transfers
367 if(ioreq
->iouh_Req
.io_Command
== UHCMD_CONTROLXFER
)
369 uhwCheckSpecialCtrlTransfers(hc
, ioreq
);
371 ReplyMsg(&ioreq
->iouh_Req
.io_Message
);
374 // be sure to save the data toggle bit where the error occurred
375 unit
->hu_DevDataToggle
[devadrep
] = (token
& UTTF_DATA1
) ? TRUE
: FALSE
;
376 ReplyMsg(&ioreq
->iouh_Req
.io_Message
);
380 KPRINTF(20, ("IOReq=%08lx has no UQH!\n", ioreq
));
386 KPRINTF(10, ("Updating Tree\n"));
387 uhciUpdateIntTree(hc
);
391 void uhciScheduleCtrlTDs(struct PCIController
*hc
) {
393 struct PCIUnit
*unit
= hc
->hc_Unit
;
394 struct IOUsbHWReq
*ioreq
;
397 struct UhciTD
*setuputd
;
398 struct UhciTD
*datautd
;
399 struct UhciTD
*termutd
;
400 struct UhciTD
*predutd
;
408 /* *** CTRL Transfers *** */
409 KPRINTF(1, ("Scheduling new CTRL transfers...\n"));
410 ioreq
= (struct IOUsbHWReq
*) hc
->hc_CtrlXFerQueue
.lh_Head
;
411 while(((struct Node
*) ioreq
)->ln_Succ
)
413 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
;
414 KPRINTF(10, ("New CTRL transfer to %ld.%ld: %ld bytes\n", ioreq
->iouh_DevAddr
, ioreq
->iouh_Endpoint
, ioreq
->iouh_Length
));
415 /* is endpoint already in use or do we have to wait for next transaction */
416 if(unit
->hu_DevBusyReq
[devadrep
])
418 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep
));
419 ioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
;
423 uqh
= uhciAllocQH(hc
);
429 setuputd
= uhciAllocTD(hc
);
435 termutd
= uhciAllocTD(hc
);
438 uhciFreeTD(hc
, setuputd
);
442 uqh
->uqh_IOReq
= ioreq
;
444 //termutd->utd_QueueHead = setuputd->utd_QueueHead = uqh;
446 KPRINTF(1, ("SetupTD=%08lx, TermTD=%08lx\n", setuputd
, termutd
));
449 ctrlstatus
= UTCF_ACTIVE
|UTCF_3ERRORSLIMIT
;
450 if(ioreq
->iouh_Flags
& UHFF_LOWSPEED
)
452 KPRINTF(5, ("*** LOW SPEED ***\n"));
453 ctrlstatus
|= UTCF_LOWSPEED
;
455 token
= (ioreq
->iouh_DevAddr
<<UTTS_DEVADDR
)|(ioreq
->iouh_Endpoint
<<UTTS_ENDPOINT
);
456 //setuputd->utd_Pred = NULL;
457 if(ioreq
->iouh_Actual
)
459 // this is a continuation of a fragmented ctrl transfer!
460 KPRINTF(1, ("Continuing FRAGMENT at %ld of %ld\n", ioreq
->iouh_Actual
, ioreq
->iouh_Length
));
464 uqh
->uqh_FirstTD
= setuputd
;
465 uqh
->uqh_Element
= setuputd
->utd_Self
; // start of queue
466 WRITEMEM32_LE(&setuputd
->utd_CtrlStatus
, ctrlstatus
);
467 WRITEMEM32_LE(&setuputd
->utd_Token
, (PID_SETUP
<<UTTS_PID
)|token
|(7<<UTTS_TRANSLENGTH
)|UTTF_DATA0
);
468 WRITEMEM32_LE(&setuputd
->utd_BufferPtr
, (IPTR
) pciGetPhysical(hc
, &ioreq
->iouh_SetupData
));
471 token
|= (ioreq
->iouh_SetupData
.bmRequestType
& URTF_IN
) ? PID_IN
: PID_OUT
;
473 actual
= ioreq
->iouh_Actual
;
474 if(ioreq
->iouh_Length
- actual
)
476 ctrlstatus
|= UTCF_SHORTPACKET
;
479 phyaddr
= (IPTR
) pciGetPhysical(hc
, &(((UBYTE
*) ioreq
->iouh_Data
)[ioreq
->iouh_Actual
]));
480 if(!unit
->hu_DevDataToggle
[devadrep
])
482 // continue with data toggle 0
486 phyaddr
= (IPTR
) pciGetPhysical(hc
, ioreq
->iouh_Data
);
490 datautd
= uhciAllocTD(hc
);
495 token
^= UTTF_DATA1
; // toggle bit
496 predutd
->utd_Link
= datautd
->utd_Self
;
497 predutd
->utd_Succ
= (struct UhciXX
*) datautd
;
498 //datautd->utd_Pred = (struct UhciXX *) predutd;
499 //datautd->utd_QueueHead = uqh;
500 len
= ioreq
->iouh_Length
- actual
;
501 if(len
> ioreq
->iouh_MaxPktSize
)
503 len
= ioreq
->iouh_MaxPktSize
;
505 WRITEMEM32_LE(&datautd
->utd_CtrlStatus
, ctrlstatus
);
507 /* FIXME: This workaround for a VIA babble bug will potentially overwrite innocent memory (very rarely), but will avoid the host controller dropping dead completely. */
508 if((len
< ioreq
->iouh_MaxPktSize
) && (ioreq
->iouh_SetupData
.bmRequestType
& URTF_IN
))
510 WRITEMEM32_LE(&datautd
->utd_Token
, token
|((ioreq
->iouh_MaxPktSize
-1)<<UTTS_TRANSLENGTH
)); // no masking need here as len is always >= 1
512 WRITEMEM32_LE(&datautd
->utd_Token
, token
|((len
-1)<<UTTS_TRANSLENGTH
)); // no masking need here as len is always >= 1
515 WRITEMEM32_LE(&datautd
->utd_Token
, token
|((len
-1)<<UTTS_TRANSLENGTH
)); // no masking need here as len is always >= 1
517 WRITEMEM32_LE(&datautd
->utd_BufferPtr
, phyaddr
);
521 } while((actual
< ioreq
->iouh_Length
) && (actual
- ioreq
->iouh_Actual
< UHCI_TD_CTRL_LIMIT
));
522 if(actual
== ioreq
->iouh_Actual
)
524 // not at least one data TD? try again later
525 uhciFreeTD(hc
, setuputd
);
526 uhciFreeTD(hc
, termutd
);
533 KPRINTF(1, ("Freeing setup\n"));
534 uqh
->uqh_FirstTD
= (struct UhciTD
*) setuputd
->utd_Succ
;
535 //uqh->uqh_FirstTD->utd_Pred = NULL;
536 uqh
->uqh_Element
= setuputd
->utd_Succ
->uxx_Self
; // start of queue after setup packet
537 uhciFreeTD(hc
, setuputd
);
538 // set toggle for next batch
539 unit
->hu_DevDataToggle
[devadrep
] = (token
& UTTF_DATA1
) ? FALSE
: TRUE
;
544 // free Setup packet, assign termination as first packet (no data)
545 KPRINTF(1, ("Freeing setup (term only)\n"));
546 uqh
->uqh_FirstTD
= (struct UhciTD
*) termutd
;
547 uqh
->uqh_Element
= termutd
->utd_Self
; // start of queue after setup packet
548 uhciFreeTD(hc
, setuputd
);
552 uqh
->uqh_Actual
= actual
- ioreq
->iouh_Actual
;
553 ctrlstatus
|= UTCF_READYINTEN
;
554 if(actual
== ioreq
->iouh_Length
)
557 KPRINTF(1, ("Activating TERM\n"));
559 token
^= (PID_IN
^PID_OUT
)<<UTTS_PID
;
563 predutd
->utd_Link
= termutd
->utd_Self
;
564 predutd
->utd_Succ
= (struct UhciXX
*) termutd
;
566 //termutd->utd_Pred = (struct UhciXX *) predutd;
567 WRITEMEM32_LE(&termutd
->utd_CtrlStatus
, ctrlstatus
);
568 WRITEMEM32_LE(&termutd
->utd_Token
, token
|(0x7ff<<UTTS_TRANSLENGTH
));
569 CONSTWRITEMEM32_LE(&termutd
->utd_Link
, UHCI_TERMINATE
);
570 termutd
->utd_Succ
= NULL
;
571 //uqh->uqh_LastTD = termutd;
573 KPRINTF(1, ("Setup data phase fragmented\n"));
574 // don't create TERM, we don't know the final data toggle bit
575 // but mark the last data TD for interrupt generation
576 WRITEMEM32_LE(&predutd
->utd_CtrlStatus
, ctrlstatus
);
577 uhciFreeTD(hc
, termutd
);
578 CONSTWRITEMEM32_LE(&predutd
->utd_Link
, UHCI_TERMINATE
);
579 predutd
->utd_Succ
= NULL
;
580 //uqh->uqh_LastTD = predutd;
583 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
584 ioreq
->iouh_DriverPrivate1
= uqh
;
586 // manage endpoint going busy
587 unit
->hu_DevBusyReq
[devadrep
] = ioreq
;
588 unit
->hu_NakTimeoutFrame
[devadrep
] = (ioreq
->iouh_Flags
& UHFF_NAKTIMEOUT
) ? hc
->hc_FrameCounter
+ ioreq
->iouh_NakTimeout
: 0;
591 AddTail(&hc
->hc_TDQueue
, (struct Node
*) ioreq
);
593 // looks good to me, now enqueue this entry (just behind the CtrlQH)
594 uqh
->uqh_Succ
= hc
->hc_UhciCtrlQH
->uqh_Succ
;
595 uqh
->uqh_Link
= uqh
->uqh_Succ
->uxx_Self
;
598 uqh
->uqh_Pred
= (struct UhciXX
*) hc
->hc_UhciCtrlQH
;
599 uqh
->uqh_Succ
->uxx_Pred
= (struct UhciXX
*) uqh
;
600 hc
->hc_UhciCtrlQH
->uqh_Succ
= (struct UhciXX
*) uqh
;
601 hc
->hc_UhciCtrlQH
->uqh_Link
= uqh
->uqh_Self
;
605 ioreq
= (struct IOUsbHWReq
*) hc
->hc_CtrlXFerQueue
.lh_Head
;
609 void uhciScheduleIntTDs(struct PCIController
*hc
) {
611 struct PCIUnit
*unit
= hc
->hc_Unit
;
612 struct IOUsbHWReq
*ioreq
;
616 struct UhciQH
*intuqh
;
618 struct UhciTD
*predutd
;
625 /* *** INT Transfers *** */
626 KPRINTF(1, ("Scheduling new INT transfers...\n"));
627 ioreq
= (struct IOUsbHWReq
*) hc
->hc_IntXFerQueue
.lh_Head
;
628 while(((struct Node
*) ioreq
)->ln_Succ
)
630 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
+ ((ioreq
->iouh_Dir
== UHDIR_IN
) ? 0x10 : 0);
631 KPRINTF(10, ("New INT transfer to %ld.%ld: %ld bytes\n", ioreq
->iouh_DevAddr
, ioreq
->iouh_Endpoint
, ioreq
->iouh_Length
));
632 /* is endpoint already in use or do we have to wait for next transaction */
633 if(unit
->hu_DevBusyReq
[devadrep
]) {
634 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep
));
635 ioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
;
639 uqh
= uhciAllocQH(hc
);
644 uqh
->uqh_IOReq
= ioreq
;
646 ctrlstatus
= UTCF_ACTIVE
|UTCF_1ERRORLIMIT
|UTCF_SHORTPACKET
;
647 if(ioreq
->iouh_Flags
& UHFF_LOWSPEED
) {
648 KPRINTF(5, ("*** LOW SPEED ***\n"));
649 ctrlstatus
|= UTCF_LOWSPEED
;
651 token
= (ioreq
->iouh_DevAddr
<<UTTS_DEVADDR
)|(ioreq
->iouh_Endpoint
<<UTTS_ENDPOINT
);
652 token
|= (ioreq
->iouh_Dir
== UHDIR_IN
) ? PID_IN
: PID_OUT
;
654 actual
= ioreq
->iouh_Actual
;
655 phyaddr
= (IPTR
) pciGetPhysical(hc
, &(((UBYTE
*) ioreq
->iouh_Data
)[ioreq
->iouh_Actual
]));
656 if(unit
->hu_DevDataToggle
[devadrep
]) {
657 // continue with data toggle 1
658 KPRINTF(1, ("Data1\n"));
661 KPRINTF(1, ("Data0\n"));
664 utd
= uhciAllocTD(hc
);
669 WRITEMEM32_LE(&predutd
->utd_Link
, READMEM32_LE(&utd
->utd_Self
)|UHCI_DFS
);
670 predutd
->utd_Succ
= (struct UhciXX
*) utd
;
671 //utd->utd_Pred = (struct UhciXX *) predutd;
673 uqh
->uqh_FirstTD
= utd
;
674 uqh
->uqh_Element
= utd
->utd_Self
;
675 //utd->utd_Pred = NULL;
677 //utd->utd_QueueHead = uqh;
678 len
= ioreq
->iouh_Length
- actual
;
679 if(len
> ioreq
->iouh_MaxPktSize
) {
680 len
= ioreq
->iouh_MaxPktSize
;
683 WRITEMEM32_LE(&utd
->utd_CtrlStatus
, ctrlstatus
);
684 WRITEMEM32_LE(&utd
->utd_Token
, token
|(((len
-1) & 0x7ff)<<UTTS_TRANSLENGTH
));
685 WRITEMEM32_LE(&utd
->utd_BufferPtr
, phyaddr
);
689 token
^= UTTF_DATA1
; // toggle bit
690 } while((actual
< ioreq
->iouh_Length
) && (actual
- ioreq
->iouh_Actual
< UHCI_TD_INT_LIMIT
));
693 // not at least one data TD? try again later
698 uqh
->uqh_Actual
= actual
- ioreq
->iouh_Actual
;
699 // set toggle for next batch / succesful transfer
700 unit
->hu_DevDataToggle
[devadrep
] = (token
& UTTF_DATA1
) ? TRUE
: FALSE
;
701 if(unit
->hu_DevDataToggle
[devadrep
]) {
702 // continue with data toggle 1
703 KPRINTF(1, ("NewData1\n"));
705 KPRINTF(1, ("NewData0\n"));
707 ctrlstatus
|= UTCF_READYINTEN
;
708 WRITEMEM32_LE(&predutd
->utd_CtrlStatus
, ctrlstatus
);
709 CONSTWRITEMEM32_LE(&utd
->utd_Link
, UHCI_TERMINATE
);
710 utd
->utd_Succ
= NULL
;
711 //uqh->uqh_LastTD = utd;
713 if(ioreq
->iouh_Interval
>= 255) {
714 intuqh
= hc
->hc_UhciIntQH
[8]; // 256ms interval
718 intuqh
= hc
->hc_UhciIntQH
[cnt
++];
719 } while(ioreq
->iouh_Interval
>= (1<<cnt
));
720 KPRINTF(1, ("Scheduled at level %ld\n", cnt
));
723 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
724 ioreq
->iouh_DriverPrivate1
= uqh
;
726 // manage endpoint going busy
727 unit
->hu_DevBusyReq
[devadrep
] = ioreq
;
728 unit
->hu_NakTimeoutFrame
[devadrep
] = (ioreq
->iouh_Flags
& UHFF_NAKTIMEOUT
) ? hc
->hc_FrameCounter
+ ioreq
->iouh_NakTimeout
: 0;
731 AddTail(&hc
->hc_TDQueue
, (struct Node
*) ioreq
);
733 // looks good to me, now enqueue this entry (just behind the right IntQH)
734 uqh
->uqh_Succ
= intuqh
->uqh_Succ
;
735 uqh
->uqh_Link
= uqh
->uqh_Succ
->uxx_Self
;
738 uqh
->uqh_Pred
= (struct UhciXX
*) intuqh
;
739 uqh
->uqh_Succ
->uxx_Pred
= (struct UhciXX
*) uqh
;
740 intuqh
->uqh_Succ
= (struct UhciXX
*) uqh
;
741 intuqh
->uqh_Link
= uqh
->uqh_Self
;
745 uhciUpdateIntTree(hc
);
747 ioreq
= (struct IOUsbHWReq
*) hc
->hc_IntXFerQueue
.lh_Head
;
751 void uhciScheduleBulkTDs(struct PCIController
*hc
) {
753 struct PCIUnit
*unit
= hc
->hc_Unit
;
754 struct IOUsbHWReq
*ioreq
;
758 struct UhciTD
*predutd
;
766 /* *** BULK Transfers *** */
767 KPRINTF(1, ("Scheduling new BULK transfers...\n"));
768 ioreq
= (struct IOUsbHWReq
*) hc
->hc_BulkXFerQueue
.lh_Head
;
769 while(((struct Node
*) ioreq
)->ln_Succ
)
771 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
+ ((ioreq
->iouh_Dir
== UHDIR_IN
) ? 0x10 : 0);
772 KPRINTF(10, ("New BULK transfer to %ld.%ld: %ld bytes\n", ioreq
->iouh_DevAddr
, ioreq
->iouh_Endpoint
, ioreq
->iouh_Length
));
773 /* is endpoint already in use or do we have to wait for next transaction */
774 if(unit
->hu_DevBusyReq
[devadrep
])
776 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep
));
777 ioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
;
781 uqh
= uhciAllocQH(hc
);
787 uqh
->uqh_IOReq
= ioreq
;
790 ctrlstatus
= UTCF_ACTIVE
|UTCF_1ERRORLIMIT
|UTCF_SHORTPACKET
;
791 token
= (ioreq
->iouh_DevAddr
<<UTTS_DEVADDR
)|(ioreq
->iouh_Endpoint
<<UTTS_ENDPOINT
);
792 token
|= (ioreq
->iouh_Dir
== UHDIR_IN
) ? PID_IN
: PID_OUT
;
794 actual
= ioreq
->iouh_Actual
;
795 phyaddr
= (IPTR
) pciGetPhysical(hc
, &(((UBYTE
*) ioreq
->iouh_Data
)[ioreq
->iouh_Actual
]));
796 if(unit
->hu_DevDataToggle
[devadrep
])
798 // continue with data toggle 1
803 utd
= uhciAllocTD(hc
);
811 WRITEMEM32_LE(&predutd
->utd_Link
, READMEM32_LE(&utd
->utd_Self
)|UHCI_DFS
);
812 predutd
->utd_Succ
= (struct UhciXX
*) utd
;
813 //utd->utd_Pred = (struct UhciXX *) predutd;
815 uqh
->uqh_FirstTD
= utd
;
816 uqh
->uqh_Element
= utd
->utd_Self
;
817 //utd->utd_Pred = NULL;
819 //utd->utd_QueueHead = uqh;
820 len
= ioreq
->iouh_Length
- actual
;
821 if(len
> ioreq
->iouh_MaxPktSize
)
823 len
= ioreq
->iouh_MaxPktSize
;
826 WRITEMEM32_LE(&utd
->utd_CtrlStatus
, ctrlstatus
);
827 WRITEMEM32_LE(&utd
->utd_Token
, token
|(((len
-1) & 0x7ff)<<UTTS_TRANSLENGTH
));
828 WRITEMEM32_LE(&utd
->utd_BufferPtr
, phyaddr
);
832 token
^= UTTF_DATA1
; // toggle bit
833 if((actual
== ioreq
->iouh_Length
) && len
)
835 if((ioreq
->iouh_Flags
& UHFF_NOSHORTPKT
) || (ioreq
->iouh_Dir
== UHDIR_IN
) || (actual
% ioreq
->iouh_MaxPktSize
))
837 // no last zero byte packet
840 // avoid rare case that the zero byte packet is reached on TD_BULK_LIMIT
844 } while(forcezero
|| (len
&& (actual
<= ioreq
->iouh_Length
) && (actual
- ioreq
->iouh_Actual
< UHCI_TD_BULK_LIMIT
)));
848 // not at least one data TD? try again later
852 uqh
->uqh_Actual
= actual
- ioreq
->iouh_Actual
;
853 // set toggle for next batch / succesful transfer
854 unit
->hu_DevDataToggle
[devadrep
] = (token
& UTTF_DATA1
) ? TRUE
: FALSE
;
856 ctrlstatus
|= UTCF_READYINTEN
;
857 WRITEMEM32_LE(&predutd
->utd_CtrlStatus
, ctrlstatus
);
858 CONSTWRITEMEM32_LE(&utd
->utd_Link
, UHCI_TERMINATE
);
859 utd
->utd_Succ
= NULL
;
860 //uqh->uqh_LastTD = utd;
862 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
863 ioreq
->iouh_DriverPrivate1
= uqh
;
865 // manage endpoint going busy
866 unit
->hu_DevBusyReq
[devadrep
] = ioreq
;
867 unit
->hu_NakTimeoutFrame
[devadrep
] = (ioreq
->iouh_Flags
& UHFF_NAKTIMEOUT
) ? hc
->hc_FrameCounter
+ ioreq
->iouh_NakTimeout
: 0;
870 AddTail(&hc
->hc_TDQueue
, (struct Node
*) ioreq
);
872 // looks good to me, now enqueue this entry (just behind the BulkQH)
873 uqh
->uqh_Succ
= hc
->hc_UhciBulkQH
->uqh_Succ
;
874 uqh
->uqh_Link
= uqh
->uqh_Succ
->uxx_Self
;
877 uqh
->uqh_Pred
= (struct UhciXX
*) hc
->hc_UhciBulkQH
;
878 uqh
->uqh_Succ
->uxx_Pred
= (struct UhciXX
*) uqh
;
879 hc
->hc_UhciBulkQH
->uqh_Succ
= (struct UhciXX
*) uqh
;
880 hc
->hc_UhciBulkQH
->uqh_Link
= uqh
->uqh_Self
;
884 ioreq
= (struct IOUsbHWReq
*) hc
->hc_BulkXFerQueue
.lh_Head
;
888 void uhciUpdateFrameCounter(struct PCIController
*hc
) {
892 framecnt
= READIO16_LE(hc
->hc_RegBase
, UHCI_FRAMECOUNT
) & 0x07ff;
893 if(framecnt
< (hc
->hc_FrameCounter
& 0x07ff))
895 hc
->hc_FrameCounter
|= 0x07ff;
896 hc
->hc_FrameCounter
++;
897 hc
->hc_FrameCounter
|= framecnt
;
898 KPRINTF(10, ("Frame Counter Rollover %ld\n", hc
->hc_FrameCounter
));
900 hc
->hc_FrameCounter
= (hc
->hc_FrameCounter
& 0xfffff800)|framecnt
;
905 static AROS_INTH1(uhciCompleteInt
, struct PCIController
*, hc
) {
909 KPRINTF(100, ("CompleteInt!\n"));
910 uhciUpdateFrameCounter(hc
);
912 /* **************** PROCESS DONE TRANSFERS **************** */
914 uhciCheckPortStatusChange(hc
);
915 uhwCheckRootHubChanges(hc
->hc_Unit
);
917 uhciHandleFinishedTDs(hc
);
919 if(hc
->hc_CtrlXFerQueue
.lh_Head
->ln_Succ
) {
920 uhciScheduleCtrlTDs(hc
);
923 if(hc
->hc_IntXFerQueue
.lh_Head
->ln_Succ
) {
924 uhciScheduleIntTDs(hc
);
927 if(hc
->hc_BulkXFerQueue
.lh_Head
->ln_Succ
) {
928 uhciScheduleBulkTDs(hc
);
931 KPRINTF(1, ("CompleteDone\n"));
938 static AROS_INTH1(uhciIntCode
, struct PCIController
*, hc
)
942 // struct PCIDevice *base = hc->hc_Device;
945 intr
= READIO16_LE(hc
->hc_RegBase
, UHCI_USBSTATUS
);
946 if(intr
& (UHSF_USBINT
|UHSF_USBERRORINT
|UHSF_RESUMEDTX
|UHSF_HCSYSERROR
|UHSF_HCPROCERROR
|UHSF_HCHALTED
)) {
947 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBSTATUS
, intr
);
949 if(intr
& (UHSF_HCSYSERROR
|UHSF_HCPROCERROR
|UHSF_HCHALTED
)) {
950 KPRINTF(200, ("Host ERROR!\n"));
951 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, UHCF_HCRESET
|UHCF_GLOBALRESET
|UHCF_MAXPACKET64
|UHCF_CONFIGURE
);
954 if (!(hc
->hc_Flags
& HCF_ONLINE
)) {
958 if(intr
& (UHSF_USBINT
|UHSF_USBERRORINT
)) {
959 //SureCause(base, &hc->hc_CompleteInt);
969 BOOL
uhciInit(struct PCIController
*hc
, struct PCIUnit
*hu
) {
971 struct PCIDevice
*hd
= hu
->hu_Device
;
974 struct UhciQH
*preduqh
;
982 struct TagItem pciActivateIO
[] = {
983 { aHidd_PCIDevice_isIO
, TRUE
},
987 struct TagItem pciActivateBusmaster
[] = {
988 { aHidd_PCIDevice_isMaster
, TRUE
},
992 struct TagItem pciDeactivateBusmaster
[] = {
993 { aHidd_PCIDevice_isMaster
, FALSE
},
997 hc
->hc_NumPorts
= 2; // UHCI always uses 2 ports per controller
998 KPRINTF(20, ("Found UHCI Controller %08lx FuncNum=%ld with %ld ports\n", hc
->hc_PCIDeviceObject
, hc
->hc_FunctionNum
, hc
->hc_NumPorts
));
1000 hc
->hc_CompleteInt
.is_Node
.ln_Type
= NT_INTERRUPT
;
1001 hc
->hc_CompleteInt
.is_Node
.ln_Name
= "UHCI CompleteInt";
1002 hc
->hc_CompleteInt
.is_Node
.ln_Pri
= 0;
1003 hc
->hc_CompleteInt
.is_Data
= hc
;
1004 hc
->hc_CompleteInt
.is_Code
= uhciCompleteInt
;
1006 hc
->hc_PCIMemSize
= sizeof(ULONG
) * UHCI_FRAMELIST_SIZE
+ UHCI_FRAMELIST_ALIGNMENT
+ 1;
1007 hc
->hc_PCIMemSize
+= sizeof(struct UhciQH
) * UHCI_QH_POOLSIZE
;
1008 hc
->hc_PCIMemSize
+= sizeof(struct UhciTD
) * UHCI_TD_POOLSIZE
;
1010 memptr
= HIDD_PCIDriver_AllocPCIMem(hc
->hc_PCIDriverObject
, hc
->hc_PCIMemSize
);
1011 hc
->hc_PCIMem
= (APTR
) memptr
;
1014 // PhysicalAddress - VirtualAdjust = VirtualAddress
1015 // VirtualAddress + VirtualAdjust = PhysicalAddress
1016 hc
->hc_PCIVirtualAdjust
= ((IPTR
) pciGetPhysical(hc
, memptr
)) - ((IPTR
) memptr
);
1017 KPRINTF(10, ("VirtualAdjust 0x%08lx\n", hc
->hc_PCIVirtualAdjust
));
1020 memptr
= (UBYTE
*) ((((IPTR
) hc
->hc_PCIMem
) + UHCI_FRAMELIST_ALIGNMENT
) & (~UHCI_FRAMELIST_ALIGNMENT
));
1021 hc
->hc_UhciFrameList
= (ULONG
*) memptr
;
1022 KPRINTF(10, ("FrameListBase 0x%08lx\n", hc
->hc_UhciFrameList
));
1023 memptr
+= sizeof(APTR
) * UHCI_FRAMELIST_SIZE
;
1026 uqh
= (struct UhciQH
*) memptr
;
1027 hc
->hc_UhciQHPool
= uqh
;
1028 cnt
= UHCI_QH_POOLSIZE
- 1;
1030 // minimal initalization
1031 uqh
->uqh_Succ
= (struct UhciXX
*) (uqh
+ 1);
1032 WRITEMEM32_LE(&uqh
->uqh_Self
, (IPTR
) (&uqh
->uqh_Link
) + hc
->hc_PCIVirtualAdjust
+ UHCI_QHSELECT
);
1035 uqh
->uqh_Succ
= NULL
;
1036 WRITEMEM32_LE(&uqh
->uqh_Self
, (IPTR
) (&uqh
->uqh_Link
) + hc
->hc_PCIVirtualAdjust
+ UHCI_QHSELECT
);
1037 memptr
+= sizeof(struct UhciQH
) * UHCI_QH_POOLSIZE
;
1040 utd
= (struct UhciTD
*) memptr
;
1041 hc
->hc_UhciTDPool
= utd
;
1042 cnt
= UHCI_TD_POOLSIZE
- 1;
1044 utd
->utd_Succ
= (struct UhciXX
*) (utd
+ 1);
1045 WRITEMEM32_LE(&utd
->utd_Self
, (IPTR
) (&utd
->utd_Link
) + hc
->hc_PCIVirtualAdjust
+ UHCI_TDSELECT
);
1048 utd
->utd_Succ
= NULL
;
1049 WRITEMEM32_LE(&utd
->utd_Self
, (IPTR
) (&utd
->utd_Link
) + hc
->hc_PCIVirtualAdjust
+ UHCI_TDSELECT
);
1050 memptr
+= sizeof(struct UhciTD
) * UHCI_TD_POOLSIZE
;
1053 hc
->hc_UhciTermQH
= preduqh
= uqh
= uhciAllocQH(hc
);
1054 uqh
->uqh_Succ
= NULL
;
1055 CONSTWRITEMEM32_LE(&uqh
->uqh_Link
, UHCI_TERMINATE
);
1056 CONSTWRITEMEM32_LE(&uqh
->uqh_Element
, UHCI_TERMINATE
);
1059 hc
->hc_UhciBulkQH
= uqh
= uhciAllocQH(hc
);
1060 uqh
->uqh_Succ
= (struct UhciXX
*) preduqh
;
1061 preduqh
->uqh_Pred
= (struct UhciXX
*) uqh
;
1062 uqh
->uqh_Link
= preduqh
->uqh_Self
; // link to terminating QH
1063 CONSTWRITEMEM32_LE(&uqh
->uqh_Element
, UHCI_TERMINATE
);
1067 hc
->hc_UhciCtrlQH
= uqh
= uhciAllocQH(hc
);
1068 uqh
->uqh_Succ
= (struct UhciXX
*) preduqh
;
1069 preduqh
->uqh_Pred
= (struct UhciXX
*) uqh
;
1070 uqh
->uqh_Link
= preduqh
->uqh_Self
; // link to Bulk QH
1071 CONSTWRITEMEM32_LE(&uqh
->uqh_Element
, UHCI_TERMINATE
);
1074 hc
->hc_UhciIsoTD
= utd
= uhciAllocTD(hc
);
1075 utd
->utd_Succ
= (struct UhciXX
*) uqh
;
1076 //utd->utd_Pred = NULL; // no certain linkage above this level
1077 uqh
->uqh_Pred
= (struct UhciXX
*) utd
;
1078 utd
->utd_Link
= uqh
->uqh_Self
; // link to Ctrl QH
1080 CONSTWRITEMEM32_LE(&utd
->utd_CtrlStatus
, 0);
1083 hc
->hc_UhciIntQH
[0] = uqh
= uhciAllocQH(hc
);
1084 uqh
->uqh_Succ
= (struct UhciXX
*) utd
;
1085 uqh
->uqh_Pred
= NULL
; // who knows...
1086 //uqh->uqh_Link = utd->utd_Self; // link to ISO
1087 CONSTWRITEMEM32_LE(&uqh
->uqh_Element
, UHCI_TERMINATE
);
1090 // make 9 levels of QH interrupts
1091 for(cnt
= 1; cnt
< 9; cnt
++) {
1092 hc
->hc_UhciIntQH
[cnt
] = uqh
= uhciAllocQH(hc
);
1093 uqh
->uqh_Succ
= (struct UhciXX
*) preduqh
;
1094 uqh
->uqh_Pred
= NULL
; // who knows...
1095 //uqh->uqh_Link = preduqh->uqh_Self; // link to previous int level
1096 CONSTWRITEMEM32_LE(&uqh
->uqh_Element
, UHCI_TERMINATE
);
1100 uhciUpdateIntTree(hc
);
1102 // fill in framelist with IntQH entry points based on interval
1103 tabptr
= hc
->hc_UhciFrameList
;
1104 for(cnt
= 0; cnt
< UHCI_FRAMELIST_SIZE
; cnt
++) {
1105 uqh
= hc
->hc_UhciIntQH
[8];
1108 if(cnt
& (1UL<<bitcnt
)) {
1109 uqh
= hc
->hc_UhciIntQH
[bitcnt
];
1112 } while(++bitcnt
< 9);
1113 *tabptr
++ = uqh
->uqh_Self
;
1116 // this will cause more PCI memory access, but faster USB transfers as well
1117 //WRITEMEM32_LE(&hc->hc_UhciTermQH->uqh_Link, AROS_LONG2LE(hc->hc_UhciBulkQH->uqh_Self));
1119 // time to initialize hardware...
1120 OOP_GetAttr(hc
->hc_PCIDeviceObject
, aHidd_PCIDevice_Base4
, (IPTR
*) &hc
->hc_RegBase
);
1121 hc
->hc_RegBase
= (APTR
) (((IPTR
) hc
->hc_RegBase
) & (~0xf));
1122 KPRINTF(10, ("RegBase = 0x%08lx\n", hc
->hc_RegBase
));
1124 OOP_SetAttrs(hc
->hc_PCIDeviceObject
, (struct TagItem
*) pciActivateIO
);
1126 // disable BIOS legacy support
1127 KPRINTF(10, ("Turning off BIOS legacy support (old value=%04lx)\n", HIDD_PCIDevice_ReadConfigWord(hc
->hc_PCIDeviceObject
, UHCI_USBLEGSUP
) ));
1128 HIDD_PCIDevice_WriteConfigWord(hc
->hc_PCIDeviceObject
, UHCI_USBLEGSUP
, 0x8f00);
1130 KPRINTF(10, ("Resetting UHCI HC\n"));
1131 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, UHCF_GLOBALRESET
);
1134 OOP_SetAttrs(hc
->hc_PCIDeviceObject
, (struct TagItem
*) pciDeactivateBusmaster
); // no busmaster yet
1136 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, UHCF_HCRESET
);
1140 if(!(READIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
) & UHCF_HCRESET
)) {
1146 KPRINTF(20, ("Reset Timeout!\n"));
1147 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, UHCF_HCRESET
);
1150 KPRINTF(20, ("Reset finished after %ld ticks\n", 100-cnt
));
1153 // stop controller and disable all interrupts first
1154 KPRINTF(10, ("Stopping controller and enabling busmaster\n"));
1155 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, 0);
1156 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBINTEN
, 0);
1158 OOP_SetAttrs(hc
->hc_PCIDeviceObject
, (struct TagItem
*) pciActivateBusmaster
); // enable busmaster
1160 // Fix for VIA Babble problem
1161 cnt
= HIDD_PCIDevice_ReadConfigByte(hc
->hc_PCIDeviceObject
, 0x40);
1163 KPRINTF(20, ("Applying VIA Babble workaround\n"));
1164 HIDD_PCIDevice_WriteConfigByte(hc
->hc_PCIDeviceObject
, 0x40, cnt
|0x40);
1167 KPRINTF(10, ("Configuring UHCI HC\n"));
1168 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, UHCF_MAXPACKET64
|UHCF_CONFIGURE
);
1170 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_FRAMECOUNT
, 0);
1172 WRITEIO32_LE(hc
->hc_RegBase
, UHCI_FRAMELISTADDR
, (IPTR
) pciGetPhysical(hc
, hc
->hc_UhciFrameList
));
1174 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBSTATUS
, UHIF_TIMEOUTCRC
|UHIF_INTONCOMPLETE
|UHIF_SHORTPACKET
);
1176 // install reset handler
1177 hc
->hc_ResetInt
.is_Code
= UhciResetHandler
;
1178 hc
->hc_ResetInt
.is_Data
= hc
;
1179 AddResetCallback(&hc
->hc_ResetInt
);
1182 hc
->hc_PCIIntHandler
.is_Node
.ln_Name
= "UHCI PCI (pciuhci.device)";
1183 hc
->hc_PCIIntHandler
.is_Node
.ln_Pri
= 5;
1184 hc
->hc_PCIIntHandler
.is_Node
.ln_Type
= NT_INTERRUPT
;
1185 hc
->hc_PCIIntHandler
.is_Code
= uhciIntCode
;
1186 hc
->hc_PCIIntHandler
.is_Data
= hc
;
1187 AddIntServer(INTB_KERNEL
+ hc
->hc_PCIIntLine
, &hc
->hc_PCIIntHandler
);
1189 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBINTEN
, UHIF_TIMEOUTCRC
|UHIF_INTONCOMPLETE
|UHIF_SHORTPACKET
);
1191 // clear all port bits (both ports)
1192 WRITEIO32_LE(hc
->hc_RegBase
, UHCI_PORT1STSCTRL
, 0);
1195 KPRINTF(10, ("Enabling PIRQ (old value=%04lx)\n", HIDD_PCIDevice_ReadConfigWord(hc
->hc_PCIDeviceObject
, UHCI_USBLEGSUP
) ));
1196 HIDD_PCIDevice_WriteConfigWord(hc
->hc_PCIDeviceObject
, UHCI_USBLEGSUP
, 0x2000);
1198 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, UHCF_MAXPACKET64
|UHCF_CONFIGURE
|UHCF_RUNSTOP
);
1201 KPRINTF(20, ("HW Init done\n"));
1203 KPRINTF(10, ("HW Regs USBCMD=%04lx\n", READIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
)));
1204 KPRINTF(10, ("HW Regs USBSTS=%04lx\n", READIO16_LE(hc
->hc_RegBase
, UHCI_USBSTATUS
)));
1205 KPRINTF(10, ("HW Regs FRAMECOUNT=%04lx\n", READIO16_LE(hc
->hc_RegBase
, UHCI_FRAMECOUNT
)));
1207 KPRINTF(20, ("uhciInit returns TRUE...\n"));
1212 FIXME: What would the appropriate debug level be?
1214 KPRINTF(1000, ("uhciInit returns FALSE...\n"));
1218 void uhciFree(struct PCIController
*hc
, struct PCIUnit
*hu
) {
1220 hc
= (struct PCIController
*) hu
->hu_Controllers
.lh_Head
;
1221 while(hc
->hc_Node
.ln_Succ
) {
1222 KPRINTF(20, ("Shutting down UHCI %08lx\n", hc
));
1223 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBINTEN
, 0);
1225 HIDD_PCIDevice_WriteConfigWord(hc
->hc_PCIDeviceObject
, UHCI_USBLEGSUP
, 0);
1226 // disable all ports
1227 WRITEIO32_LE(hc
->hc_RegBase
, UHCI_PORT1STSCTRL
, 0);
1229 //WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_MAXPACKET64|UHCF_CONFIGURE);
1230 //uhwDelayMS(50, hu);
1231 KPRINTF(20, ("Stopping UHCI %08lx\n", hc
));
1232 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, 0);
1235 //KPRINTF(20, ("Reset done UHCI %08lx\n", hc));
1238 KPRINTF(20, ("Resetting UHCI %08lx\n", hc
));
1239 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, UHCF_HCRESET
);
1243 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, 0);
1246 KPRINTF(20, ("Shutting down UHCI done.\n"));
1247 hc
= (struct PCIController
*) hc
->hc_Node
.ln_Succ
;