2 * Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it>
3 * Copyright (c) 2008 Eurotech S.p.A. <info@eurtech.it>
5 * This code is *strongly* based on EHCI-HCD code by David Brownell since
6 * the chip is a quasi-EHCI compatible.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/module.h>
24 #include <linux/pci.h>
25 #include <linux/dmapool.h>
26 #include <linux/kernel.h>
27 #include <linux/delay.h>
28 #include <linux/ioport.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/errno.h>
32 #include <linux/init.h>
33 #include <linux/timer.h>
34 #include <linux/list.h>
35 #include <linux/interrupt.h>
36 #include <linux/usb.h>
37 #include <linux/usb/hcd.h>
38 #include <linux/moduleparam.h>
39 #include <linux/dma-mapping.h>
43 #include <asm/system.h>
44 #include <asm/unaligned.h>
46 #include <linux/irq.h>
47 #include <linux/platform_device.h>
51 #define DRIVER_VERSION "0.0.50"
57 #define oxu_dbg(oxu, fmt, args...) \
58 dev_dbg(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
59 #define oxu_err(oxu, fmt, args...) \
60 dev_err(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
61 #define oxu_info(oxu, fmt, args...) \
62 dev_info(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
64 static inline struct usb_hcd
*oxu_to_hcd(struct oxu_hcd
*oxu
)
66 return container_of((void *) oxu
, struct usb_hcd
, hcd_priv
);
69 static inline struct oxu_hcd
*hcd_to_oxu(struct usb_hcd
*hcd
)
71 return (struct oxu_hcd
*) (hcd
->hcd_priv
);
79 #undef OXU_VERBOSE_DEBUG
81 #ifdef OXU_VERBOSE_DEBUG
82 #define oxu_vdbg oxu_dbg
84 #define oxu_vdbg(oxu, fmt, args...) /* Nop */
89 static int __attribute__((__unused__
))
90 dbg_status_buf(char *buf
, unsigned len
, const char *label
, u32 status
)
92 return scnprintf(buf
, len
, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
93 label
, label
[0] ? " " : "", status
,
94 (status
& STS_ASS
) ? " Async" : "",
95 (status
& STS_PSS
) ? " Periodic" : "",
96 (status
& STS_RECL
) ? " Recl" : "",
97 (status
& STS_HALT
) ? " Halt" : "",
98 (status
& STS_IAA
) ? " IAA" : "",
99 (status
& STS_FATAL
) ? " FATAL" : "",
100 (status
& STS_FLR
) ? " FLR" : "",
101 (status
& STS_PCD
) ? " PCD" : "",
102 (status
& STS_ERR
) ? " ERR" : "",
103 (status
& STS_INT
) ? " INT" : ""
107 static int __attribute__((__unused__
))
108 dbg_intr_buf(char *buf
, unsigned len
, const char *label
, u32 enable
)
110 return scnprintf(buf
, len
, "%s%sintrenable %02x%s%s%s%s%s%s",
111 label
, label
[0] ? " " : "", enable
,
112 (enable
& STS_IAA
) ? " IAA" : "",
113 (enable
& STS_FATAL
) ? " FATAL" : "",
114 (enable
& STS_FLR
) ? " FLR" : "",
115 (enable
& STS_PCD
) ? " PCD" : "",
116 (enable
& STS_ERR
) ? " ERR" : "",
117 (enable
& STS_INT
) ? " INT" : ""
121 static const char *const fls_strings
[] =
122 { "1024", "512", "256", "??" };
124 static int dbg_command_buf(char *buf
, unsigned len
,
125 const char *label
, u32 command
)
127 return scnprintf(buf
, len
,
128 "%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
129 label
, label
[0] ? " " : "", command
,
130 (command
& CMD_PARK
) ? "park" : "(park)",
131 CMD_PARK_CNT(command
),
132 (command
>> 16) & 0x3f,
133 (command
& CMD_LRESET
) ? " LReset" : "",
134 (command
& CMD_IAAD
) ? " IAAD" : "",
135 (command
& CMD_ASE
) ? " Async" : "",
136 (command
& CMD_PSE
) ? " Periodic" : "",
137 fls_strings
[(command
>> 2) & 0x3],
138 (command
& CMD_RESET
) ? " Reset" : "",
139 (command
& CMD_RUN
) ? "RUN" : "HALT"
143 static int dbg_port_buf(char *buf
, unsigned len
, const char *label
,
144 int port
, u32 status
)
148 /* signaling state */
149 switch (status
& (3 << 10)) {
154 sig
= "k"; /* low speed */
164 return scnprintf(buf
, len
,
165 "%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s",
166 label
, label
[0] ? " " : "", port
, status
,
167 (status
& PORT_POWER
) ? " POWER" : "",
168 (status
& PORT_OWNER
) ? " OWNER" : "",
170 (status
& PORT_RESET
) ? " RESET" : "",
171 (status
& PORT_SUSPEND
) ? " SUSPEND" : "",
172 (status
& PORT_RESUME
) ? " RESUME" : "",
173 (status
& PORT_OCC
) ? " OCC" : "",
174 (status
& PORT_OC
) ? " OC" : "",
175 (status
& PORT_PEC
) ? " PEC" : "",
176 (status
& PORT_PE
) ? " PE" : "",
177 (status
& PORT_CSC
) ? " CSC" : "",
178 (status
& PORT_CONNECT
) ? " CONNECT" : ""
184 static inline int __attribute__((__unused__
))
185 dbg_status_buf(char *buf
, unsigned len
, const char *label
, u32 status
)
188 static inline int __attribute__((__unused__
))
189 dbg_command_buf(char *buf
, unsigned len
, const char *label
, u32 command
)
192 static inline int __attribute__((__unused__
))
193 dbg_intr_buf(char *buf
, unsigned len
, const char *label
, u32 enable
)
196 static inline int __attribute__((__unused__
))
197 dbg_port_buf(char *buf
, unsigned len
, const char *label
, int port
, u32 status
)
202 /* functions have the "wrong" filename when they're output... */
203 #define dbg_status(oxu, label, status) { \
205 dbg_status_buf(_buf, sizeof _buf, label, status); \
206 oxu_dbg(oxu, "%s\n", _buf); \
209 #define dbg_cmd(oxu, label, command) { \
211 dbg_command_buf(_buf, sizeof _buf, label, command); \
212 oxu_dbg(oxu, "%s\n", _buf); \
215 #define dbg_port(oxu, label, port, status) { \
217 dbg_port_buf(_buf, sizeof _buf, label, port, status); \
218 oxu_dbg(oxu, "%s\n", _buf); \
225 /* Initial IRQ latency: faster than hw default */
226 static int log2_irq_thresh
; /* 0 to 6 */
227 module_param(log2_irq_thresh
, int, S_IRUGO
);
228 MODULE_PARM_DESC(log2_irq_thresh
, "log2 IRQ latency, 1-64 microframes");
230 /* Initial park setting: slower than hw default */
231 static unsigned park
;
232 module_param(park
, uint
, S_IRUGO
);
233 MODULE_PARM_DESC(park
, "park setting; 1-3 back-to-back async packets");
235 /* For flakey hardware, ignore overcurrent indicators */
236 static int ignore_oc
;
237 module_param(ignore_oc
, bool, S_IRUGO
);
238 MODULE_PARM_DESC(ignore_oc
, "ignore bogus hardware overcurrent indications");
241 static void ehci_work(struct oxu_hcd
*oxu
);
242 static int oxu_hub_control(struct usb_hcd
*hcd
,
243 u16 typeReq
, u16 wValue
, u16 wIndex
,
244 char *buf
, u16 wLength
);
250 /* Low level read/write registers functions */
251 static inline u32
oxu_readl(void *base
, u32 reg
)
253 return readl(base
+ reg
);
256 static inline void oxu_writel(void *base
, u32 reg
, u32 val
)
258 writel(val
, base
+ reg
);
261 static inline void timer_action_done(struct oxu_hcd
*oxu
,
262 enum ehci_timer_action action
)
264 clear_bit(action
, &oxu
->actions
);
267 static inline void timer_action(struct oxu_hcd
*oxu
,
268 enum ehci_timer_action action
)
270 if (!test_and_set_bit(action
, &oxu
->actions
)) {
274 case TIMER_IAA_WATCHDOG
:
275 t
= EHCI_IAA_JIFFIES
;
277 case TIMER_IO_WATCHDOG
:
280 case TIMER_ASYNC_OFF
:
281 t
= EHCI_ASYNC_JIFFIES
;
283 case TIMER_ASYNC_SHRINK
:
285 t
= EHCI_SHRINK_JIFFIES
;
289 /* all timings except IAA watchdog can be overridden.
290 * async queue SHRINK often precedes IAA. while it's ready
291 * to go OFF neither can matter, and afterwards the IO
292 * watchdog stops unless there's still periodic traffic.
294 if (action
!= TIMER_IAA_WATCHDOG
295 && t
> oxu
->watchdog
.expires
296 && timer_pending(&oxu
->watchdog
))
298 mod_timer(&oxu
->watchdog
, t
);
303 * handshake - spin reading hc until handshake completes or fails
304 * @ptr: address of hc register to be read
305 * @mask: bits to look at in result of read
306 * @done: value of those bits when handshake succeeds
307 * @usec: timeout in microseconds
309 * Returns negative errno, or zero on success
311 * Success happens when the "mask" bits have the specified value (hardware
312 * handshake done). There are two failure modes: "usec" have passed (major
313 * hardware flakeout), or the register reads as all-ones (hardware removed).
315 * That last failure should_only happen in cases like physical cardbus eject
316 * before driver shutdown. But it also seems to be caused by bugs in cardbus
317 * bridge shutdown: shutting down the bridge before the devices using it.
319 static int handshake(struct oxu_hcd
*oxu
, void __iomem
*ptr
,
320 u32 mask
, u32 done
, int usec
)
326 if (result
== ~(u32
)0) /* card removed */
337 /* Force HC to halt state from unknown (EHCI spec section 2.3) */
338 static int ehci_halt(struct oxu_hcd
*oxu
)
340 u32 temp
= readl(&oxu
->regs
->status
);
342 /* disable any irqs left enabled by previous code */
343 writel(0, &oxu
->regs
->intr_enable
);
345 if ((temp
& STS_HALT
) != 0)
348 temp
= readl(&oxu
->regs
->command
);
350 writel(temp
, &oxu
->regs
->command
);
351 return handshake(oxu
, &oxu
->regs
->status
,
352 STS_HALT
, STS_HALT
, 16 * 125);
355 /* Put TDI/ARC silicon into EHCI mode */
356 static void tdi_reset(struct oxu_hcd
*oxu
)
358 u32 __iomem
*reg_ptr
;
361 reg_ptr
= (u32 __iomem
*)(((u8 __iomem
*)oxu
->regs
) + 0x68);
362 tmp
= readl(reg_ptr
);
364 writel(tmp
, reg_ptr
);
367 /* Reset a non-running (STS_HALT == 1) controller */
368 static int ehci_reset(struct oxu_hcd
*oxu
)
371 u32 command
= readl(&oxu
->regs
->command
);
373 command
|= CMD_RESET
;
374 dbg_cmd(oxu
, "reset", command
);
375 writel(command
, &oxu
->regs
->command
);
376 oxu_to_hcd(oxu
)->state
= HC_STATE_HALT
;
377 oxu
->next_statechange
= jiffies
;
378 retval
= handshake(oxu
, &oxu
->regs
->command
,
379 CMD_RESET
, 0, 250 * 1000);
389 /* Idle the controller (from running) */
390 static void ehci_quiesce(struct oxu_hcd
*oxu
)
395 if (!HC_IS_RUNNING(oxu_to_hcd(oxu
)->state
))
399 /* wait for any schedule enables/disables to take effect */
400 temp
= readl(&oxu
->regs
->command
) << 10;
401 temp
&= STS_ASS
| STS_PSS
;
402 if (handshake(oxu
, &oxu
->regs
->status
, STS_ASS
| STS_PSS
,
403 temp
, 16 * 125) != 0) {
404 oxu_to_hcd(oxu
)->state
= HC_STATE_HALT
;
408 /* then disable anything that's still active */
409 temp
= readl(&oxu
->regs
->command
);
410 temp
&= ~(CMD_ASE
| CMD_IAAD
| CMD_PSE
);
411 writel(temp
, &oxu
->regs
->command
);
413 /* hardware can take 16 microframes to turn off ... */
414 if (handshake(oxu
, &oxu
->regs
->status
, STS_ASS
| STS_PSS
,
416 oxu_to_hcd(oxu
)->state
= HC_STATE_HALT
;
421 static int check_reset_complete(struct oxu_hcd
*oxu
, int index
,
422 u32 __iomem
*status_reg
, int port_status
)
424 if (!(port_status
& PORT_CONNECT
)) {
425 oxu
->reset_done
[index
] = 0;
429 /* if reset finished and it's still not enabled -- handoff */
430 if (!(port_status
& PORT_PE
)) {
431 oxu_dbg(oxu
, "Failed to enable port %d on root hub TT\n",
435 oxu_dbg(oxu
, "port %d high speed\n", index
+ 1);
440 static void ehci_hub_descriptor(struct oxu_hcd
*oxu
,
441 struct usb_hub_descriptor
*desc
)
443 int ports
= HCS_N_PORTS(oxu
->hcs_params
);
446 desc
->bDescriptorType
= 0x29;
447 desc
->bPwrOn2PwrGood
= 10; /* oxu 1.0, 2.3.9 says 20ms max */
448 desc
->bHubContrCurrent
= 0;
450 desc
->bNbrPorts
= ports
;
451 temp
= 1 + (ports
/ 8);
452 desc
->bDescLength
= 7 + 2 * temp
;
454 /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
455 memset(&desc
->bitmap
[0], 0, temp
);
456 memset(&desc
->bitmap
[temp
], 0xff, temp
);
458 temp
= 0x0008; /* per-port overcurrent reporting */
459 if (HCS_PPC(oxu
->hcs_params
))
460 temp
|= 0x0001; /* per-port power control */
462 temp
|= 0x0002; /* no power switching */
463 desc
->wHubCharacteristics
= (__force __u16
)cpu_to_le16(temp
);
467 static int oxu_buf_alloc(struct oxu_hcd
*oxu
, struct ehci_qtd
*qtd
, int len
)
469 int n_blocks
; /* minium blocks needed to hold len */
470 int a_blocks
; /* blocks allocated */
473 /* Don't allocte bigger than supported */
474 if (len
> BUFFER_SIZE
* BUFFER_NUM
) {
475 oxu_err(oxu
, "buffer too big (%d)\n", len
);
479 spin_lock(&oxu
->mem_lock
);
481 /* Number of blocks needed to hold len */
482 n_blocks
= (len
+ BUFFER_SIZE
- 1) / BUFFER_SIZE
;
484 /* Round the number of blocks up to the power of 2 */
485 for (a_blocks
= 1; a_blocks
< n_blocks
; a_blocks
<<= 1)
488 /* Find a suitable available data buffer */
489 for (i
= 0; i
< BUFFER_NUM
;
490 i
+= max(a_blocks
, (int)oxu
->db_used
[i
])) {
492 /* Check all the required blocks are available */
493 for (j
= 0; j
< a_blocks
; j
++)
494 if (oxu
->db_used
[i
+ j
])
500 /* Allocate blocks found! */
501 qtd
->buffer
= (void *) &oxu
->mem
->db_pool
[i
];
502 qtd
->buffer_dma
= virt_to_phys(qtd
->buffer
);
504 qtd
->qtd_buffer_len
= BUFFER_SIZE
* a_blocks
;
505 oxu
->db_used
[i
] = a_blocks
;
507 spin_unlock(&oxu
->mem_lock
);
514 spin_unlock(&oxu
->mem_lock
);
519 static void oxu_buf_free(struct oxu_hcd
*oxu
, struct ehci_qtd
*qtd
)
523 spin_lock(&oxu
->mem_lock
);
525 index
= (qtd
->buffer
- (void *) &oxu
->mem
->db_pool
[0])
527 oxu
->db_used
[index
] = 0;
528 qtd
->qtd_buffer_len
= 0;
532 spin_unlock(&oxu
->mem_lock
);
537 static inline void ehci_qtd_init(struct ehci_qtd
*qtd
, dma_addr_t dma
)
539 memset(qtd
, 0, sizeof *qtd
);
541 qtd
->hw_token
= cpu_to_le32(QTD_STS_HALT
);
542 qtd
->hw_next
= EHCI_LIST_END
;
543 qtd
->hw_alt_next
= EHCI_LIST_END
;
544 INIT_LIST_HEAD(&qtd
->qtd_list
);
547 static inline void oxu_qtd_free(struct oxu_hcd
*oxu
, struct ehci_qtd
*qtd
)
552 oxu_buf_free(oxu
, qtd
);
554 spin_lock(&oxu
->mem_lock
);
556 index
= qtd
- &oxu
->mem
->qtd_pool
[0];
557 oxu
->qtd_used
[index
] = 0;
559 spin_unlock(&oxu
->mem_lock
);
564 static struct ehci_qtd
*ehci_qtd_alloc(struct oxu_hcd
*oxu
)
567 struct ehci_qtd
*qtd
= NULL
;
569 spin_lock(&oxu
->mem_lock
);
571 for (i
= 0; i
< QTD_NUM
; i
++)
572 if (!oxu
->qtd_used
[i
])
576 qtd
= (struct ehci_qtd
*) &oxu
->mem
->qtd_pool
[i
];
577 memset(qtd
, 0, sizeof *qtd
);
579 qtd
->hw_token
= cpu_to_le32(QTD_STS_HALT
);
580 qtd
->hw_next
= EHCI_LIST_END
;
581 qtd
->hw_alt_next
= EHCI_LIST_END
;
582 INIT_LIST_HEAD(&qtd
->qtd_list
);
584 qtd
->qtd_dma
= virt_to_phys(qtd
);
586 oxu
->qtd_used
[i
] = 1;
589 spin_unlock(&oxu
->mem_lock
);
594 static void oxu_qh_free(struct oxu_hcd
*oxu
, struct ehci_qh
*qh
)
598 spin_lock(&oxu
->mem_lock
);
600 index
= qh
- &oxu
->mem
->qh_pool
[0];
601 oxu
->qh_used
[index
] = 0;
603 spin_unlock(&oxu
->mem_lock
);
608 static void qh_destroy(struct kref
*kref
)
610 struct ehci_qh
*qh
= container_of(kref
, struct ehci_qh
, kref
);
611 struct oxu_hcd
*oxu
= qh
->oxu
;
613 /* clean qtds first, and know this is not linked */
614 if (!list_empty(&qh
->qtd_list
) || qh
->qh_next
.ptr
) {
615 oxu_dbg(oxu
, "unused qh not empty!\n");
619 oxu_qtd_free(oxu
, qh
->dummy
);
620 oxu_qh_free(oxu
, qh
);
623 static struct ehci_qh
*oxu_qh_alloc(struct oxu_hcd
*oxu
)
626 struct ehci_qh
*qh
= NULL
;
628 spin_lock(&oxu
->mem_lock
);
630 for (i
= 0; i
< QHEAD_NUM
; i
++)
631 if (!oxu
->qh_used
[i
])
635 qh
= (struct ehci_qh
*) &oxu
->mem
->qh_pool
[i
];
636 memset(qh
, 0, sizeof *qh
);
638 kref_init(&qh
->kref
);
640 qh
->qh_dma
= virt_to_phys(qh
);
641 INIT_LIST_HEAD(&qh
->qtd_list
);
643 /* dummy td enables safe urb queuing */
644 qh
->dummy
= ehci_qtd_alloc(oxu
);
645 if (qh
->dummy
== NULL
) {
646 oxu_dbg(oxu
, "no dummy td\n");
655 spin_unlock(&oxu
->mem_lock
);
660 /* to share a qh (cpu threads, or hc) */
661 static inline struct ehci_qh
*qh_get(struct ehci_qh
*qh
)
667 static inline void qh_put(struct ehci_qh
*qh
)
669 kref_put(&qh
->kref
, qh_destroy
);
672 static void oxu_murb_free(struct oxu_hcd
*oxu
, struct oxu_murb
*murb
)
676 spin_lock(&oxu
->mem_lock
);
678 index
= murb
- &oxu
->murb_pool
[0];
679 oxu
->murb_used
[index
] = 0;
681 spin_unlock(&oxu
->mem_lock
);
686 static struct oxu_murb
*oxu_murb_alloc(struct oxu_hcd
*oxu
)
690 struct oxu_murb
*murb
= NULL
;
692 spin_lock(&oxu
->mem_lock
);
694 for (i
= 0; i
< MURB_NUM
; i
++)
695 if (!oxu
->murb_used
[i
])
699 murb
= &(oxu
->murb_pool
)[i
];
701 oxu
->murb_used
[i
] = 1;
704 spin_unlock(&oxu
->mem_lock
);
709 /* The queue heads and transfer descriptors are managed from pools tied
710 * to each of the "per device" structures.
711 * This is the initialisation and cleanup code.
713 static void ehci_mem_cleanup(struct oxu_hcd
*oxu
)
715 kfree(oxu
->murb_pool
);
716 oxu
->murb_pool
= NULL
;
722 del_timer(&oxu
->urb_timer
);
724 oxu
->periodic
= NULL
;
726 /* shadow periodic table */
731 /* Remember to add cleanup code (above) if you add anything here.
733 static int ehci_mem_init(struct oxu_hcd
*oxu
, gfp_t flags
)
737 for (i
= 0; i
< oxu
->periodic_size
; i
++)
738 oxu
->mem
->frame_list
[i
] = EHCI_LIST_END
;
739 for (i
= 0; i
< QHEAD_NUM
; i
++)
741 for (i
= 0; i
< QTD_NUM
; i
++)
742 oxu
->qtd_used
[i
] = 0;
744 oxu
->murb_pool
= kcalloc(MURB_NUM
, sizeof(struct oxu_murb
), flags
);
748 for (i
= 0; i
< MURB_NUM
; i
++)
749 oxu
->murb_used
[i
] = 0;
751 oxu
->async
= oxu_qh_alloc(oxu
);
755 oxu
->periodic
= (__le32
*) &oxu
->mem
->frame_list
;
756 oxu
->periodic_dma
= virt_to_phys(oxu
->periodic
);
758 for (i
= 0; i
< oxu
->periodic_size
; i
++)
759 oxu
->periodic
[i
] = EHCI_LIST_END
;
761 /* software shadow of hardware table */
762 oxu
->pshadow
= kcalloc(oxu
->periodic_size
, sizeof(void *), flags
);
763 if (oxu
->pshadow
!= NULL
)
767 oxu_dbg(oxu
, "couldn't init memory\n");
768 ehci_mem_cleanup(oxu
);
772 /* Fill a qtd, returning how much of the buffer we were able to queue up.
774 static int qtd_fill(struct ehci_qtd
*qtd
, dma_addr_t buf
, size_t len
,
775 int token
, int maxpacket
)
780 /* one buffer entry per 4K ... first might be short or unaligned */
781 qtd
->hw_buf
[0] = cpu_to_le32((u32
)addr
);
782 qtd
->hw_buf_hi
[0] = cpu_to_le32((u32
)(addr
>> 32));
783 count
= 0x1000 - (buf
& 0x0fff); /* rest of that page */
784 if (likely(len
< count
)) /* ... iff needed */
790 /* per-qtd limit: from 16K to 20K (best alignment) */
791 for (i
= 1; count
< len
&& i
< 5; i
++) {
793 qtd
->hw_buf
[i
] = cpu_to_le32((u32
)addr
);
794 qtd
->hw_buf_hi
[i
] = cpu_to_le32((u32
)(addr
>> 32));
796 if ((count
+ 0x1000) < len
)
802 /* short packets may only terminate transfers */
804 count
-= (count
% maxpacket
);
806 qtd
->hw_token
= cpu_to_le32((count
<< 16) | token
);
812 static inline void qh_update(struct oxu_hcd
*oxu
,
813 struct ehci_qh
*qh
, struct ehci_qtd
*qtd
)
815 /* writes to an active overlay are unsafe */
816 BUG_ON(qh
->qh_state
!= QH_STATE_IDLE
);
818 qh
->hw_qtd_next
= QTD_NEXT(qtd
->qtd_dma
);
819 qh
->hw_alt_next
= EHCI_LIST_END
;
821 /* Except for control endpoints, we make hardware maintain data
822 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
823 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
826 if (!(qh
->hw_info1
& cpu_to_le32(1 << 14))) {
827 unsigned is_out
, epnum
;
829 is_out
= !(qtd
->hw_token
& cpu_to_le32(1 << 8));
830 epnum
= (le32_to_cpup(&qh
->hw_info1
) >> 8) & 0x0f;
831 if (unlikely(!usb_gettoggle(qh
->dev
, epnum
, is_out
))) {
832 qh
->hw_token
&= ~cpu_to_le32(QTD_TOGGLE
);
833 usb_settoggle(qh
->dev
, epnum
, is_out
, 1);
837 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
839 qh
->hw_token
&= cpu_to_le32(QTD_TOGGLE
| QTD_STS_PING
);
842 /* If it weren't for a common silicon quirk (writing the dummy into the qh
843 * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
844 * recovery (including urb dequeue) would need software changes to a QH...
846 static void qh_refresh(struct oxu_hcd
*oxu
, struct ehci_qh
*qh
)
848 struct ehci_qtd
*qtd
;
850 if (list_empty(&qh
->qtd_list
))
853 qtd
= list_entry(qh
->qtd_list
.next
,
854 struct ehci_qtd
, qtd_list
);
855 /* first qtd may already be partially processed */
856 if (cpu_to_le32(qtd
->qtd_dma
) == qh
->hw_current
)
861 qh_update(oxu
, qh
, qtd
);
864 static void qtd_copy_status(struct oxu_hcd
*oxu
, struct urb
*urb
,
865 size_t length
, u32 token
)
867 /* count IN/OUT bytes, not SETUP (even short packets) */
868 if (likely(QTD_PID(token
) != 2))
869 urb
->actual_length
+= length
- QTD_LENGTH(token
);
871 /* don't modify error codes */
872 if (unlikely(urb
->status
!= -EINPROGRESS
))
875 /* force cleanup after short read; not always an error */
876 if (unlikely(IS_SHORT_READ(token
)))
877 urb
->status
= -EREMOTEIO
;
879 /* serious "can't proceed" faults reported by the hardware */
880 if (token
& QTD_STS_HALT
) {
881 if (token
& QTD_STS_BABBLE
) {
882 urb
->status
= -EOVERFLOW
;
883 } else if (token
& QTD_STS_MMF
) {
884 /* fs/ls interrupt xfer missed the complete-split */
885 urb
->status
= -EPROTO
;
886 } else if (token
& QTD_STS_DBE
) {
887 urb
->status
= (QTD_PID(token
) == 1) /* IN ? */
888 ? -ENOSR
/* hc couldn't read data */
889 : -ECOMM
; /* hc couldn't write data */
890 } else if (token
& QTD_STS_XACT
) {
891 /* timeout, bad crc, wrong PID, etc; retried */
893 urb
->status
= -EPIPE
;
895 oxu_dbg(oxu
, "devpath %s ep%d%s 3strikes\n",
897 usb_pipeendpoint(urb
->pipe
),
898 usb_pipein(urb
->pipe
) ? "in" : "out");
899 urb
->status
= -EPROTO
;
901 /* CERR nonzero + no errors + halt --> stall */
902 } else if (QTD_CERR(token
))
903 urb
->status
= -EPIPE
;
905 urb
->status
= -EPROTO
;
907 oxu_vdbg(oxu
, "dev%d ep%d%s qtd token %08x --> status %d\n",
908 usb_pipedevice(urb
->pipe
),
909 usb_pipeendpoint(urb
->pipe
),
910 usb_pipein(urb
->pipe
) ? "in" : "out",
915 static void ehci_urb_done(struct oxu_hcd
*oxu
, struct urb
*urb
)
916 __releases(oxu
->lock
)
917 __acquires(oxu
->lock
)
919 if (likely(urb
->hcpriv
!= NULL
)) {
920 struct ehci_qh
*qh
= (struct ehci_qh
*) urb
->hcpriv
;
922 /* S-mask in a QH means it's an interrupt urb */
923 if ((qh
->hw_info2
& cpu_to_le32(QH_SMASK
)) != 0) {
925 /* ... update hc-wide periodic stats (for usbfs) */
926 oxu_to_hcd(oxu
)->self
.bandwidth_int_reqs
--;
932 switch (urb
->status
) {
933 case -EINPROGRESS
: /* success */
937 case -EREMOTEIO
: /* fault or normal */
938 if (!(urb
->transfer_flags
& URB_SHORT_NOT_OK
))
941 case -ECONNRESET
: /* canceled */
947 oxu_dbg(oxu
, "%s %s urb %p ep%d%s status %d len %d/%d\n",
948 __func__
, urb
->dev
->devpath
, urb
,
949 usb_pipeendpoint(urb
->pipe
),
950 usb_pipein(urb
->pipe
) ? "in" : "out",
952 urb
->actual_length
, urb
->transfer_buffer_length
);
955 /* complete() can reenter this HCD */
956 spin_unlock(&oxu
->lock
);
957 usb_hcd_giveback_urb(oxu_to_hcd(oxu
), urb
, urb
->status
);
958 spin_lock(&oxu
->lock
);
961 static void start_unlink_async(struct oxu_hcd
*oxu
, struct ehci_qh
*qh
);
962 static void unlink_async(struct oxu_hcd
*oxu
, struct ehci_qh
*qh
);
964 static void intr_deschedule(struct oxu_hcd
*oxu
, struct ehci_qh
*qh
);
965 static int qh_schedule(struct oxu_hcd
*oxu
, struct ehci_qh
*qh
);
967 #define HALT_BIT cpu_to_le32(QTD_STS_HALT)
969 /* Process and free completed qtds for a qh, returning URBs to drivers.
970 * Chases up to qh->hw_current. Returns number of completions called,
971 * indicating how much "real" work we did.
973 static unsigned qh_completions(struct oxu_hcd
*oxu
, struct ehci_qh
*qh
)
975 struct ehci_qtd
*last
= NULL
, *end
= qh
->dummy
;
976 struct list_head
*entry
, *tmp
;
981 struct oxu_murb
*murb
= NULL
;
983 if (unlikely(list_empty(&qh
->qtd_list
)))
986 /* completions (or tasks on other cpus) must never clobber HALT
987 * till we've gone through and cleaned everything up, even when
988 * they add urbs to this qh's queue or mark them for unlinking.
990 * NOTE: unlinking expects to be done in queue order.
992 state
= qh
->qh_state
;
993 qh
->qh_state
= QH_STATE_COMPLETING
;
994 stopped
= (state
== QH_STATE_IDLE
);
996 /* remove de-activated QTDs from front of queue.
997 * after faults (including short reads), cleanup this urb
998 * then let the queue advance.
999 * if queue is stopped, handles unlinks.
1001 list_for_each_safe(entry
, tmp
, &qh
->qtd_list
) {
1002 struct ehci_qtd
*qtd
;
1006 qtd
= list_entry(entry
, struct ehci_qtd
, qtd_list
);
1009 /* Clean up any state from previous QTD ...*/
1011 if (likely(last
->urb
!= urb
)) {
1012 if (last
->urb
->complete
== NULL
) {
1013 murb
= (struct oxu_murb
*) last
->urb
;
1014 last
->urb
= murb
->main
;
1016 ehci_urb_done(oxu
, last
->urb
);
1019 oxu_murb_free(oxu
, murb
);
1021 ehci_urb_done(oxu
, last
->urb
);
1025 oxu_qtd_free(oxu
, last
);
1029 /* ignore urbs submitted during completions we reported */
1033 /* hardware copies qtd out of qh overlay */
1035 token
= le32_to_cpu(qtd
->hw_token
);
1037 /* always clean up qtds the hc de-activated */
1038 if ((token
& QTD_STS_ACTIVE
) == 0) {
1040 if ((token
& QTD_STS_HALT
) != 0) {
1043 /* magic dummy for some short reads; qh won't advance.
1044 * that silicon quirk can kick in with this dummy too.
1046 } else if (IS_SHORT_READ(token
) &&
1047 !(qtd
->hw_alt_next
& EHCI_LIST_END
)) {
1052 /* stop scanning when we reach qtds the hc is using */
1053 } else if (likely(!stopped
&&
1054 HC_IS_RUNNING(oxu_to_hcd(oxu
)->state
))) {
1060 if (unlikely(!HC_IS_RUNNING(oxu_to_hcd(oxu
)->state
)))
1061 urb
->status
= -ESHUTDOWN
;
1063 /* ignore active urbs unless some previous qtd
1064 * for the urb faulted (including short read) or
1065 * its urb was canceled. we may patch qh or qtds.
1067 if (likely(urb
->status
== -EINPROGRESS
))
1070 /* issue status after short control reads */
1071 if (unlikely(do_status
!= 0)
1072 && QTD_PID(token
) == 0 /* OUT */) {
1077 /* token in overlay may be most current */
1078 if (state
== QH_STATE_IDLE
1079 && cpu_to_le32(qtd
->qtd_dma
)
1081 token
= le32_to_cpu(qh
->hw_token
);
1083 /* force halt for unlinked or blocked qh, so we'll
1084 * patch the qh later and so that completions can't
1085 * activate it while we "know" it's stopped.
1087 if ((HALT_BIT
& qh
->hw_token
) == 0) {
1089 qh
->hw_token
|= HALT_BIT
;
1094 /* Remove it from the queue */
1095 qtd_copy_status(oxu
, urb
->complete
?
1096 urb
: ((struct oxu_murb
*) urb
)->main
,
1097 qtd
->length
, token
);
1098 if ((usb_pipein(qtd
->urb
->pipe
)) &&
1099 (NULL
!= qtd
->transfer_buffer
))
1100 memcpy(qtd
->transfer_buffer
, qtd
->buffer
, qtd
->length
);
1101 do_status
= (urb
->status
== -EREMOTEIO
)
1102 && usb_pipecontrol(urb
->pipe
);
1104 if (stopped
&& qtd
->qtd_list
.prev
!= &qh
->qtd_list
) {
1105 last
= list_entry(qtd
->qtd_list
.prev
,
1106 struct ehci_qtd
, qtd_list
);
1107 last
->hw_next
= qtd
->hw_next
;
1109 list_del(&qtd
->qtd_list
);
1113 /* last urb's completion might still need calling */
1114 if (likely(last
!= NULL
)) {
1115 if (last
->urb
->complete
== NULL
) {
1116 murb
= (struct oxu_murb
*) last
->urb
;
1117 last
->urb
= murb
->main
;
1119 ehci_urb_done(oxu
, last
->urb
);
1122 oxu_murb_free(oxu
, murb
);
1124 ehci_urb_done(oxu
, last
->urb
);
1127 oxu_qtd_free(oxu
, last
);
1130 /* restore original state; caller must unlink or relink */
1131 qh
->qh_state
= state
;
1133 /* be sure the hardware's done with the qh before refreshing
1134 * it after fault cleanup, or recovering from silicon wrongly
1135 * overlaying the dummy qtd (which reduces DMA chatter).
1137 if (stopped
!= 0 || qh
->hw_qtd_next
== EHCI_LIST_END
) {
1140 qh_refresh(oxu
, qh
);
1142 case QH_STATE_LINKED
:
1143 /* should be rare for periodic transfers,
1144 * except maybe high bandwidth ...
1146 if ((cpu_to_le32(QH_SMASK
)
1147 & qh
->hw_info2
) != 0) {
1148 intr_deschedule(oxu
, qh
);
1149 (void) qh_schedule(oxu
, qh
);
1151 unlink_async(oxu
, qh
);
1153 /* otherwise, unlink already started */
1160 /* High bandwidth multiplier, as encoded in highspeed endpoint descriptors */
1161 #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
1162 /* ... and packet size, for any kind of endpoint descriptor */
1163 #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
1165 /* Reverse of qh_urb_transaction: free a list of TDs.
1166 * used for cleanup after errors, before HC sees an URB's TDs.
1168 static void qtd_list_free(struct oxu_hcd
*oxu
,
1169 struct urb
*urb
, struct list_head
*qtd_list
)
1171 struct list_head
*entry
, *temp
;
1173 list_for_each_safe(entry
, temp
, qtd_list
) {
1174 struct ehci_qtd
*qtd
;
1176 qtd
= list_entry(entry
, struct ehci_qtd
, qtd_list
);
1177 list_del(&qtd
->qtd_list
);
1178 oxu_qtd_free(oxu
, qtd
);
1182 /* Create a list of filled qtds for this URB; won't link into qh.
1184 static struct list_head
*qh_urb_transaction(struct oxu_hcd
*oxu
,
1186 struct list_head
*head
,
1189 struct ehci_qtd
*qtd
, *qtd_prev
;
1194 void *transfer_buf
= NULL
;
1198 * URBs map to sequences of QTDs: one logical transaction
1200 qtd
= ehci_qtd_alloc(oxu
);
1203 list_add_tail(&qtd
->qtd_list
, head
);
1206 token
= QTD_STS_ACTIVE
;
1207 token
|= (EHCI_TUNE_CERR
<< 10);
1208 /* for split transactions, SplitXState initialized to zero */
1210 len
= urb
->transfer_buffer_length
;
1211 is_input
= usb_pipein(urb
->pipe
);
1212 if (!urb
->transfer_buffer
&& urb
->transfer_buffer_length
&& is_input
)
1213 urb
->transfer_buffer
= phys_to_virt(urb
->transfer_dma
);
1215 if (usb_pipecontrol(urb
->pipe
)) {
1217 ret
= oxu_buf_alloc(oxu
, qtd
, sizeof(struct usb_ctrlrequest
));
1221 qtd_fill(qtd
, qtd
->buffer_dma
, sizeof(struct usb_ctrlrequest
),
1222 token
| (2 /* "setup" */ << 8), 8);
1223 memcpy(qtd
->buffer
, qtd
->urb
->setup_packet
,
1224 sizeof(struct usb_ctrlrequest
));
1226 /* ... and always at least one more pid */
1227 token
^= QTD_TOGGLE
;
1229 qtd
= ehci_qtd_alloc(oxu
);
1233 qtd_prev
->hw_next
= QTD_NEXT(qtd
->qtd_dma
);
1234 list_add_tail(&qtd
->qtd_list
, head
);
1236 /* for zero length DATA stages, STATUS is always IN */
1238 token
|= (1 /* "in" */ << 8);
1242 * Data transfer stage: buffer setup
1245 ret
= oxu_buf_alloc(oxu
, qtd
, len
);
1249 buf
= qtd
->buffer_dma
;
1250 transfer_buf
= urb
->transfer_buffer
;
1253 memcpy(qtd
->buffer
, qtd
->urb
->transfer_buffer
, len
);
1256 token
|= (1 /* "in" */ << 8);
1257 /* else it's already initted to "out" pid (0 << 8) */
1259 maxpacket
= max_packet(usb_maxpacket(urb
->dev
, urb
->pipe
, !is_input
));
1262 * buffer gets wrapped in one or more qtds;
1263 * last one may be "short" (including zero len)
1264 * and may serve as a control status ack
1269 this_qtd_len
= qtd_fill(qtd
, buf
, len
, token
, maxpacket
);
1270 qtd
->transfer_buffer
= transfer_buf
;
1271 len
-= this_qtd_len
;
1272 buf
+= this_qtd_len
;
1273 transfer_buf
+= this_qtd_len
;
1275 qtd
->hw_alt_next
= oxu
->async
->hw_alt_next
;
1277 /* qh makes control packets use qtd toggle; maybe switch it */
1278 if ((maxpacket
& (this_qtd_len
+ (maxpacket
- 1))) == 0)
1279 token
^= QTD_TOGGLE
;
1281 if (likely(len
<= 0))
1285 qtd
= ehci_qtd_alloc(oxu
);
1288 if (likely(len
> 0)) {
1289 ret
= oxu_buf_alloc(oxu
, qtd
, len
);
1294 qtd_prev
->hw_next
= QTD_NEXT(qtd
->qtd_dma
);
1295 list_add_tail(&qtd
->qtd_list
, head
);
1298 /* unless the bulk/interrupt caller wants a chance to clean
1299 * up after short reads, hc should advance qh past this urb
1301 if (likely((urb
->transfer_flags
& URB_SHORT_NOT_OK
) == 0
1302 || usb_pipecontrol(urb
->pipe
)))
1303 qtd
->hw_alt_next
= EHCI_LIST_END
;
1306 * control requests may need a terminating data "status" ack;
1307 * bulk ones may need a terminating short packet (zero length).
1309 if (likely(urb
->transfer_buffer_length
!= 0)) {
1312 if (usb_pipecontrol(urb
->pipe
)) {
1314 token
^= 0x0100; /* "in" <--> "out" */
1315 token
|= QTD_TOGGLE
; /* force DATA1 */
1316 } else if (usb_pipebulk(urb
->pipe
)
1317 && (urb
->transfer_flags
& URB_ZERO_PACKET
)
1318 && !(urb
->transfer_buffer_length
% maxpacket
)) {
1323 qtd
= ehci_qtd_alloc(oxu
);
1327 qtd_prev
->hw_next
= QTD_NEXT(qtd
->qtd_dma
);
1328 list_add_tail(&qtd
->qtd_list
, head
);
1330 /* never any data in such packets */
1331 qtd_fill(qtd
, 0, 0, token
, 0);
1335 /* by default, enable interrupt on urb completion */
1336 qtd
->hw_token
|= cpu_to_le32(QTD_IOC
);
1340 qtd_list_free(oxu
, urb
, head
);
1344 /* Each QH holds a qtd list; a QH is used for everything except iso.
1346 * For interrupt urbs, the scheduler must set the microframe scheduling
1347 * mask(s) each time the QH gets scheduled. For highspeed, that's
1348 * just one microframe in the s-mask. For split interrupt transactions
1349 * there are additional complications: c-mask, maybe FSTNs.
1351 static struct ehci_qh
*qh_make(struct oxu_hcd
*oxu
,
1352 struct urb
*urb
, gfp_t flags
)
1354 struct ehci_qh
*qh
= oxu_qh_alloc(oxu
);
1355 u32 info1
= 0, info2
= 0;
1363 * init endpoint/device data for this QH
1365 info1
|= usb_pipeendpoint(urb
->pipe
) << 8;
1366 info1
|= usb_pipedevice(urb
->pipe
) << 0;
1368 is_input
= usb_pipein(urb
->pipe
);
1369 type
= usb_pipetype(urb
->pipe
);
1370 maxp
= usb_maxpacket(urb
->dev
, urb
->pipe
, !is_input
);
1372 /* Compute interrupt scheduling parameters just once, and save.
1373 * - allowing for high bandwidth, how many nsec/uframe are used?
1374 * - split transactions need a second CSPLIT uframe; same question
1375 * - splits also need a schedule gap (for full/low speed I/O)
1376 * - qh has a polling interval
1378 * For control/bulk requests, the HC or TT handles these.
1380 if (type
== PIPE_INTERRUPT
) {
1381 qh
->usecs
= NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH
,
1383 hb_mult(maxp
) * max_packet(maxp
)));
1384 qh
->start
= NO_FRAME
;
1386 if (urb
->dev
->speed
== USB_SPEED_HIGH
) {
1390 qh
->period
= urb
->interval
>> 3;
1391 if (qh
->period
== 0 && urb
->interval
!= 1) {
1392 /* NOTE interval 2 or 4 uframes could work.
1393 * But interval 1 scheduling is simpler, and
1394 * includes high bandwidth.
1396 dbg("intr period %d uframes, NYET!",
1401 struct usb_tt
*tt
= urb
->dev
->tt
;
1404 /* gap is f(FS/LS transfer times) */
1405 qh
->gap_uf
= 1 + usb_calc_bus_time(urb
->dev
->speed
,
1406 is_input
, 0, maxp
) / (125 * 1000);
1408 if (is_input
) { /* SPLIT, gap, CSPLIT+DATA */
1409 qh
->c_usecs
= qh
->usecs
+ HS_USECS(0);
1410 qh
->usecs
= HS_USECS(1);
1411 } else { /* SPLIT+DATA, gap, CSPLIT */
1412 qh
->usecs
+= HS_USECS(1);
1413 qh
->c_usecs
= HS_USECS(0);
1416 think_time
= tt
? tt
->think_time
: 0;
1417 qh
->tt_usecs
= NS_TO_US(think_time
+
1418 usb_calc_bus_time(urb
->dev
->speed
,
1419 is_input
, 0, max_packet(maxp
)));
1420 qh
->period
= urb
->interval
;
1424 /* support for tt scheduling, and access to toggles */
1428 switch (urb
->dev
->speed
) {
1430 info1
|= (1 << 12); /* EPS "low" */
1433 case USB_SPEED_FULL
:
1434 /* EPS 0 means "full" */
1435 if (type
!= PIPE_INTERRUPT
)
1436 info1
|= (EHCI_TUNE_RL_TT
<< 28);
1437 if (type
== PIPE_CONTROL
) {
1438 info1
|= (1 << 27); /* for TT */
1439 info1
|= 1 << 14; /* toggle from qtd */
1441 info1
|= maxp
<< 16;
1443 info2
|= (EHCI_TUNE_MULT_TT
<< 30);
1444 info2
|= urb
->dev
->ttport
<< 23;
1446 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
1450 case USB_SPEED_HIGH
: /* no TT involved */
1451 info1
|= (2 << 12); /* EPS "high" */
1452 if (type
== PIPE_CONTROL
) {
1453 info1
|= (EHCI_TUNE_RL_HS
<< 28);
1454 info1
|= 64 << 16; /* usb2 fixed maxpacket */
1455 info1
|= 1 << 14; /* toggle from qtd */
1456 info2
|= (EHCI_TUNE_MULT_HS
<< 30);
1457 } else if (type
== PIPE_BULK
) {
1458 info1
|= (EHCI_TUNE_RL_HS
<< 28);
1459 info1
|= 512 << 16; /* usb2 fixed maxpacket */
1460 info2
|= (EHCI_TUNE_MULT_HS
<< 30);
1461 } else { /* PIPE_INTERRUPT */
1462 info1
|= max_packet(maxp
) << 16;
1463 info2
|= hb_mult(maxp
) << 30;
1467 dbg("bogus dev %p speed %d", urb
->dev
, urb
->dev
->speed
);
1473 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
1475 /* init as live, toggle clear, advance to dummy */
1476 qh
->qh_state
= QH_STATE_IDLE
;
1477 qh
->hw_info1
= cpu_to_le32(info1
);
1478 qh
->hw_info2
= cpu_to_le32(info2
);
1479 usb_settoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
), !is_input
, 1);
1480 qh_refresh(oxu
, qh
);
1484 /* Move qh (and its qtds) onto async queue; maybe enable queue.
1486 static void qh_link_async(struct oxu_hcd
*oxu
, struct ehci_qh
*qh
)
1488 __le32 dma
= QH_NEXT(qh
->qh_dma
);
1489 struct ehci_qh
*head
;
1491 /* (re)start the async schedule? */
1493 timer_action_done(oxu
, TIMER_ASYNC_OFF
);
1494 if (!head
->qh_next
.qh
) {
1495 u32 cmd
= readl(&oxu
->regs
->command
);
1497 if (!(cmd
& CMD_ASE
)) {
1498 /* in case a clear of CMD_ASE didn't take yet */
1499 (void)handshake(oxu
, &oxu
->regs
->status
,
1501 cmd
|= CMD_ASE
| CMD_RUN
;
1502 writel(cmd
, &oxu
->regs
->command
);
1503 oxu_to_hcd(oxu
)->state
= HC_STATE_RUNNING
;
1504 /* posted write need not be known to HC yet ... */
1508 /* clear halt and/or toggle; and maybe recover from silicon quirk */
1509 if (qh
->qh_state
== QH_STATE_IDLE
)
1510 qh_refresh(oxu
, qh
);
1512 /* splice right after start */
1513 qh
->qh_next
= head
->qh_next
;
1514 qh
->hw_next
= head
->hw_next
;
1517 head
->qh_next
.qh
= qh
;
1518 head
->hw_next
= dma
;
1520 qh
->qh_state
= QH_STATE_LINKED
;
1521 /* qtd completions reported later by interrupt */
1524 #define QH_ADDR_MASK cpu_to_le32(0x7f)
1527 * For control/bulk/interrupt, return QH with these TDs appended.
1528 * Allocates and initializes the QH if necessary.
1529 * Returns null if it can't allocate a QH it needs to.
1530 * If the QH has TDs (urbs) already, that's great.
1532 static struct ehci_qh
*qh_append_tds(struct oxu_hcd
*oxu
,
1533 struct urb
*urb
, struct list_head
*qtd_list
,
1534 int epnum
, void **ptr
)
1536 struct ehci_qh
*qh
= NULL
;
1538 qh
= (struct ehci_qh
*) *ptr
;
1539 if (unlikely(qh
== NULL
)) {
1540 /* can't sleep here, we have oxu->lock... */
1541 qh
= qh_make(oxu
, urb
, GFP_ATOMIC
);
1544 if (likely(qh
!= NULL
)) {
1545 struct ehci_qtd
*qtd
;
1547 if (unlikely(list_empty(qtd_list
)))
1550 qtd
= list_entry(qtd_list
->next
, struct ehci_qtd
,
1553 /* control qh may need patching ... */
1554 if (unlikely(epnum
== 0)) {
1556 /* usb_reset_device() briefly reverts to address 0 */
1557 if (usb_pipedevice(urb
->pipe
) == 0)
1558 qh
->hw_info1
&= ~QH_ADDR_MASK
;
1561 /* just one way to queue requests: swap with the dummy qtd.
1562 * only hc or qh_refresh() ever modify the overlay.
1564 if (likely(qtd
!= NULL
)) {
1565 struct ehci_qtd
*dummy
;
1569 /* to avoid racing the HC, use the dummy td instead of
1570 * the first td of our list (becomes new dummy). both
1571 * tds stay deactivated until we're done, when the
1572 * HC is allowed to fetch the old dummy (4.10.2).
1574 token
= qtd
->hw_token
;
1575 qtd
->hw_token
= HALT_BIT
;
1579 dma
= dummy
->qtd_dma
;
1581 dummy
->qtd_dma
= dma
;
1583 list_del(&qtd
->qtd_list
);
1584 list_add(&dummy
->qtd_list
, qtd_list
);
1585 list_splice(qtd_list
, qh
->qtd_list
.prev
);
1587 ehci_qtd_init(qtd
, qtd
->qtd_dma
);
1590 /* hc must see the new dummy at list end */
1592 qtd
= list_entry(qh
->qtd_list
.prev
,
1593 struct ehci_qtd
, qtd_list
);
1594 qtd
->hw_next
= QTD_NEXT(dma
);
1596 /* let the hc process these next qtds */
1597 dummy
->hw_token
= (token
& ~(0x80));
1599 dummy
->hw_token
= token
;
1601 urb
->hcpriv
= qh_get(qh
);
1607 static int submit_async(struct oxu_hcd
*oxu
, struct urb
*urb
,
1608 struct list_head
*qtd_list
, gfp_t mem_flags
)
1610 struct ehci_qtd
*qtd
;
1612 unsigned long flags
;
1613 struct ehci_qh
*qh
= NULL
;
1616 qtd
= list_entry(qtd_list
->next
, struct ehci_qtd
, qtd_list
);
1617 epnum
= urb
->ep
->desc
.bEndpointAddress
;
1619 #ifdef OXU_URB_TRACE
1620 oxu_dbg(oxu
, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
1621 __func__
, urb
->dev
->devpath
, urb
,
1622 epnum
& 0x0f, (epnum
& USB_DIR_IN
) ? "in" : "out",
1623 urb
->transfer_buffer_length
,
1624 qtd
, urb
->ep
->hcpriv
);
1627 spin_lock_irqsave(&oxu
->lock
, flags
);
1628 if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu
)))) {
1633 qh
= qh_append_tds(oxu
, urb
, qtd_list
, epnum
, &urb
->ep
->hcpriv
);
1634 if (unlikely(qh
== NULL
)) {
1639 /* Control/bulk operations through TTs don't need scheduling,
1640 * the HC and TT handle it when the TT has a buffer ready.
1642 if (likely(qh
->qh_state
== QH_STATE_IDLE
))
1643 qh_link_async(oxu
, qh_get(qh
));
1645 spin_unlock_irqrestore(&oxu
->lock
, flags
);
1646 if (unlikely(qh
== NULL
))
1647 qtd_list_free(oxu
, urb
, qtd_list
);
1651 /* The async qh for the qtds being reclaimed are now unlinked from the HC */
1653 static void end_unlink_async(struct oxu_hcd
*oxu
)
1655 struct ehci_qh
*qh
= oxu
->reclaim
;
1656 struct ehci_qh
*next
;
1658 timer_action_done(oxu
, TIMER_IAA_WATCHDOG
);
1660 qh
->qh_state
= QH_STATE_IDLE
;
1661 qh
->qh_next
.qh
= NULL
;
1662 qh_put(qh
); /* refcount from reclaim */
1664 /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
1666 oxu
->reclaim
= next
;
1667 oxu
->reclaim_ready
= 0;
1670 qh_completions(oxu
, qh
);
1672 if (!list_empty(&qh
->qtd_list
)
1673 && HC_IS_RUNNING(oxu_to_hcd(oxu
)->state
))
1674 qh_link_async(oxu
, qh
);
1676 qh_put(qh
); /* refcount from async list */
1678 /* it's not free to turn the async schedule on/off; leave it
1679 * active but idle for a while once it empties.
1681 if (HC_IS_RUNNING(oxu_to_hcd(oxu
)->state
)
1682 && oxu
->async
->qh_next
.qh
== NULL
)
1683 timer_action(oxu
, TIMER_ASYNC_OFF
);
1687 oxu
->reclaim
= NULL
;
1688 start_unlink_async(oxu
, next
);
1692 /* makes sure the async qh will become idle */
1693 /* caller must own oxu->lock */
1695 static void start_unlink_async(struct oxu_hcd
*oxu
, struct ehci_qh
*qh
)
1697 int cmd
= readl(&oxu
->regs
->command
);
1698 struct ehci_qh
*prev
;
1701 assert_spin_locked(&oxu
->lock
);
1702 if (oxu
->reclaim
|| (qh
->qh_state
!= QH_STATE_LINKED
1703 && qh
->qh_state
!= QH_STATE_UNLINK_WAIT
))
1707 /* stop async schedule right now? */
1708 if (unlikely(qh
== oxu
->async
)) {
1709 /* can't get here without STS_ASS set */
1710 if (oxu_to_hcd(oxu
)->state
!= HC_STATE_HALT
1712 /* ... and CMD_IAAD clear */
1713 writel(cmd
& ~CMD_ASE
, &oxu
->regs
->command
);
1715 /* handshake later, if we need to */
1716 timer_action_done(oxu
, TIMER_ASYNC_OFF
);
1721 qh
->qh_state
= QH_STATE_UNLINK
;
1722 oxu
->reclaim
= qh
= qh_get(qh
);
1725 while (prev
->qh_next
.qh
!= qh
)
1726 prev
= prev
->qh_next
.qh
;
1728 prev
->hw_next
= qh
->hw_next
;
1729 prev
->qh_next
= qh
->qh_next
;
1732 if (unlikely(oxu_to_hcd(oxu
)->state
== HC_STATE_HALT
)) {
1733 /* if (unlikely(qh->reclaim != 0))
1734 * this will recurse, probably not much
1736 end_unlink_async(oxu
);
1740 oxu
->reclaim_ready
= 0;
1742 writel(cmd
, &oxu
->regs
->command
);
1743 (void) readl(&oxu
->regs
->command
);
1744 timer_action(oxu
, TIMER_IAA_WATCHDOG
);
1747 static void scan_async(struct oxu_hcd
*oxu
)
1750 enum ehci_timer_action action
= TIMER_IO_WATCHDOG
;
1752 if (!++(oxu
->stamp
))
1754 timer_action_done(oxu
, TIMER_ASYNC_SHRINK
);
1756 qh
= oxu
->async
->qh_next
.qh
;
1757 if (likely(qh
!= NULL
)) {
1759 /* clean any finished work for this qh */
1760 if (!list_empty(&qh
->qtd_list
)
1761 && qh
->stamp
!= oxu
->stamp
) {
1764 /* unlinks could happen here; completion
1765 * reporting drops the lock. rescan using
1766 * the latest schedule, but don't rescan
1767 * qhs we already finished (no looping).
1770 qh
->stamp
= oxu
->stamp
;
1771 temp
= qh_completions(oxu
, qh
);
1777 /* unlink idle entries, reducing HC PCI usage as well
1778 * as HCD schedule-scanning costs. delay for any qh
1779 * we just scanned, there's a not-unusual case that it
1780 * doesn't stay idle for long.
1781 * (plus, avoids some kind of re-activation race.)
1783 if (list_empty(&qh
->qtd_list
)) {
1784 if (qh
->stamp
== oxu
->stamp
)
1785 action
= TIMER_ASYNC_SHRINK
;
1786 else if (!oxu
->reclaim
1787 && qh
->qh_state
== QH_STATE_LINKED
)
1788 start_unlink_async(oxu
, qh
);
1791 qh
= qh
->qh_next
.qh
;
1794 if (action
== TIMER_ASYNC_SHRINK
)
1795 timer_action(oxu
, TIMER_ASYNC_SHRINK
);
1799 * periodic_next_shadow - return "next" pointer on shadow list
1800 * @periodic: host pointer to qh/itd/sitd
1801 * @tag: hardware tag for type of this record
1803 static union ehci_shadow
*periodic_next_shadow(union ehci_shadow
*periodic
,
1809 return &periodic
->qh
->qh_next
;
1813 /* caller must hold oxu->lock */
1814 static void periodic_unlink(struct oxu_hcd
*oxu
, unsigned frame
, void *ptr
)
1816 union ehci_shadow
*prev_p
= &oxu
->pshadow
[frame
];
1817 __le32
*hw_p
= &oxu
->periodic
[frame
];
1818 union ehci_shadow here
= *prev_p
;
1820 /* find predecessor of "ptr"; hw and shadow lists are in sync */
1821 while (here
.ptr
&& here
.ptr
!= ptr
) {
1822 prev_p
= periodic_next_shadow(prev_p
, Q_NEXT_TYPE(*hw_p
));
1823 hw_p
= here
.hw_next
;
1826 /* an interrupt entry (at list end) could have been shared */
1830 /* update shadow and hardware lists ... the old "next" pointers
1831 * from ptr may still be in use, the caller updates them.
1833 *prev_p
= *periodic_next_shadow(&here
, Q_NEXT_TYPE(*hw_p
));
1834 *hw_p
= *here
.hw_next
;
1837 /* how many of the uframe's 125 usecs are allocated? */
1838 static unsigned short periodic_usecs(struct oxu_hcd
*oxu
,
1839 unsigned frame
, unsigned uframe
)
1841 __le32
*hw_p
= &oxu
->periodic
[frame
];
1842 union ehci_shadow
*q
= &oxu
->pshadow
[frame
];
1846 switch (Q_NEXT_TYPE(*hw_p
)) {
1849 /* is it in the S-mask? */
1850 if (q
->qh
->hw_info2
& cpu_to_le32(1 << uframe
))
1851 usecs
+= q
->qh
->usecs
;
1852 /* ... or C-mask? */
1853 if (q
->qh
->hw_info2
& cpu_to_le32(1 << (8 + uframe
)))
1854 usecs
+= q
->qh
->c_usecs
;
1855 hw_p
= &q
->qh
->hw_next
;
1856 q
= &q
->qh
->qh_next
;
1862 oxu_err(oxu
, "uframe %d sched overrun: %d usecs\n",
1863 frame
* 8 + uframe
, usecs
);
1868 static int enable_periodic(struct oxu_hcd
*oxu
)
1873 /* did clearing PSE did take effect yet?
1874 * takes effect only at frame boundaries...
1876 status
= handshake(oxu
, &oxu
->regs
->status
, STS_PSS
, 0, 9 * 125);
1878 oxu_to_hcd(oxu
)->state
= HC_STATE_HALT
;
1882 cmd
= readl(&oxu
->regs
->command
) | CMD_PSE
;
1883 writel(cmd
, &oxu
->regs
->command
);
1884 /* posted write ... PSS happens later */
1885 oxu_to_hcd(oxu
)->state
= HC_STATE_RUNNING
;
1887 /* make sure ehci_work scans these */
1888 oxu
->next_uframe
= readl(&oxu
->regs
->frame_index
)
1889 % (oxu
->periodic_size
<< 3);
1893 static int disable_periodic(struct oxu_hcd
*oxu
)
1898 /* did setting PSE not take effect yet?
1899 * takes effect only at frame boundaries...
1901 status
= handshake(oxu
, &oxu
->regs
->status
, STS_PSS
, STS_PSS
, 9 * 125);
1903 oxu_to_hcd(oxu
)->state
= HC_STATE_HALT
;
1907 cmd
= readl(&oxu
->regs
->command
) & ~CMD_PSE
;
1908 writel(cmd
, &oxu
->regs
->command
);
1909 /* posted write ... */
1911 oxu
->next_uframe
= -1;
1915 /* periodic schedule slots have iso tds (normal or split) first, then a
1916 * sparse tree for active interrupt transfers.
1918 * this just links in a qh; caller guarantees uframe masks are set right.
1919 * no FSTN support (yet; oxu 0.96+)
1921 static int qh_link_periodic(struct oxu_hcd
*oxu
, struct ehci_qh
*qh
)
1924 unsigned period
= qh
->period
;
1926 dev_dbg(&qh
->dev
->dev
,
1927 "link qh%d-%04x/%p start %d [%d/%d us]\n",
1928 period
, le32_to_cpup(&qh
->hw_info2
) & (QH_CMASK
| QH_SMASK
),
1929 qh
, qh
->start
, qh
->usecs
, qh
->c_usecs
);
1931 /* high bandwidth, or otherwise every microframe */
1935 for (i
= qh
->start
; i
< oxu
->periodic_size
; i
+= period
) {
1936 union ehci_shadow
*prev
= &oxu
->pshadow
[i
];
1937 __le32
*hw_p
= &oxu
->periodic
[i
];
1938 union ehci_shadow here
= *prev
;
1941 /* skip the iso nodes at list head */
1943 type
= Q_NEXT_TYPE(*hw_p
);
1944 if (type
== Q_TYPE_QH
)
1946 prev
= periodic_next_shadow(prev
, type
);
1947 hw_p
= &here
.qh
->hw_next
;
1951 /* sorting each branch by period (slow-->fast)
1952 * enables sharing interior tree nodes
1954 while (here
.ptr
&& qh
!= here
.qh
) {
1955 if (qh
->period
> here
.qh
->period
)
1957 prev
= &here
.qh
->qh_next
;
1958 hw_p
= &here
.qh
->hw_next
;
1961 /* link in this qh, unless some earlier pass did that */
1962 if (qh
!= here
.qh
) {
1965 qh
->hw_next
= *hw_p
;
1968 *hw_p
= QH_NEXT(qh
->qh_dma
);
1971 qh
->qh_state
= QH_STATE_LINKED
;
1974 /* update per-qh bandwidth for usbfs */
1975 oxu_to_hcd(oxu
)->self
.bandwidth_allocated
+= qh
->period
1976 ? ((qh
->usecs
+ qh
->c_usecs
) / qh
->period
)
1979 /* maybe enable periodic schedule processing */
1980 if (!oxu
->periodic_sched
++)
1981 return enable_periodic(oxu
);
1986 static void qh_unlink_periodic(struct oxu_hcd
*oxu
, struct ehci_qh
*qh
)
1992 /* high bandwidth, or otherwise part of every microframe */
1993 period
= qh
->period
;
1997 for (i
= qh
->start
; i
< oxu
->periodic_size
; i
+= period
)
1998 periodic_unlink(oxu
, i
, qh
);
2000 /* update per-qh bandwidth for usbfs */
2001 oxu_to_hcd(oxu
)->self
.bandwidth_allocated
-= qh
->period
2002 ? ((qh
->usecs
+ qh
->c_usecs
) / qh
->period
)
2005 dev_dbg(&qh
->dev
->dev
,
2006 "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
2008 le32_to_cpup(&qh
->hw_info2
) & (QH_CMASK
| QH_SMASK
),
2009 qh
, qh
->start
, qh
->usecs
, qh
->c_usecs
);
2011 /* qh->qh_next still "live" to HC */
2012 qh
->qh_state
= QH_STATE_UNLINK
;
2013 qh
->qh_next
.ptr
= NULL
;
2016 /* maybe turn off periodic schedule */
2017 oxu
->periodic_sched
--;
2018 if (!oxu
->periodic_sched
)
2019 (void) disable_periodic(oxu
);
2022 static void intr_deschedule(struct oxu_hcd
*oxu
, struct ehci_qh
*qh
)
2026 qh_unlink_periodic(oxu
, qh
);
2028 /* simple/paranoid: always delay, expecting the HC needs to read
2029 * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
2030 * expect khubd to clean up after any CSPLITs we won't issue.
2031 * active high speed queues may need bigger delays...
2033 if (list_empty(&qh
->qtd_list
)
2034 || (cpu_to_le32(QH_CMASK
) & qh
->hw_info2
) != 0)
2037 wait
= 55; /* worst case: 3 * 1024 */
2040 qh
->qh_state
= QH_STATE_IDLE
;
2041 qh
->hw_next
= EHCI_LIST_END
;
2045 static int check_period(struct oxu_hcd
*oxu
,
2046 unsigned frame
, unsigned uframe
,
2047 unsigned period
, unsigned usecs
)
2051 /* complete split running into next frame?
2052 * given FSTN support, we could sometimes check...
2058 * 80% periodic == 100 usec/uframe available
2059 * convert "usecs we need" to "max already claimed"
2061 usecs
= 100 - usecs
;
2063 /* we "know" 2 and 4 uframe intervals were rejected; so
2064 * for period 0, check _every_ microframe in the schedule.
2066 if (unlikely(period
== 0)) {
2068 for (uframe
= 0; uframe
< 7; uframe
++) {
2069 claimed
= periodic_usecs(oxu
, frame
, uframe
);
2070 if (claimed
> usecs
)
2073 } while ((frame
+= 1) < oxu
->periodic_size
);
2075 /* just check the specified uframe, at that period */
2078 claimed
= periodic_usecs(oxu
, frame
, uframe
);
2079 if (claimed
> usecs
)
2081 } while ((frame
+= period
) < oxu
->periodic_size
);
2087 static int check_intr_schedule(struct oxu_hcd
*oxu
,
2088 unsigned frame
, unsigned uframe
,
2089 const struct ehci_qh
*qh
, __le32
*c_maskp
)
2091 int retval
= -ENOSPC
;
2093 if (qh
->c_usecs
&& uframe
>= 6) /* FSTN territory? */
2096 if (!check_period(oxu
, frame
, uframe
, qh
->period
, qh
->usecs
))
2108 /* "first fit" scheduling policy used the first time through,
2109 * or when the previous schedule slot can't be re-used.
2111 static int qh_schedule(struct oxu_hcd
*oxu
, struct ehci_qh
*qh
)
2116 unsigned frame
; /* 0..(qh->period - 1), or NO_FRAME */
2118 qh_refresh(oxu
, qh
);
2119 qh
->hw_next
= EHCI_LIST_END
;
2122 /* reuse the previous schedule slots, if we can */
2123 if (frame
< qh
->period
) {
2124 uframe
= ffs(le32_to_cpup(&qh
->hw_info2
) & QH_SMASK
);
2125 status
= check_intr_schedule(oxu
, frame
, --uframe
,
2133 /* else scan the schedule to find a group of slots such that all
2134 * uframes have enough periodic bandwidth available.
2137 /* "normal" case, uframing flexible except with splits */
2139 frame
= qh
->period
- 1;
2141 for (uframe
= 0; uframe
< 8; uframe
++) {
2142 status
= check_intr_schedule(oxu
,
2148 } while (status
&& frame
--);
2150 /* qh->period == 0 means every uframe */
2153 status
= check_intr_schedule(oxu
, 0, 0, qh
, &c_mask
);
2159 /* reset S-frame and (maybe) C-frame masks */
2160 qh
->hw_info2
&= cpu_to_le32(~(QH_CMASK
| QH_SMASK
));
2161 qh
->hw_info2
|= qh
->period
2162 ? cpu_to_le32(1 << uframe
)
2163 : cpu_to_le32(QH_SMASK
);
2164 qh
->hw_info2
|= c_mask
;
2166 oxu_dbg(oxu
, "reused qh %p schedule\n", qh
);
2168 /* stuff into the periodic schedule */
2169 status
= qh_link_periodic(oxu
, qh
);
2174 static int intr_submit(struct oxu_hcd
*oxu
, struct urb
*urb
,
2175 struct list_head
*qtd_list
, gfp_t mem_flags
)
2178 unsigned long flags
;
2181 struct list_head empty
;
2183 /* get endpoint and transfer/schedule data */
2184 epnum
= urb
->ep
->desc
.bEndpointAddress
;
2186 spin_lock_irqsave(&oxu
->lock
, flags
);
2188 if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu
)))) {
2189 status
= -ESHUTDOWN
;
2193 /* get qh and force any scheduling errors */
2194 INIT_LIST_HEAD(&empty
);
2195 qh
= qh_append_tds(oxu
, urb
, &empty
, epnum
, &urb
->ep
->hcpriv
);
2200 if (qh
->qh_state
== QH_STATE_IDLE
) {
2201 status
= qh_schedule(oxu
, qh
);
2206 /* then queue the urb's tds to the qh */
2207 qh
= qh_append_tds(oxu
, urb
, qtd_list
, epnum
, &urb
->ep
->hcpriv
);
2210 /* ... update usbfs periodic stats */
2211 oxu_to_hcd(oxu
)->self
.bandwidth_int_reqs
++;
2214 spin_unlock_irqrestore(&oxu
->lock
, flags
);
2216 qtd_list_free(oxu
, urb
, qtd_list
);
2221 static inline int itd_submit(struct oxu_hcd
*oxu
, struct urb
*urb
,
2224 oxu_dbg(oxu
, "iso support is missing!\n");
2228 static inline int sitd_submit(struct oxu_hcd
*oxu
, struct urb
*urb
,
2231 oxu_dbg(oxu
, "split iso support is missing!\n");
2235 static void scan_periodic(struct oxu_hcd
*oxu
)
2237 unsigned frame
, clock
, now_uframe
, mod
;
2240 mod
= oxu
->periodic_size
<< 3;
2243 * When running, scan from last scan point up to "now"
2244 * else clean up by scanning everything that's left.
2245 * Touches as few pages as possible: cache-friendly.
2247 now_uframe
= oxu
->next_uframe
;
2248 if (HC_IS_RUNNING(oxu_to_hcd(oxu
)->state
))
2249 clock
= readl(&oxu
->regs
->frame_index
);
2251 clock
= now_uframe
+ mod
- 1;
2255 union ehci_shadow q
, *q_p
;
2259 /* don't scan past the live uframe */
2260 frame
= now_uframe
>> 3;
2261 if (frame
== (clock
>> 3))
2262 uframes
= now_uframe
& 0x07;
2264 /* safe to scan the whole frame at once */
2270 /* scan each element in frame's queue for completions */
2271 q_p
= &oxu
->pshadow
[frame
];
2272 hw_p
= &oxu
->periodic
[frame
];
2274 type
= Q_NEXT_TYPE(*hw_p
);
2277 while (q
.ptr
!= NULL
) {
2278 union ehci_shadow temp
;
2281 live
= HC_IS_RUNNING(oxu_to_hcd(oxu
)->state
);
2284 /* handle any completions */
2285 temp
.qh
= qh_get(q
.qh
);
2286 type
= Q_NEXT_TYPE(q
.qh
->hw_next
);
2288 modified
= qh_completions(oxu
, temp
.qh
);
2289 if (unlikely(list_empty(&temp
.qh
->qtd_list
)))
2290 intr_deschedule(oxu
, temp
.qh
);
2294 dbg("corrupt type %d frame %d shadow %p",
2295 type
, frame
, q
.ptr
);
2299 /* assume completion callbacks modify the queue */
2300 if (unlikely(modified
))
2304 /* Stop when we catch up to the HC */
2308 if (now_uframe
== clock
) {
2311 if (!HC_IS_RUNNING(oxu_to_hcd(oxu
)->state
))
2313 oxu
->next_uframe
= now_uframe
;
2314 now
= readl(&oxu
->regs
->frame_index
) % mod
;
2315 if (now_uframe
== now
)
2318 /* rescan the rest of this frame, then ... */
2327 /* On some systems, leaving remote wakeup enabled prevents system shutdown.
2328 * The firmware seems to think that powering off is a wakeup event!
2329 * This routine turns off remote wakeup and everything else, on all ports.
2331 static void ehci_turn_off_all_ports(struct oxu_hcd
*oxu
)
2333 int port
= HCS_N_PORTS(oxu
->hcs_params
);
2336 writel(PORT_RWC_BITS
, &oxu
->regs
->port_status
[port
]);
2339 static void ehci_port_power(struct oxu_hcd
*oxu
, int is_on
)
2343 if (!HCS_PPC(oxu
->hcs_params
))
2346 oxu_dbg(oxu
, "...power%s ports...\n", is_on
? "up" : "down");
2347 for (port
= HCS_N_PORTS(oxu
->hcs_params
); port
> 0; )
2348 (void) oxu_hub_control(oxu_to_hcd(oxu
),
2349 is_on
? SetPortFeature
: ClearPortFeature
,
2350 USB_PORT_FEAT_POWER
,
2355 /* Called from some interrupts, timers, and so on.
2356 * It calls driver completion functions, after dropping oxu->lock.
2358 static void ehci_work(struct oxu_hcd
*oxu
)
2360 timer_action_done(oxu
, TIMER_IO_WATCHDOG
);
2361 if (oxu
->reclaim_ready
)
2362 end_unlink_async(oxu
);
2364 /* another CPU may drop oxu->lock during a schedule scan while
2365 * it reports urb completions. this flag guards against bogus
2366 * attempts at re-entrant schedule scanning.
2372 if (oxu
->next_uframe
!= -1)
2376 /* the IO watchdog guards against hardware or driver bugs that
2377 * misplace IRQs, and should let us run completely without IRQs.
2378 * such lossage has been observed on both VT6202 and VT8235.
2380 if (HC_IS_RUNNING(oxu_to_hcd(oxu
)->state
) &&
2381 (oxu
->async
->qh_next
.ptr
!= NULL
||
2382 oxu
->periodic_sched
!= 0))
2383 timer_action(oxu
, TIMER_IO_WATCHDOG
);
2386 static void unlink_async(struct oxu_hcd
*oxu
, struct ehci_qh
*qh
)
2388 /* if we need to use IAA and it's busy, defer */
2389 if (qh
->qh_state
== QH_STATE_LINKED
2391 && HC_IS_RUNNING(oxu_to_hcd(oxu
)->state
)) {
2392 struct ehci_qh
*last
;
2394 for (last
= oxu
->reclaim
;
2396 last
= last
->reclaim
)
2398 qh
->qh_state
= QH_STATE_UNLINK_WAIT
;
2401 /* bypass IAA if the hc can't care */
2402 } else if (!HC_IS_RUNNING(oxu_to_hcd(oxu
)->state
) && oxu
->reclaim
)
2403 end_unlink_async(oxu
);
2405 /* something else might have unlinked the qh by now */
2406 if (qh
->qh_state
== QH_STATE_LINKED
)
2407 start_unlink_async(oxu
, qh
);
2411 * USB host controller methods
2414 static irqreturn_t
oxu210_hcd_irq(struct usb_hcd
*hcd
)
2416 struct oxu_hcd
*oxu
= hcd_to_oxu(hcd
);
2417 u32 status
, pcd_status
= 0;
2420 spin_lock(&oxu
->lock
);
2422 status
= readl(&oxu
->regs
->status
);
2424 /* e.g. cardbus physical eject */
2425 if (status
== ~(u32
) 0) {
2426 oxu_dbg(oxu
, "device removed\n");
2430 status
&= INTR_MASK
;
2431 if (!status
) { /* irq sharing? */
2432 spin_unlock(&oxu
->lock
);
2436 /* clear (just) interrupts */
2437 writel(status
, &oxu
->regs
->status
);
2438 readl(&oxu
->regs
->command
); /* unblock posted write */
2441 #ifdef OXU_VERBOSE_DEBUG
2442 /* unrequested/ignored: Frame List Rollover */
2443 dbg_status(oxu
, "irq", status
);
2446 /* INT, ERR, and IAA interrupt rates can be throttled */
2448 /* normal [4.15.1.2] or error [4.15.1.1] completion */
2449 if (likely((status
& (STS_INT
|STS_ERR
)) != 0))
2452 /* complete the unlinking of some qh [4.15.2.3] */
2453 if (status
& STS_IAA
) {
2454 oxu
->reclaim_ready
= 1;
2458 /* remote wakeup [4.3.1] */
2459 if (status
& STS_PCD
) {
2460 unsigned i
= HCS_N_PORTS(oxu
->hcs_params
);
2461 pcd_status
= status
;
2463 /* resume root hub? */
2464 if (!(readl(&oxu
->regs
->command
) & CMD_RUN
))
2465 usb_hcd_resume_root_hub(hcd
);
2468 int pstatus
= readl(&oxu
->regs
->port_status
[i
]);
2470 if (pstatus
& PORT_OWNER
)
2472 if (!(pstatus
& PORT_RESUME
)
2473 || oxu
->reset_done
[i
] != 0)
2476 /* start 20 msec resume signaling from this port,
2477 * and make khubd collect PORT_STAT_C_SUSPEND to
2478 * stop that signaling.
2480 oxu
->reset_done
[i
] = jiffies
+ msecs_to_jiffies(20);
2481 oxu_dbg(oxu
, "port %d remote wakeup\n", i
+ 1);
2482 mod_timer(&hcd
->rh_timer
, oxu
->reset_done
[i
]);
2486 /* PCI errors [4.15.2.4] */
2487 if (unlikely((status
& STS_FATAL
) != 0)) {
2488 /* bogus "fatal" IRQs appear on some chips... why? */
2489 status
= readl(&oxu
->regs
->status
);
2490 dbg_cmd(oxu
, "fatal", readl(&oxu
->regs
->command
));
2491 dbg_status(oxu
, "fatal", status
);
2492 if (status
& STS_HALT
) {
2493 oxu_err(oxu
, "fatal error\n");
2496 writel(0, &oxu
->regs
->configured_flag
);
2497 /* generic layer kills/unlinks all urbs, then
2498 * uses oxu_stop to clean up the rest
2506 spin_unlock(&oxu
->lock
);
2507 if (pcd_status
& STS_PCD
)
2508 usb_hcd_poll_rh_status(hcd
);
2512 static irqreturn_t
oxu_irq(struct usb_hcd
*hcd
)
2514 struct oxu_hcd
*oxu
= hcd_to_oxu(hcd
);
2515 int ret
= IRQ_HANDLED
;
2517 u32 status
= oxu_readl(hcd
->regs
, OXU_CHIPIRQSTATUS
);
2518 u32 enable
= oxu_readl(hcd
->regs
, OXU_CHIPIRQEN_SET
);
2520 /* Disable all interrupt */
2521 oxu_writel(hcd
->regs
, OXU_CHIPIRQEN_CLR
, enable
);
2523 if ((oxu
->is_otg
&& (status
& OXU_USBOTGI
)) ||
2524 (!oxu
->is_otg
&& (status
& OXU_USBSPHI
)))
2525 oxu210_hcd_irq(hcd
);
2529 /* Enable all interrupt back */
2530 oxu_writel(hcd
->regs
, OXU_CHIPIRQEN_SET
, enable
);
2535 static void oxu_watchdog(unsigned long param
)
2537 struct oxu_hcd
*oxu
= (struct oxu_hcd
*) param
;
2538 unsigned long flags
;
2540 spin_lock_irqsave(&oxu
->lock
, flags
);
2542 /* lost IAA irqs wedge things badly; seen with a vt8235 */
2544 u32 status
= readl(&oxu
->regs
->status
);
2545 if (status
& STS_IAA
) {
2546 oxu_vdbg(oxu
, "lost IAA\n");
2547 writel(STS_IAA
, &oxu
->regs
->status
);
2548 oxu
->reclaim_ready
= 1;
2552 /* stop async processing after it's idled a bit */
2553 if (test_bit(TIMER_ASYNC_OFF
, &oxu
->actions
))
2554 start_unlink_async(oxu
, oxu
->async
);
2556 /* oxu could run by timer, without IRQs ... */
2559 spin_unlock_irqrestore(&oxu
->lock
, flags
);
2562 /* One-time init, only for memory state.
2564 static int oxu_hcd_init(struct usb_hcd
*hcd
)
2566 struct oxu_hcd
*oxu
= hcd_to_oxu(hcd
);
2571 spin_lock_init(&oxu
->lock
);
2573 init_timer(&oxu
->watchdog
);
2574 oxu
->watchdog
.function
= oxu_watchdog
;
2575 oxu
->watchdog
.data
= (unsigned long) oxu
;
2578 * hw default: 1K periodic list heads, one per frame.
2579 * periodic_size can shrink by USBCMD update if hcc_params allows.
2581 oxu
->periodic_size
= DEFAULT_I_TDPS
;
2582 retval
= ehci_mem_init(oxu
, GFP_KERNEL
);
2586 /* controllers may cache some of the periodic schedule ... */
2587 hcc_params
= readl(&oxu
->caps
->hcc_params
);
2588 if (HCC_ISOC_CACHE(hcc_params
)) /* full frame cache */
2590 else /* N microframes cached */
2591 oxu
->i_thresh
= 2 + HCC_ISOC_THRES(hcc_params
);
2593 oxu
->reclaim
= NULL
;
2594 oxu
->reclaim_ready
= 0;
2595 oxu
->next_uframe
= -1;
2598 * dedicate a qh for the async ring head, since we couldn't unlink
2599 * a 'real' qh without stopping the async schedule [4.8]. use it
2600 * as the 'reclamation list head' too.
2601 * its dummy is used in hw_alt_next of many tds, to prevent the qh
2602 * from automatically advancing to the next td after short reads.
2604 oxu
->async
->qh_next
.qh
= NULL
;
2605 oxu
->async
->hw_next
= QH_NEXT(oxu
->async
->qh_dma
);
2606 oxu
->async
->hw_info1
= cpu_to_le32(QH_HEAD
);
2607 oxu
->async
->hw_token
= cpu_to_le32(QTD_STS_HALT
);
2608 oxu
->async
->hw_qtd_next
= EHCI_LIST_END
;
2609 oxu
->async
->qh_state
= QH_STATE_LINKED
;
2610 oxu
->async
->hw_alt_next
= QTD_NEXT(oxu
->async
->dummy
->qtd_dma
);
2612 /* clear interrupt enables, set irq latency */
2613 if (log2_irq_thresh
< 0 || log2_irq_thresh
> 6)
2614 log2_irq_thresh
= 0;
2615 temp
= 1 << (16 + log2_irq_thresh
);
2616 if (HCC_CANPARK(hcc_params
)) {
2617 /* HW default park == 3, on hardware that supports it (like
2618 * NVidia and ALI silicon), maximizes throughput on the async
2619 * schedule by avoiding QH fetches between transfers.
2621 * With fast usb storage devices and NForce2, "park" seems to
2622 * make problems: throughput reduction (!), data errors...
2625 park
= min(park
, (unsigned) 3);
2629 oxu_dbg(oxu
, "park %d\n", park
);
2631 if (HCC_PGM_FRAMELISTLEN(hcc_params
)) {
2632 /* periodic schedule size can be smaller than default */
2634 temp
|= (EHCI_TUNE_FLS
<< 2);
2636 oxu
->command
= temp
;
2641 /* Called during probe() after chip reset completes.
2643 static int oxu_reset(struct usb_hcd
*hcd
)
2645 struct oxu_hcd
*oxu
= hcd_to_oxu(hcd
);
2648 spin_lock_init(&oxu
->mem_lock
);
2649 INIT_LIST_HEAD(&oxu
->urb_list
);
2653 hcd
->self
.controller
->dma_mask
= NULL
;
2656 oxu
->caps
= hcd
->regs
+ OXU_OTG_CAP_OFFSET
;
2657 oxu
->regs
= hcd
->regs
+ OXU_OTG_CAP_OFFSET
+ \
2658 HC_LENGTH(readl(&oxu
->caps
->hc_capbase
));
2660 oxu
->mem
= hcd
->regs
+ OXU_SPH_MEM
;
2662 oxu
->caps
= hcd
->regs
+ OXU_SPH_CAP_OFFSET
;
2663 oxu
->regs
= hcd
->regs
+ OXU_SPH_CAP_OFFSET
+ \
2664 HC_LENGTH(readl(&oxu
->caps
->hc_capbase
));
2666 oxu
->mem
= hcd
->regs
+ OXU_OTG_MEM
;
2669 oxu
->hcs_params
= readl(&oxu
->caps
->hcs_params
);
2672 ret
= oxu_hcd_init(hcd
);
2679 static int oxu_run(struct usb_hcd
*hcd
)
2681 struct oxu_hcd
*oxu
= hcd_to_oxu(hcd
);
2683 u32 temp
, hcc_params
;
2685 hcd
->uses_new_polling
= 1;
2687 /* EHCI spec section 4.1 */
2688 retval
= ehci_reset(oxu
);
2690 ehci_mem_cleanup(oxu
);
2693 writel(oxu
->periodic_dma
, &oxu
->regs
->frame_list
);
2694 writel((u32
) oxu
->async
->qh_dma
, &oxu
->regs
->async_next
);
2696 /* hcc_params controls whether oxu->regs->segment must (!!!)
2697 * be used; it constrains QH/ITD/SITD and QTD locations.
2698 * pci_pool consistent memory always uses segment zero.
2699 * streaming mappings for I/O buffers, like pci_map_single(),
2700 * can return segments above 4GB, if the device allows.
2702 * NOTE: the dma mask is visible through dma_supported(), so
2703 * drivers can pass this info along ... like NETIF_F_HIGHDMA,
2704 * Scsi_Host.highmem_io, and so forth. It's readonly to all
2705 * host side drivers though.
2707 hcc_params
= readl(&oxu
->caps
->hcc_params
);
2708 if (HCC_64BIT_ADDR(hcc_params
))
2709 writel(0, &oxu
->regs
->segment
);
2711 oxu
->command
&= ~(CMD_LRESET
| CMD_IAAD
| CMD_PSE
|
2712 CMD_ASE
| CMD_RESET
);
2713 oxu
->command
|= CMD_RUN
;
2714 writel(oxu
->command
, &oxu
->regs
->command
);
2715 dbg_cmd(oxu
, "init", oxu
->command
);
2718 * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
2719 * are explicitly handed to companion controller(s), so no TT is
2720 * involved with the root hub. (Except where one is integrated,
2721 * and there's no companion controller unless maybe for USB OTG.)
2723 hcd
->state
= HC_STATE_RUNNING
;
2724 writel(FLAG_CF
, &oxu
->regs
->configured_flag
);
2725 readl(&oxu
->regs
->command
); /* unblock posted writes */
2727 temp
= HC_VERSION(readl(&oxu
->caps
->hc_capbase
));
2728 oxu_info(oxu
, "USB %x.%x started, quasi-EHCI %x.%02x, driver %s%s\n",
2729 ((oxu
->sbrn
& 0xf0)>>4), (oxu
->sbrn
& 0x0f),
2730 temp
>> 8, temp
& 0xff, DRIVER_VERSION
,
2731 ignore_oc
? ", overcurrent ignored" : "");
2733 writel(INTR_MASK
, &oxu
->regs
->intr_enable
); /* Turn On Interrupts */
2738 static void oxu_stop(struct usb_hcd
*hcd
)
2740 struct oxu_hcd
*oxu
= hcd_to_oxu(hcd
);
2742 /* Turn off port power on all root hub ports. */
2743 ehci_port_power(oxu
, 0);
2745 /* no more interrupts ... */
2746 del_timer_sync(&oxu
->watchdog
);
2748 spin_lock_irq(&oxu
->lock
);
2749 if (HC_IS_RUNNING(hcd
->state
))
2753 writel(0, &oxu
->regs
->intr_enable
);
2754 spin_unlock_irq(&oxu
->lock
);
2756 /* let companion controllers work when we aren't */
2757 writel(0, &oxu
->regs
->configured_flag
);
2759 /* root hub is shut down separately (first, when possible) */
2760 spin_lock_irq(&oxu
->lock
);
2763 spin_unlock_irq(&oxu
->lock
);
2764 ehci_mem_cleanup(oxu
);
2766 dbg_status(oxu
, "oxu_stop completed", readl(&oxu
->regs
->status
));
2769 /* Kick in for silicon on any bus (not just pci, etc).
2770 * This forcibly disables dma and IRQs, helping kexec and other cases
2771 * where the next system software may expect clean state.
2773 static void oxu_shutdown(struct usb_hcd
*hcd
)
2775 struct oxu_hcd
*oxu
= hcd_to_oxu(hcd
);
2777 (void) ehci_halt(oxu
);
2778 ehci_turn_off_all_ports(oxu
);
2780 /* make BIOS/etc use companion controller during reboot */
2781 writel(0, &oxu
->regs
->configured_flag
);
2783 /* unblock posted writes */
2784 readl(&oxu
->regs
->configured_flag
);
2787 /* Non-error returns are a promise to giveback() the urb later
2788 * we drop ownership so next owner (or urb unlink) can get it
2790 * urb + dev is in hcd.self.controller.urb_list
2791 * we're queueing TDs onto software and hardware lists
2793 * hcd-specific init for hcpriv hasn't been done yet
2795 * NOTE: control, bulk, and interrupt share the same code to append TDs
2796 * to a (possibly active) QH, and the same QH scanning code.
2798 static int __oxu_urb_enqueue(struct usb_hcd
*hcd
, struct urb
*urb
,
2801 struct oxu_hcd
*oxu
= hcd_to_oxu(hcd
);
2802 struct list_head qtd_list
;
2804 INIT_LIST_HEAD(&qtd_list
);
2806 switch (usb_pipetype(urb
->pipe
)) {
2810 if (!qh_urb_transaction(oxu
, urb
, &qtd_list
, mem_flags
))
2812 return submit_async(oxu
, urb
, &qtd_list
, mem_flags
);
2814 case PIPE_INTERRUPT
:
2815 if (!qh_urb_transaction(oxu
, urb
, &qtd_list
, mem_flags
))
2817 return intr_submit(oxu
, urb
, &qtd_list
, mem_flags
);
2819 case PIPE_ISOCHRONOUS
:
2820 if (urb
->dev
->speed
== USB_SPEED_HIGH
)
2821 return itd_submit(oxu
, urb
, mem_flags
);
2823 return sitd_submit(oxu
, urb
, mem_flags
);
2827 /* This function is responsible for breaking URBs with big data size
2828 * into smaller size and processing small urbs in sequence.
2830 static int oxu_urb_enqueue(struct usb_hcd
*hcd
, struct urb
*urb
,
2833 struct oxu_hcd
*oxu
= hcd_to_oxu(hcd
);
2835 int transfer_buffer_length
;
2836 void *transfer_buffer
;
2840 /* If not bulk pipe just enqueue the URB */
2841 if (!usb_pipebulk(urb
->pipe
))
2842 return __oxu_urb_enqueue(hcd
, urb
, mem_flags
);
2844 /* Otherwise we should verify the USB transfer buffer size! */
2845 transfer_buffer
= urb
->transfer_buffer
;
2846 transfer_buffer_length
= urb
->transfer_buffer_length
;
2848 num
= urb
->transfer_buffer_length
/ 4096;
2849 rem
= urb
->transfer_buffer_length
% 4096;
2853 /* If URB is smaller than 4096 bytes just enqueue it! */
2855 return __oxu_urb_enqueue(hcd
, urb
, mem_flags
);
2857 /* Ok, we have more job to do! :) */
2859 for (i
= 0; i
< num
- 1; i
++) {
2860 /* Get free micro URB poll till a free urb is recieved */
2863 murb
= (struct urb
*) oxu_murb_alloc(oxu
);
2868 /* Coping the urb */
2869 memcpy(murb
, urb
, sizeof(struct urb
));
2871 murb
->transfer_buffer_length
= 4096;
2872 murb
->transfer_buffer
= transfer_buffer
+ i
* 4096;
2874 /* Null pointer for the encodes that this is a micro urb */
2875 murb
->complete
= NULL
;
2877 ((struct oxu_murb
*) murb
)->main
= urb
;
2878 ((struct oxu_murb
*) murb
)->last
= 0;
2880 /* This loop is to guarantee urb to be processed when there's
2881 * not enough resources at a particular time by retrying.
2884 ret
= __oxu_urb_enqueue(hcd
, murb
, mem_flags
);
2890 /* Last urb requires special handling */
2892 /* Get free micro URB poll till a free urb is recieved */
2894 murb
= (struct urb
*) oxu_murb_alloc(oxu
);
2899 /* Coping the urb */
2900 memcpy(murb
, urb
, sizeof(struct urb
));
2902 murb
->transfer_buffer_length
= rem
> 0 ? rem
: 4096;
2903 murb
->transfer_buffer
= transfer_buffer
+ (num
- 1) * 4096;
2905 /* Null pointer for the encodes that this is a micro urb */
2906 murb
->complete
= NULL
;
2908 ((struct oxu_murb
*) murb
)->main
= urb
;
2909 ((struct oxu_murb
*) murb
)->last
= 1;
2912 ret
= __oxu_urb_enqueue(hcd
, murb
, mem_flags
);
2920 /* Remove from hardware lists.
2921 * Completions normally happen asynchronously
2923 static int oxu_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
2925 struct oxu_hcd
*oxu
= hcd_to_oxu(hcd
);
2927 unsigned long flags
;
2929 spin_lock_irqsave(&oxu
->lock
, flags
);
2930 switch (usb_pipetype(urb
->pipe
)) {
2934 qh
= (struct ehci_qh
*) urb
->hcpriv
;
2937 unlink_async(oxu
, qh
);
2940 case PIPE_INTERRUPT
:
2941 qh
= (struct ehci_qh
*) urb
->hcpriv
;
2944 switch (qh
->qh_state
) {
2945 case QH_STATE_LINKED
:
2946 intr_deschedule(oxu
, qh
);
2949 qh_completions(oxu
, qh
);
2952 oxu_dbg(oxu
, "bogus qh %p state %d\n",
2957 /* reschedule QH iff another request is queued */
2958 if (!list_empty(&qh
->qtd_list
)
2959 && HC_IS_RUNNING(hcd
->state
)) {
2962 status
= qh_schedule(oxu
, qh
);
2963 spin_unlock_irqrestore(&oxu
->lock
, flags
);
2966 err("can't reschedule qh %p, err %d",
2974 spin_unlock_irqrestore(&oxu
->lock
, flags
);
2978 /* Bulk qh holds the data toggle */
2979 static void oxu_endpoint_disable(struct usb_hcd
*hcd
,
2980 struct usb_host_endpoint
*ep
)
2982 struct oxu_hcd
*oxu
= hcd_to_oxu(hcd
);
2983 unsigned long flags
;
2984 struct ehci_qh
*qh
, *tmp
;
2986 /* ASSERT: any requests/urbs are being unlinked */
2987 /* ASSERT: nobody can be submitting urbs for this any more */
2990 spin_lock_irqsave(&oxu
->lock
, flags
);
2995 /* endpoints can be iso streams. for now, we don't
2996 * accelerate iso completions ... so spin a while.
2998 if (qh
->hw_info1
== 0) {
2999 oxu_vdbg(oxu
, "iso delay\n");
3003 if (!HC_IS_RUNNING(hcd
->state
))
3004 qh
->qh_state
= QH_STATE_IDLE
;
3005 switch (qh
->qh_state
) {
3006 case QH_STATE_LINKED
:
3007 for (tmp
= oxu
->async
->qh_next
.qh
;
3009 tmp
= tmp
->qh_next
.qh
)
3011 /* periodic qh self-unlinks on empty */
3014 unlink_async(oxu
, qh
);
3016 case QH_STATE_UNLINK
: /* wait for hw to finish? */
3018 spin_unlock_irqrestore(&oxu
->lock
, flags
);
3019 schedule_timeout_uninterruptible(1);
3021 case QH_STATE_IDLE
: /* fully unlinked */
3022 if (list_empty(&qh
->qtd_list
)) {
3026 /* else FALL THROUGH */
3029 /* caller was supposed to have unlinked any requests;
3030 * that's not our job. just leak this memory.
3032 oxu_err(oxu
, "qh %p (#%02x) state %d%s\n",
3033 qh
, ep
->desc
.bEndpointAddress
, qh
->qh_state
,
3034 list_empty(&qh
->qtd_list
) ? "" : "(has tds)");
3039 spin_unlock_irqrestore(&oxu
->lock
, flags
);
3043 static int oxu_get_frame(struct usb_hcd
*hcd
)
3045 struct oxu_hcd
*oxu
= hcd_to_oxu(hcd
);
3047 return (readl(&oxu
->regs
->frame_index
) >> 3) %
3051 /* Build "status change" packet (one or two bytes) from HC registers */
3052 static int oxu_hub_status_data(struct usb_hcd
*hcd
, char *buf
)
3054 struct oxu_hcd
*oxu
= hcd_to_oxu(hcd
);
3055 u32 temp
, mask
, status
= 0;
3056 int ports
, i
, retval
= 1;
3057 unsigned long flags
;
3059 /* if !USB_SUSPEND, root hub timers won't get shut down ... */
3060 if (!HC_IS_RUNNING(hcd
->state
))
3063 /* init status to no-changes */
3065 ports
= HCS_N_PORTS(oxu
->hcs_params
);
3071 /* Some boards (mostly VIA?) report bogus overcurrent indications,
3072 * causing massive log spam unless we completely ignore them. It
3073 * may be relevant that VIA VT8235 controlers, where PORT_POWER is
3074 * always set, seem to clear PORT_OCC and PORT_CSC when writing to
3075 * PORT_POWER; that's surprising, but maybe within-spec.
3078 mask
= PORT_CSC
| PORT_PEC
| PORT_OCC
;
3080 mask
= PORT_CSC
| PORT_PEC
;
3082 /* no hub change reports (bit 0) for now (power, ...) */
3084 /* port N changes (bit N)? */
3085 spin_lock_irqsave(&oxu
->lock
, flags
);
3086 for (i
= 0; i
< ports
; i
++) {
3087 temp
= readl(&oxu
->regs
->port_status
[i
]);
3090 * Return status information even for ports with OWNER set.
3091 * Otherwise khubd wouldn't see the disconnect event when a
3092 * high-speed device is switched over to the companion
3093 * controller by the user.
3096 if (!(temp
& PORT_CONNECT
))
3097 oxu
->reset_done
[i
] = 0;
3098 if ((temp
& mask
) != 0 || ((temp
& PORT_RESUME
) != 0 &&
3099 time_after_eq(jiffies
, oxu
->reset_done
[i
]))) {
3101 buf
[0] |= 1 << (i
+ 1);
3103 buf
[1] |= 1 << (i
- 7);
3107 spin_unlock_irqrestore(&oxu
->lock
, flags
);
3108 return status
? retval
: 0;
3111 /* Returns the speed of a device attached to a port on the root hub. */
3112 static inline unsigned int oxu_port_speed(struct oxu_hcd
*oxu
,
3113 unsigned int portsc
)
3115 switch ((portsc
>> 26) & 3) {
3119 return USB_PORT_STAT_LOW_SPEED
;
3122 return USB_PORT_STAT_HIGH_SPEED
;
3126 #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
3127 static int oxu_hub_control(struct usb_hcd
*hcd
, u16 typeReq
,
3128 u16 wValue
, u16 wIndex
, char *buf
, u16 wLength
)
3130 struct oxu_hcd
*oxu
= hcd_to_oxu(hcd
);
3131 int ports
= HCS_N_PORTS(oxu
->hcs_params
);
3132 u32 __iomem
*status_reg
= &oxu
->regs
->port_status
[wIndex
- 1];
3134 unsigned long flags
;
3139 spin_lock_irqsave(&oxu
->lock
, flags
);
3141 case ClearHubFeature
:
3143 case C_HUB_LOCAL_POWER
:
3144 case C_HUB_OVER_CURRENT
:
3145 /* no hub-wide feature/status flags */
3151 case ClearPortFeature
:
3152 if (!wIndex
|| wIndex
> ports
)
3155 temp
= readl(status_reg
);
3158 * Even if OWNER is set, so the port is owned by the
3159 * companion controller, khubd needs to be able to clear
3160 * the port-change status bits (especially
3161 * USB_PORT_STAT_C_CONNECTION).
3165 case USB_PORT_FEAT_ENABLE
:
3166 writel(temp
& ~PORT_PE
, status_reg
);
3168 case USB_PORT_FEAT_C_ENABLE
:
3169 writel((temp
& ~PORT_RWC_BITS
) | PORT_PEC
, status_reg
);
3171 case USB_PORT_FEAT_SUSPEND
:
3172 if (temp
& PORT_RESET
)
3174 if (temp
& PORT_SUSPEND
) {
3175 if ((temp
& PORT_PE
) == 0)
3177 /* resume signaling for 20 msec */
3178 temp
&= ~(PORT_RWC_BITS
| PORT_WAKE_BITS
);
3179 writel(temp
| PORT_RESUME
, status_reg
);
3180 oxu
->reset_done
[wIndex
] = jiffies
3181 + msecs_to_jiffies(20);
3184 case USB_PORT_FEAT_C_SUSPEND
:
3185 /* we auto-clear this feature */
3187 case USB_PORT_FEAT_POWER
:
3188 if (HCS_PPC(oxu
->hcs_params
))
3189 writel(temp
& ~(PORT_RWC_BITS
| PORT_POWER
),
3192 case USB_PORT_FEAT_C_CONNECTION
:
3193 writel((temp
& ~PORT_RWC_BITS
) | PORT_CSC
, status_reg
);
3195 case USB_PORT_FEAT_C_OVER_CURRENT
:
3196 writel((temp
& ~PORT_RWC_BITS
) | PORT_OCC
, status_reg
);
3198 case USB_PORT_FEAT_C_RESET
:
3199 /* GetPortStatus clears reset */
3204 readl(&oxu
->regs
->command
); /* unblock posted write */
3206 case GetHubDescriptor
:
3207 ehci_hub_descriptor(oxu
, (struct usb_hub_descriptor
*)
3211 /* no hub-wide feature/status flags */
3215 if (!wIndex
|| wIndex
> ports
)
3219 temp
= readl(status_reg
);
3221 /* wPortChange bits */
3222 if (temp
& PORT_CSC
)
3223 status
|= USB_PORT_STAT_C_CONNECTION
<< 16;
3224 if (temp
& PORT_PEC
)
3225 status
|= USB_PORT_STAT_C_ENABLE
<< 16;
3226 if ((temp
& PORT_OCC
) && !ignore_oc
)
3227 status
|= USB_PORT_STAT_C_OVERCURRENT
<< 16;
3229 /* whoever resumes must GetPortStatus to complete it!! */
3230 if (temp
& PORT_RESUME
) {
3232 /* Remote Wakeup received? */
3233 if (!oxu
->reset_done
[wIndex
]) {
3234 /* resume signaling for 20 msec */
3235 oxu
->reset_done
[wIndex
] = jiffies
3236 + msecs_to_jiffies(20);
3237 /* check the port again */
3238 mod_timer(&oxu_to_hcd(oxu
)->rh_timer
,
3239 oxu
->reset_done
[wIndex
]);
3242 /* resume completed? */
3243 else if (time_after_eq(jiffies
,
3244 oxu
->reset_done
[wIndex
])) {
3245 status
|= USB_PORT_STAT_C_SUSPEND
<< 16;
3246 oxu
->reset_done
[wIndex
] = 0;
3248 /* stop resume signaling */
3249 temp
= readl(status_reg
);
3250 writel(temp
& ~(PORT_RWC_BITS
| PORT_RESUME
),
3252 retval
= handshake(oxu
, status_reg
,
3253 PORT_RESUME
, 0, 2000 /* 2msec */);
3256 "port %d resume error %d\n",
3257 wIndex
+ 1, retval
);
3260 temp
&= ~(PORT_SUSPEND
|PORT_RESUME
|(3<<10));
3264 /* whoever resets must GetPortStatus to complete it!! */
3265 if ((temp
& PORT_RESET
)
3266 && time_after_eq(jiffies
,
3267 oxu
->reset_done
[wIndex
])) {
3268 status
|= USB_PORT_STAT_C_RESET
<< 16;
3269 oxu
->reset_done
[wIndex
] = 0;
3271 /* force reset to complete */
3272 writel(temp
& ~(PORT_RWC_BITS
| PORT_RESET
),
3274 /* REVISIT: some hardware needs 550+ usec to clear
3275 * this bit; seems too long to spin routinely...
3277 retval
= handshake(oxu
, status_reg
,
3278 PORT_RESET
, 0, 750);
3280 oxu_err(oxu
, "port %d reset error %d\n",
3281 wIndex
+ 1, retval
);
3285 /* see what we found out */
3286 temp
= check_reset_complete(oxu
, wIndex
, status_reg
,
3290 /* transfer dedicated ports to the companion hc */
3291 if ((temp
& PORT_CONNECT
) &&
3292 test_bit(wIndex
, &oxu
->companion_ports
)) {
3293 temp
&= ~PORT_RWC_BITS
;
3295 writel(temp
, status_reg
);
3296 oxu_dbg(oxu
, "port %d --> companion\n", wIndex
+ 1);
3297 temp
= readl(status_reg
);
3301 * Even if OWNER is set, there's no harm letting khubd
3302 * see the wPortStatus values (they should all be 0 except
3303 * for PORT_POWER anyway).
3306 if (temp
& PORT_CONNECT
) {
3307 status
|= USB_PORT_STAT_CONNECTION
;
3308 /* status may be from integrated TT */
3309 status
|= oxu_port_speed(oxu
, temp
);
3312 status
|= USB_PORT_STAT_ENABLE
;
3313 if (temp
& (PORT_SUSPEND
|PORT_RESUME
))
3314 status
|= USB_PORT_STAT_SUSPEND
;
3316 status
|= USB_PORT_STAT_OVERCURRENT
;
3317 if (temp
& PORT_RESET
)
3318 status
|= USB_PORT_STAT_RESET
;
3319 if (temp
& PORT_POWER
)
3320 status
|= USB_PORT_STAT_POWER
;
3322 #ifndef OXU_VERBOSE_DEBUG
3323 if (status
& ~0xffff) /* only if wPortChange is interesting */
3325 dbg_port(oxu
, "GetStatus", wIndex
+ 1, temp
);
3326 put_unaligned(cpu_to_le32(status
), (__le32
*) buf
);
3330 case C_HUB_LOCAL_POWER
:
3331 case C_HUB_OVER_CURRENT
:
3332 /* no hub-wide feature/status flags */
3338 case SetPortFeature
:
3339 selector
= wIndex
>> 8;
3341 if (!wIndex
|| wIndex
> ports
)
3344 temp
= readl(status_reg
);
3345 if (temp
& PORT_OWNER
)
3348 temp
&= ~PORT_RWC_BITS
;
3350 case USB_PORT_FEAT_SUSPEND
:
3351 if ((temp
& PORT_PE
) == 0
3352 || (temp
& PORT_RESET
) != 0)
3354 if (device_may_wakeup(&hcd
->self
.root_hub
->dev
))
3355 temp
|= PORT_WAKE_BITS
;
3356 writel(temp
| PORT_SUSPEND
, status_reg
);
3358 case USB_PORT_FEAT_POWER
:
3359 if (HCS_PPC(oxu
->hcs_params
))
3360 writel(temp
| PORT_POWER
, status_reg
);
3362 case USB_PORT_FEAT_RESET
:
3363 if (temp
& PORT_RESUME
)
3365 /* line status bits may report this as low speed,
3366 * which can be fine if this root hub has a
3367 * transaction translator built in.
3369 oxu_vdbg(oxu
, "port %d reset\n", wIndex
+ 1);
3374 * caller must wait, then call GetPortStatus
3375 * usb 2.0 spec says 50 ms resets on root
3377 oxu
->reset_done
[wIndex
] = jiffies
3378 + msecs_to_jiffies(50);
3379 writel(temp
, status_reg
);
3382 /* For downstream facing ports (these): one hub port is put
3383 * into test mode according to USB2 11.24.2.13, then the hub
3384 * must be reset (which for root hub now means rmmod+modprobe,
3385 * or else system reboot). See EHCI 2.3.9 and 4.14 for info
3386 * about the EHCI-specific stuff.
3388 case USB_PORT_FEAT_TEST
:
3389 if (!selector
|| selector
> 5)
3393 temp
|= selector
<< 16;
3394 writel(temp
, status_reg
);
3400 readl(&oxu
->regs
->command
); /* unblock posted writes */
3405 /* "stall" on error */
3408 spin_unlock_irqrestore(&oxu
->lock
, flags
);
3414 static int oxu_bus_suspend(struct usb_hcd
*hcd
)
3416 struct oxu_hcd
*oxu
= hcd_to_oxu(hcd
);
3420 oxu_dbg(oxu
, "suspend root hub\n");
3422 if (time_before(jiffies
, oxu
->next_statechange
))
3425 port
= HCS_N_PORTS(oxu
->hcs_params
);
3426 spin_lock_irq(&oxu
->lock
);
3428 /* stop schedules, clean any completed work */
3429 if (HC_IS_RUNNING(hcd
->state
)) {
3431 hcd
->state
= HC_STATE_QUIESCING
;
3433 oxu
->command
= readl(&oxu
->regs
->command
);
3435 oxu
->reclaim_ready
= 1;
3438 /* Unlike other USB host controller types, EHCI doesn't have
3439 * any notion of "global" or bus-wide suspend. The driver has
3440 * to manually suspend all the active unsuspended ports, and
3441 * then manually resume them in the bus_resume() routine.
3443 oxu
->bus_suspended
= 0;
3445 u32 __iomem
*reg
= &oxu
->regs
->port_status
[port
];
3446 u32 t1
= readl(reg
) & ~PORT_RWC_BITS
;
3449 /* keep track of which ports we suspend */
3450 if ((t1
& PORT_PE
) && !(t1
& PORT_OWNER
) &&
3451 !(t1
& PORT_SUSPEND
)) {
3453 set_bit(port
, &oxu
->bus_suspended
);
3456 /* enable remote wakeup on all ports */
3457 if (device_may_wakeup(&hcd
->self
.root_hub
->dev
))
3458 t2
|= PORT_WKOC_E
|PORT_WKDISC_E
|PORT_WKCONN_E
;
3460 t2
&= ~(PORT_WKOC_E
|PORT_WKDISC_E
|PORT_WKCONN_E
);
3463 oxu_vdbg(oxu
, "port %d, %08x -> %08x\n",
3469 /* turn off now-idle HC */
3470 del_timer_sync(&oxu
->watchdog
);
3472 hcd
->state
= HC_STATE_SUSPENDED
;
3474 /* allow remote wakeup */
3476 if (!device_may_wakeup(&hcd
->self
.root_hub
->dev
))
3478 writel(mask
, &oxu
->regs
->intr_enable
);
3479 readl(&oxu
->regs
->intr_enable
);
3481 oxu
->next_statechange
= jiffies
+ msecs_to_jiffies(10);
3482 spin_unlock_irq(&oxu
->lock
);
3486 /* Caller has locked the root hub, and should reset/reinit on error */
3487 static int oxu_bus_resume(struct usb_hcd
*hcd
)
3489 struct oxu_hcd
*oxu
= hcd_to_oxu(hcd
);
3493 if (time_before(jiffies
, oxu
->next_statechange
))
3495 spin_lock_irq(&oxu
->lock
);
3497 /* Ideally and we've got a real resume here, and no port's power
3498 * was lost. (For PCI, that means Vaux was maintained.) But we
3499 * could instead be restoring a swsusp snapshot -- so that BIOS was
3500 * the last user of the controller, not reset/pm hardware keeping
3501 * state we gave to it.
3503 temp
= readl(&oxu
->regs
->intr_enable
);
3504 oxu_dbg(oxu
, "resume root hub%s\n", temp
? "" : " after power loss");
3506 /* at least some APM implementations will try to deliver
3507 * IRQs right away, so delay them until we're ready.
3509 writel(0, &oxu
->regs
->intr_enable
);
3511 /* re-init operational registers */
3512 writel(0, &oxu
->regs
->segment
);
3513 writel(oxu
->periodic_dma
, &oxu
->regs
->frame_list
);
3514 writel((u32
) oxu
->async
->qh_dma
, &oxu
->regs
->async_next
);
3516 /* restore CMD_RUN, framelist size, and irq threshold */
3517 writel(oxu
->command
, &oxu
->regs
->command
);
3519 /* Some controller/firmware combinations need a delay during which
3520 * they set up the port statuses. See Bugzilla #8190. */
3523 /* manually resume the ports we suspended during bus_suspend() */
3524 i
= HCS_N_PORTS(oxu
->hcs_params
);
3526 temp
= readl(&oxu
->regs
->port_status
[i
]);
3527 temp
&= ~(PORT_RWC_BITS
3528 | PORT_WKOC_E
| PORT_WKDISC_E
| PORT_WKCONN_E
);
3529 if (test_bit(i
, &oxu
->bus_suspended
) && (temp
& PORT_SUSPEND
)) {
3530 oxu
->reset_done
[i
] = jiffies
+ msecs_to_jiffies(20);
3531 temp
|= PORT_RESUME
;
3533 writel(temp
, &oxu
->regs
->port_status
[i
]);
3535 i
= HCS_N_PORTS(oxu
->hcs_params
);
3538 temp
= readl(&oxu
->regs
->port_status
[i
]);
3539 if (test_bit(i
, &oxu
->bus_suspended
) && (temp
& PORT_SUSPEND
)) {
3540 temp
&= ~(PORT_RWC_BITS
| PORT_RESUME
);
3541 writel(temp
, &oxu
->regs
->port_status
[i
]);
3542 oxu_vdbg(oxu
, "resumed port %d\n", i
+ 1);
3545 (void) readl(&oxu
->regs
->command
);
3547 /* maybe re-activate the schedule(s) */
3549 if (oxu
->async
->qh_next
.qh
)
3551 if (oxu
->periodic_sched
)
3554 oxu
->command
|= temp
;
3555 writel(oxu
->command
, &oxu
->regs
->command
);
3558 oxu
->next_statechange
= jiffies
+ msecs_to_jiffies(5);
3559 hcd
->state
= HC_STATE_RUNNING
;
3561 /* Now we can safely re-enable irqs */
3562 writel(INTR_MASK
, &oxu
->regs
->intr_enable
);
3564 spin_unlock_irq(&oxu
->lock
);
3570 static int oxu_bus_suspend(struct usb_hcd
*hcd
)
3575 static int oxu_bus_resume(struct usb_hcd
*hcd
)
3580 #endif /* CONFIG_PM */
3582 static const struct hc_driver oxu_hc_driver
= {
3583 .description
= "oxu210hp_hcd",
3584 .product_desc
= "oxu210hp HCD",
3585 .hcd_priv_size
= sizeof(struct oxu_hcd
),
3588 * Generic hardware linkage
3591 .flags
= HCD_MEMORY
| HCD_USB2
,
3594 * Basic lifecycle operations
3599 .shutdown
= oxu_shutdown
,
3602 * Managing i/o requests and associated device resources
3604 .urb_enqueue
= oxu_urb_enqueue
,
3605 .urb_dequeue
= oxu_urb_dequeue
,
3606 .endpoint_disable
= oxu_endpoint_disable
,
3609 * Scheduling support
3611 .get_frame_number
= oxu_get_frame
,
3616 .hub_status_data
= oxu_hub_status_data
,
3617 .hub_control
= oxu_hub_control
,
3618 .bus_suspend
= oxu_bus_suspend
,
3619 .bus_resume
= oxu_bus_resume
,
3626 static void oxu_configuration(struct platform_device
*pdev
, void *base
)
3630 /* Initialize top level registers.
3633 oxu_writel(base
, OXU_HOSTIFCONFIG
, 0x0000037D);
3634 oxu_writel(base
, OXU_SOFTRESET
, OXU_SRESET
);
3635 oxu_writel(base
, OXU_HOSTIFCONFIG
, 0x0000037D);
3637 tmp
= oxu_readl(base
, OXU_PIOBURSTREADCTRL
);
3638 oxu_writel(base
, OXU_PIOBURSTREADCTRL
, tmp
| 0x0040);
3640 oxu_writel(base
, OXU_ASO
, OXU_SPHPOEN
| OXU_OVRCCURPUPDEN
|
3641 OXU_COMPARATOR
| OXU_ASO_OP
);
3643 tmp
= oxu_readl(base
, OXU_CLKCTRL_SET
);
3644 oxu_writel(base
, OXU_CLKCTRL_SET
, tmp
| OXU_SYSCLKEN
| OXU_USBOTGCLKEN
);
3646 /* Clear all top interrupt enable */
3647 oxu_writel(base
, OXU_CHIPIRQEN_CLR
, 0xff);
3649 /* Clear all top interrupt status */
3650 oxu_writel(base
, OXU_CHIPIRQSTATUS
, 0xff);
3652 /* Enable all needed top interrupt except OTG SPH core */
3653 oxu_writel(base
, OXU_CHIPIRQEN_SET
, OXU_USBSPHLPWUI
| OXU_USBOTGLPWUI
);
3656 static int oxu_verify_id(struct platform_device
*pdev
, void *base
)
3666 /* Read controller signature register to find a match */
3667 id
= oxu_readl(base
, OXU_DEVICEID
);
3668 dev_info(&pdev
->dev
, "device ID %x\n", id
);
3669 if ((id
& OXU_REV_MASK
) != (OXU_REV_2100
<< OXU_REV_SHIFT
))
3672 dev_info(&pdev
->dev
, "found device %x %s (%04x:%04x)\n",
3673 id
>> OXU_REV_SHIFT
,
3674 bo
[(id
& OXU_BO_MASK
) >> OXU_BO_SHIFT
],
3675 (id
& OXU_MAJ_REV_MASK
) >> OXU_MAJ_REV_SHIFT
,
3676 (id
& OXU_MIN_REV_MASK
) >> OXU_MIN_REV_SHIFT
);
3681 static const struct hc_driver oxu_hc_driver
;
3682 static struct usb_hcd
*oxu_create(struct platform_device
*pdev
,
3683 unsigned long memstart
, unsigned long memlen
,
3684 void *base
, int irq
, int otg
)
3686 struct device
*dev
= &pdev
->dev
;
3688 struct usb_hcd
*hcd
;
3689 struct oxu_hcd
*oxu
;
3692 /* Set endian mode and host mode */
3693 oxu_writel(base
+ (otg
? OXU_OTG_CORE_OFFSET
: OXU_SPH_CORE_OFFSET
),
3695 OXU_CM_HOST_ONLY
| OXU_ES_LITTLE
| OXU_VBPS
);
3697 hcd
= usb_create_hcd(&oxu_hc_driver
, dev
,
3698 otg
? "oxu210hp_otg" : "oxu210hp_sph");
3700 return ERR_PTR(-ENOMEM
);
3702 hcd
->rsrc_start
= memstart
;
3703 hcd
->rsrc_len
= memlen
;
3706 hcd
->state
= HC_STATE_HALT
;
3708 oxu
= hcd_to_oxu(hcd
);
3711 ret
= usb_add_hcd(hcd
, irq
, IRQF_SHARED
);
3713 return ERR_PTR(ret
);
3718 static int oxu_init(struct platform_device
*pdev
,
3719 unsigned long memstart
, unsigned long memlen
,
3720 void *base
, int irq
)
3722 struct oxu_info
*info
= platform_get_drvdata(pdev
);
3723 struct usb_hcd
*hcd
;
3726 /* First time configuration at start up */
3727 oxu_configuration(pdev
, base
);
3729 ret
= oxu_verify_id(pdev
, base
);
3731 dev_err(&pdev
->dev
, "no devices found!\n");
3735 /* Create the OTG controller */
3736 hcd
= oxu_create(pdev
, memstart
, memlen
, base
, irq
, 1);
3738 dev_err(&pdev
->dev
, "cannot create OTG controller!\n");
3740 goto error_create_otg
;
3744 /* Create the SPH host controller */
3745 hcd
= oxu_create(pdev
, memstart
, memlen
, base
, irq
, 0);
3747 dev_err(&pdev
->dev
, "cannot create SPH controller!\n");
3749 goto error_create_sph
;
3753 oxu_writel(base
, OXU_CHIPIRQEN_SET
,
3754 oxu_readl(base
, OXU_CHIPIRQEN_SET
) | 3);
3759 usb_remove_hcd(info
->hcd
[0]);
3760 usb_put_hcd(info
->hcd
[0]);
3766 static int oxu_drv_probe(struct platform_device
*pdev
)
3768 struct resource
*res
;
3770 unsigned long memstart
, memlen
;
3772 struct oxu_info
*info
;
3778 * Get the platform resources
3780 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
3783 "no IRQ! Check %s setup!\n", dev_name(&pdev
->dev
));
3787 dev_dbg(&pdev
->dev
, "IRQ resource %d\n", irq
);
3789 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
3791 dev_err(&pdev
->dev
, "no registers address! Check %s setup!\n",
3792 dev_name(&pdev
->dev
));
3795 memstart
= res
->start
;
3796 memlen
= res
->end
- res
->start
+ 1;
3797 dev_dbg(&pdev
->dev
, "MEM resource %lx-%lx\n", memstart
, memlen
);
3798 if (!request_mem_region(memstart
, memlen
,
3799 oxu_hc_driver
.description
)) {
3800 dev_dbg(&pdev
->dev
, "memory area already in use\n");
3804 ret
= set_irq_type(irq
, IRQF_TRIGGER_FALLING
);
3806 dev_err(&pdev
->dev
, "error setting irq type\n");
3808 goto error_set_irq_type
;
3811 base
= ioremap(memstart
, memlen
);
3813 dev_dbg(&pdev
->dev
, "error mapping memory\n");
3818 /* Allocate a driver data struct to hold useful info for both
3821 info
= kzalloc(sizeof(struct oxu_info
), GFP_KERNEL
);
3823 dev_dbg(&pdev
->dev
, "error allocating memory\n");
3827 platform_set_drvdata(pdev
, info
);
3829 ret
= oxu_init(pdev
, memstart
, memlen
, base
, irq
);
3831 dev_dbg(&pdev
->dev
, "cannot init USB devices\n");
3835 dev_info(&pdev
->dev
, "devices enabled and running\n");
3836 platform_set_drvdata(pdev
, info
);
3842 platform_set_drvdata(pdev
, NULL
);
3849 release_mem_region(memstart
, memlen
);
3851 dev_err(&pdev
->dev
, "init %s fail, %d\n", dev_name(&pdev
->dev
), ret
);
3855 static void oxu_remove(struct platform_device
*pdev
, struct usb_hcd
*hcd
)
3857 usb_remove_hcd(hcd
);
3861 static int oxu_drv_remove(struct platform_device
*pdev
)
3863 struct oxu_info
*info
= platform_get_drvdata(pdev
);
3864 unsigned long memstart
= info
->hcd
[0]->rsrc_start
,
3865 memlen
= info
->hcd
[0]->rsrc_len
;
3866 void *base
= info
->hcd
[0]->regs
;
3868 oxu_remove(pdev
, info
->hcd
[0]);
3869 oxu_remove(pdev
, info
->hcd
[1]);
3872 release_mem_region(memstart
, memlen
);
3875 platform_set_drvdata(pdev
, NULL
);
3880 static void oxu_drv_shutdown(struct platform_device
*pdev
)
3882 oxu_drv_remove(pdev
);
3885 #define oxu_drv_suspend NULL
3886 #define oxu_drv_resume NULL
3888 static struct platform_driver oxu_driver
= {
3889 .probe
= oxu_drv_probe
,
3890 .remove
= oxu_drv_remove
,
3891 .shutdown
= oxu_drv_shutdown
,
3892 .suspend
= oxu_drv_suspend
,
3893 .resume
= oxu_drv_resume
,
3895 .name
= "oxu210hp-hcd",
3896 .bus
= &platform_bus_type
3900 static int __init
oxu_module_init(void)
3904 retval
= platform_driver_register(&oxu_driver
);
3911 static void __exit
oxu_module_cleanup(void)
3913 platform_driver_unregister(&oxu_driver
);
3916 module_init(oxu_module_init
);
3917 module_exit(oxu_module_cleanup
);
3919 MODULE_DESCRIPTION("Oxford OXU210HP HCD driver - ver. " DRIVER_VERSION
);
3920 MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
3921 MODULE_LICENSE("GPL");