2 * USB Host Controller Driver for IMX21
4 * Copyright (C) 2006 Loping Dog Embedded Systems
5 * Copyright (C) 2009 Martin Fuzzey
6 * Originally written by Jay Monkman <jtm@lopingdog.com>
7 * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 * The i.MX21 USB hardware contains
27 * * 32 transfer descriptors (called ETDs)
28 * * 4Kb of Data memory
30 * The data memory is shared between the host and function controllers
31 * (but this driver only supports the host controller)
33 * So setting up a transfer involves:
35 * * Fill in ETD with appropriate information
36 * * Allocating data memory (and putting the offset in the ETD)
38 * * Get interrupt when done.
40 * An ETD is assigned to each active endpoint.
42 * Low resource (ETD and Data memory) situations are handled differently for
43 * isochronous and non insosynchronous transactions :
45 * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
47 * ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
48 * They allocate both ETDs and Data memory during URB submission
49 * (and fail if unavailable).
52 #include <linux/clk.h>
54 #include <linux/kernel.h>
55 #include <linux/list.h>
56 #include <linux/platform_device.h>
57 #include <linux/slab.h>
58 #include <linux/usb.h>
59 #include <linux/usb/hcd.h>
60 #include <linux/dma-mapping.h>
62 #include "imx21-hcd.h"
65 #define DEBUG_LOG_FRAME(imx21, etd, event) \
66 (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
68 #define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
71 static const char hcd_name
[] = "imx21-hcd";
73 static inline struct imx21
*hcd_to_imx21(struct usb_hcd
*hcd
)
75 return (struct imx21
*)hcd
->hcd_priv
;
79 /* =========================================== */
80 /* Hardware access helpers */
81 /* =========================================== */
83 static inline void set_register_bits(struct imx21
*imx21
, u32 offset
, u32 mask
)
85 void __iomem
*reg
= imx21
->regs
+ offset
;
86 writel(readl(reg
) | mask
, reg
);
89 static inline void clear_register_bits(struct imx21
*imx21
,
92 void __iomem
*reg
= imx21
->regs
+ offset
;
93 writel(readl(reg
) & ~mask
, reg
);
96 static inline void clear_toggle_bit(struct imx21
*imx21
, u32 offset
, u32 mask
)
98 void __iomem
*reg
= imx21
->regs
+ offset
;
100 if (readl(reg
) & mask
)
104 static inline void set_toggle_bit(struct imx21
*imx21
, u32 offset
, u32 mask
)
106 void __iomem
*reg
= imx21
->regs
+ offset
;
108 if (!(readl(reg
) & mask
))
112 static void etd_writel(struct imx21
*imx21
, int etd_num
, int dword
, u32 value
)
114 writel(value
, imx21
->regs
+ USB_ETD_DWORD(etd_num
, dword
));
117 static u32
etd_readl(struct imx21
*imx21
, int etd_num
, int dword
)
119 return readl(imx21
->regs
+ USB_ETD_DWORD(etd_num
, dword
));
122 static inline int wrap_frame(int counter
)
124 return counter
& 0xFFFF;
127 static inline int frame_after(int frame
, int after
)
129 /* handle wrapping like jiffies time_afer */
130 return (s16
)((s16
)after
- (s16
)frame
) < 0;
133 static int imx21_hc_get_frame(struct usb_hcd
*hcd
)
135 struct imx21
*imx21
= hcd_to_imx21(hcd
);
137 return wrap_frame(readl(imx21
->regs
+ USBH_FRMNUB
));
140 static inline bool unsuitable_for_dma(dma_addr_t addr
)
142 return (addr
& 3) != 0;
145 #include "imx21-dbg.c"
147 static void nonisoc_urb_completed_for_etd(
148 struct imx21
*imx21
, struct etd_priv
*etd
, int status
);
149 static void schedule_nonisoc_etd(struct imx21
*imx21
, struct urb
*urb
);
150 static void free_dmem(struct imx21
*imx21
, struct etd_priv
*etd
);
152 /* =========================================== */
154 /* =========================================== */
156 static int alloc_etd(struct imx21
*imx21
)
159 struct etd_priv
*etd
= imx21
->etd
;
161 for (i
= 0; i
< USB_NUM_ETD
; i
++, etd
++) {
162 if (etd
->alloc
== 0) {
163 memset(etd
, 0, sizeof(imx21
->etd
[0]));
165 debug_etd_allocated(imx21
);
172 static void disactivate_etd(struct imx21
*imx21
, int num
)
174 int etd_mask
= (1 << num
);
175 struct etd_priv
*etd
= &imx21
->etd
[num
];
177 writel(etd_mask
, imx21
->regs
+ USBH_ETDENCLR
);
178 clear_register_bits(imx21
, USBH_ETDDONEEN
, etd_mask
);
179 writel(etd_mask
, imx21
->regs
+ USB_ETDDMACHANLCLR
);
180 clear_toggle_bit(imx21
, USBH_ETDDONESTAT
, etd_mask
);
182 etd
->active_count
= 0;
184 DEBUG_LOG_FRAME(imx21
, etd
, disactivated
);
187 static void reset_etd(struct imx21
*imx21
, int num
)
189 struct etd_priv
*etd
= imx21
->etd
+ num
;
192 disactivate_etd(imx21
, num
);
194 for (i
= 0; i
< 4; i
++)
195 etd_writel(imx21
, num
, i
, 0);
199 etd
->bounce_buffer
= NULL
;
202 static void free_etd(struct imx21
*imx21
, int num
)
207 if (num
>= USB_NUM_ETD
) {
208 dev_err(imx21
->dev
, "BAD etd=%d!\n", num
);
211 if (imx21
->etd
[num
].alloc
== 0) {
212 dev_err(imx21
->dev
, "ETD %d already free!\n", num
);
216 debug_etd_freed(imx21
);
217 reset_etd(imx21
, num
);
218 memset(&imx21
->etd
[num
], 0, sizeof(imx21
->etd
[0]));
222 static void setup_etd_dword0(struct imx21
*imx21
,
223 int etd_num
, struct urb
*urb
, u8 dir
, u16 maxpacket
)
225 etd_writel(imx21
, etd_num
, 0,
226 ((u32
) usb_pipedevice(urb
->pipe
)) << DW0_ADDRESS
|
227 ((u32
) usb_pipeendpoint(urb
->pipe
) << DW0_ENDPNT
) |
228 ((u32
) dir
<< DW0_DIRECT
) |
229 ((u32
) ((urb
->dev
->speed
== USB_SPEED_LOW
) ?
230 1 : 0) << DW0_SPEED
) |
231 ((u32
) fmt_urb_to_etd
[usb_pipetype(urb
->pipe
)] << DW0_FORMAT
) |
232 ((u32
) maxpacket
<< DW0_MAXPKTSIZ
));
236 * Copy buffer to data controller data memory.
237 * We cannot use memcpy_toio() because the hardware requires 32bit writes
239 static void copy_to_dmem(
240 struct imx21
*imx21
, int dmem_offset
, void *src
, int count
)
242 void __iomem
*dmem
= imx21
->regs
+ USBOTG_DMEM
+ dmem_offset
;
248 for (i
= 0; i
< count
; i
++) {
250 word
+= (*p
++ << (byte
* 8));
258 if (count
&& byte
!= 3)
262 static void activate_etd(struct imx21
*imx21
, int etd_num
, u8 dir
)
264 u32 etd_mask
= 1 << etd_num
;
265 struct etd_priv
*etd
= &imx21
->etd
[etd_num
];
267 if (etd
->dma_handle
&& unsuitable_for_dma(etd
->dma_handle
)) {
268 /* For non aligned isoc the condition below is always true */
269 if (etd
->len
<= etd
->dmem_size
) {
270 /* Fits into data memory, use PIO */
271 if (dir
!= TD_DIR_IN
) {
274 etd
->cpu_buffer
, etd
->len
);
279 /* Too big for data memory, use bounce buffer */
280 enum dma_data_direction dmadir
;
282 if (dir
== TD_DIR_IN
) {
283 dmadir
= DMA_FROM_DEVICE
;
284 etd
->bounce_buffer
= kmalloc(etd
->len
,
287 dmadir
= DMA_TO_DEVICE
;
288 etd
->bounce_buffer
= kmemdup(etd
->cpu_buffer
,
292 if (!etd
->bounce_buffer
) {
293 dev_err(imx21
->dev
, "failed bounce alloc\n");
294 goto err_bounce_alloc
;
298 dma_map_single(imx21
->dev
,
302 if (dma_mapping_error(imx21
->dev
, etd
->dma_handle
)) {
303 dev_err(imx21
->dev
, "failed bounce map\n");
309 clear_toggle_bit(imx21
, USBH_ETDDONESTAT
, etd_mask
);
310 set_register_bits(imx21
, USBH_ETDDONEEN
, etd_mask
);
311 clear_toggle_bit(imx21
, USBH_XFILLSTAT
, etd_mask
);
312 clear_toggle_bit(imx21
, USBH_YFILLSTAT
, etd_mask
);
314 if (etd
->dma_handle
) {
315 set_register_bits(imx21
, USB_ETDDMACHANLCLR
, etd_mask
);
316 clear_toggle_bit(imx21
, USBH_XBUFSTAT
, etd_mask
);
317 clear_toggle_bit(imx21
, USBH_YBUFSTAT
, etd_mask
);
318 writel(etd
->dma_handle
, imx21
->regs
+ USB_ETDSMSA(etd_num
));
319 set_register_bits(imx21
, USB_ETDDMAEN
, etd_mask
);
321 if (dir
!= TD_DIR_IN
) {
322 /* need to set for ZLP and PIO */
323 set_toggle_bit(imx21
, USBH_XFILLSTAT
, etd_mask
);
324 set_toggle_bit(imx21
, USBH_YFILLSTAT
, etd_mask
);
328 DEBUG_LOG_FRAME(imx21
, etd
, activated
);
331 if (!etd
->active_count
) {
333 etd
->activated_frame
= readl(imx21
->regs
+ USBH_FRMNUB
);
334 etd
->disactivated_frame
= -1;
335 etd
->last_int_frame
= -1;
336 etd
->last_req_frame
= -1;
338 for (i
= 0; i
< 4; i
++)
339 etd
->submitted_dwords
[i
] = etd_readl(imx21
, etd_num
, i
);
343 etd
->active_count
= 1;
344 writel(etd_mask
, imx21
->regs
+ USBH_ETDENSET
);
348 kfree(etd
->bounce_buffer
);
351 free_dmem(imx21
, etd
);
352 nonisoc_urb_completed_for_etd(imx21
, etd
, -ENOMEM
);
355 /* =========================================== */
356 /* Data memory management */
357 /* =========================================== */
359 static int alloc_dmem(struct imx21
*imx21
, unsigned int size
,
360 struct usb_host_endpoint
*ep
)
362 unsigned int offset
= 0;
363 struct imx21_dmem_area
*area
;
364 struct imx21_dmem_area
*tmp
;
366 size
+= (~size
+ 1) & 0x3; /* Round to 4 byte multiple */
368 if (size
> DMEM_SIZE
) {
369 dev_err(imx21
->dev
, "size=%d > DMEM_SIZE(%d)\n",
374 list_for_each_entry(tmp
, &imx21
->dmem_list
, list
) {
375 if ((size
+ offset
) < offset
)
377 if ((size
+ offset
) <= tmp
->offset
)
379 offset
= tmp
->size
+ tmp
->offset
;
380 if ((offset
+ size
) > DMEM_SIZE
)
384 area
= kmalloc(sizeof(struct imx21_dmem_area
), GFP_ATOMIC
);
389 area
->offset
= offset
;
391 list_add_tail(&area
->list
, &tmp
->list
);
392 debug_dmem_allocated(imx21
, size
);
399 /* Memory now available for a queued ETD - activate it */
400 static void activate_queued_etd(struct imx21
*imx21
,
401 struct etd_priv
*etd
, u32 dmem_offset
)
403 struct urb_priv
*urb_priv
= etd
->urb
->hcpriv
;
404 int etd_num
= etd
- &imx21
->etd
[0];
405 u32 maxpacket
= etd_readl(imx21
, etd_num
, 1) >> DW1_YBUFSRTAD
;
406 u8 dir
= (etd_readl(imx21
, etd_num
, 2) >> DW2_DIRPID
) & 0x03;
408 dev_dbg(imx21
->dev
, "activating queued ETD %d now DMEM available\n",
410 etd_writel(imx21
, etd_num
, 1,
411 ((dmem_offset
+ maxpacket
) << DW1_YBUFSRTAD
) | dmem_offset
);
413 etd
->dmem_offset
= dmem_offset
;
414 urb_priv
->active
= 1;
415 activate_etd(imx21
, etd_num
, dir
);
418 static void free_dmem(struct imx21
*imx21
, struct etd_priv
*etd
)
420 struct imx21_dmem_area
*area
;
421 struct etd_priv
*tmp
;
429 offset
= etd
->dmem_offset
;
430 list_for_each_entry(area
, &imx21
->dmem_list
, list
) {
431 if (area
->offset
== offset
) {
432 debug_dmem_freed(imx21
, area
->size
);
433 list_del(&area
->list
);
442 "Trying to free unallocated DMEM %d\n", offset
);
446 /* Try again to allocate memory for anything we've queued */
447 list_for_each_entry_safe(etd
, tmp
, &imx21
->queue_for_dmem
, queue
) {
448 offset
= alloc_dmem(imx21
, etd
->dmem_size
, etd
->ep
);
450 list_del(&etd
->queue
);
451 activate_queued_etd(imx21
, etd
, (u32
)offset
);
456 static void free_epdmem(struct imx21
*imx21
, struct usb_host_endpoint
*ep
)
458 struct imx21_dmem_area
*area
, *tmp
;
460 list_for_each_entry_safe(area
, tmp
, &imx21
->dmem_list
, list
) {
461 if (area
->ep
== ep
) {
463 "Active DMEM %d for disabled ep=%p\n",
465 list_del(&area
->list
);
472 /* =========================================== */
474 /* =========================================== */
476 /* Endpoint now idle - release it's ETD(s) or asssign to queued request */
477 static void ep_idle(struct imx21
*imx21
, struct ep_priv
*ep_priv
)
481 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
482 int etd_num
= ep_priv
->etd
[i
];
483 struct etd_priv
*etd
;
487 etd
= &imx21
->etd
[etd_num
];
488 ep_priv
->etd
[i
] = -1;
490 free_dmem(imx21
, etd
); /* for isoc */
492 if (list_empty(&imx21
->queue_for_etd
)) {
493 free_etd(imx21
, etd_num
);
498 "assigning idle etd %d for queued request\n", etd_num
);
499 ep_priv
= list_first_entry(&imx21
->queue_for_etd
,
500 struct ep_priv
, queue
);
501 list_del(&ep_priv
->queue
);
502 reset_etd(imx21
, etd_num
);
503 ep_priv
->waiting_etd
= 0;
504 ep_priv
->etd
[i
] = etd_num
;
506 if (list_empty(&ep_priv
->ep
->urb_list
)) {
507 dev_err(imx21
->dev
, "No urb for queued ep!\n");
510 schedule_nonisoc_etd(imx21
, list_first_entry(
511 &ep_priv
->ep
->urb_list
, struct urb
, urb_list
));
515 static void urb_done(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
516 __releases(imx21
->lock
)
517 __acquires(imx21
->lock
)
519 struct imx21
*imx21
= hcd_to_imx21(hcd
);
520 struct ep_priv
*ep_priv
= urb
->ep
->hcpriv
;
521 struct urb_priv
*urb_priv
= urb
->hcpriv
;
523 debug_urb_completed(imx21
, urb
, status
);
524 dev_vdbg(imx21
->dev
, "urb %p done %d\n", urb
, status
);
526 kfree(urb_priv
->isoc_td
);
529 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
530 spin_unlock(&imx21
->lock
);
531 usb_hcd_giveback_urb(hcd
, urb
, status
);
532 spin_lock(&imx21
->lock
);
533 if (list_empty(&ep_priv
->ep
->urb_list
))
534 ep_idle(imx21
, ep_priv
);
537 static void nonisoc_urb_completed_for_etd(
538 struct imx21
*imx21
, struct etd_priv
*etd
, int status
)
540 struct usb_host_endpoint
*ep
= etd
->ep
;
542 urb_done(imx21
->hcd
, etd
->urb
, status
);
545 if (!list_empty(&ep
->urb_list
)) {
546 struct urb
*urb
= list_first_entry(
547 &ep
->urb_list
, struct urb
, urb_list
);
549 dev_vdbg(imx21
->dev
, "next URB %p\n", urb
);
550 schedule_nonisoc_etd(imx21
, urb
);
555 /* =========================================== */
556 /* ISOC Handling ... */
557 /* =========================================== */
559 static void schedule_isoc_etds(struct usb_hcd
*hcd
,
560 struct usb_host_endpoint
*ep
)
562 struct imx21
*imx21
= hcd_to_imx21(hcd
);
563 struct ep_priv
*ep_priv
= ep
->hcpriv
;
564 struct etd_priv
*etd
;
565 struct urb_priv
*urb_priv
;
572 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
574 if (list_empty(&ep_priv
->td_list
))
577 etd_num
= ep_priv
->etd
[i
];
581 etd
= &imx21
->etd
[etd_num
];
585 td
= list_entry(ep_priv
->td_list
.next
, struct td
, list
);
587 urb_priv
= td
->urb
->hcpriv
;
589 cur_frame
= imx21_hc_get_frame(hcd
);
590 if (frame_after(cur_frame
, td
->frame
)) {
591 dev_dbg(imx21
->dev
, "isoc too late frame %d > %d\n",
592 cur_frame
, td
->frame
);
593 urb_priv
->isoc_status
= -EXDEV
;
594 td
->urb
->iso_frame_desc
[
595 td
->isoc_index
].actual_length
= 0;
596 td
->urb
->iso_frame_desc
[td
->isoc_index
].status
= -EXDEV
;
597 if (--urb_priv
->isoc_remaining
== 0)
598 urb_done(hcd
, td
->urb
, urb_priv
->isoc_status
);
602 urb_priv
->active
= 1;
607 etd
->dma_handle
= td
->dma_handle
;
608 etd
->cpu_buffer
= td
->cpu_buffer
;
610 debug_isoc_submitted(imx21
, cur_frame
, td
);
612 dir
= usb_pipeout(td
->urb
->pipe
) ? TD_DIR_OUT
: TD_DIR_IN
;
613 setup_etd_dword0(imx21
, etd_num
, td
->urb
, dir
, etd
->dmem_size
);
614 etd_writel(imx21
, etd_num
, 1, etd
->dmem_offset
);
615 etd_writel(imx21
, etd_num
, 2,
616 (TD_NOTACCESSED
<< DW2_COMPCODE
) |
617 ((td
->frame
& 0xFFFF) << DW2_STARTFRM
));
618 etd_writel(imx21
, etd_num
, 3,
619 (TD_NOTACCESSED
<< DW3_COMPCODE0
) |
620 (td
->len
<< DW3_PKTLEN0
));
622 activate_etd(imx21
, etd_num
, dir
);
626 static void isoc_etd_done(struct usb_hcd
*hcd
, int etd_num
)
628 struct imx21
*imx21
= hcd_to_imx21(hcd
);
629 int etd_mask
= 1 << etd_num
;
630 struct etd_priv
*etd
= imx21
->etd
+ etd_num
;
631 struct urb
*urb
= etd
->urb
;
632 struct urb_priv
*urb_priv
= urb
->hcpriv
;
633 struct td
*td
= etd
->td
;
634 struct usb_host_endpoint
*ep
= etd
->ep
;
635 int isoc_index
= td
->isoc_index
;
636 unsigned int pipe
= urb
->pipe
;
637 int dir_in
= usb_pipein(pipe
);
641 disactivate_etd(imx21
, etd_num
);
643 cc
= (etd_readl(imx21
, etd_num
, 3) >> DW3_COMPCODE0
) & 0xf;
644 bytes_xfrd
= etd_readl(imx21
, etd_num
, 3) & 0x3ff;
646 /* Input doesn't always fill the buffer, don't generate an error
649 if (dir_in
&& (cc
== TD_DATAUNDERRUN
))
652 if (cc
== TD_NOTACCESSED
)
655 debug_isoc_completed(imx21
,
656 imx21_hc_get_frame(hcd
), td
, cc
, bytes_xfrd
);
658 urb_priv
->isoc_status
= -EXDEV
;
660 "bad iso cc=0x%X frame=%d sched frame=%d "
661 "cnt=%d len=%d urb=%p etd=%d index=%d\n",
662 cc
, imx21_hc_get_frame(hcd
), td
->frame
,
663 bytes_xfrd
, td
->len
, urb
, etd_num
, isoc_index
);
667 clear_toggle_bit(imx21
, USBH_XFILLSTAT
, etd_mask
);
668 if (!etd
->dma_handle
)
669 memcpy_fromio(etd
->cpu_buffer
,
670 imx21
->regs
+ USBOTG_DMEM
+ etd
->dmem_offset
,
674 urb
->actual_length
+= bytes_xfrd
;
675 urb
->iso_frame_desc
[isoc_index
].actual_length
= bytes_xfrd
;
676 urb
->iso_frame_desc
[isoc_index
].status
= cc_to_error
[cc
];
682 if (--urb_priv
->isoc_remaining
== 0)
683 urb_done(hcd
, urb
, urb_priv
->isoc_status
);
685 schedule_isoc_etds(hcd
, ep
);
688 static struct ep_priv
*alloc_isoc_ep(
689 struct imx21
*imx21
, struct usb_host_endpoint
*ep
)
691 struct ep_priv
*ep_priv
;
694 ep_priv
= kzalloc(sizeof(struct ep_priv
), GFP_ATOMIC
);
698 for (i
= 0; i
< NUM_ISO_ETDS
; i
++)
699 ep_priv
->etd
[i
] = -1;
701 INIT_LIST_HEAD(&ep_priv
->td_list
);
703 ep
->hcpriv
= ep_priv
;
707 static int alloc_isoc_etds(struct imx21
*imx21
, struct ep_priv
*ep_priv
)
712 /* Allocate the ETDs if required */
713 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
714 if (ep_priv
->etd
[i
] < 0) {
715 etd_num
= alloc_etd(imx21
);
717 goto alloc_etd_failed
;
719 ep_priv
->etd
[i
] = etd_num
;
720 imx21
->etd
[etd_num
].ep
= ep_priv
->ep
;
726 dev_err(imx21
->dev
, "isoc: Couldn't allocate etd\n");
727 for (j
= 0; j
< i
; j
++) {
728 free_etd(imx21
, ep_priv
->etd
[j
]);
729 ep_priv
->etd
[j
] = -1;
734 static int imx21_hc_urb_enqueue_isoc(struct usb_hcd
*hcd
,
735 struct usb_host_endpoint
*ep
,
736 struct urb
*urb
, gfp_t mem_flags
)
738 struct imx21
*imx21
= hcd_to_imx21(hcd
);
739 struct urb_priv
*urb_priv
;
741 struct ep_priv
*ep_priv
;
742 struct td
*td
= NULL
;
748 urb_priv
= kzalloc(sizeof(struct urb_priv
), mem_flags
);
749 if (urb_priv
== NULL
)
752 urb_priv
->isoc_td
= kzalloc(
753 sizeof(struct td
) * urb
->number_of_packets
, mem_flags
);
754 if (urb_priv
->isoc_td
== NULL
) {
756 goto alloc_td_failed
;
759 spin_lock_irqsave(&imx21
->lock
, flags
);
761 if (ep
->hcpriv
== NULL
) {
762 ep_priv
= alloc_isoc_ep(imx21
, ep
);
763 if (ep_priv
== NULL
) {
765 goto alloc_ep_failed
;
768 ep_priv
= ep
->hcpriv
;
771 ret
= alloc_isoc_etds(imx21
, ep_priv
);
773 goto alloc_etd_failed
;
775 ret
= usb_hcd_link_urb_to_ep(hcd
, urb
);
779 urb
->status
= -EINPROGRESS
;
780 urb
->actual_length
= 0;
781 urb
->error_count
= 0;
782 urb
->hcpriv
= urb_priv
;
785 /* allocate data memory for largest packets if not already done */
786 maxpacket
= usb_maxpacket(urb
->dev
, urb
->pipe
, usb_pipeout(urb
->pipe
));
787 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
788 struct etd_priv
*etd
= &imx21
->etd
[ep_priv
->etd
[i
]];
790 if (etd
->dmem_size
> 0 && etd
->dmem_size
< maxpacket
) {
791 /* not sure if this can really occur.... */
792 dev_err(imx21
->dev
, "increasing isoc buffer %d->%d\n",
793 etd
->dmem_size
, maxpacket
);
795 goto alloc_dmem_failed
;
798 if (etd
->dmem_size
== 0) {
799 etd
->dmem_offset
= alloc_dmem(imx21
, maxpacket
, ep
);
800 if (etd
->dmem_offset
< 0) {
801 dev_dbg(imx21
->dev
, "failed alloc isoc dmem\n");
803 goto alloc_dmem_failed
;
805 etd
->dmem_size
= maxpacket
;
809 /* calculate frame */
810 cur_frame
= imx21_hc_get_frame(hcd
);
811 if (urb
->transfer_flags
& URB_ISO_ASAP
) {
812 if (list_empty(&ep_priv
->td_list
))
813 urb
->start_frame
= cur_frame
+ 5;
815 urb
->start_frame
= list_entry(
816 ep_priv
->td_list
.prev
,
817 struct td
, list
)->frame
+ urb
->interval
;
819 urb
->start_frame
= wrap_frame(urb
->start_frame
);
820 if (frame_after(cur_frame
, urb
->start_frame
)) {
822 "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
823 urb
->start_frame
, cur_frame
,
824 (urb
->transfer_flags
& URB_ISO_ASAP
) != 0);
825 urb
->start_frame
= wrap_frame(cur_frame
+ 1);
828 /* set up transfers */
829 td
= urb_priv
->isoc_td
;
830 for (i
= 0; i
< urb
->number_of_packets
; i
++, td
++) {
831 unsigned int offset
= urb
->iso_frame_desc
[i
].offset
;
834 td
->len
= urb
->iso_frame_desc
[i
].length
;
836 td
->frame
= wrap_frame(urb
->start_frame
+ urb
->interval
* i
);
837 td
->dma_handle
= urb
->transfer_dma
+ offset
;
838 td
->cpu_buffer
= urb
->transfer_buffer
+ offset
;
839 list_add_tail(&td
->list
, &ep_priv
->td_list
);
842 urb_priv
->isoc_remaining
= urb
->number_of_packets
;
843 dev_vdbg(imx21
->dev
, "setup %d packets for iso frame %d->%d\n",
844 urb
->number_of_packets
, urb
->start_frame
, td
->frame
);
846 debug_urb_submitted(imx21
, urb
);
847 schedule_isoc_etds(hcd
, ep
);
849 spin_unlock_irqrestore(&imx21
->lock
, flags
);
853 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
858 spin_unlock_irqrestore(&imx21
->lock
, flags
);
859 kfree(urb_priv
->isoc_td
);
866 static void dequeue_isoc_urb(struct imx21
*imx21
,
867 struct urb
*urb
, struct ep_priv
*ep_priv
)
869 struct urb_priv
*urb_priv
= urb
->hcpriv
;
873 if (urb_priv
->active
) {
874 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
875 int etd_num
= ep_priv
->etd
[i
];
876 if (etd_num
!= -1 && imx21
->etd
[etd_num
].urb
== urb
) {
877 struct etd_priv
*etd
= imx21
->etd
+ etd_num
;
879 reset_etd(imx21
, etd_num
);
880 free_dmem(imx21
, etd
);
885 list_for_each_entry_safe(td
, tmp
, &ep_priv
->td_list
, list
) {
886 if (td
->urb
== urb
) {
887 dev_vdbg(imx21
->dev
, "removing td %p\n", td
);
893 /* =========================================== */
894 /* NON ISOC Handling ... */
895 /* =========================================== */
897 static void schedule_nonisoc_etd(struct imx21
*imx21
, struct urb
*urb
)
899 unsigned int pipe
= urb
->pipe
;
900 struct urb_priv
*urb_priv
= urb
->hcpriv
;
901 struct ep_priv
*ep_priv
= urb_priv
->ep
->hcpriv
;
902 int state
= urb_priv
->state
;
903 int etd_num
= ep_priv
->etd
[0];
904 struct etd_priv
*etd
;
915 dev_err(imx21
->dev
, "No valid ETD\n");
918 if (readl(imx21
->regs
+ USBH_ETDENSET
) & (1 << etd_num
))
919 dev_err(imx21
->dev
, "submitting to active ETD %d\n", etd_num
);
921 etd
= &imx21
->etd
[etd_num
];
922 maxpacket
= usb_maxpacket(urb
->dev
, pipe
, usb_pipeout(pipe
));
926 if (usb_pipecontrol(pipe
) && (state
!= US_CTRL_DATA
)) {
927 if (state
== US_CTRL_SETUP
) {
929 if (unsuitable_for_dma(urb
->setup_dma
))
930 unmap_urb_setup_for_dma(imx21
->hcd
, urb
);
931 etd
->dma_handle
= urb
->setup_dma
;
932 etd
->cpu_buffer
= urb
->setup_packet
;
935 datatoggle
= TD_TOGGLE_DATA0
;
936 } else { /* US_CTRL_ACK */
937 dir
= usb_pipeout(pipe
) ? TD_DIR_IN
: TD_DIR_OUT
;
940 datatoggle
= TD_TOGGLE_DATA1
;
943 dir
= usb_pipeout(pipe
) ? TD_DIR_OUT
: TD_DIR_IN
;
944 bufround
= (dir
== TD_DIR_IN
) ? 1 : 0;
945 if (unsuitable_for_dma(urb
->transfer_dma
))
946 unmap_urb_for_dma(imx21
->hcd
, urb
);
948 etd
->dma_handle
= urb
->transfer_dma
;
949 etd
->cpu_buffer
= urb
->transfer_buffer
;
950 if (usb_pipebulk(pipe
) && (state
== US_BULK0
))
953 count
= urb
->transfer_buffer_length
;
955 if (usb_pipecontrol(pipe
)) {
956 datatoggle
= TD_TOGGLE_DATA1
;
960 usb_pipeendpoint(urb
->pipe
),
961 usb_pipeout(urb
->pipe
)))
962 datatoggle
= TD_TOGGLE_DATA1
;
964 datatoggle
= TD_TOGGLE_DATA0
;
969 etd
->ep
= urb_priv
->ep
;
972 if (usb_pipeint(pipe
)) {
973 interval
= urb
->interval
;
974 relpolpos
= (readl(imx21
->regs
+ USBH_FRMNUB
) + 1) & 0xff;
977 /* Write ETD to device memory */
978 setup_etd_dword0(imx21
, etd_num
, urb
, dir
, maxpacket
);
980 etd_writel(imx21
, etd_num
, 2,
981 (u32
) interval
<< DW2_POLINTERV
|
982 ((u32
) relpolpos
<< DW2_RELPOLPOS
) |
983 ((u32
) dir
<< DW2_DIRPID
) |
984 ((u32
) bufround
<< DW2_BUFROUND
) |
985 ((u32
) datatoggle
<< DW2_DATATOG
) |
986 ((u32
) TD_NOTACCESSED
<< DW2_COMPCODE
));
988 /* DMA will always transfer buffer size even if TOBYCNT in DWORD3
989 is smaller. Make sure we don't overrun the buffer!
991 if (count
&& count
< maxpacket
)
992 etd_buf_size
= count
;
994 etd_buf_size
= maxpacket
;
996 etd_writel(imx21
, etd_num
, 3,
997 ((u32
) (etd_buf_size
- 1) << DW3_BUFSIZE
) | (u32
) count
);
1000 etd
->dma_handle
= 0;
1002 /* allocate x and y buffer space at once */
1003 etd
->dmem_size
= (count
> maxpacket
) ? maxpacket
* 2 : maxpacket
;
1004 etd
->dmem_offset
= alloc_dmem(imx21
, etd
->dmem_size
, urb_priv
->ep
);
1005 if (etd
->dmem_offset
< 0) {
1006 /* Setup everything we can in HW and update when we get DMEM */
1007 etd_writel(imx21
, etd_num
, 1, (u32
)maxpacket
<< 16);
1009 dev_dbg(imx21
->dev
, "Queuing etd %d for DMEM\n", etd_num
);
1010 debug_urb_queued_for_dmem(imx21
, urb
);
1011 list_add_tail(&etd
->queue
, &imx21
->queue_for_dmem
);
1015 etd_writel(imx21
, etd_num
, 1,
1016 (((u32
) etd
->dmem_offset
+ (u32
) maxpacket
) << DW1_YBUFSRTAD
) |
1017 (u32
) etd
->dmem_offset
);
1019 urb_priv
->active
= 1;
1021 /* enable the ETD to kick off transfer */
1022 dev_vdbg(imx21
->dev
, "Activating etd %d for %d bytes %s\n",
1023 etd_num
, count
, dir
!= TD_DIR_IN
? "out" : "in");
1024 activate_etd(imx21
, etd_num
, dir
);
1028 static void nonisoc_etd_done(struct usb_hcd
*hcd
, int etd_num
)
1030 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1031 struct etd_priv
*etd
= &imx21
->etd
[etd_num
];
1032 struct urb
*urb
= etd
->urb
;
1033 u32 etd_mask
= 1 << etd_num
;
1034 struct urb_priv
*urb_priv
= urb
->hcpriv
;
1040 disactivate_etd(imx21
, etd_num
);
1042 dir
= (etd_readl(imx21
, etd_num
, 0) >> DW0_DIRECT
) & 0x3;
1043 cc
= (etd_readl(imx21
, etd_num
, 2) >> DW2_COMPCODE
) & 0xf;
1044 bytes_xfrd
= etd
->len
- (etd_readl(imx21
, etd_num
, 3) & 0x1fffff);
1046 /* save toggle carry */
1047 usb_settoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
1048 usb_pipeout(urb
->pipe
),
1049 (etd_readl(imx21
, etd_num
, 0) >> DW0_TOGCRY
) & 0x1);
1051 if (dir
== TD_DIR_IN
) {
1052 clear_toggle_bit(imx21
, USBH_XFILLSTAT
, etd_mask
);
1053 clear_toggle_bit(imx21
, USBH_YFILLSTAT
, etd_mask
);
1055 if (etd
->bounce_buffer
) {
1056 memcpy(etd
->cpu_buffer
, etd
->bounce_buffer
, bytes_xfrd
);
1057 dma_unmap_single(imx21
->dev
,
1058 etd
->dma_handle
, etd
->len
, DMA_FROM_DEVICE
);
1059 } else if (!etd
->dma_handle
&& bytes_xfrd
) {/* PIO */
1060 memcpy_fromio(etd
->cpu_buffer
,
1061 imx21
->regs
+ USBOTG_DMEM
+ etd
->dmem_offset
,
1066 kfree(etd
->bounce_buffer
);
1067 etd
->bounce_buffer
= NULL
;
1068 free_dmem(imx21
, etd
);
1070 urb
->error_count
= 0;
1071 if (!(urb
->transfer_flags
& URB_SHORT_NOT_OK
)
1072 && (cc
== TD_DATAUNDERRUN
))
1076 dev_vdbg(imx21
->dev
, "cc is 0x%x\n", cc
);
1078 etd_done
= (cc_to_error
[cc
] != 0); /* stop if error */
1080 switch (usb_pipetype(urb
->pipe
)) {
1082 switch (urb_priv
->state
) {
1084 if (urb
->transfer_buffer_length
> 0)
1085 urb_priv
->state
= US_CTRL_DATA
;
1087 urb_priv
->state
= US_CTRL_ACK
;
1090 urb
->actual_length
+= bytes_xfrd
;
1091 urb_priv
->state
= US_CTRL_ACK
;
1098 "Invalid pipe state %d\n", urb_priv
->state
);
1105 urb
->actual_length
+= bytes_xfrd
;
1106 if ((urb_priv
->state
== US_BULK
)
1107 && (urb
->transfer_flags
& URB_ZERO_PACKET
)
1108 && urb
->transfer_buffer_length
> 0
1109 && ((urb
->transfer_buffer_length
%
1110 usb_maxpacket(urb
->dev
, urb
->pipe
,
1111 usb_pipeout(urb
->pipe
))) == 0)) {
1112 /* need a 0-packet */
1113 urb_priv
->state
= US_BULK0
;
1119 case PIPE_INTERRUPT
:
1120 urb
->actual_length
+= bytes_xfrd
;
1126 nonisoc_urb_completed_for_etd(imx21
, etd
, cc_to_error
[cc
]);
1128 dev_vdbg(imx21
->dev
, "next state=%d\n", urb_priv
->state
);
1129 schedule_nonisoc_etd(imx21
, urb
);
1134 static struct ep_priv
*alloc_ep(void)
1137 struct ep_priv
*ep_priv
;
1139 ep_priv
= kzalloc(sizeof(struct ep_priv
), GFP_ATOMIC
);
1143 for (i
= 0; i
< NUM_ISO_ETDS
; ++i
)
1144 ep_priv
->etd
[i
] = -1;
1149 static int imx21_hc_urb_enqueue(struct usb_hcd
*hcd
,
1150 struct urb
*urb
, gfp_t mem_flags
)
1152 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1153 struct usb_host_endpoint
*ep
= urb
->ep
;
1154 struct urb_priv
*urb_priv
;
1155 struct ep_priv
*ep_priv
;
1156 struct etd_priv
*etd
;
1158 unsigned long flags
;
1160 dev_vdbg(imx21
->dev
,
1161 "enqueue urb=%p ep=%p len=%d "
1162 "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n",
1164 urb
->transfer_buffer_length
,
1165 urb
->transfer_buffer
, urb
->transfer_dma
,
1166 urb
->setup_packet
, urb
->setup_dma
);
1168 if (usb_pipeisoc(urb
->pipe
))
1169 return imx21_hc_urb_enqueue_isoc(hcd
, ep
, urb
, mem_flags
);
1171 urb_priv
= kzalloc(sizeof(struct urb_priv
), mem_flags
);
1175 spin_lock_irqsave(&imx21
->lock
, flags
);
1177 ep_priv
= ep
->hcpriv
;
1178 if (ep_priv
== NULL
) {
1179 ep_priv
= alloc_ep();
1182 goto failed_alloc_ep
;
1184 ep
->hcpriv
= ep_priv
;
1188 ret
= usb_hcd_link_urb_to_ep(hcd
, urb
);
1192 urb
->status
= -EINPROGRESS
;
1193 urb
->actual_length
= 0;
1194 urb
->error_count
= 0;
1195 urb
->hcpriv
= urb_priv
;
1198 switch (usb_pipetype(urb
->pipe
)) {
1200 urb_priv
->state
= US_CTRL_SETUP
;
1203 urb_priv
->state
= US_BULK
;
1207 debug_urb_submitted(imx21
, urb
);
1208 if (ep_priv
->etd
[0] < 0) {
1209 if (ep_priv
->waiting_etd
) {
1211 "no ETD available already queued %p\n",
1213 debug_urb_queued_for_etd(imx21
, urb
);
1216 ep_priv
->etd
[0] = alloc_etd(imx21
);
1217 if (ep_priv
->etd
[0] < 0) {
1219 "no ETD available queueing %p\n", ep_priv
);
1220 debug_urb_queued_for_etd(imx21
, urb
);
1221 list_add_tail(&ep_priv
->queue
, &imx21
->queue_for_etd
);
1222 ep_priv
->waiting_etd
= 1;
1227 /* Schedule if no URB already active for this endpoint */
1228 etd
= &imx21
->etd
[ep_priv
->etd
[0]];
1229 if (etd
->urb
== NULL
) {
1230 DEBUG_LOG_FRAME(imx21
, etd
, last_req
);
1231 schedule_nonisoc_etd(imx21
, urb
);
1235 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1240 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1245 static int imx21_hc_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
,
1248 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1249 unsigned long flags
;
1250 struct usb_host_endpoint
*ep
;
1251 struct ep_priv
*ep_priv
;
1252 struct urb_priv
*urb_priv
= urb
->hcpriv
;
1255 dev_vdbg(imx21
->dev
, "dequeue urb=%p iso=%d status=%d\n",
1256 urb
, usb_pipeisoc(urb
->pipe
), status
);
1258 spin_lock_irqsave(&imx21
->lock
, flags
);
1260 ret
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
1264 ep_priv
= ep
->hcpriv
;
1266 debug_urb_unlinked(imx21
, urb
);
1268 if (usb_pipeisoc(urb
->pipe
)) {
1269 dequeue_isoc_urb(imx21
, urb
, ep_priv
);
1270 schedule_isoc_etds(hcd
, ep
);
1271 } else if (urb_priv
->active
) {
1272 int etd_num
= ep_priv
->etd
[0];
1273 if (etd_num
!= -1) {
1274 struct etd_priv
*etd
= &imx21
->etd
[etd_num
];
1276 disactivate_etd(imx21
, etd_num
);
1277 free_dmem(imx21
, etd
);
1279 kfree(etd
->bounce_buffer
);
1280 etd
->bounce_buffer
= NULL
;
1284 urb_done(hcd
, urb
, status
);
1286 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1290 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1294 /* =========================================== */
1295 /* Interrupt dispatch */
1296 /* =========================================== */
1298 static void process_etds(struct usb_hcd
*hcd
, struct imx21
*imx21
, int sof
)
1301 int enable_sof_int
= 0;
1302 unsigned long flags
;
1304 spin_lock_irqsave(&imx21
->lock
, flags
);
1306 for (etd_num
= 0; etd_num
< USB_NUM_ETD
; etd_num
++) {
1307 u32 etd_mask
= 1 << etd_num
;
1308 u32 enabled
= readl(imx21
->regs
+ USBH_ETDENSET
) & etd_mask
;
1309 u32 done
= readl(imx21
->regs
+ USBH_ETDDONESTAT
) & etd_mask
;
1310 struct etd_priv
*etd
= &imx21
->etd
[etd_num
];
1314 DEBUG_LOG_FRAME(imx21
, etd
, last_int
);
1319 * When multiple transfers are using the bus we sometimes get into a state
1320 * where the transfer has completed (the CC field of the ETD is != 0x0F),
1321 * the ETD has self disabled but the ETDDONESTAT flag is not set
1322 * (and hence no interrupt occurs).
1323 * This causes the transfer in question to hang.
1324 * The kludge below checks for this condition at each SOF and processes any
1325 * blocked ETDs (after an arbitary 10 frame wait)
1327 * With a single active transfer the usbtest test suite will run for days
1328 * without the kludge.
1329 * With other bus activity (eg mass storage) even just test1 will hang without
1335 if (etd
->active_count
&& !enabled
) /* suspicious... */
1338 if (!sof
|| enabled
|| !etd
->active_count
)
1341 cc
= etd_readl(imx21
, etd_num
, 2) >> DW2_COMPCODE
;
1342 if (cc
== TD_NOTACCESSED
)
1345 if (++etd
->active_count
< 10)
1348 dword0
= etd_readl(imx21
, etd_num
, 0);
1350 "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
1351 etd_num
, dword0
& 0x7F,
1352 (dword0
>> DW0_ENDPNT
) & 0x0F,
1357 "frame: act=%d disact=%d"
1358 " int=%d req=%d cur=%d\n",
1359 etd
->activated_frame
,
1360 etd
->disactivated_frame
,
1361 etd
->last_int_frame
,
1362 etd
->last_req_frame
,
1363 readl(imx21
->regs
+ USBH_FRMNUB
));
1364 imx21
->debug_unblocks
++;
1366 etd
->active_count
= 0;
1370 if (etd
->ep
== NULL
|| etd
->urb
== NULL
) {
1372 "Interrupt for unexpected etd %d"
1374 etd_num
, etd
->ep
, etd
->urb
);
1375 disactivate_etd(imx21
, etd_num
);
1379 if (usb_pipeisoc(etd
->urb
->pipe
))
1380 isoc_etd_done(hcd
, etd_num
);
1382 nonisoc_etd_done(hcd
, etd_num
);
1385 /* only enable SOF interrupt if it may be needed for the kludge */
1387 set_register_bits(imx21
, USBH_SYSIEN
, USBH_SYSIEN_SOFINT
);
1389 clear_register_bits(imx21
, USBH_SYSIEN
, USBH_SYSIEN_SOFINT
);
1392 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1395 static irqreturn_t
imx21_irq(struct usb_hcd
*hcd
)
1397 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1398 u32 ints
= readl(imx21
->regs
+ USBH_SYSISR
);
1400 if (ints
& USBH_SYSIEN_HERRINT
)
1401 dev_dbg(imx21
->dev
, "Scheduling error\n");
1403 if (ints
& USBH_SYSIEN_SORINT
)
1404 dev_dbg(imx21
->dev
, "Scheduling overrun\n");
1406 if (ints
& (USBH_SYSISR_DONEINT
| USBH_SYSISR_SOFINT
))
1407 process_etds(hcd
, imx21
, ints
& USBH_SYSISR_SOFINT
);
1409 writel(ints
, imx21
->regs
+ USBH_SYSISR
);
1413 static void imx21_hc_endpoint_disable(struct usb_hcd
*hcd
,
1414 struct usb_host_endpoint
*ep
)
1416 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1417 unsigned long flags
;
1418 struct ep_priv
*ep_priv
;
1424 spin_lock_irqsave(&imx21
->lock
, flags
);
1425 ep_priv
= ep
->hcpriv
;
1426 dev_vdbg(imx21
->dev
, "disable ep=%p, ep->hcpriv=%p\n", ep
, ep_priv
);
1428 if (!list_empty(&ep
->urb_list
))
1429 dev_dbg(imx21
->dev
, "ep's URB list is not empty\n");
1431 if (ep_priv
!= NULL
) {
1432 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
1433 if (ep_priv
->etd
[i
] > -1)
1434 dev_dbg(imx21
->dev
, "free etd %d for disable\n",
1437 free_etd(imx21
, ep_priv
->etd
[i
]);
1443 for (i
= 0; i
< USB_NUM_ETD
; i
++) {
1444 if (imx21
->etd
[i
].alloc
&& imx21
->etd
[i
].ep
== ep
) {
1446 "Active etd %d for disabled ep=%p!\n", i
, ep
);
1450 free_epdmem(imx21
, ep
);
1451 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1454 /* =========================================== */
1456 /* =========================================== */
1458 static int get_hub_descriptor(struct usb_hcd
*hcd
,
1459 struct usb_hub_descriptor
*desc
)
1461 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1462 desc
->bDescriptorType
= 0x29; /* HUB descriptor */
1463 desc
->bHubContrCurrent
= 0;
1465 desc
->bNbrPorts
= readl(imx21
->regs
+ USBH_ROOTHUBA
)
1466 & USBH_ROOTHUBA_NDNSTMPRT_MASK
;
1467 desc
->bDescLength
= 9;
1468 desc
->bPwrOn2PwrGood
= 0;
1469 desc
->wHubCharacteristics
= (__force __u16
) cpu_to_le16(
1470 0x0002 | /* No power switching */
1471 0x0010 | /* No over current protection */
1474 desc
->bitmap
[0] = 1 << 1;
1475 desc
->bitmap
[1] = ~0;
1479 static int imx21_hc_hub_status_data(struct usb_hcd
*hcd
, char *buf
)
1481 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1485 unsigned long flags
;
1487 spin_lock_irqsave(&imx21
->lock
, flags
);
1488 ports
= readl(imx21
->regs
+ USBH_ROOTHUBA
)
1489 & USBH_ROOTHUBA_NDNSTMPRT_MASK
;
1492 dev_err(imx21
->dev
, "ports %d > 7\n", ports
);
1494 for (i
= 0; i
< ports
; i
++) {
1495 if (readl(imx21
->regs
+ USBH_PORTSTAT(i
)) &
1496 (USBH_PORTSTAT_CONNECTSC
|
1497 USBH_PORTSTAT_PRTENBLSC
|
1498 USBH_PORTSTAT_PRTSTATSC
|
1499 USBH_PORTSTAT_OVRCURIC
|
1500 USBH_PORTSTAT_PRTRSTSC
)) {
1503 buf
[0] |= 1 << (i
+ 1);
1506 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1509 dev_info(imx21
->dev
, "Hub status changed\n");
1513 static int imx21_hc_hub_control(struct usb_hcd
*hcd
,
1515 u16 wValue
, u16 wIndex
, char *buf
, u16 wLength
)
1517 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1519 u32 status_write
= 0;
1522 case ClearHubFeature
:
1523 dev_dbg(imx21
->dev
, "ClearHubFeature\n");
1525 case C_HUB_OVER_CURRENT
:
1526 dev_dbg(imx21
->dev
, " OVER_CURRENT\n");
1528 case C_HUB_LOCAL_POWER
:
1529 dev_dbg(imx21
->dev
, " LOCAL_POWER\n");
1532 dev_dbg(imx21
->dev
, " unknown\n");
1538 case ClearPortFeature
:
1539 dev_dbg(imx21
->dev
, "ClearPortFeature\n");
1541 case USB_PORT_FEAT_ENABLE
:
1542 dev_dbg(imx21
->dev
, " ENABLE\n");
1543 status_write
= USBH_PORTSTAT_CURCONST
;
1545 case USB_PORT_FEAT_SUSPEND
:
1546 dev_dbg(imx21
->dev
, " SUSPEND\n");
1547 status_write
= USBH_PORTSTAT_PRTOVRCURI
;
1549 case USB_PORT_FEAT_POWER
:
1550 dev_dbg(imx21
->dev
, " POWER\n");
1551 status_write
= USBH_PORTSTAT_LSDEVCON
;
1553 case USB_PORT_FEAT_C_ENABLE
:
1554 dev_dbg(imx21
->dev
, " C_ENABLE\n");
1555 status_write
= USBH_PORTSTAT_PRTENBLSC
;
1557 case USB_PORT_FEAT_C_SUSPEND
:
1558 dev_dbg(imx21
->dev
, " C_SUSPEND\n");
1559 status_write
= USBH_PORTSTAT_PRTSTATSC
;
1561 case USB_PORT_FEAT_C_CONNECTION
:
1562 dev_dbg(imx21
->dev
, " C_CONNECTION\n");
1563 status_write
= USBH_PORTSTAT_CONNECTSC
;
1565 case USB_PORT_FEAT_C_OVER_CURRENT
:
1566 dev_dbg(imx21
->dev
, " C_OVER_CURRENT\n");
1567 status_write
= USBH_PORTSTAT_OVRCURIC
;
1569 case USB_PORT_FEAT_C_RESET
:
1570 dev_dbg(imx21
->dev
, " C_RESET\n");
1571 status_write
= USBH_PORTSTAT_PRTRSTSC
;
1574 dev_dbg(imx21
->dev
, " unknown\n");
1581 case GetHubDescriptor
:
1582 dev_dbg(imx21
->dev
, "GetHubDescriptor\n");
1583 rc
= get_hub_descriptor(hcd
, (void *)buf
);
1587 dev_dbg(imx21
->dev
, " GetHubStatus\n");
1588 *(__le32
*) buf
= 0;
1592 dev_dbg(imx21
->dev
, "GetPortStatus: port: %d, 0x%x\n",
1593 wIndex
, USBH_PORTSTAT(wIndex
- 1));
1594 *(__le32
*) buf
= readl(imx21
->regs
+
1595 USBH_PORTSTAT(wIndex
- 1));
1599 dev_dbg(imx21
->dev
, "SetHubFeature\n");
1601 case C_HUB_OVER_CURRENT
:
1602 dev_dbg(imx21
->dev
, " OVER_CURRENT\n");
1605 case C_HUB_LOCAL_POWER
:
1606 dev_dbg(imx21
->dev
, " LOCAL_POWER\n");
1609 dev_dbg(imx21
->dev
, " unknown\n");
1616 case SetPortFeature
:
1617 dev_dbg(imx21
->dev
, "SetPortFeature\n");
1619 case USB_PORT_FEAT_SUSPEND
:
1620 dev_dbg(imx21
->dev
, " SUSPEND\n");
1621 status_write
= USBH_PORTSTAT_PRTSUSPST
;
1623 case USB_PORT_FEAT_POWER
:
1624 dev_dbg(imx21
->dev
, " POWER\n");
1625 status_write
= USBH_PORTSTAT_PRTPWRST
;
1627 case USB_PORT_FEAT_RESET
:
1628 dev_dbg(imx21
->dev
, " RESET\n");
1629 status_write
= USBH_PORTSTAT_PRTRSTST
;
1632 dev_dbg(imx21
->dev
, " unknown\n");
1639 dev_dbg(imx21
->dev
, " unknown\n");
1645 writel(status_write
, imx21
->regs
+ USBH_PORTSTAT(wIndex
- 1));
1649 /* =========================================== */
1650 /* Host controller management */
1651 /* =========================================== */
1653 static int imx21_hc_reset(struct usb_hcd
*hcd
)
1655 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1656 unsigned long timeout
;
1657 unsigned long flags
;
1659 spin_lock_irqsave(&imx21
->lock
, flags
);
1661 /* Reset the Host controler modules */
1662 writel(USBOTG_RST_RSTCTRL
| USBOTG_RST_RSTRH
|
1663 USBOTG_RST_RSTHSIE
| USBOTG_RST_RSTHC
,
1664 imx21
->regs
+ USBOTG_RST_CTRL
);
1666 /* Wait for reset to finish */
1667 timeout
= jiffies
+ HZ
;
1668 while (readl(imx21
->regs
+ USBOTG_RST_CTRL
) != 0) {
1669 if (time_after(jiffies
, timeout
)) {
1670 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1671 dev_err(imx21
->dev
, "timeout waiting for reset\n");
1674 spin_unlock_irq(&imx21
->lock
);
1675 schedule_timeout_uninterruptible(1);
1676 spin_lock_irq(&imx21
->lock
);
1678 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1682 static int __devinit
imx21_hc_start(struct usb_hcd
*hcd
)
1684 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1685 unsigned long flags
;
1687 u32 hw_mode
= USBOTG_HWMODE_CRECFG_HOST
;
1688 u32 usb_control
= 0;
1690 hw_mode
|= ((imx21
->pdata
->host_xcvr
<< USBOTG_HWMODE_HOSTXCVR_SHIFT
) &
1691 USBOTG_HWMODE_HOSTXCVR_MASK
);
1692 hw_mode
|= ((imx21
->pdata
->otg_xcvr
<< USBOTG_HWMODE_OTGXCVR_SHIFT
) &
1693 USBOTG_HWMODE_OTGXCVR_MASK
);
1695 if (imx21
->pdata
->host1_txenoe
)
1696 usb_control
|= USBCTRL_HOST1_TXEN_OE
;
1698 if (!imx21
->pdata
->host1_xcverless
)
1699 usb_control
|= USBCTRL_HOST1_BYP_TLL
;
1701 if (imx21
->pdata
->otg_ext_xcvr
)
1702 usb_control
|= USBCTRL_OTC_RCV_RXDP
;
1705 spin_lock_irqsave(&imx21
->lock
, flags
);
1707 writel((USBOTG_CLK_CTRL_HST
| USBOTG_CLK_CTRL_MAIN
),
1708 imx21
->regs
+ USBOTG_CLK_CTRL
);
1709 writel(hw_mode
, imx21
->regs
+ USBOTG_HWMODE
);
1710 writel(usb_control
, imx21
->regs
+ USBCTRL
);
1711 writel(USB_MISCCONTROL_SKPRTRY
| USB_MISCCONTROL_ARBMODE
,
1712 imx21
->regs
+ USB_MISCCONTROL
);
1714 /* Clear the ETDs */
1715 for (i
= 0; i
< USB_NUM_ETD
; i
++)
1716 for (j
= 0; j
< 4; j
++)
1717 etd_writel(imx21
, i
, j
, 0);
1719 /* Take the HC out of reset */
1720 writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL
| USBH_HOST_CTRL_CTLBLKSR_1
,
1721 imx21
->regs
+ USBH_HOST_CTRL
);
1724 if (imx21
->pdata
->enable_otg_host
)
1725 writel(USBH_PORTSTAT_PRTPWRST
| USBH_PORTSTAT_PRTENABST
,
1726 imx21
->regs
+ USBH_PORTSTAT(0));
1728 if (imx21
->pdata
->enable_host1
)
1729 writel(USBH_PORTSTAT_PRTPWRST
| USBH_PORTSTAT_PRTENABST
,
1730 imx21
->regs
+ USBH_PORTSTAT(1));
1732 if (imx21
->pdata
->enable_host2
)
1733 writel(USBH_PORTSTAT_PRTPWRST
| USBH_PORTSTAT_PRTENABST
,
1734 imx21
->regs
+ USBH_PORTSTAT(2));
1737 hcd
->state
= HC_STATE_RUNNING
;
1739 /* Enable host controller interrupts */
1740 set_register_bits(imx21
, USBH_SYSIEN
,
1741 USBH_SYSIEN_HERRINT
|
1742 USBH_SYSIEN_DONEINT
| USBH_SYSIEN_SORINT
);
1743 set_register_bits(imx21
, USBOTG_CINT_STEN
, USBOTG_HCINT
);
1745 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1750 static void imx21_hc_stop(struct usb_hcd
*hcd
)
1752 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1753 unsigned long flags
;
1755 spin_lock_irqsave(&imx21
->lock
, flags
);
1757 writel(0, imx21
->regs
+ USBH_SYSIEN
);
1758 clear_register_bits(imx21
, USBOTG_CINT_STEN
, USBOTG_HCINT
);
1759 clear_register_bits(imx21
, USBOTG_CLK_CTRL_HST
| USBOTG_CLK_CTRL_MAIN
,
1761 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1764 /* =========================================== */
1766 /* =========================================== */
1768 static struct hc_driver imx21_hc_driver
= {
1769 .description
= hcd_name
,
1770 .product_desc
= "IMX21 USB Host Controller",
1771 .hcd_priv_size
= sizeof(struct imx21
),
1776 .reset
= imx21_hc_reset
,
1777 .start
= imx21_hc_start
,
1778 .stop
= imx21_hc_stop
,
1781 .urb_enqueue
= imx21_hc_urb_enqueue
,
1782 .urb_dequeue
= imx21_hc_urb_dequeue
,
1783 .endpoint_disable
= imx21_hc_endpoint_disable
,
1785 /* scheduling support */
1786 .get_frame_number
= imx21_hc_get_frame
,
1788 /* Root hub support */
1789 .hub_status_data
= imx21_hc_hub_status_data
,
1790 .hub_control
= imx21_hc_hub_control
,
1794 static struct mx21_usbh_platform_data default_pdata
= {
1795 .host_xcvr
= MX21_USBXCVR_TXDIF_RXDIF
,
1796 .otg_xcvr
= MX21_USBXCVR_TXDIF_RXDIF
,
1799 .enable_otg_host
= 1,
1803 static int imx21_remove(struct platform_device
*pdev
)
1805 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
1806 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1807 struct resource
*res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1809 remove_debug_files(imx21
);
1810 usb_remove_hcd(hcd
);
1813 clk_disable(imx21
->clk
);
1814 clk_put(imx21
->clk
);
1815 iounmap(imx21
->regs
);
1816 release_mem_region(res
->start
, resource_size(res
));
1824 static int imx21_probe(struct platform_device
*pdev
)
1826 struct usb_hcd
*hcd
;
1827 struct imx21
*imx21
;
1828 struct resource
*res
;
1832 printk(KERN_INFO
"%s\n", imx21_hc_driver
.product_desc
);
1834 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1837 irq
= platform_get_irq(pdev
, 0);
1841 hcd
= usb_create_hcd(&imx21_hc_driver
,
1842 &pdev
->dev
, dev_name(&pdev
->dev
));
1844 dev_err(&pdev
->dev
, "Cannot create hcd (%s)\n",
1845 dev_name(&pdev
->dev
));
1849 imx21
= hcd_to_imx21(hcd
);
1851 imx21
->dev
= &pdev
->dev
;
1852 imx21
->pdata
= pdev
->dev
.platform_data
;
1854 imx21
->pdata
= &default_pdata
;
1856 spin_lock_init(&imx21
->lock
);
1857 INIT_LIST_HEAD(&imx21
->dmem_list
);
1858 INIT_LIST_HEAD(&imx21
->queue_for_etd
);
1859 INIT_LIST_HEAD(&imx21
->queue_for_dmem
);
1860 create_debug_files(imx21
);
1862 res
= request_mem_region(res
->start
, resource_size(res
), hcd_name
);
1865 goto failed_request_mem
;
1868 imx21
->regs
= ioremap(res
->start
, resource_size(res
));
1869 if (imx21
->regs
== NULL
) {
1870 dev_err(imx21
->dev
, "Cannot map registers\n");
1872 goto failed_ioremap
;
1875 /* Enable clocks source */
1876 imx21
->clk
= clk_get(imx21
->dev
, NULL
);
1877 if (IS_ERR(imx21
->clk
)) {
1878 dev_err(imx21
->dev
, "no clock found\n");
1879 ret
= PTR_ERR(imx21
->clk
);
1880 goto failed_clock_get
;
1883 ret
= clk_set_rate(imx21
->clk
, clk_round_rate(imx21
->clk
, 48000000));
1885 goto failed_clock_set
;
1886 ret
= clk_enable(imx21
->clk
);
1888 goto failed_clock_enable
;
1890 dev_info(imx21
->dev
, "Hardware HC revision: 0x%02X\n",
1891 (readl(imx21
->regs
+ USBOTG_HWMODE
) >> 16) & 0xFF);
1893 ret
= usb_add_hcd(hcd
, irq
, IRQF_DISABLED
);
1895 dev_err(imx21
->dev
, "usb_add_hcd() returned %d\n", ret
);
1896 goto failed_add_hcd
;
1902 clk_disable(imx21
->clk
);
1903 failed_clock_enable
:
1905 clk_put(imx21
->clk
);
1907 iounmap(imx21
->regs
);
1909 release_mem_region(res
->start
, resource_size(res
));
1911 remove_debug_files(imx21
);
1916 static struct platform_driver imx21_hcd_driver
= {
1918 .name
= (char *)hcd_name
,
1920 .probe
= imx21_probe
,
1921 .remove
= imx21_remove
,
1926 static int __init
imx21_hcd_init(void)
1928 return platform_driver_register(&imx21_hcd_driver
);
1931 static void __exit
imx21_hcd_cleanup(void)
1933 platform_driver_unregister(&imx21_hcd_driver
);
1936 module_init(imx21_hcd_init
);
1937 module_exit(imx21_hcd_cleanup
);
1939 MODULE_DESCRIPTION("i.MX21 USB Host controller");
1940 MODULE_AUTHOR("Martin Fuzzey");
1941 MODULE_LICENSE("GPL");
1942 MODULE_ALIAS("platform:imx21-hcd");