[PATCH] usb gadget: allow drivers support speeds higher than full speed
[linux-2.6/mini2440.git] / drivers / usb / gadget / pxa2xx_udc.c
blob0a609e3dfbaef5b36ad812c8e9c25be097e7b815
1 /*
2 * linux/drivers/usb/gadget/pxa2xx_udc.c
3 * Intel PXA25x and IXP4xx on-chip full speed USB device controllers
5 * Copyright (C) 2002 Intrinsyc, Inc. (Frank Becker)
6 * Copyright (C) 2003 Robert Schwebel, Pengutronix
7 * Copyright (C) 2003 Benedikt Spranger, Pengutronix
8 * Copyright (C) 2003 David Brownell
9 * Copyright (C) 2003 Joshua Wise
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #undef DEBUG
28 // #define VERBOSE DBG_VERBOSE
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/ioport.h>
34 #include <linux/types.h>
35 #include <linux/errno.h>
36 #include <linux/delay.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/timer.h>
41 #include <linux/list.h>
42 #include <linux/interrupt.h>
43 #include <linux/proc_fs.h>
44 #include <linux/mm.h>
45 #include <linux/platform_device.h>
46 #include <linux/dma-mapping.h>
48 #include <asm/byteorder.h>
49 #include <asm/dma.h>
50 #include <asm/io.h>
51 #include <asm/irq.h>
52 #include <asm/system.h>
53 #include <asm/mach-types.h>
54 #include <asm/unaligned.h>
55 #include <asm/hardware.h>
56 #include <asm/arch/pxa-regs.h>
58 #include <linux/usb_ch9.h>
59 #include <linux/usb_gadget.h>
61 #include <asm/arch/udc.h>
65 * This driver handles the USB Device Controller (UDC) in Intel's PXA 25x
66 * series processors. The UDC for the IXP 4xx series is very similar.
67 * There are fifteen endpoints, in addition to ep0.
69 * Such controller drivers work with a gadget driver. The gadget driver
70 * returns descriptors, implements configuration and data protocols used
71 * by the host to interact with this device, and allocates endpoints to
72 * the different protocol interfaces. The controller driver virtualizes
73 * usb hardware so that the gadget drivers will be more portable.
75 * This UDC hardware wants to implement a bit too much USB protocol, so
76 * it constrains the sorts of USB configuration change events that work.
77 * The errata for these chips are misleading; some "fixed" bugs from
78 * pxa250 a0/a1 b0/b1/b2 sure act like they're still there.
81 #define DRIVER_VERSION "4-May-2005"
82 #define DRIVER_DESC "PXA 25x USB Device Controller driver"
85 static const char driver_name [] = "pxa2xx_udc";
87 static const char ep0name [] = "ep0";
90 // #define USE_DMA
91 // #define USE_OUT_DMA
92 // #define DISABLE_TEST_MODE
94 #ifdef CONFIG_ARCH_IXP4XX
95 #undef USE_DMA
97 /* cpu-specific register addresses are compiled in to this code */
98 #ifdef CONFIG_ARCH_PXA
99 #error "Can't configure both IXP and PXA"
100 #endif
102 #endif
104 #include "pxa2xx_udc.h"
107 #ifdef USE_DMA
108 static int use_dma = 1;
109 module_param(use_dma, bool, 0);
110 MODULE_PARM_DESC (use_dma, "true to use dma");
112 static void dma_nodesc_handler (int dmach, void *_ep, struct pt_regs *r);
113 static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req);
115 #ifdef USE_OUT_DMA
116 #define DMASTR " (dma support)"
117 #else
118 #define DMASTR " (dma in)"
119 #endif
121 #else /* !USE_DMA */
122 #define DMASTR " (pio only)"
123 #undef USE_OUT_DMA
124 #endif
126 #ifdef CONFIG_USB_PXA2XX_SMALL
127 #define SIZE_STR " (small)"
128 #else
129 #define SIZE_STR ""
130 #endif
132 #ifdef DISABLE_TEST_MODE
133 /* (mode == 0) == no undocumented chip tweaks
134 * (mode & 1) == double buffer bulk IN
135 * (mode & 2) == double buffer bulk OUT
136 * ... so mode = 3 (or 7, 15, etc) does it for both
138 static ushort fifo_mode = 0;
139 module_param(fifo_mode, ushort, 0);
140 MODULE_PARM_DESC (fifo_mode, "pxa2xx udc fifo mode");
141 #endif
143 /* ---------------------------------------------------------------------------
144 * endpoint related parts of the api to the usb controller hardware,
145 * used by gadget driver; and the inner talker-to-hardware core.
146 * ---------------------------------------------------------------------------
149 static void pxa2xx_ep_fifo_flush (struct usb_ep *ep);
150 static void nuke (struct pxa2xx_ep *, int status);
152 static void pio_irq_enable(int bEndpointAddress)
154 bEndpointAddress &= 0xf;
155 if (bEndpointAddress < 8)
156 UICR0 &= ~(1 << bEndpointAddress);
157 else {
158 bEndpointAddress -= 8;
159 UICR1 &= ~(1 << bEndpointAddress);
163 static void pio_irq_disable(int bEndpointAddress)
165 bEndpointAddress &= 0xf;
166 if (bEndpointAddress < 8)
167 UICR0 |= 1 << bEndpointAddress;
168 else {
169 bEndpointAddress -= 8;
170 UICR1 |= 1 << bEndpointAddress;
174 /* The UDCCR reg contains mask and interrupt status bits,
175 * so using '|=' isn't safe as it may ack an interrupt.
177 #define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE)
179 static inline void udc_set_mask_UDCCR(int mask)
181 UDCCR = (UDCCR & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS);
184 static inline void udc_clear_mask_UDCCR(int mask)
186 UDCCR = (UDCCR & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS);
189 static inline void udc_ack_int_UDCCR(int mask)
191 /* udccr contains the bits we dont want to change */
192 __u32 udccr = UDCCR & UDCCR_MASK_BITS;
194 UDCCR = udccr | (mask & ~UDCCR_MASK_BITS);
198 * endpoint enable/disable
200 * we need to verify the descriptors used to enable endpoints. since pxa2xx
201 * endpoint configurations are fixed, and are pretty much always enabled,
202 * there's not a lot to manage here.
204 * because pxa2xx can't selectively initialize bulk (or interrupt) endpoints,
205 * (resetting endpoint halt and toggle), SET_INTERFACE is unusable except
206 * for a single interface (with only the default altsetting) and for gadget
207 * drivers that don't halt endpoints (not reset by set_interface). that also
208 * means that if you use ISO, you must violate the USB spec rule that all
209 * iso endpoints must be in non-default altsettings.
211 static int pxa2xx_ep_enable (struct usb_ep *_ep,
212 const struct usb_endpoint_descriptor *desc)
214 struct pxa2xx_ep *ep;
215 struct pxa2xx_udc *dev;
217 ep = container_of (_ep, struct pxa2xx_ep, ep);
218 if (!_ep || !desc || ep->desc || _ep->name == ep0name
219 || desc->bDescriptorType != USB_DT_ENDPOINT
220 || ep->bEndpointAddress != desc->bEndpointAddress
221 || ep->fifo_size < le16_to_cpu
222 (desc->wMaxPacketSize)) {
223 DMSG("%s, bad ep or descriptor\n", __FUNCTION__);
224 return -EINVAL;
227 /* xfer types must match, except that interrupt ~= bulk */
228 if (ep->bmAttributes != desc->bmAttributes
229 && ep->bmAttributes != USB_ENDPOINT_XFER_BULK
230 && desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
231 DMSG("%s, %s type mismatch\n", __FUNCTION__, _ep->name);
232 return -EINVAL;
235 /* hardware _could_ do smaller, but driver doesn't */
236 if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
237 && le16_to_cpu (desc->wMaxPacketSize)
238 != BULK_FIFO_SIZE)
239 || !desc->wMaxPacketSize) {
240 DMSG("%s, bad %s maxpacket\n", __FUNCTION__, _ep->name);
241 return -ERANGE;
244 dev = ep->dev;
245 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
246 DMSG("%s, bogus device state\n", __FUNCTION__);
247 return -ESHUTDOWN;
250 ep->desc = desc;
251 ep->dma = -1;
252 ep->stopped = 0;
253 ep->pio_irqs = ep->dma_irqs = 0;
254 ep->ep.maxpacket = le16_to_cpu (desc->wMaxPacketSize);
256 /* flush fifo (mostly for OUT buffers) */
257 pxa2xx_ep_fifo_flush (_ep);
259 /* ... reset halt state too, if we could ... */
261 #ifdef USE_DMA
262 /* for (some) bulk and ISO endpoints, try to get a DMA channel and
263 * bind it to the endpoint. otherwise use PIO.
265 switch (ep->bmAttributes) {
266 case USB_ENDPOINT_XFER_ISOC:
267 if (le16_to_cpu(desc->wMaxPacketSize) % 32)
268 break;
269 // fall through
270 case USB_ENDPOINT_XFER_BULK:
271 if (!use_dma || !ep->reg_drcmr)
272 break;
273 ep->dma = pxa_request_dma ((char *)_ep->name,
274 (le16_to_cpu (desc->wMaxPacketSize) > 64)
275 ? DMA_PRIO_MEDIUM /* some iso */
276 : DMA_PRIO_LOW,
277 dma_nodesc_handler, ep);
278 if (ep->dma >= 0) {
279 *ep->reg_drcmr = DRCMR_MAPVLD | ep->dma;
280 DMSG("%s using dma%d\n", _ep->name, ep->dma);
283 #endif
285 DBG(DBG_VERBOSE, "enabled %s\n", _ep->name);
286 return 0;
289 static int pxa2xx_ep_disable (struct usb_ep *_ep)
291 struct pxa2xx_ep *ep;
292 unsigned long flags;
294 ep = container_of (_ep, struct pxa2xx_ep, ep);
295 if (!_ep || !ep->desc) {
296 DMSG("%s, %s not enabled\n", __FUNCTION__,
297 _ep ? ep->ep.name : NULL);
298 return -EINVAL;
300 local_irq_save(flags);
302 nuke (ep, -ESHUTDOWN);
304 #ifdef USE_DMA
305 if (ep->dma >= 0) {
306 *ep->reg_drcmr = 0;
307 pxa_free_dma (ep->dma);
308 ep->dma = -1;
310 #endif
312 /* flush fifo (mostly for IN buffers) */
313 pxa2xx_ep_fifo_flush (_ep);
315 ep->desc = NULL;
316 ep->stopped = 1;
318 local_irq_restore(flags);
319 DBG(DBG_VERBOSE, "%s disabled\n", _ep->name);
320 return 0;
323 /*-------------------------------------------------------------------------*/
325 /* for the pxa2xx, these can just wrap kmalloc/kfree. gadget drivers
326 * must still pass correctly initialized endpoints, since other controller
327 * drivers may care about how it's currently set up (dma issues etc).
331 * pxa2xx_ep_alloc_request - allocate a request data structure
333 static struct usb_request *
334 pxa2xx_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
336 struct pxa2xx_request *req;
338 req = kzalloc(sizeof(*req), gfp_flags);
339 if (!req)
340 return NULL;
342 INIT_LIST_HEAD (&req->queue);
343 return &req->req;
348 * pxa2xx_ep_free_request - deallocate a request data structure
350 static void
351 pxa2xx_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
353 struct pxa2xx_request *req;
355 req = container_of (_req, struct pxa2xx_request, req);
356 WARN_ON (!list_empty (&req->queue));
357 kfree(req);
361 /* PXA cache needs flushing with DMA I/O (it's dma-incoherent), but there's
362 * no device-affinity and the heap works perfectly well for i/o buffers.
363 * It wastes much less memory than dma_alloc_coherent() would, and even
364 * prevents cacheline (32 bytes wide) sharing problems.
366 static void *
367 pxa2xx_ep_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
368 dma_addr_t *dma, gfp_t gfp_flags)
370 char *retval;
372 retval = kmalloc (bytes, gfp_flags & ~(__GFP_DMA|__GFP_HIGHMEM));
373 if (retval)
374 #ifdef USE_DMA
375 *dma = virt_to_bus (retval);
376 #else
377 *dma = (dma_addr_t)~0;
378 #endif
379 return retval;
382 static void
383 pxa2xx_ep_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma,
384 unsigned bytes)
386 kfree (buf);
389 /*-------------------------------------------------------------------------*/
392 * done - retire a request; caller blocked irqs
394 static void done(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int status)
396 unsigned stopped = ep->stopped;
398 list_del_init(&req->queue);
400 if (likely (req->req.status == -EINPROGRESS))
401 req->req.status = status;
402 else
403 status = req->req.status;
405 if (status && status != -ESHUTDOWN)
406 DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n",
407 ep->ep.name, &req->req, status,
408 req->req.actual, req->req.length);
410 /* don't modify queue heads during completion callback */
411 ep->stopped = 1;
412 req->req.complete(&ep->ep, &req->req);
413 ep->stopped = stopped;
417 static inline void ep0_idle (struct pxa2xx_udc *dev)
419 dev->ep0state = EP0_IDLE;
422 static int
423 write_packet(volatile u32 *uddr, struct pxa2xx_request *req, unsigned max)
425 u8 *buf;
426 unsigned length, count;
428 buf = req->req.buf + req->req.actual;
429 prefetch(buf);
431 /* how big will this packet be? */
432 length = min(req->req.length - req->req.actual, max);
433 req->req.actual += length;
435 count = length;
436 while (likely(count--))
437 *uddr = *buf++;
439 return length;
443 * write to an IN endpoint fifo, as many packets as possible.
444 * irqs will use this to write the rest later.
445 * caller guarantees at least one packet buffer is ready (or a zlp).
447 static int
448 write_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
450 unsigned max;
452 max = le16_to_cpu(ep->desc->wMaxPacketSize);
453 do {
454 unsigned count;
455 int is_last, is_short;
457 count = write_packet(ep->reg_uddr, req, max);
459 /* last packet is usually short (or a zlp) */
460 if (unlikely (count != max))
461 is_last = is_short = 1;
462 else {
463 if (likely(req->req.length != req->req.actual)
464 || req->req.zero)
465 is_last = 0;
466 else
467 is_last = 1;
468 /* interrupt/iso maxpacket may not fill the fifo */
469 is_short = unlikely (max < ep->fifo_size);
472 DBG(DBG_VERY_NOISY, "wrote %s %d bytes%s%s %d left %p\n",
473 ep->ep.name, count,
474 is_last ? "/L" : "", is_short ? "/S" : "",
475 req->req.length - req->req.actual, req);
477 /* let loose that packet. maybe try writing another one,
478 * double buffering might work. TSP, TPC, and TFS
479 * bit values are the same for all normal IN endpoints.
481 *ep->reg_udccs = UDCCS_BI_TPC;
482 if (is_short)
483 *ep->reg_udccs = UDCCS_BI_TSP;
485 /* requests complete when all IN data is in the FIFO */
486 if (is_last) {
487 done (ep, req, 0);
488 if (list_empty(&ep->queue) || unlikely(ep->dma >= 0)) {
489 pio_irq_disable (ep->bEndpointAddress);
490 #ifdef USE_DMA
491 /* unaligned data and zlps couldn't use dma */
492 if (unlikely(!list_empty(&ep->queue))) {
493 req = list_entry(ep->queue.next,
494 struct pxa2xx_request, queue);
495 kick_dma(ep,req);
496 return 0;
498 #endif
500 return 1;
503 // TODO experiment: how robust can fifo mode tweaking be?
504 // double buffering is off in the default fifo mode, which
505 // prevents TFS from being set here.
507 } while (*ep->reg_udccs & UDCCS_BI_TFS);
508 return 0;
511 /* caller asserts req->pending (ep0 irq status nyet cleared); starts
512 * ep0 data stage. these chips want very simple state transitions.
514 static inline
515 void ep0start(struct pxa2xx_udc *dev, u32 flags, const char *tag)
517 UDCCS0 = flags|UDCCS0_SA|UDCCS0_OPR;
518 USIR0 = USIR0_IR0;
519 dev->req_pending = 0;
520 DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n",
521 __FUNCTION__, tag, UDCCS0, flags);
524 static int
525 write_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
527 unsigned count;
528 int is_short;
530 count = write_packet(&UDDR0, req, EP0_FIFO_SIZE);
531 ep->dev->stats.write.bytes += count;
533 /* last packet "must be" short (or a zlp) */
534 is_short = (count != EP0_FIFO_SIZE);
536 DBG(DBG_VERY_NOISY, "ep0in %d bytes %d left %p\n", count,
537 req->req.length - req->req.actual, req);
539 if (unlikely (is_short)) {
540 if (ep->dev->req_pending)
541 ep0start(ep->dev, UDCCS0_IPR, "short IN");
542 else
543 UDCCS0 = UDCCS0_IPR;
545 count = req->req.length;
546 done (ep, req, 0);
547 ep0_idle(ep->dev);
548 #if 1
549 /* This seems to get rid of lost status irqs in some cases:
550 * host responds quickly, or next request involves config
551 * change automagic, or should have been hidden, or ...
553 * FIXME get rid of all udelays possible...
555 if (count >= EP0_FIFO_SIZE) {
556 count = 100;
557 do {
558 if ((UDCCS0 & UDCCS0_OPR) != 0) {
559 /* clear OPR, generate ack */
560 UDCCS0 = UDCCS0_OPR;
561 break;
563 count--;
564 udelay(1);
565 } while (count);
567 #endif
568 } else if (ep->dev->req_pending)
569 ep0start(ep->dev, 0, "IN");
570 return is_short;
575 * read_fifo - unload packet(s) from the fifo we use for usb OUT
576 * transfers and put them into the request. caller should have made
577 * sure there's at least one packet ready.
579 * returns true if the request completed because of short packet or the
580 * request buffer having filled (and maybe overran till end-of-packet).
582 static int
583 read_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
585 for (;;) {
586 u32 udccs;
587 u8 *buf;
588 unsigned bufferspace, count, is_short;
590 /* make sure there's a packet in the FIFO.
591 * UDCCS_{BO,IO}_RPC are all the same bit value.
592 * UDCCS_{BO,IO}_RNE are all the same bit value.
594 udccs = *ep->reg_udccs;
595 if (unlikely ((udccs & UDCCS_BO_RPC) == 0))
596 break;
597 buf = req->req.buf + req->req.actual;
598 prefetchw(buf);
599 bufferspace = req->req.length - req->req.actual;
601 /* read all bytes from this packet */
602 if (likely (udccs & UDCCS_BO_RNE)) {
603 count = 1 + (0x0ff & *ep->reg_ubcr);
604 req->req.actual += min (count, bufferspace);
605 } else /* zlp */
606 count = 0;
607 is_short = (count < ep->ep.maxpacket);
608 DBG(DBG_VERY_NOISY, "read %s %02x, %d bytes%s req %p %d/%d\n",
609 ep->ep.name, udccs, count,
610 is_short ? "/S" : "",
611 req, req->req.actual, req->req.length);
612 while (likely (count-- != 0)) {
613 u8 byte = (u8) *ep->reg_uddr;
615 if (unlikely (bufferspace == 0)) {
616 /* this happens when the driver's buffer
617 * is smaller than what the host sent.
618 * discard the extra data.
620 if (req->req.status != -EOVERFLOW)
621 DMSG("%s overflow %d\n",
622 ep->ep.name, count);
623 req->req.status = -EOVERFLOW;
624 } else {
625 *buf++ = byte;
626 bufferspace--;
629 *ep->reg_udccs = UDCCS_BO_RPC;
630 /* RPC/RSP/RNE could now reflect the other packet buffer */
632 /* iso is one request per packet */
633 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
634 if (udccs & UDCCS_IO_ROF)
635 req->req.status = -EHOSTUNREACH;
636 /* more like "is_done" */
637 is_short = 1;
640 /* completion */
641 if (is_short || req->req.actual == req->req.length) {
642 done (ep, req, 0);
643 if (list_empty(&ep->queue))
644 pio_irq_disable (ep->bEndpointAddress);
645 return 1;
648 /* finished that packet. the next one may be waiting... */
650 return 0;
654 * special ep0 version of the above. no UBCR0 or double buffering; status
655 * handshaking is magic. most device protocols don't need control-OUT.
656 * CDC vendor commands (and RNDIS), mass storage CB/CBI, and some other
657 * protocols do use them.
659 static int
660 read_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
662 u8 *buf, byte;
663 unsigned bufferspace;
665 buf = req->req.buf + req->req.actual;
666 bufferspace = req->req.length - req->req.actual;
668 while (UDCCS0 & UDCCS0_RNE) {
669 byte = (u8) UDDR0;
671 if (unlikely (bufferspace == 0)) {
672 /* this happens when the driver's buffer
673 * is smaller than what the host sent.
674 * discard the extra data.
676 if (req->req.status != -EOVERFLOW)
677 DMSG("%s overflow\n", ep->ep.name);
678 req->req.status = -EOVERFLOW;
679 } else {
680 *buf++ = byte;
681 req->req.actual++;
682 bufferspace--;
686 UDCCS0 = UDCCS0_OPR | UDCCS0_IPR;
688 /* completion */
689 if (req->req.actual >= req->req.length)
690 return 1;
692 /* finished that packet. the next one may be waiting... */
693 return 0;
696 #ifdef USE_DMA
698 #define MAX_IN_DMA ((DCMD_LENGTH + 1) - BULK_FIFO_SIZE)
700 static void
701 start_dma_nodesc(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int is_in)
703 u32 dcmd = req->req.length;
704 u32 buf = req->req.dma;
705 u32 fifo = io_v2p ((u32)ep->reg_uddr);
707 /* caller guarantees there's a packet or more remaining
708 * - IN may end with a short packet (TSP set separately),
709 * - OUT is always full length
711 buf += req->req.actual;
712 dcmd -= req->req.actual;
713 ep->dma_fixup = 0;
715 /* no-descriptor mode can be simple for bulk-in, iso-in, iso-out */
716 DCSR(ep->dma) = DCSR_NODESC;
717 if (is_in) {
718 DSADR(ep->dma) = buf;
719 DTADR(ep->dma) = fifo;
720 if (dcmd > MAX_IN_DMA)
721 dcmd = MAX_IN_DMA;
722 else
723 ep->dma_fixup = (dcmd % ep->ep.maxpacket) != 0;
724 dcmd |= DCMD_BURST32 | DCMD_WIDTH1
725 | DCMD_FLOWTRG | DCMD_INCSRCADDR;
726 } else {
727 #ifdef USE_OUT_DMA
728 DSADR(ep->dma) = fifo;
729 DTADR(ep->dma) = buf;
730 if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC)
731 dcmd = ep->ep.maxpacket;
732 dcmd |= DCMD_BURST32 | DCMD_WIDTH1
733 | DCMD_FLOWSRC | DCMD_INCTRGADDR;
734 #endif
736 DCMD(ep->dma) = dcmd;
737 DCSR(ep->dma) = DCSR_RUN | DCSR_NODESC
738 | (unlikely(is_in)
739 ? DCSR_STOPIRQEN /* use dma_nodesc_handler() */
740 : 0); /* use handle_ep() */
743 static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req)
745 int is_in = ep->bEndpointAddress & USB_DIR_IN;
747 if (is_in) {
748 /* unaligned tx buffers and zlps only work with PIO */
749 if ((req->req.dma & 0x0f) != 0
750 || unlikely((req->req.length - req->req.actual)
751 == 0)) {
752 pio_irq_enable(ep->bEndpointAddress);
753 if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0)
754 (void) write_fifo(ep, req);
755 } else {
756 start_dma_nodesc(ep, req, USB_DIR_IN);
758 } else {
759 if ((req->req.length - req->req.actual) < ep->ep.maxpacket) {
760 DMSG("%s short dma read...\n", ep->ep.name);
761 /* we're always set up for pio out */
762 read_fifo (ep, req);
763 } else {
764 *ep->reg_udccs = UDCCS_BO_DME
765 | (*ep->reg_udccs & UDCCS_BO_FST);
766 start_dma_nodesc(ep, req, USB_DIR_OUT);
771 static void cancel_dma(struct pxa2xx_ep *ep)
773 struct pxa2xx_request *req;
774 u32 tmp;
776 if (DCSR(ep->dma) == 0 || list_empty(&ep->queue))
777 return;
779 DCSR(ep->dma) = 0;
780 while ((DCSR(ep->dma) & DCSR_STOPSTATE) == 0)
781 cpu_relax();
783 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
784 tmp = DCMD(ep->dma) & DCMD_LENGTH;
785 req->req.actual = req->req.length - (tmp & DCMD_LENGTH);
787 /* the last tx packet may be incomplete, so flush the fifo.
788 * FIXME correct req.actual if we can
790 if (ep->bEndpointAddress & USB_DIR_IN)
791 *ep->reg_udccs = UDCCS_BI_FTF;
794 /* dma channel stopped ... normal tx end (IN), or on error (IN/OUT) */
795 static void dma_nodesc_handler(int dmach, void *_ep, struct pt_regs *r)
797 struct pxa2xx_ep *ep = _ep;
798 struct pxa2xx_request *req;
799 u32 tmp, completed;
801 local_irq_disable();
803 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
805 ep->dma_irqs++;
806 ep->dev->stats.irqs++;
807 HEX_DISPLAY(ep->dev->stats.irqs);
809 /* ack/clear */
810 tmp = DCSR(ep->dma);
811 DCSR(ep->dma) = tmp;
812 if ((tmp & DCSR_STOPSTATE) == 0
813 || (DDADR(ep->dma) & DDADR_STOP) != 0) {
814 DBG(DBG_VERBOSE, "%s, dcsr %08x ddadr %08x\n",
815 ep->ep.name, DCSR(ep->dma), DDADR(ep->dma));
816 goto done;
818 DCSR(ep->dma) = 0; /* clear DCSR_STOPSTATE */
820 /* update transfer status */
821 completed = tmp & DCSR_BUSERR;
822 if (ep->bEndpointAddress & USB_DIR_IN)
823 tmp = DSADR(ep->dma);
824 else
825 tmp = DTADR(ep->dma);
826 req->req.actual = tmp - req->req.dma;
828 /* FIXME seems we sometimes see partial transfers... */
830 if (unlikely(completed != 0))
831 req->req.status = -EIO;
832 else if (req->req.actual) {
833 /* these registers have zeroes in low bits; they miscount
834 * some (end-of-transfer) short packets: tx 14 as tx 12
836 if (ep->dma_fixup)
837 req->req.actual = min(req->req.actual + 3,
838 req->req.length);
840 tmp = (req->req.length - req->req.actual);
841 completed = (tmp == 0);
842 if (completed && (ep->bEndpointAddress & USB_DIR_IN)) {
844 /* maybe validate final short packet ... */
845 if ((req->req.actual % ep->ep.maxpacket) != 0)
846 *ep->reg_udccs = UDCCS_BI_TSP/*|UDCCS_BI_TPC*/;
848 /* ... or zlp, using pio fallback */
849 else if (ep->bmAttributes == USB_ENDPOINT_XFER_BULK
850 && req->req.zero) {
851 DMSG("%s zlp terminate ...\n", ep->ep.name);
852 completed = 0;
857 if (likely(completed)) {
858 done(ep, req, 0);
860 /* maybe re-activate after completion */
861 if (ep->stopped || list_empty(&ep->queue))
862 goto done;
863 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
865 kick_dma(ep, req);
866 done:
867 local_irq_enable();
870 #endif
872 /*-------------------------------------------------------------------------*/
874 static int
875 pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
877 struct pxa2xx_request *req;
878 struct pxa2xx_ep *ep;
879 struct pxa2xx_udc *dev;
880 unsigned long flags;
882 req = container_of(_req, struct pxa2xx_request, req);
883 if (unlikely (!_req || !_req->complete || !_req->buf
884 || !list_empty(&req->queue))) {
885 DMSG("%s, bad params\n", __FUNCTION__);
886 return -EINVAL;
889 ep = container_of(_ep, struct pxa2xx_ep, ep);
890 if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) {
891 DMSG("%s, bad ep\n", __FUNCTION__);
892 return -EINVAL;
895 dev = ep->dev;
896 if (unlikely (!dev->driver
897 || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
898 DMSG("%s, bogus device state\n", __FUNCTION__);
899 return -ESHUTDOWN;
902 /* iso is always one packet per request, that's the only way
903 * we can report per-packet status. that also helps with dma.
905 if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
906 && req->req.length > le16_to_cpu
907 (ep->desc->wMaxPacketSize)))
908 return -EMSGSIZE;
910 #ifdef USE_DMA
911 // FIXME caller may already have done the dma mapping
912 if (ep->dma >= 0) {
913 _req->dma = dma_map_single(dev->dev,
914 _req->buf, _req->length,
915 ((ep->bEndpointAddress & USB_DIR_IN) != 0)
916 ? DMA_TO_DEVICE
917 : DMA_FROM_DEVICE);
919 #endif
921 DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n",
922 _ep->name, _req, _req->length, _req->buf);
924 local_irq_save(flags);
926 _req->status = -EINPROGRESS;
927 _req->actual = 0;
929 /* kickstart this i/o queue? */
930 if (list_empty(&ep->queue) && !ep->stopped) {
931 if (ep->desc == 0 /* ep0 */) {
932 unsigned length = _req->length;
934 switch (dev->ep0state) {
935 case EP0_IN_DATA_PHASE:
936 dev->stats.write.ops++;
937 if (write_ep0_fifo(ep, req))
938 req = NULL;
939 break;
941 case EP0_OUT_DATA_PHASE:
942 dev->stats.read.ops++;
943 /* messy ... */
944 if (dev->req_config) {
945 DBG(DBG_VERBOSE, "ep0 config ack%s\n",
946 dev->has_cfr ? "" : " raced");
947 if (dev->has_cfr)
948 UDCCFR = UDCCFR_AREN|UDCCFR_ACM
949 |UDCCFR_MB1;
950 done(ep, req, 0);
951 dev->ep0state = EP0_END_XFER;
952 local_irq_restore (flags);
953 return 0;
955 if (dev->req_pending)
956 ep0start(dev, UDCCS0_IPR, "OUT");
957 if (length == 0 || ((UDCCS0 & UDCCS0_RNE) != 0
958 && read_ep0_fifo(ep, req))) {
959 ep0_idle(dev);
960 done(ep, req, 0);
961 req = NULL;
963 break;
965 default:
966 DMSG("ep0 i/o, odd state %d\n", dev->ep0state);
967 local_irq_restore (flags);
968 return -EL2HLT;
970 #ifdef USE_DMA
971 /* either start dma or prime pio pump */
972 } else if (ep->dma >= 0) {
973 kick_dma(ep, req);
974 #endif
975 /* can the FIFO can satisfy the request immediately? */
976 } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
977 if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0
978 && write_fifo(ep, req))
979 req = NULL;
980 } else if ((*ep->reg_udccs & UDCCS_BO_RFS) != 0
981 && read_fifo(ep, req)) {
982 req = NULL;
985 if (likely (req && ep->desc) && ep->dma < 0)
986 pio_irq_enable(ep->bEndpointAddress);
989 /* pio or dma irq handler advances the queue. */
990 if (likely (req != 0))
991 list_add_tail(&req->queue, &ep->queue);
992 local_irq_restore(flags);
994 return 0;
999 * nuke - dequeue ALL requests
1001 static void nuke(struct pxa2xx_ep *ep, int status)
1003 struct pxa2xx_request *req;
1005 /* called with irqs blocked */
1006 #ifdef USE_DMA
1007 if (ep->dma >= 0 && !ep->stopped)
1008 cancel_dma(ep);
1009 #endif
1010 while (!list_empty(&ep->queue)) {
1011 req = list_entry(ep->queue.next,
1012 struct pxa2xx_request,
1013 queue);
1014 done(ep, req, status);
1016 if (ep->desc)
1017 pio_irq_disable (ep->bEndpointAddress);
1021 /* dequeue JUST ONE request */
1022 static int pxa2xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1024 struct pxa2xx_ep *ep;
1025 struct pxa2xx_request *req;
1026 unsigned long flags;
1028 ep = container_of(_ep, struct pxa2xx_ep, ep);
1029 if (!_ep || ep->ep.name == ep0name)
1030 return -EINVAL;
1032 local_irq_save(flags);
1034 /* make sure it's actually queued on this endpoint */
1035 list_for_each_entry (req, &ep->queue, queue) {
1036 if (&req->req == _req)
1037 break;
1039 if (&req->req != _req) {
1040 local_irq_restore(flags);
1041 return -EINVAL;
1044 #ifdef USE_DMA
1045 if (ep->dma >= 0 && ep->queue.next == &req->queue && !ep->stopped) {
1046 cancel_dma(ep);
1047 done(ep, req, -ECONNRESET);
1048 /* restart i/o */
1049 if (!list_empty(&ep->queue)) {
1050 req = list_entry(ep->queue.next,
1051 struct pxa2xx_request, queue);
1052 kick_dma(ep, req);
1054 } else
1055 #endif
1056 done(ep, req, -ECONNRESET);
1058 local_irq_restore(flags);
1059 return 0;
1062 /*-------------------------------------------------------------------------*/
1064 static int pxa2xx_ep_set_halt(struct usb_ep *_ep, int value)
1066 struct pxa2xx_ep *ep;
1067 unsigned long flags;
1069 ep = container_of(_ep, struct pxa2xx_ep, ep);
1070 if (unlikely (!_ep
1071 || (!ep->desc && ep->ep.name != ep0name))
1072 || ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
1073 DMSG("%s, bad ep\n", __FUNCTION__);
1074 return -EINVAL;
1076 if (value == 0) {
1077 /* this path (reset toggle+halt) is needed to implement
1078 * SET_INTERFACE on normal hardware. but it can't be
1079 * done from software on the PXA UDC, and the hardware
1080 * forgets to do it as part of SET_INTERFACE automagic.
1082 DMSG("only host can clear %s halt\n", _ep->name);
1083 return -EROFS;
1086 local_irq_save(flags);
1088 if ((ep->bEndpointAddress & USB_DIR_IN) != 0
1089 && ((*ep->reg_udccs & UDCCS_BI_TFS) == 0
1090 || !list_empty(&ep->queue))) {
1091 local_irq_restore(flags);
1092 return -EAGAIN;
1095 /* FST bit is the same for control, bulk in, bulk out, interrupt in */
1096 *ep->reg_udccs = UDCCS_BI_FST|UDCCS_BI_FTF;
1098 /* ep0 needs special care */
1099 if (!ep->desc) {
1100 start_watchdog(ep->dev);
1101 ep->dev->req_pending = 0;
1102 ep->dev->ep0state = EP0_STALL;
1104 /* and bulk/intr endpoints like dropping stalls too */
1105 } else {
1106 unsigned i;
1107 for (i = 0; i < 1000; i += 20) {
1108 if (*ep->reg_udccs & UDCCS_BI_SST)
1109 break;
1110 udelay(20);
1113 local_irq_restore(flags);
1115 DBG(DBG_VERBOSE, "%s halt\n", _ep->name);
1116 return 0;
1119 static int pxa2xx_ep_fifo_status(struct usb_ep *_ep)
1121 struct pxa2xx_ep *ep;
1123 ep = container_of(_ep, struct pxa2xx_ep, ep);
1124 if (!_ep) {
1125 DMSG("%s, bad ep\n", __FUNCTION__);
1126 return -ENODEV;
1128 /* pxa can't report unclaimed bytes from IN fifos */
1129 if ((ep->bEndpointAddress & USB_DIR_IN) != 0)
1130 return -EOPNOTSUPP;
1131 if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN
1132 || (*ep->reg_udccs & UDCCS_BO_RFS) == 0)
1133 return 0;
1134 else
1135 return (*ep->reg_ubcr & 0xfff) + 1;
1138 static void pxa2xx_ep_fifo_flush(struct usb_ep *_ep)
1140 struct pxa2xx_ep *ep;
1142 ep = container_of(_ep, struct pxa2xx_ep, ep);
1143 if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) {
1144 DMSG("%s, bad ep\n", __FUNCTION__);
1145 return;
1148 /* toggle and halt bits stay unchanged */
1150 /* for OUT, just read and discard the FIFO contents. */
1151 if ((ep->bEndpointAddress & USB_DIR_IN) == 0) {
1152 while (((*ep->reg_udccs) & UDCCS_BO_RNE) != 0)
1153 (void) *ep->reg_uddr;
1154 return;
1157 /* most IN status is the same, but ISO can't stall */
1158 *ep->reg_udccs = UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR
1159 | (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
1160 ? 0 : UDCCS_BI_SST;
1164 static struct usb_ep_ops pxa2xx_ep_ops = {
1165 .enable = pxa2xx_ep_enable,
1166 .disable = pxa2xx_ep_disable,
1168 .alloc_request = pxa2xx_ep_alloc_request,
1169 .free_request = pxa2xx_ep_free_request,
1171 .alloc_buffer = pxa2xx_ep_alloc_buffer,
1172 .free_buffer = pxa2xx_ep_free_buffer,
1174 .queue = pxa2xx_ep_queue,
1175 .dequeue = pxa2xx_ep_dequeue,
1177 .set_halt = pxa2xx_ep_set_halt,
1178 .fifo_status = pxa2xx_ep_fifo_status,
1179 .fifo_flush = pxa2xx_ep_fifo_flush,
1183 /* ---------------------------------------------------------------------------
1184 * device-scoped parts of the api to the usb controller hardware
1185 * ---------------------------------------------------------------------------
1188 static int pxa2xx_udc_get_frame(struct usb_gadget *_gadget)
1190 return ((UFNRH & 0x07) << 8) | (UFNRL & 0xff);
1193 static int pxa2xx_udc_wakeup(struct usb_gadget *_gadget)
1195 /* host may not have enabled remote wakeup */
1196 if ((UDCCS0 & UDCCS0_DRWF) == 0)
1197 return -EHOSTUNREACH;
1198 udc_set_mask_UDCCR(UDCCR_RSM);
1199 return 0;
1202 static void stop_activity(struct pxa2xx_udc *, struct usb_gadget_driver *);
1203 static void udc_enable (struct pxa2xx_udc *);
1204 static void udc_disable(struct pxa2xx_udc *);
1206 /* We disable the UDC -- and its 48 MHz clock -- whenever it's not
1207 * in active use.
1209 static int pullup(struct pxa2xx_udc *udc, int is_active)
1211 is_active = is_active && udc->vbus && udc->pullup;
1212 DMSG("%s\n", is_active ? "active" : "inactive");
1213 if (is_active)
1214 udc_enable(udc);
1215 else {
1216 if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
1217 DMSG("disconnect %s\n", udc->driver
1218 ? udc->driver->driver.name
1219 : "(no driver)");
1220 stop_activity(udc, udc->driver);
1222 udc_disable(udc);
1224 return 0;
1227 /* VBUS reporting logically comes from a transceiver */
1228 static int pxa2xx_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1230 struct pxa2xx_udc *udc;
1232 udc = container_of(_gadget, struct pxa2xx_udc, gadget);
1233 udc->vbus = is_active = (is_active != 0);
1234 DMSG("vbus %s\n", is_active ? "supplied" : "inactive");
1235 pullup(udc, is_active);
1236 return 0;
1239 /* drivers may have software control over D+ pullup */
1240 static int pxa2xx_udc_pullup(struct usb_gadget *_gadget, int is_active)
1242 struct pxa2xx_udc *udc;
1244 udc = container_of(_gadget, struct pxa2xx_udc, gadget);
1246 /* not all boards support pullup control */
1247 if (!udc->mach->udc_command)
1248 return -EOPNOTSUPP;
1250 is_active = (is_active != 0);
1251 udc->pullup = is_active;
1252 pullup(udc, is_active);
1253 return 0;
1256 static const struct usb_gadget_ops pxa2xx_udc_ops = {
1257 .get_frame = pxa2xx_udc_get_frame,
1258 .wakeup = pxa2xx_udc_wakeup,
1259 .vbus_session = pxa2xx_udc_vbus_session,
1260 .pullup = pxa2xx_udc_pullup,
1262 // .vbus_draw ... boards may consume current from VBUS, up to
1263 // 100-500mA based on config. the 500uA suspend ceiling means
1264 // that exclusively vbus-powered PXA designs violate USB specs.
1267 /*-------------------------------------------------------------------------*/
1269 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
1271 static const char proc_node_name [] = "driver/udc";
1273 static int
1274 udc_proc_read(char *page, char **start, off_t off, int count,
1275 int *eof, void *_dev)
1277 char *buf = page;
1278 struct pxa2xx_udc *dev = _dev;
1279 char *next = buf;
1280 unsigned size = count;
1281 unsigned long flags;
1282 int i, t;
1283 u32 tmp;
1285 if (off != 0)
1286 return 0;
1288 local_irq_save(flags);
1290 /* basic device status */
1291 t = scnprintf(next, size, DRIVER_DESC "\n"
1292 "%s version: %s\nGadget driver: %s\nHost %s\n\n",
1293 driver_name, DRIVER_VERSION SIZE_STR DMASTR,
1294 dev->driver ? dev->driver->driver.name : "(none)",
1295 is_vbus_present() ? "full speed" : "disconnected");
1296 size -= t;
1297 next += t;
1299 /* registers for device and ep0 */
1300 t = scnprintf(next, size,
1301 "uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
1302 UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL);
1303 size -= t;
1304 next += t;
1306 tmp = UDCCR;
1307 t = scnprintf(next, size,
1308 "udccr %02X =%s%s%s%s%s%s%s%s\n", tmp,
1309 (tmp & UDCCR_REM) ? " rem" : "",
1310 (tmp & UDCCR_RSTIR) ? " rstir" : "",
1311 (tmp & UDCCR_SRM) ? " srm" : "",
1312 (tmp & UDCCR_SUSIR) ? " susir" : "",
1313 (tmp & UDCCR_RESIR) ? " resir" : "",
1314 (tmp & UDCCR_RSM) ? " rsm" : "",
1315 (tmp & UDCCR_UDA) ? " uda" : "",
1316 (tmp & UDCCR_UDE) ? " ude" : "");
1317 size -= t;
1318 next += t;
1320 tmp = UDCCS0;
1321 t = scnprintf(next, size,
1322 "udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp,
1323 (tmp & UDCCS0_SA) ? " sa" : "",
1324 (tmp & UDCCS0_RNE) ? " rne" : "",
1325 (tmp & UDCCS0_FST) ? " fst" : "",
1326 (tmp & UDCCS0_SST) ? " sst" : "",
1327 (tmp & UDCCS0_DRWF) ? " dwrf" : "",
1328 (tmp & UDCCS0_FTF) ? " ftf" : "",
1329 (tmp & UDCCS0_IPR) ? " ipr" : "",
1330 (tmp & UDCCS0_OPR) ? " opr" : "");
1331 size -= t;
1332 next += t;
1334 if (dev->has_cfr) {
1335 tmp = UDCCFR;
1336 t = scnprintf(next, size,
1337 "udccfr %02X =%s%s\n", tmp,
1338 (tmp & UDCCFR_AREN) ? " aren" : "",
1339 (tmp & UDCCFR_ACM) ? " acm" : "");
1340 size -= t;
1341 next += t;
1344 if (!is_vbus_present() || !dev->driver)
1345 goto done;
1347 t = scnprintf(next, size, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n",
1348 dev->stats.write.bytes, dev->stats.write.ops,
1349 dev->stats.read.bytes, dev->stats.read.ops,
1350 dev->stats.irqs);
1351 size -= t;
1352 next += t;
1354 /* dump endpoint queues */
1355 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1356 struct pxa2xx_ep *ep = &dev->ep [i];
1357 struct pxa2xx_request *req;
1358 int t;
1360 if (i != 0) {
1361 const struct usb_endpoint_descriptor *d;
1363 d = ep->desc;
1364 if (!d)
1365 continue;
1366 tmp = *dev->ep [i].reg_udccs;
1367 t = scnprintf(next, size,
1368 "%s max %d %s udccs %02x irqs %lu/%lu\n",
1369 ep->ep.name, le16_to_cpu (d->wMaxPacketSize),
1370 (ep->dma >= 0) ? "dma" : "pio", tmp,
1371 ep->pio_irqs, ep->dma_irqs);
1372 /* TODO translate all five groups of udccs bits! */
1374 } else /* ep0 should only have one transfer queued */
1375 t = scnprintf(next, size, "ep0 max 16 pio irqs %lu\n",
1376 ep->pio_irqs);
1377 if (t <= 0 || t > size)
1378 goto done;
1379 size -= t;
1380 next += t;
1382 if (list_empty(&ep->queue)) {
1383 t = scnprintf(next, size, "\t(nothing queued)\n");
1384 if (t <= 0 || t > size)
1385 goto done;
1386 size -= t;
1387 next += t;
1388 continue;
1390 list_for_each_entry(req, &ep->queue, queue) {
1391 #ifdef USE_DMA
1392 if (ep->dma >= 0 && req->queue.prev == &ep->queue)
1393 t = scnprintf(next, size,
1394 "\treq %p len %d/%d "
1395 "buf %p (dma%d dcmd %08x)\n",
1396 &req->req, req->req.actual,
1397 req->req.length, req->req.buf,
1398 ep->dma, DCMD(ep->dma)
1399 // low 13 bits == bytes-to-go
1401 else
1402 #endif
1403 t = scnprintf(next, size,
1404 "\treq %p len %d/%d buf %p\n",
1405 &req->req, req->req.actual,
1406 req->req.length, req->req.buf);
1407 if (t <= 0 || t > size)
1408 goto done;
1409 size -= t;
1410 next += t;
1414 done:
1415 local_irq_restore(flags);
1416 *eof = 1;
1417 return count - size;
1420 #define create_proc_files() \
1421 create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev)
1422 #define remove_proc_files() \
1423 remove_proc_entry(proc_node_name, NULL)
1425 #else /* !CONFIG_USB_GADGET_DEBUG_FILES */
1427 #define create_proc_files() do {} while (0)
1428 #define remove_proc_files() do {} while (0)
1430 #endif /* CONFIG_USB_GADGET_DEBUG_FILES */
1432 /* "function" sysfs attribute */
1433 static ssize_t
1434 show_function (struct device *_dev, struct device_attribute *attr, char *buf)
1436 struct pxa2xx_udc *dev = dev_get_drvdata (_dev);
1438 if (!dev->driver
1439 || !dev->driver->function
1440 || strlen (dev->driver->function) > PAGE_SIZE)
1441 return 0;
1442 return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
1444 static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
1446 /*-------------------------------------------------------------------------*/
1449 * udc_disable - disable USB device controller
1451 static void udc_disable(struct pxa2xx_udc *dev)
1453 /* block all irqs */
1454 udc_set_mask_UDCCR(UDCCR_SRM|UDCCR_REM);
1455 UICR0 = UICR1 = 0xff;
1456 UFNRH = UFNRH_SIM;
1458 /* if hardware supports it, disconnect from usb */
1459 pullup_off();
1461 udc_clear_mask_UDCCR(UDCCR_UDE);
1463 #ifdef CONFIG_ARCH_PXA
1464 /* Disable clock for USB device */
1465 pxa_set_cken(CKEN11_USB, 0);
1466 #endif
1468 ep0_idle (dev);
1469 dev->gadget.speed = USB_SPEED_UNKNOWN;
1470 LED_CONNECTED_OFF;
1475 * udc_reinit - initialize software state
1477 static void udc_reinit(struct pxa2xx_udc *dev)
1479 u32 i;
1481 /* device/ep0 records init */
1482 INIT_LIST_HEAD (&dev->gadget.ep_list);
1483 INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
1484 dev->ep0state = EP0_IDLE;
1486 /* basic endpoint records init */
1487 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1488 struct pxa2xx_ep *ep = &dev->ep[i];
1490 if (i != 0)
1491 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
1493 ep->desc = NULL;
1494 ep->stopped = 0;
1495 INIT_LIST_HEAD (&ep->queue);
1496 ep->pio_irqs = ep->dma_irqs = 0;
1499 /* the rest was statically initialized, and is read-only */
1502 /* until it's enabled, this UDC should be completely invisible
1503 * to any USB host.
1505 static void udc_enable (struct pxa2xx_udc *dev)
1507 udc_clear_mask_UDCCR(UDCCR_UDE);
1509 #ifdef CONFIG_ARCH_PXA
1510 /* Enable clock for USB device */
1511 pxa_set_cken(CKEN11_USB, 1);
1512 udelay(5);
1513 #endif
1515 /* try to clear these bits before we enable the udc */
1516 udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
1518 ep0_idle(dev);
1519 dev->gadget.speed = USB_SPEED_UNKNOWN;
1520 dev->stats.irqs = 0;
1523 * sequence taken from chapter 12.5.10, PXA250 AppProcDevManual:
1524 * - enable UDC
1525 * - if RESET is already in progress, ack interrupt
1526 * - unmask reset interrupt
1528 udc_set_mask_UDCCR(UDCCR_UDE);
1529 if (!(UDCCR & UDCCR_UDA))
1530 udc_ack_int_UDCCR(UDCCR_RSTIR);
1532 if (dev->has_cfr /* UDC_RES2 is defined */) {
1533 /* pxa255 (a0+) can avoid a set_config race that could
1534 * prevent gadget drivers from configuring correctly
1536 UDCCFR = UDCCFR_ACM | UDCCFR_MB1;
1537 } else {
1538 /* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1)
1539 * which could result in missing packets and interrupts.
1540 * supposedly one bit per endpoint, controlling whether it
1541 * double buffers or not; ACM/AREN bits fit into the holes.
1542 * zero bits (like USIR0_IRx) disable double buffering.
1544 UDC_RES1 = 0x00;
1545 UDC_RES2 = 0x00;
1548 #ifdef DISABLE_TEST_MODE
1549 /* "test mode" seems to have become the default in later chip
1550 * revs, preventing double buffering (and invalidating docs).
1551 * this EXPERIMENT enables it for bulk endpoints by tweaking
1552 * undefined/reserved register bits (that other drivers clear).
1553 * Belcarra code comments noted this usage.
1555 if (fifo_mode & 1) { /* IN endpoints */
1556 UDC_RES1 |= USIR0_IR1|USIR0_IR6;
1557 UDC_RES2 |= USIR1_IR11;
1559 if (fifo_mode & 2) { /* OUT endpoints */
1560 UDC_RES1 |= USIR0_IR2|USIR0_IR7;
1561 UDC_RES2 |= USIR1_IR12;
1563 #endif
1565 /* enable suspend/resume and reset irqs */
1566 udc_clear_mask_UDCCR(UDCCR_SRM | UDCCR_REM);
1568 /* enable ep0 irqs */
1569 UICR0 &= ~UICR0_IM0;
1571 /* if hardware supports it, pullup D+ and wait for reset */
1572 pullup_on();
1576 /* when a driver is successfully registered, it will receive
1577 * control requests including set_configuration(), which enables
1578 * non-control requests. then usb traffic follows until a
1579 * disconnect is reported. then a host may connect again, or
1580 * the driver might get unbound.
1582 int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1584 struct pxa2xx_udc *dev = the_controller;
1585 int retval;
1587 if (!driver
1588 || driver->speed < USB_SPEED_FULL
1589 || !driver->bind
1590 || !driver->unbind
1591 || !driver->disconnect
1592 || !driver->setup)
1593 return -EINVAL;
1594 if (!dev)
1595 return -ENODEV;
1596 if (dev->driver)
1597 return -EBUSY;
1599 /* first hook up the driver ... */
1600 dev->driver = driver;
1601 dev->gadget.dev.driver = &driver->driver;
1602 dev->pullup = 1;
1604 device_add (&dev->gadget.dev);
1605 retval = driver->bind(&dev->gadget);
1606 if (retval) {
1607 DMSG("bind to driver %s --> error %d\n",
1608 driver->driver.name, retval);
1609 device_del (&dev->gadget.dev);
1611 dev->driver = NULL;
1612 dev->gadget.dev.driver = NULL;
1613 return retval;
1615 device_create_file(dev->dev, &dev_attr_function);
1617 /* ... then enable host detection and ep0; and we're ready
1618 * for set_configuration as well as eventual disconnect.
1620 DMSG("registered gadget driver '%s'\n", driver->driver.name);
1621 pullup(dev, 1);
1622 dump_state(dev);
1623 return 0;
1625 EXPORT_SYMBOL(usb_gadget_register_driver);
1627 static void
1628 stop_activity(struct pxa2xx_udc *dev, struct usb_gadget_driver *driver)
1630 int i;
1632 /* don't disconnect drivers more than once */
1633 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1634 driver = NULL;
1635 dev->gadget.speed = USB_SPEED_UNKNOWN;
1637 /* prevent new request submissions, kill any outstanding requests */
1638 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1639 struct pxa2xx_ep *ep = &dev->ep[i];
1641 ep->stopped = 1;
1642 nuke(ep, -ESHUTDOWN);
1644 del_timer_sync(&dev->timer);
1646 /* report disconnect; the driver is already quiesced */
1647 LED_CONNECTED_OFF;
1648 if (driver)
1649 driver->disconnect(&dev->gadget);
1651 /* re-init driver-visible data structures */
1652 udc_reinit(dev);
1655 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1657 struct pxa2xx_udc *dev = the_controller;
1659 if (!dev)
1660 return -ENODEV;
1661 if (!driver || driver != dev->driver)
1662 return -EINVAL;
1664 local_irq_disable();
1665 pullup(dev, 0);
1666 stop_activity(dev, driver);
1667 local_irq_enable();
1669 driver->unbind(&dev->gadget);
1670 dev->driver = NULL;
1672 device_del (&dev->gadget.dev);
1673 device_remove_file(dev->dev, &dev_attr_function);
1675 DMSG("unregistered gadget driver '%s'\n", driver->driver.name);
1676 dump_state(dev);
1677 return 0;
1679 EXPORT_SYMBOL(usb_gadget_unregister_driver);
1682 /*-------------------------------------------------------------------------*/
1684 #ifdef CONFIG_ARCH_LUBBOCK
1686 /* Lubbock has separate connect and disconnect irqs. More typical designs
1687 * use one GPIO as the VBUS IRQ, and another to control the D+ pullup.
1690 static irqreturn_t
1691 lubbock_vbus_irq(int irq, void *_dev, struct pt_regs *r)
1693 struct pxa2xx_udc *dev = _dev;
1694 int vbus;
1696 dev->stats.irqs++;
1697 HEX_DISPLAY(dev->stats.irqs);
1698 switch (irq) {
1699 case LUBBOCK_USB_IRQ:
1700 LED_CONNECTED_ON;
1701 vbus = 1;
1702 disable_irq(LUBBOCK_USB_IRQ);
1703 enable_irq(LUBBOCK_USB_DISC_IRQ);
1704 break;
1705 case LUBBOCK_USB_DISC_IRQ:
1706 LED_CONNECTED_OFF;
1707 vbus = 0;
1708 disable_irq(LUBBOCK_USB_DISC_IRQ);
1709 enable_irq(LUBBOCK_USB_IRQ);
1710 break;
1711 default:
1712 return IRQ_NONE;
1715 pxa2xx_udc_vbus_session(&dev->gadget, vbus);
1716 return IRQ_HANDLED;
1719 #endif
1722 /*-------------------------------------------------------------------------*/
1724 static inline void clear_ep_state (struct pxa2xx_udc *dev)
1726 unsigned i;
1728 /* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint
1729 * fifos, and pending transactions mustn't be continued in any case.
1731 for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++)
1732 nuke(&dev->ep[i], -ECONNABORTED);
1735 static void udc_watchdog(unsigned long _dev)
1737 struct pxa2xx_udc *dev = (void *)_dev;
1739 local_irq_disable();
1740 if (dev->ep0state == EP0_STALL
1741 && (UDCCS0 & UDCCS0_FST) == 0
1742 && (UDCCS0 & UDCCS0_SST) == 0) {
1743 UDCCS0 = UDCCS0_FST|UDCCS0_FTF;
1744 DBG(DBG_VERBOSE, "ep0 re-stall\n");
1745 start_watchdog(dev);
1747 local_irq_enable();
1750 static void handle_ep0 (struct pxa2xx_udc *dev)
1752 u32 udccs0 = UDCCS0;
1753 struct pxa2xx_ep *ep = &dev->ep [0];
1754 struct pxa2xx_request *req;
1755 union {
1756 struct usb_ctrlrequest r;
1757 u8 raw [8];
1758 u32 word [2];
1759 } u;
1761 if (list_empty(&ep->queue))
1762 req = NULL;
1763 else
1764 req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
1766 /* clear stall status */
1767 if (udccs0 & UDCCS0_SST) {
1768 nuke(ep, -EPIPE);
1769 UDCCS0 = UDCCS0_SST;
1770 del_timer(&dev->timer);
1771 ep0_idle(dev);
1774 /* previous request unfinished? non-error iff back-to-back ... */
1775 if ((udccs0 & UDCCS0_SA) != 0 && dev->ep0state != EP0_IDLE) {
1776 nuke(ep, 0);
1777 del_timer(&dev->timer);
1778 ep0_idle(dev);
1781 switch (dev->ep0state) {
1782 case EP0_IDLE:
1783 /* late-breaking status? */
1784 udccs0 = UDCCS0;
1786 /* start control request? */
1787 if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))
1788 == (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))) {
1789 int i;
1791 nuke (ep, -EPROTO);
1793 /* read SETUP packet */
1794 for (i = 0; i < 8; i++) {
1795 if (unlikely(!(UDCCS0 & UDCCS0_RNE))) {
1796 bad_setup:
1797 DMSG("SETUP %d!\n", i);
1798 goto stall;
1800 u.raw [i] = (u8) UDDR0;
1802 if (unlikely((UDCCS0 & UDCCS0_RNE) != 0))
1803 goto bad_setup;
1805 got_setup:
1806 DBG(DBG_VERBOSE, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1807 u.r.bRequestType, u.r.bRequest,
1808 le16_to_cpu(u.r.wValue),
1809 le16_to_cpu(u.r.wIndex),
1810 le16_to_cpu(u.r.wLength));
1812 /* cope with automagic for some standard requests. */
1813 dev->req_std = (u.r.bRequestType & USB_TYPE_MASK)
1814 == USB_TYPE_STANDARD;
1815 dev->req_config = 0;
1816 dev->req_pending = 1;
1817 switch (u.r.bRequest) {
1818 /* hardware restricts gadget drivers here! */
1819 case USB_REQ_SET_CONFIGURATION:
1820 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1821 /* reflect hardware's automagic
1822 * up to the gadget driver.
1824 config_change:
1825 dev->req_config = 1;
1826 clear_ep_state(dev);
1827 /* if !has_cfr, there's no synch
1828 * else use AREN (later) not SA|OPR
1829 * USIR0_IR0 acts edge sensitive
1832 break;
1833 /* ... and here, even more ... */
1834 case USB_REQ_SET_INTERFACE:
1835 if (u.r.bRequestType == USB_RECIP_INTERFACE) {
1836 /* udc hardware is broken by design:
1837 * - altsetting may only be zero;
1838 * - hw resets all interfaces' eps;
1839 * - ep reset doesn't include halt(?).
1841 DMSG("broken set_interface (%d/%d)\n",
1842 le16_to_cpu(u.r.wIndex),
1843 le16_to_cpu(u.r.wValue));
1844 goto config_change;
1846 break;
1847 /* hardware was supposed to hide this */
1848 case USB_REQ_SET_ADDRESS:
1849 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1850 ep0start(dev, 0, "address");
1851 return;
1853 break;
1856 if (u.r.bRequestType & USB_DIR_IN)
1857 dev->ep0state = EP0_IN_DATA_PHASE;
1858 else
1859 dev->ep0state = EP0_OUT_DATA_PHASE;
1861 i = dev->driver->setup(&dev->gadget, &u.r);
1862 if (i < 0) {
1863 /* hardware automagic preventing STALL... */
1864 if (dev->req_config) {
1865 /* hardware sometimes neglects to tell
1866 * tell us about config change events,
1867 * so later ones may fail...
1869 WARN("config change %02x fail %d?\n",
1870 u.r.bRequest, i);
1871 return;
1872 /* TODO experiment: if has_cfr,
1873 * hardware didn't ACK; maybe we
1874 * could actually STALL!
1877 DBG(DBG_VERBOSE, "protocol STALL, "
1878 "%02x err %d\n", UDCCS0, i);
1879 stall:
1880 /* the watchdog timer helps deal with cases
1881 * where udc seems to clear FST wrongly, and
1882 * then NAKs instead of STALLing.
1884 ep0start(dev, UDCCS0_FST|UDCCS0_FTF, "stall");
1885 start_watchdog(dev);
1886 dev->ep0state = EP0_STALL;
1888 /* deferred i/o == no response yet */
1889 } else if (dev->req_pending) {
1890 if (likely(dev->ep0state == EP0_IN_DATA_PHASE
1891 || dev->req_std || u.r.wLength))
1892 ep0start(dev, 0, "defer");
1893 else
1894 ep0start(dev, UDCCS0_IPR, "defer/IPR");
1897 /* expect at least one data or status stage irq */
1898 return;
1900 } else if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA))
1901 == (UDCCS0_OPR|UDCCS0_SA))) {
1902 unsigned i;
1904 /* pxa210/250 erratum 131 for B0/B1 says RNE lies.
1905 * still observed on a pxa255 a0.
1907 DBG(DBG_VERBOSE, "e131\n");
1908 nuke(ep, -EPROTO);
1910 /* read SETUP data, but don't trust it too much */
1911 for (i = 0; i < 8; i++)
1912 u.raw [i] = (u8) UDDR0;
1913 if ((u.r.bRequestType & USB_RECIP_MASK)
1914 > USB_RECIP_OTHER)
1915 goto stall;
1916 if (u.word [0] == 0 && u.word [1] == 0)
1917 goto stall;
1918 goto got_setup;
1919 } else {
1920 /* some random early IRQ:
1921 * - we acked FST
1922 * - IPR cleared
1923 * - OPR got set, without SA (likely status stage)
1925 UDCCS0 = udccs0 & (UDCCS0_SA|UDCCS0_OPR);
1927 break;
1928 case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */
1929 if (udccs0 & UDCCS0_OPR) {
1930 UDCCS0 = UDCCS0_OPR|UDCCS0_FTF;
1931 DBG(DBG_VERBOSE, "ep0in premature status\n");
1932 if (req)
1933 done(ep, req, 0);
1934 ep0_idle(dev);
1935 } else /* irq was IPR clearing */ {
1936 if (req) {
1937 /* this IN packet might finish the request */
1938 (void) write_ep0_fifo(ep, req);
1939 } /* else IN token before response was written */
1941 break;
1942 case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */
1943 if (udccs0 & UDCCS0_OPR) {
1944 if (req) {
1945 /* this OUT packet might finish the request */
1946 if (read_ep0_fifo(ep, req))
1947 done(ep, req, 0);
1948 /* else more OUT packets expected */
1949 } /* else OUT token before read was issued */
1950 } else /* irq was IPR clearing */ {
1951 DBG(DBG_VERBOSE, "ep0out premature status\n");
1952 if (req)
1953 done(ep, req, 0);
1954 ep0_idle(dev);
1956 break;
1957 case EP0_END_XFER:
1958 if (req)
1959 done(ep, req, 0);
1960 /* ack control-IN status (maybe in-zlp was skipped)
1961 * also appears after some config change events.
1963 if (udccs0 & UDCCS0_OPR)
1964 UDCCS0 = UDCCS0_OPR;
1965 ep0_idle(dev);
1966 break;
1967 case EP0_STALL:
1968 UDCCS0 = UDCCS0_FST;
1969 break;
1971 USIR0 = USIR0_IR0;
1974 static void handle_ep(struct pxa2xx_ep *ep)
1976 struct pxa2xx_request *req;
1977 int is_in = ep->bEndpointAddress & USB_DIR_IN;
1978 int completed;
1979 u32 udccs, tmp;
1981 do {
1982 completed = 0;
1983 if (likely (!list_empty(&ep->queue)))
1984 req = list_entry(ep->queue.next,
1985 struct pxa2xx_request, queue);
1986 else
1987 req = NULL;
1989 // TODO check FST handling
1991 udccs = *ep->reg_udccs;
1992 if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */
1993 tmp = UDCCS_BI_TUR;
1994 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
1995 tmp |= UDCCS_BI_SST;
1996 tmp &= udccs;
1997 if (likely (tmp))
1998 *ep->reg_udccs = tmp;
1999 if (req && likely ((udccs & UDCCS_BI_TFS) != 0))
2000 completed = write_fifo(ep, req);
2002 } else { /* irq from RPC (or for ISO, ROF) */
2003 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
2004 tmp = UDCCS_BO_SST | UDCCS_BO_DME;
2005 else
2006 tmp = UDCCS_IO_ROF | UDCCS_IO_DME;
2007 tmp &= udccs;
2008 if (likely(tmp))
2009 *ep->reg_udccs = tmp;
2011 /* fifos can hold packets, ready for reading... */
2012 if (likely(req)) {
2013 #ifdef USE_OUT_DMA
2014 // TODO didn't yet debug out-dma. this approach assumes
2015 // the worst about short packets and RPC; it might be better.
2017 if (likely(ep->dma >= 0)) {
2018 if (!(udccs & UDCCS_BO_RSP)) {
2019 *ep->reg_udccs = UDCCS_BO_RPC;
2020 ep->dma_irqs++;
2021 return;
2024 #endif
2025 completed = read_fifo(ep, req);
2026 } else
2027 pio_irq_disable (ep->bEndpointAddress);
2029 ep->pio_irqs++;
2030 } while (completed);
2034 * pxa2xx_udc_irq - interrupt handler
2036 * avoid delays in ep0 processing. the control handshaking isn't always
2037 * under software control (pxa250c0 and the pxa255 are better), and delays
2038 * could cause usb protocol errors.
2040 static irqreturn_t
2041 pxa2xx_udc_irq(int irq, void *_dev, struct pt_regs *r)
2043 struct pxa2xx_udc *dev = _dev;
2044 int handled;
2046 dev->stats.irqs++;
2047 HEX_DISPLAY(dev->stats.irqs);
2048 do {
2049 u32 udccr = UDCCR;
2051 handled = 0;
2053 /* SUSpend Interrupt Request */
2054 if (unlikely(udccr & UDCCR_SUSIR)) {
2055 udc_ack_int_UDCCR(UDCCR_SUSIR);
2056 handled = 1;
2057 DBG(DBG_VERBOSE, "USB suspend%s\n", is_vbus_present()
2058 ? "" : "+disconnect");
2060 if (!is_vbus_present())
2061 stop_activity(dev, dev->driver);
2062 else if (dev->gadget.speed != USB_SPEED_UNKNOWN
2063 && dev->driver
2064 && dev->driver->suspend)
2065 dev->driver->suspend(&dev->gadget);
2066 ep0_idle (dev);
2069 /* RESume Interrupt Request */
2070 if (unlikely(udccr & UDCCR_RESIR)) {
2071 udc_ack_int_UDCCR(UDCCR_RESIR);
2072 handled = 1;
2073 DBG(DBG_VERBOSE, "USB resume\n");
2075 if (dev->gadget.speed != USB_SPEED_UNKNOWN
2076 && dev->driver
2077 && dev->driver->resume
2078 && is_vbus_present())
2079 dev->driver->resume(&dev->gadget);
2082 /* ReSeT Interrupt Request - USB reset */
2083 if (unlikely(udccr & UDCCR_RSTIR)) {
2084 udc_ack_int_UDCCR(UDCCR_RSTIR);
2085 handled = 1;
2087 if ((UDCCR & UDCCR_UDA) == 0) {
2088 DBG(DBG_VERBOSE, "USB reset start\n");
2090 /* reset driver and endpoints,
2091 * in case that's not yet done
2093 stop_activity (dev, dev->driver);
2095 } else {
2096 DBG(DBG_VERBOSE, "USB reset end\n");
2097 dev->gadget.speed = USB_SPEED_FULL;
2098 LED_CONNECTED_ON;
2099 memset(&dev->stats, 0, sizeof dev->stats);
2100 /* driver and endpoints are still reset */
2103 } else {
2104 u32 usir0 = USIR0 & ~UICR0;
2105 u32 usir1 = USIR1 & ~UICR1;
2106 int i;
2108 if (unlikely (!usir0 && !usir1))
2109 continue;
2111 DBG(DBG_VERY_NOISY, "irq %02x.%02x\n", usir1, usir0);
2113 /* control traffic */
2114 if (usir0 & USIR0_IR0) {
2115 dev->ep[0].pio_irqs++;
2116 handle_ep0(dev);
2117 handled = 1;
2120 /* endpoint data transfers */
2121 for (i = 0; i < 8; i++) {
2122 u32 tmp = 1 << i;
2124 if (i && (usir0 & tmp)) {
2125 handle_ep(&dev->ep[i]);
2126 USIR0 |= tmp;
2127 handled = 1;
2129 if (usir1 & tmp) {
2130 handle_ep(&dev->ep[i+8]);
2131 USIR1 |= tmp;
2132 handled = 1;
2137 /* we could also ask for 1 msec SOF (SIR) interrupts */
2139 } while (handled);
2140 return IRQ_HANDLED;
2143 /*-------------------------------------------------------------------------*/
2145 static void nop_release (struct device *dev)
2147 DMSG("%s %s\n", __FUNCTION__, dev->bus_id);
2150 /* this uses load-time allocation and initialization (instead of
2151 * doing it at run-time) to save code, eliminate fault paths, and
2152 * be more obviously correct.
2154 static struct pxa2xx_udc memory = {
2155 .gadget = {
2156 .ops = &pxa2xx_udc_ops,
2157 .ep0 = &memory.ep[0].ep,
2158 .name = driver_name,
2159 .dev = {
2160 .bus_id = "gadget",
2161 .release = nop_release,
2165 /* control endpoint */
2166 .ep[0] = {
2167 .ep = {
2168 .name = ep0name,
2169 .ops = &pxa2xx_ep_ops,
2170 .maxpacket = EP0_FIFO_SIZE,
2172 .dev = &memory,
2173 .reg_udccs = &UDCCS0,
2174 .reg_uddr = &UDDR0,
2177 /* first group of endpoints */
2178 .ep[1] = {
2179 .ep = {
2180 .name = "ep1in-bulk",
2181 .ops = &pxa2xx_ep_ops,
2182 .maxpacket = BULK_FIFO_SIZE,
2184 .dev = &memory,
2185 .fifo_size = BULK_FIFO_SIZE,
2186 .bEndpointAddress = USB_DIR_IN | 1,
2187 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2188 .reg_udccs = &UDCCS1,
2189 .reg_uddr = &UDDR1,
2190 drcmr (25)
2192 .ep[2] = {
2193 .ep = {
2194 .name = "ep2out-bulk",
2195 .ops = &pxa2xx_ep_ops,
2196 .maxpacket = BULK_FIFO_SIZE,
2198 .dev = &memory,
2199 .fifo_size = BULK_FIFO_SIZE,
2200 .bEndpointAddress = 2,
2201 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2202 .reg_udccs = &UDCCS2,
2203 .reg_ubcr = &UBCR2,
2204 .reg_uddr = &UDDR2,
2205 drcmr (26)
2207 #ifndef CONFIG_USB_PXA2XX_SMALL
2208 .ep[3] = {
2209 .ep = {
2210 .name = "ep3in-iso",
2211 .ops = &pxa2xx_ep_ops,
2212 .maxpacket = ISO_FIFO_SIZE,
2214 .dev = &memory,
2215 .fifo_size = ISO_FIFO_SIZE,
2216 .bEndpointAddress = USB_DIR_IN | 3,
2217 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2218 .reg_udccs = &UDCCS3,
2219 .reg_uddr = &UDDR3,
2220 drcmr (27)
2222 .ep[4] = {
2223 .ep = {
2224 .name = "ep4out-iso",
2225 .ops = &pxa2xx_ep_ops,
2226 .maxpacket = ISO_FIFO_SIZE,
2228 .dev = &memory,
2229 .fifo_size = ISO_FIFO_SIZE,
2230 .bEndpointAddress = 4,
2231 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2232 .reg_udccs = &UDCCS4,
2233 .reg_ubcr = &UBCR4,
2234 .reg_uddr = &UDDR4,
2235 drcmr (28)
2237 .ep[5] = {
2238 .ep = {
2239 .name = "ep5in-int",
2240 .ops = &pxa2xx_ep_ops,
2241 .maxpacket = INT_FIFO_SIZE,
2243 .dev = &memory,
2244 .fifo_size = INT_FIFO_SIZE,
2245 .bEndpointAddress = USB_DIR_IN | 5,
2246 .bmAttributes = USB_ENDPOINT_XFER_INT,
2247 .reg_udccs = &UDCCS5,
2248 .reg_uddr = &UDDR5,
2251 /* second group of endpoints */
2252 .ep[6] = {
2253 .ep = {
2254 .name = "ep6in-bulk",
2255 .ops = &pxa2xx_ep_ops,
2256 .maxpacket = BULK_FIFO_SIZE,
2258 .dev = &memory,
2259 .fifo_size = BULK_FIFO_SIZE,
2260 .bEndpointAddress = USB_DIR_IN | 6,
2261 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2262 .reg_udccs = &UDCCS6,
2263 .reg_uddr = &UDDR6,
2264 drcmr (30)
2266 .ep[7] = {
2267 .ep = {
2268 .name = "ep7out-bulk",
2269 .ops = &pxa2xx_ep_ops,
2270 .maxpacket = BULK_FIFO_SIZE,
2272 .dev = &memory,
2273 .fifo_size = BULK_FIFO_SIZE,
2274 .bEndpointAddress = 7,
2275 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2276 .reg_udccs = &UDCCS7,
2277 .reg_ubcr = &UBCR7,
2278 .reg_uddr = &UDDR7,
2279 drcmr (31)
2281 .ep[8] = {
2282 .ep = {
2283 .name = "ep8in-iso",
2284 .ops = &pxa2xx_ep_ops,
2285 .maxpacket = ISO_FIFO_SIZE,
2287 .dev = &memory,
2288 .fifo_size = ISO_FIFO_SIZE,
2289 .bEndpointAddress = USB_DIR_IN | 8,
2290 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2291 .reg_udccs = &UDCCS8,
2292 .reg_uddr = &UDDR8,
2293 drcmr (32)
2295 .ep[9] = {
2296 .ep = {
2297 .name = "ep9out-iso",
2298 .ops = &pxa2xx_ep_ops,
2299 .maxpacket = ISO_FIFO_SIZE,
2301 .dev = &memory,
2302 .fifo_size = ISO_FIFO_SIZE,
2303 .bEndpointAddress = 9,
2304 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2305 .reg_udccs = &UDCCS9,
2306 .reg_ubcr = &UBCR9,
2307 .reg_uddr = &UDDR9,
2308 drcmr (33)
2310 .ep[10] = {
2311 .ep = {
2312 .name = "ep10in-int",
2313 .ops = &pxa2xx_ep_ops,
2314 .maxpacket = INT_FIFO_SIZE,
2316 .dev = &memory,
2317 .fifo_size = INT_FIFO_SIZE,
2318 .bEndpointAddress = USB_DIR_IN | 10,
2319 .bmAttributes = USB_ENDPOINT_XFER_INT,
2320 .reg_udccs = &UDCCS10,
2321 .reg_uddr = &UDDR10,
2324 /* third group of endpoints */
2325 .ep[11] = {
2326 .ep = {
2327 .name = "ep11in-bulk",
2328 .ops = &pxa2xx_ep_ops,
2329 .maxpacket = BULK_FIFO_SIZE,
2331 .dev = &memory,
2332 .fifo_size = BULK_FIFO_SIZE,
2333 .bEndpointAddress = USB_DIR_IN | 11,
2334 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2335 .reg_udccs = &UDCCS11,
2336 .reg_uddr = &UDDR11,
2337 drcmr (35)
2339 .ep[12] = {
2340 .ep = {
2341 .name = "ep12out-bulk",
2342 .ops = &pxa2xx_ep_ops,
2343 .maxpacket = BULK_FIFO_SIZE,
2345 .dev = &memory,
2346 .fifo_size = BULK_FIFO_SIZE,
2347 .bEndpointAddress = 12,
2348 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2349 .reg_udccs = &UDCCS12,
2350 .reg_ubcr = &UBCR12,
2351 .reg_uddr = &UDDR12,
2352 drcmr (36)
2354 .ep[13] = {
2355 .ep = {
2356 .name = "ep13in-iso",
2357 .ops = &pxa2xx_ep_ops,
2358 .maxpacket = ISO_FIFO_SIZE,
2360 .dev = &memory,
2361 .fifo_size = ISO_FIFO_SIZE,
2362 .bEndpointAddress = USB_DIR_IN | 13,
2363 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2364 .reg_udccs = &UDCCS13,
2365 .reg_uddr = &UDDR13,
2366 drcmr (37)
2368 .ep[14] = {
2369 .ep = {
2370 .name = "ep14out-iso",
2371 .ops = &pxa2xx_ep_ops,
2372 .maxpacket = ISO_FIFO_SIZE,
2374 .dev = &memory,
2375 .fifo_size = ISO_FIFO_SIZE,
2376 .bEndpointAddress = 14,
2377 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2378 .reg_udccs = &UDCCS14,
2379 .reg_ubcr = &UBCR14,
2380 .reg_uddr = &UDDR14,
2381 drcmr (38)
2383 .ep[15] = {
2384 .ep = {
2385 .name = "ep15in-int",
2386 .ops = &pxa2xx_ep_ops,
2387 .maxpacket = INT_FIFO_SIZE,
2389 .dev = &memory,
2390 .fifo_size = INT_FIFO_SIZE,
2391 .bEndpointAddress = USB_DIR_IN | 15,
2392 .bmAttributes = USB_ENDPOINT_XFER_INT,
2393 .reg_udccs = &UDCCS15,
2394 .reg_uddr = &UDDR15,
2396 #endif /* !CONFIG_USB_PXA2XX_SMALL */
2399 #define CP15R0_VENDOR_MASK 0xffffe000
2401 #if defined(CONFIG_ARCH_PXA)
2402 #define CP15R0_XSCALE_VALUE 0x69052000 /* intel/arm/xscale */
2404 #elif defined(CONFIG_ARCH_IXP4XX)
2405 #define CP15R0_XSCALE_VALUE 0x69054000 /* intel/arm/ixp4xx */
2407 #endif
2409 #define CP15R0_PROD_MASK 0x000003f0
2410 #define PXA25x 0x00000100 /* and PXA26x */
2411 #define PXA210 0x00000120
2413 #define CP15R0_REV_MASK 0x0000000f
2415 #define CP15R0_PRODREV_MASK (CP15R0_PROD_MASK | CP15R0_REV_MASK)
2417 #define PXA255_A0 0x00000106 /* or PXA260_B1 */
2418 #define PXA250_C0 0x00000105 /* or PXA26x_B0 */
2419 #define PXA250_B2 0x00000104
2420 #define PXA250_B1 0x00000103 /* or PXA260_A0 */
2421 #define PXA250_B0 0x00000102
2422 #define PXA250_A1 0x00000101
2423 #define PXA250_A0 0x00000100
2425 #define PXA210_C0 0x00000125
2426 #define PXA210_B2 0x00000124
2427 #define PXA210_B1 0x00000123
2428 #define PXA210_B0 0x00000122
2429 #define IXP425_A0 0x000001c1
2432 * probe - binds to the platform device
2434 static int __init pxa2xx_udc_probe(struct platform_device *pdev)
2436 struct pxa2xx_udc *dev = &memory;
2437 int retval, out_dma = 1;
2438 u32 chiprev;
2440 /* insist on Intel/ARM/XScale */
2441 asm("mrc%? p15, 0, %0, c0, c0" : "=r" (chiprev));
2442 if ((chiprev & CP15R0_VENDOR_MASK) != CP15R0_XSCALE_VALUE) {
2443 printk(KERN_ERR "%s: not XScale!\n", driver_name);
2444 return -ENODEV;
2447 /* trigger chiprev-specific logic */
2448 switch (chiprev & CP15R0_PRODREV_MASK) {
2449 #if defined(CONFIG_ARCH_PXA)
2450 case PXA255_A0:
2451 dev->has_cfr = 1;
2452 break;
2453 case PXA250_A0:
2454 case PXA250_A1:
2455 /* A0/A1 "not released"; ep 13, 15 unusable */
2456 /* fall through */
2457 case PXA250_B2: case PXA210_B2:
2458 case PXA250_B1: case PXA210_B1:
2459 case PXA250_B0: case PXA210_B0:
2460 out_dma = 0;
2461 /* fall through */
2462 case PXA250_C0: case PXA210_C0:
2463 break;
2464 #elif defined(CONFIG_ARCH_IXP4XX)
2465 case IXP425_A0:
2466 out_dma = 0;
2467 break;
2468 #endif
2469 default:
2470 out_dma = 0;
2471 printk(KERN_ERR "%s: unrecognized processor: %08x\n",
2472 driver_name, chiprev);
2473 /* iop3xx, ixp4xx, ... */
2474 return -ENODEV;
2477 pr_debug("%s: IRQ %d%s%s%s\n", driver_name, IRQ_USB,
2478 dev->has_cfr ? "" : " (!cfr)",
2479 out_dma ? "" : " (broken dma-out)",
2480 SIZE_STR DMASTR
2483 #ifdef USE_DMA
2484 #ifndef USE_OUT_DMA
2485 out_dma = 0;
2486 #endif
2487 /* pxa 250 erratum 130 prevents using OUT dma (fixed C0) */
2488 if (!out_dma) {
2489 DMSG("disabled OUT dma\n");
2490 dev->ep[ 2].reg_drcmr = dev->ep[ 4].reg_drcmr = 0;
2491 dev->ep[ 7].reg_drcmr = dev->ep[ 9].reg_drcmr = 0;
2492 dev->ep[12].reg_drcmr = dev->ep[14].reg_drcmr = 0;
2494 #endif
2496 /* other non-static parts of init */
2497 dev->dev = &pdev->dev;
2498 dev->mach = pdev->dev.platform_data;
2500 init_timer(&dev->timer);
2501 dev->timer.function = udc_watchdog;
2502 dev->timer.data = (unsigned long) dev;
2504 device_initialize(&dev->gadget.dev);
2505 dev->gadget.dev.parent = &pdev->dev;
2506 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
2508 the_controller = dev;
2509 platform_set_drvdata(pdev, dev);
2511 udc_disable(dev);
2512 udc_reinit(dev);
2514 dev->vbus = is_vbus_present();
2516 /* irq setup after old hardware state is cleaned up */
2517 retval = request_irq(IRQ_USB, pxa2xx_udc_irq,
2518 SA_INTERRUPT, driver_name, dev);
2519 if (retval != 0) {
2520 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2521 driver_name, IRQ_USB, retval);
2522 return -EBUSY;
2524 dev->got_irq = 1;
2526 #ifdef CONFIG_ARCH_LUBBOCK
2527 if (machine_is_lubbock()) {
2528 retval = request_irq(LUBBOCK_USB_DISC_IRQ,
2529 lubbock_vbus_irq,
2530 SA_INTERRUPT | SA_SAMPLE_RANDOM,
2531 driver_name, dev);
2532 if (retval != 0) {
2533 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2534 driver_name, LUBBOCK_USB_DISC_IRQ, retval);
2535 lubbock_fail0:
2536 free_irq(IRQ_USB, dev);
2537 return -EBUSY;
2539 retval = request_irq(LUBBOCK_USB_IRQ,
2540 lubbock_vbus_irq,
2541 SA_INTERRUPT | SA_SAMPLE_RANDOM,
2542 driver_name, dev);
2543 if (retval != 0) {
2544 printk(KERN_ERR "%s: can't get irq %i, err %d\n",
2545 driver_name, LUBBOCK_USB_IRQ, retval);
2546 free_irq(LUBBOCK_USB_DISC_IRQ, dev);
2547 goto lubbock_fail0;
2549 #ifdef DEBUG
2550 /* with U-Boot (but not BLOB), hex is off by default */
2551 HEX_DISPLAY(dev->stats.irqs);
2552 LUB_DISC_BLNK_LED &= 0xff;
2553 #endif
2555 #endif
2556 create_proc_files();
2558 return 0;
2561 static void pxa2xx_udc_shutdown(struct platform_device *_dev)
2563 pullup_off();
2566 static int __exit pxa2xx_udc_remove(struct platform_device *pdev)
2568 struct pxa2xx_udc *dev = platform_get_drvdata(pdev);
2570 udc_disable(dev);
2571 remove_proc_files();
2572 usb_gadget_unregister_driver(dev->driver);
2574 if (dev->got_irq) {
2575 free_irq(IRQ_USB, dev);
2576 dev->got_irq = 0;
2578 if (machine_is_lubbock()) {
2579 free_irq(LUBBOCK_USB_DISC_IRQ, dev);
2580 free_irq(LUBBOCK_USB_IRQ, dev);
2582 platform_set_drvdata(pdev, NULL);
2583 the_controller = NULL;
2584 return 0;
2587 /*-------------------------------------------------------------------------*/
2589 #ifdef CONFIG_PM
2591 /* USB suspend (controlled by the host) and system suspend (controlled
2592 * by the PXA) don't necessarily work well together. If USB is active,
2593 * the 48 MHz clock is required; so the system can't enter 33 MHz idle
2594 * mode, or any deeper PM saving state.
2596 * For now, we punt and forcibly disconnect from the USB host when PXA
2597 * enters any suspend state. While we're disconnected, we always disable
2598 * the 48MHz USB clock ... allowing PXA sleep and/or 33 MHz idle states.
2599 * Boards without software pullup control shouldn't use those states.
2600 * VBUS IRQs should probably be ignored so that the PXA device just acts
2601 * "dead" to USB hosts until system resume.
2603 static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state)
2605 struct pxa2xx_udc *udc = platform_get_drvdata(dev);
2607 if (!udc->mach->udc_command)
2608 WARN("USB host won't detect disconnect!\n");
2609 pullup(udc, 0);
2611 return 0;
2614 static int pxa2xx_udc_resume(struct platform_device *dev)
2616 struct pxa2xx_udc *udc = platform_get_drvdata(dev);
2618 pullup(udc, 1);
2620 return 0;
2623 #else
2624 #define pxa2xx_udc_suspend NULL
2625 #define pxa2xx_udc_resume NULL
2626 #endif
2628 /*-------------------------------------------------------------------------*/
2630 static struct platform_driver udc_driver = {
2631 .probe = pxa2xx_udc_probe,
2632 .shutdown = pxa2xx_udc_shutdown,
2633 .remove = __exit_p(pxa2xx_udc_remove),
2634 .suspend = pxa2xx_udc_suspend,
2635 .resume = pxa2xx_udc_resume,
2636 .driver = {
2637 .owner = THIS_MODULE,
2638 .name = "pxa2xx-udc",
2642 static int __init udc_init(void)
2644 printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
2645 return platform_driver_register(&udc_driver);
2647 module_init(udc_init);
2649 static void __exit udc_exit(void)
2651 platform_driver_unregister(&udc_driver);
2653 module_exit(udc_exit);
2655 MODULE_DESCRIPTION(DRIVER_DESC);
2656 MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
2657 MODULE_LICENSE("GPL");