usb: dwc3: gadget: return early in dwc3_cleanup_done_reqs()
[pohmelfs.git] / drivers / usb / dwc3 / gadget.c
blob85cf392365cb364056b97145f6d8c4bd5fd2b67a
1 /**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2, as published by the Free
24 * Software Foundation.
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <linux/kernel.h>
40 #include <linux/delay.h>
41 #include <linux/slab.h>
42 #include <linux/spinlock.h>
43 #include <linux/platform_device.h>
44 #include <linux/pm_runtime.h>
45 #include <linux/interrupt.h>
46 #include <linux/io.h>
47 #include <linux/list.h>
48 #include <linux/dma-mapping.h>
50 #include <linux/usb/ch9.h>
51 #include <linux/usb/gadget.h>
53 #include "core.h"
54 #include "gadget.h"
55 #include "io.h"
57 #define DMA_ADDR_INVALID (~(dma_addr_t)0)
59 void dwc3_map_buffer_to_dma(struct dwc3_request *req)
61 struct dwc3 *dwc = req->dep->dwc;
63 if (req->request.length == 0) {
64 /* req->request.dma = dwc->setup_buf_addr; */
65 return;
68 if (req->request.dma == DMA_ADDR_INVALID) {
69 req->request.dma = dma_map_single(dwc->dev, req->request.buf,
70 req->request.length, req->direction
71 ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
72 req->mapped = true;
76 void dwc3_unmap_buffer_from_dma(struct dwc3_request *req)
78 struct dwc3 *dwc = req->dep->dwc;
80 if (req->request.length == 0) {
81 req->request.dma = DMA_ADDR_INVALID;
82 return;
85 if (req->mapped) {
86 dma_unmap_single(dwc->dev, req->request.dma,
87 req->request.length, req->direction
88 ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
89 req->mapped = 0;
90 req->request.dma = DMA_ADDR_INVALID;
94 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
95 int status)
97 struct dwc3 *dwc = dep->dwc;
99 if (req->queued) {
100 dep->busy_slot++;
102 * Skip LINK TRB. We can't use req->trb and check for
103 * DWC3_TRBCTL_LINK_TRB because it points the TRB we just
104 * completed (not the LINK TRB).
106 if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
107 usb_endpoint_xfer_isoc(dep->desc))
108 dep->busy_slot++;
110 list_del(&req->list);
112 if (req->request.status == -EINPROGRESS)
113 req->request.status = status;
115 dwc3_unmap_buffer_from_dma(req);
117 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
118 req, dep->name, req->request.actual,
119 req->request.length, status);
121 spin_unlock(&dwc->lock);
122 req->request.complete(&req->dep->endpoint, &req->request);
123 spin_lock(&dwc->lock);
126 static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
128 switch (cmd) {
129 case DWC3_DEPCMD_DEPSTARTCFG:
130 return "Start New Configuration";
131 case DWC3_DEPCMD_ENDTRANSFER:
132 return "End Transfer";
133 case DWC3_DEPCMD_UPDATETRANSFER:
134 return "Update Transfer";
135 case DWC3_DEPCMD_STARTTRANSFER:
136 return "Start Transfer";
137 case DWC3_DEPCMD_CLEARSTALL:
138 return "Clear Stall";
139 case DWC3_DEPCMD_SETSTALL:
140 return "Set Stall";
141 case DWC3_DEPCMD_GETSEQNUMBER:
142 return "Get Data Sequence Number";
143 case DWC3_DEPCMD_SETTRANSFRESOURCE:
144 return "Set Endpoint Transfer Resource";
145 case DWC3_DEPCMD_SETEPCONFIG:
146 return "Set Endpoint Configuration";
147 default:
148 return "UNKNOWN command";
152 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
153 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
155 struct dwc3_ep *dep = dwc->eps[ep];
156 u32 timeout = 500;
157 u32 reg;
159 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
160 dep->name,
161 dwc3_gadget_ep_cmd_string(cmd), params->param0,
162 params->param1, params->param2);
164 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
165 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
166 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
168 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
169 do {
170 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
171 if (!(reg & DWC3_DEPCMD_CMDACT)) {
172 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
173 DWC3_DEPCMD_STATUS(reg));
174 return 0;
178 * We can't sleep here, because it is also called from
179 * interrupt context.
181 timeout--;
182 if (!timeout)
183 return -ETIMEDOUT;
185 udelay(1);
186 } while (1);
189 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
190 struct dwc3_trb_hw *trb)
192 u32 offset = (char *) trb - (char *) dep->trb_pool;
194 return dep->trb_pool_dma + offset;
197 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
199 struct dwc3 *dwc = dep->dwc;
201 if (dep->trb_pool)
202 return 0;
204 if (dep->number == 0 || dep->number == 1)
205 return 0;
207 dep->trb_pool = dma_alloc_coherent(dwc->dev,
208 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
209 &dep->trb_pool_dma, GFP_KERNEL);
210 if (!dep->trb_pool) {
211 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
212 dep->name);
213 return -ENOMEM;
216 return 0;
219 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
221 struct dwc3 *dwc = dep->dwc;
223 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
224 dep->trb_pool, dep->trb_pool_dma);
226 dep->trb_pool = NULL;
227 dep->trb_pool_dma = 0;
230 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
232 struct dwc3_gadget_ep_cmd_params params;
233 u32 cmd;
235 memset(&params, 0x00, sizeof(params));
237 if (dep->number != 1) {
238 cmd = DWC3_DEPCMD_DEPSTARTCFG;
239 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
240 if (dep->number > 1) {
241 if (dwc->start_config_issued)
242 return 0;
243 dwc->start_config_issued = true;
244 cmd |= DWC3_DEPCMD_PARAM(2);
247 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
250 return 0;
253 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
254 const struct usb_endpoint_descriptor *desc)
256 struct dwc3_gadget_ep_cmd_params params;
258 memset(&params, 0x00, sizeof(params));
260 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
261 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc))
262 | DWC3_DEPCFG_BURST_SIZE(dep->endpoint.maxburst);
264 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
265 | DWC3_DEPCFG_XFER_NOT_READY_EN;
267 if (usb_endpoint_xfer_bulk(desc) && dep->endpoint.max_streams) {
268 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
269 | DWC3_DEPCFG_STREAM_EVENT_EN;
270 dep->stream_capable = true;
273 if (usb_endpoint_xfer_isoc(desc))
274 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
277 * We are doing 1:1 mapping for endpoints, meaning
278 * Physical Endpoints 2 maps to Logical Endpoint 2 and
279 * so on. We consider the direction bit as part of the physical
280 * endpoint number. So USB endpoint 0x81 is 0x03.
282 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
285 * We must use the lower 16 TX FIFOs even though
286 * HW might have more
288 if (dep->direction)
289 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
291 if (desc->bInterval) {
292 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
293 dep->interval = 1 << (desc->bInterval - 1);
296 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
297 DWC3_DEPCMD_SETEPCONFIG, &params);
300 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
302 struct dwc3_gadget_ep_cmd_params params;
304 memset(&params, 0x00, sizeof(params));
306 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
308 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
309 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
313 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
314 * @dep: endpoint to be initialized
315 * @desc: USB Endpoint Descriptor
317 * Caller should take care of locking
319 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
320 const struct usb_endpoint_descriptor *desc)
322 struct dwc3 *dwc = dep->dwc;
323 u32 reg;
324 int ret = -ENOMEM;
326 if (!(dep->flags & DWC3_EP_ENABLED)) {
327 ret = dwc3_gadget_start_config(dwc, dep);
328 if (ret)
329 return ret;
332 ret = dwc3_gadget_set_ep_config(dwc, dep, desc);
333 if (ret)
334 return ret;
336 if (!(dep->flags & DWC3_EP_ENABLED)) {
337 struct dwc3_trb_hw *trb_st_hw;
338 struct dwc3_trb_hw *trb_link_hw;
339 struct dwc3_trb trb_link;
341 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
342 if (ret)
343 return ret;
345 dep->desc = desc;
346 dep->type = usb_endpoint_type(desc);
347 dep->flags |= DWC3_EP_ENABLED;
349 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
350 reg |= DWC3_DALEPENA_EP(dep->number);
351 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
353 if (!usb_endpoint_xfer_isoc(desc))
354 return 0;
356 memset(&trb_link, 0, sizeof(trb_link));
358 /* Link TRB for ISOC. The HWO but is never reset */
359 trb_st_hw = &dep->trb_pool[0];
361 trb_link.bplh = dwc3_trb_dma_offset(dep, trb_st_hw);
362 trb_link.trbctl = DWC3_TRBCTL_LINK_TRB;
363 trb_link.hwo = true;
365 trb_link_hw = &dep->trb_pool[DWC3_TRB_NUM - 1];
366 dwc3_trb_to_hw(&trb_link, trb_link_hw);
369 return 0;
372 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
373 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
375 struct dwc3_request *req;
377 if (!list_empty(&dep->req_queued))
378 dwc3_stop_active_transfer(dwc, dep->number);
380 while (!list_empty(&dep->request_list)) {
381 req = next_request(&dep->request_list);
383 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
388 * __dwc3_gadget_ep_disable - Disables a HW endpoint
389 * @dep: the endpoint to disable
391 * This function also removes requests which are currently processed ny the
392 * hardware and those which are not yet scheduled.
393 * Caller should take care of locking.
395 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
397 struct dwc3 *dwc = dep->dwc;
398 u32 reg;
400 dwc3_remove_requests(dwc, dep);
402 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
403 reg &= ~DWC3_DALEPENA_EP(dep->number);
404 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
406 dep->stream_capable = false;
407 dep->desc = NULL;
408 dep->type = 0;
409 dep->flags = 0;
411 return 0;
414 /* -------------------------------------------------------------------------- */
416 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
417 const struct usb_endpoint_descriptor *desc)
419 return -EINVAL;
422 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
424 return -EINVAL;
427 /* -------------------------------------------------------------------------- */
429 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
430 const struct usb_endpoint_descriptor *desc)
432 struct dwc3_ep *dep;
433 struct dwc3 *dwc;
434 unsigned long flags;
435 int ret;
437 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
438 pr_debug("dwc3: invalid parameters\n");
439 return -EINVAL;
442 if (!desc->wMaxPacketSize) {
443 pr_debug("dwc3: missing wMaxPacketSize\n");
444 return -EINVAL;
447 dep = to_dwc3_ep(ep);
448 dwc = dep->dwc;
450 switch (usb_endpoint_type(desc)) {
451 case USB_ENDPOINT_XFER_CONTROL:
452 strncat(dep->name, "-control", sizeof(dep->name));
453 break;
454 case USB_ENDPOINT_XFER_ISOC:
455 strncat(dep->name, "-isoc", sizeof(dep->name));
456 break;
457 case USB_ENDPOINT_XFER_BULK:
458 strncat(dep->name, "-bulk", sizeof(dep->name));
459 break;
460 case USB_ENDPOINT_XFER_INT:
461 strncat(dep->name, "-int", sizeof(dep->name));
462 break;
463 default:
464 dev_err(dwc->dev, "invalid endpoint transfer type\n");
467 if (dep->flags & DWC3_EP_ENABLED) {
468 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
469 dep->name);
470 return 0;
473 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
475 spin_lock_irqsave(&dwc->lock, flags);
476 ret = __dwc3_gadget_ep_enable(dep, desc);
477 spin_unlock_irqrestore(&dwc->lock, flags);
479 return ret;
482 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
484 struct dwc3_ep *dep;
485 struct dwc3 *dwc;
486 unsigned long flags;
487 int ret;
489 if (!ep) {
490 pr_debug("dwc3: invalid parameters\n");
491 return -EINVAL;
494 dep = to_dwc3_ep(ep);
495 dwc = dep->dwc;
497 if (!(dep->flags & DWC3_EP_ENABLED)) {
498 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
499 dep->name);
500 return 0;
503 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
504 dep->number >> 1,
505 (dep->number & 1) ? "in" : "out");
507 spin_lock_irqsave(&dwc->lock, flags);
508 ret = __dwc3_gadget_ep_disable(dep);
509 spin_unlock_irqrestore(&dwc->lock, flags);
511 return ret;
514 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
515 gfp_t gfp_flags)
517 struct dwc3_request *req;
518 struct dwc3_ep *dep = to_dwc3_ep(ep);
519 struct dwc3 *dwc = dep->dwc;
521 req = kzalloc(sizeof(*req), gfp_flags);
522 if (!req) {
523 dev_err(dwc->dev, "not enough memory\n");
524 return NULL;
527 req->epnum = dep->number;
528 req->dep = dep;
529 req->request.dma = DMA_ADDR_INVALID;
531 return &req->request;
534 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
535 struct usb_request *request)
537 struct dwc3_request *req = to_dwc3_request(request);
539 kfree(req);
543 * dwc3_prepare_trbs - setup TRBs from requests
544 * @dep: endpoint for which requests are being prepared
545 * @starting: true if the endpoint is idle and no requests are queued.
547 * The functions goes through the requests list and setups TRBs for the
548 * transfers. The functions returns once there are not more TRBs available or
549 * it run out of requests.
551 static struct dwc3_request *dwc3_prepare_trbs(struct dwc3_ep *dep,
552 bool starting)
554 struct dwc3_request *req, *n, *ret = NULL;
555 struct dwc3_trb_hw *trb_hw;
556 struct dwc3_trb trb;
557 u32 trbs_left;
559 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
561 /* the first request must not be queued */
562 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
564 * if busy & slot are equal than it is either full or empty. If we are
565 * starting to proceed requests then we are empty. Otherwise we ar
566 * full and don't do anything
568 if (!trbs_left) {
569 if (!starting)
570 return NULL;
571 trbs_left = DWC3_TRB_NUM;
573 * In case we start from scratch, we queue the ISOC requests
574 * starting from slot 1. This is done because we use ring
575 * buffer and have no LST bit to stop us. Instead, we place
576 * IOC bit TRB_NUM/4. We try to avoid to having an interrupt
577 * after the first request so we start at slot 1 and have
578 * 7 requests proceed before we hit the first IOC.
579 * Other transfer types don't use the ring buffer and are
580 * processed from the first TRB until the last one. Since we
581 * don't wrap around we have to start at the beginning.
583 if (usb_endpoint_xfer_isoc(dep->desc)) {
584 dep->busy_slot = 1;
585 dep->free_slot = 1;
586 } else {
587 dep->busy_slot = 0;
588 dep->free_slot = 0;
592 /* The last TRB is a link TRB, not used for xfer */
593 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc))
594 return NULL;
596 list_for_each_entry_safe(req, n, &dep->request_list, list) {
597 unsigned int last_one = 0;
598 unsigned int cur_slot;
600 trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
601 cur_slot = dep->free_slot;
602 dep->free_slot++;
604 /* Skip the LINK-TRB on ISOC */
605 if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
606 usb_endpoint_xfer_isoc(dep->desc))
607 continue;
609 dwc3_gadget_move_request_queued(req);
610 memset(&trb, 0, sizeof(trb));
611 trbs_left--;
613 /* Is our TRB pool empty? */
614 if (!trbs_left)
615 last_one = 1;
616 /* Is this the last request? */
617 if (list_empty(&dep->request_list))
618 last_one = 1;
621 * FIXME we shouldn't need to set LST bit always but we are
622 * facing some weird problem with the Hardware where it doesn't
623 * complete even though it has been previously started.
625 * While we're debugging the problem, as a workaround to
626 * multiple TRBs handling, use only one TRB at a time.
628 last_one = 1;
630 req->trb = trb_hw;
631 if (!ret)
632 ret = req;
634 trb.bplh = req->request.dma;
636 if (usb_endpoint_xfer_isoc(dep->desc)) {
637 trb.isp_imi = true;
638 trb.csp = true;
639 } else {
640 trb.lst = last_one;
643 if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable)
644 trb.sid_sofn = req->request.stream_id;
646 switch (usb_endpoint_type(dep->desc)) {
647 case USB_ENDPOINT_XFER_CONTROL:
648 trb.trbctl = DWC3_TRBCTL_CONTROL_SETUP;
649 break;
651 case USB_ENDPOINT_XFER_ISOC:
652 trb.trbctl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
654 /* IOC every DWC3_TRB_NUM / 4 so we can refill */
655 if (!(cur_slot % (DWC3_TRB_NUM / 4)))
656 trb.ioc = last_one;
657 break;
659 case USB_ENDPOINT_XFER_BULK:
660 case USB_ENDPOINT_XFER_INT:
661 trb.trbctl = DWC3_TRBCTL_NORMAL;
662 break;
663 default:
665 * This is only possible with faulty memory because we
666 * checked it already :)
668 BUG();
671 trb.length = req->request.length;
672 trb.hwo = true;
674 dwc3_trb_to_hw(&trb, trb_hw);
675 req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw);
677 if (last_one)
678 break;
681 return ret;
684 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
685 int start_new)
687 struct dwc3_gadget_ep_cmd_params params;
688 struct dwc3_request *req;
689 struct dwc3 *dwc = dep->dwc;
690 int ret;
691 u32 cmd;
693 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
694 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
695 return -EBUSY;
697 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
700 * If we are getting here after a short-out-packet we don't enqueue any
701 * new requests as we try to set the IOC bit only on the last request.
703 if (start_new) {
704 if (list_empty(&dep->req_queued))
705 dwc3_prepare_trbs(dep, start_new);
707 /* req points to the first request which will be sent */
708 req = next_request(&dep->req_queued);
709 } else {
711 * req points to the first request where HWO changed
712 * from 0 to 1
714 req = dwc3_prepare_trbs(dep, start_new);
716 if (!req) {
717 dep->flags |= DWC3_EP_PENDING_REQUEST;
718 return 0;
721 memset(&params, 0, sizeof(params));
722 params.param0 = upper_32_bits(req->trb_dma);
723 params.param1 = lower_32_bits(req->trb_dma);
725 if (start_new)
726 cmd = DWC3_DEPCMD_STARTTRANSFER;
727 else
728 cmd = DWC3_DEPCMD_UPDATETRANSFER;
730 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
731 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
732 if (ret < 0) {
733 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
736 * FIXME we need to iterate over the list of requests
737 * here and stop, unmap, free and del each of the linked
738 * requests instead of we do now.
740 dwc3_unmap_buffer_from_dma(req);
741 list_del(&req->list);
742 return ret;
745 dep->flags |= DWC3_EP_BUSY;
746 dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
747 dep->number);
749 WARN_ON_ONCE(!dep->res_trans_idx);
751 return 0;
754 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
756 req->request.actual = 0;
757 req->request.status = -EINPROGRESS;
758 req->direction = dep->direction;
759 req->epnum = dep->number;
762 * We only add to our list of requests now and
763 * start consuming the list once we get XferNotReady
764 * IRQ.
766 * That way, we avoid doing anything that we don't need
767 * to do now and defer it until the point we receive a
768 * particular token from the Host side.
770 * This will also avoid Host cancelling URBs due to too
771 * many NACKs.
773 dwc3_map_buffer_to_dma(req);
774 list_add_tail(&req->list, &dep->request_list);
777 * There is one special case: XferNotReady with
778 * empty list of requests. We need to kick the
779 * transfer here in that situation, otherwise
780 * we will be NAKing forever.
782 * If we get XferNotReady before gadget driver
783 * has a chance to queue a request, we will ACK
784 * the IRQ but won't be able to receive the data
785 * until the next request is queued. The following
786 * code is handling exactly that.
788 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
789 int ret;
790 int start_trans;
792 start_trans = 1;
793 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
794 dep->flags & DWC3_EP_BUSY)
795 start_trans = 0;
797 ret = __dwc3_gadget_kick_transfer(dep, 0, start_trans);
798 if (ret && ret != -EBUSY) {
799 struct dwc3 *dwc = dep->dwc;
801 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
802 dep->name);
806 return 0;
809 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
810 gfp_t gfp_flags)
812 struct dwc3_request *req = to_dwc3_request(request);
813 struct dwc3_ep *dep = to_dwc3_ep(ep);
814 struct dwc3 *dwc = dep->dwc;
816 unsigned long flags;
818 int ret;
820 if (!dep->desc) {
821 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
822 request, ep->name);
823 return -ESHUTDOWN;
826 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
827 request, ep->name, request->length);
829 spin_lock_irqsave(&dwc->lock, flags);
830 ret = __dwc3_gadget_ep_queue(dep, req);
831 spin_unlock_irqrestore(&dwc->lock, flags);
833 return ret;
836 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
837 struct usb_request *request)
839 struct dwc3_request *req = to_dwc3_request(request);
840 struct dwc3_request *r = NULL;
842 struct dwc3_ep *dep = to_dwc3_ep(ep);
843 struct dwc3 *dwc = dep->dwc;
845 unsigned long flags;
846 int ret = 0;
848 spin_lock_irqsave(&dwc->lock, flags);
850 list_for_each_entry(r, &dep->request_list, list) {
851 if (r == req)
852 break;
855 if (r != req) {
856 list_for_each_entry(r, &dep->req_queued, list) {
857 if (r == req)
858 break;
860 if (r == req) {
861 /* wait until it is processed */
862 dwc3_stop_active_transfer(dwc, dep->number);
863 goto out0;
865 dev_err(dwc->dev, "request %p was not queued to %s\n",
866 request, ep->name);
867 ret = -EINVAL;
868 goto out0;
871 /* giveback the request */
872 dwc3_gadget_giveback(dep, req, -ECONNRESET);
874 out0:
875 spin_unlock_irqrestore(&dwc->lock, flags);
877 return ret;
880 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
882 struct dwc3_gadget_ep_cmd_params params;
883 struct dwc3 *dwc = dep->dwc;
884 int ret;
886 memset(&params, 0x00, sizeof(params));
888 if (value) {
889 if (dep->number == 0 || dep->number == 1) {
891 * Whenever EP0 is stalled, we will restart
892 * the state machine, thus moving back to
893 * Setup Phase
895 dwc->ep0state = EP0_SETUP_PHASE;
898 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
899 DWC3_DEPCMD_SETSTALL, &params);
900 if (ret)
901 dev_err(dwc->dev, "failed to %s STALL on %s\n",
902 value ? "set" : "clear",
903 dep->name);
904 else
905 dep->flags |= DWC3_EP_STALL;
906 } else {
907 if (dep->flags & DWC3_EP_WEDGE)
908 return 0;
910 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
911 DWC3_DEPCMD_CLEARSTALL, &params);
912 if (ret)
913 dev_err(dwc->dev, "failed to %s STALL on %s\n",
914 value ? "set" : "clear",
915 dep->name);
916 else
917 dep->flags &= ~DWC3_EP_STALL;
920 return ret;
923 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
925 struct dwc3_ep *dep = to_dwc3_ep(ep);
926 struct dwc3 *dwc = dep->dwc;
928 unsigned long flags;
930 int ret;
932 spin_lock_irqsave(&dwc->lock, flags);
934 if (usb_endpoint_xfer_isoc(dep->desc)) {
935 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
936 ret = -EINVAL;
937 goto out;
940 ret = __dwc3_gadget_ep_set_halt(dep, value);
941 out:
942 spin_unlock_irqrestore(&dwc->lock, flags);
944 return ret;
947 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
949 struct dwc3_ep *dep = to_dwc3_ep(ep);
951 dep->flags |= DWC3_EP_WEDGE;
953 return dwc3_gadget_ep_set_halt(ep, 1);
956 /* -------------------------------------------------------------------------- */
958 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
959 .bLength = USB_DT_ENDPOINT_SIZE,
960 .bDescriptorType = USB_DT_ENDPOINT,
961 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
964 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
965 .enable = dwc3_gadget_ep0_enable,
966 .disable = dwc3_gadget_ep0_disable,
967 .alloc_request = dwc3_gadget_ep_alloc_request,
968 .free_request = dwc3_gadget_ep_free_request,
969 .queue = dwc3_gadget_ep0_queue,
970 .dequeue = dwc3_gadget_ep_dequeue,
971 .set_halt = dwc3_gadget_ep_set_halt,
972 .set_wedge = dwc3_gadget_ep_set_wedge,
975 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
976 .enable = dwc3_gadget_ep_enable,
977 .disable = dwc3_gadget_ep_disable,
978 .alloc_request = dwc3_gadget_ep_alloc_request,
979 .free_request = dwc3_gadget_ep_free_request,
980 .queue = dwc3_gadget_ep_queue,
981 .dequeue = dwc3_gadget_ep_dequeue,
982 .set_halt = dwc3_gadget_ep_set_halt,
983 .set_wedge = dwc3_gadget_ep_set_wedge,
986 /* -------------------------------------------------------------------------- */
988 static int dwc3_gadget_get_frame(struct usb_gadget *g)
990 struct dwc3 *dwc = gadget_to_dwc(g);
991 u32 reg;
993 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
994 return DWC3_DSTS_SOFFN(reg);
997 static int dwc3_gadget_wakeup(struct usb_gadget *g)
999 struct dwc3 *dwc = gadget_to_dwc(g);
1001 unsigned long timeout;
1002 unsigned long flags;
1004 u32 reg;
1006 int ret = 0;
1008 u8 link_state;
1009 u8 speed;
1011 spin_lock_irqsave(&dwc->lock, flags);
1014 * According to the Databook Remote wakeup request should
1015 * be issued only when the device is in early suspend state.
1017 * We can check that via USB Link State bits in DSTS register.
1019 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1021 speed = reg & DWC3_DSTS_CONNECTSPD;
1022 if (speed == DWC3_DSTS_SUPERSPEED) {
1023 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1024 ret = -EINVAL;
1025 goto out;
1028 link_state = DWC3_DSTS_USBLNKST(reg);
1030 switch (link_state) {
1031 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1032 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1033 break;
1034 default:
1035 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1036 link_state);
1037 ret = -EINVAL;
1038 goto out;
1041 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1044 * Switch link state to Recovery. In HS/FS/LS this means
1045 * RemoteWakeup Request
1047 reg |= DWC3_DCTL_ULSTCHNG_RECOVERY;
1048 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1050 /* wait for at least 2000us */
1051 usleep_range(2000, 2500);
1053 /* write zeroes to Link Change Request */
1054 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1055 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1057 /* pool until Link State change to ON */
1058 timeout = jiffies + msecs_to_jiffies(100);
1060 while (!(time_after(jiffies, timeout))) {
1061 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1063 /* in HS, means ON */
1064 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1065 break;
1068 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1069 dev_err(dwc->dev, "failed to send remote wakeup\n");
1070 ret = -EINVAL;
1073 out:
1074 spin_unlock_irqrestore(&dwc->lock, flags);
1076 return ret;
1079 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1080 int is_selfpowered)
1082 struct dwc3 *dwc = gadget_to_dwc(g);
1084 dwc->is_selfpowered = !!is_selfpowered;
1086 return 0;
1089 static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
1091 u32 reg;
1092 u32 timeout = 500;
1094 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1095 if (is_on)
1096 reg |= DWC3_DCTL_RUN_STOP;
1097 else
1098 reg &= ~DWC3_DCTL_RUN_STOP;
1100 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1102 do {
1103 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1104 if (is_on) {
1105 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1106 break;
1107 } else {
1108 if (reg & DWC3_DSTS_DEVCTRLHLT)
1109 break;
1111 timeout--;
1112 if (!timeout)
1113 break;
1114 udelay(1);
1115 } while (1);
1117 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1118 dwc->gadget_driver
1119 ? dwc->gadget_driver->function : "no-function",
1120 is_on ? "connect" : "disconnect");
1123 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1125 struct dwc3 *dwc = gadget_to_dwc(g);
1126 unsigned long flags;
1128 is_on = !!is_on;
1130 spin_lock_irqsave(&dwc->lock, flags);
1131 dwc3_gadget_run_stop(dwc, is_on);
1132 spin_unlock_irqrestore(&dwc->lock, flags);
1134 return 0;
1137 static int dwc3_gadget_start(struct usb_gadget *g,
1138 struct usb_gadget_driver *driver)
1140 struct dwc3 *dwc = gadget_to_dwc(g);
1141 struct dwc3_ep *dep;
1142 unsigned long flags;
1143 int ret = 0;
1144 u32 reg;
1146 spin_lock_irqsave(&dwc->lock, flags);
1148 if (dwc->gadget_driver) {
1149 dev_err(dwc->dev, "%s is already bound to %s\n",
1150 dwc->gadget.name,
1151 dwc->gadget_driver->driver.name);
1152 ret = -EBUSY;
1153 goto err0;
1156 dwc->gadget_driver = driver;
1157 dwc->gadget.dev.driver = &driver->driver;
1159 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1160 reg &= ~(DWC3_DCFG_SPEED_MASK);
1161 reg |= dwc->maximum_speed;
1162 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1164 dwc->start_config_issued = false;
1166 /* Start with SuperSpeed Default */
1167 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1169 dep = dwc->eps[0];
1170 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1171 if (ret) {
1172 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1173 goto err0;
1176 dep = dwc->eps[1];
1177 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1178 if (ret) {
1179 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1180 goto err1;
1183 /* begin to receive SETUP packets */
1184 dwc->ep0state = EP0_SETUP_PHASE;
1185 dwc3_ep0_out_start(dwc);
1187 spin_unlock_irqrestore(&dwc->lock, flags);
1189 return 0;
1191 err1:
1192 __dwc3_gadget_ep_disable(dwc->eps[0]);
1194 err0:
1195 spin_unlock_irqrestore(&dwc->lock, flags);
1197 return ret;
1200 static int dwc3_gadget_stop(struct usb_gadget *g,
1201 struct usb_gadget_driver *driver)
1203 struct dwc3 *dwc = gadget_to_dwc(g);
1204 unsigned long flags;
1206 spin_lock_irqsave(&dwc->lock, flags);
1208 __dwc3_gadget_ep_disable(dwc->eps[0]);
1209 __dwc3_gadget_ep_disable(dwc->eps[1]);
1211 dwc->gadget_driver = NULL;
1212 dwc->gadget.dev.driver = NULL;
1214 spin_unlock_irqrestore(&dwc->lock, flags);
1216 return 0;
1218 static const struct usb_gadget_ops dwc3_gadget_ops = {
1219 .get_frame = dwc3_gadget_get_frame,
1220 .wakeup = dwc3_gadget_wakeup,
1221 .set_selfpowered = dwc3_gadget_set_selfpowered,
1222 .pullup = dwc3_gadget_pullup,
1223 .udc_start = dwc3_gadget_start,
1224 .udc_stop = dwc3_gadget_stop,
1227 /* -------------------------------------------------------------------------- */
1229 static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1231 struct dwc3_ep *dep;
1232 u8 epnum;
1234 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1236 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1237 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1238 if (!dep) {
1239 dev_err(dwc->dev, "can't allocate endpoint %d\n",
1240 epnum);
1241 return -ENOMEM;
1244 dep->dwc = dwc;
1245 dep->number = epnum;
1246 dwc->eps[epnum] = dep;
1248 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1249 (epnum & 1) ? "in" : "out");
1250 dep->endpoint.name = dep->name;
1251 dep->direction = (epnum & 1);
1253 if (epnum == 0 || epnum == 1) {
1254 dep->endpoint.maxpacket = 512;
1255 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1256 if (!epnum)
1257 dwc->gadget.ep0 = &dep->endpoint;
1258 } else {
1259 int ret;
1261 dep->endpoint.maxpacket = 1024;
1262 dep->endpoint.max_streams = 15;
1263 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1264 list_add_tail(&dep->endpoint.ep_list,
1265 &dwc->gadget.ep_list);
1267 ret = dwc3_alloc_trb_pool(dep);
1268 if (ret)
1269 return ret;
1272 INIT_LIST_HEAD(&dep->request_list);
1273 INIT_LIST_HEAD(&dep->req_queued);
1276 return 0;
1279 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1281 struct dwc3_ep *dep;
1282 u8 epnum;
1284 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1285 dep = dwc->eps[epnum];
1286 dwc3_free_trb_pool(dep);
1288 if (epnum != 0 && epnum != 1)
1289 list_del(&dep->endpoint.ep_list);
1291 kfree(dep);
1295 static void dwc3_gadget_release(struct device *dev)
1297 dev_dbg(dev, "%s\n", __func__);
1300 /* -------------------------------------------------------------------------- */
1301 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1302 const struct dwc3_event_depevt *event, int status)
1304 struct dwc3_request *req;
1305 struct dwc3_trb trb;
1306 unsigned int count;
1307 unsigned int s_pkt = 0;
1309 do {
1310 req = next_request(&dep->req_queued);
1311 if (!req) {
1312 WARN_ON_ONCE(1);
1313 return 1;
1316 dwc3_trb_to_nat(req->trb, &trb);
1318 if (trb.hwo && status != -ESHUTDOWN)
1320 * We continue despite the error. There is not much we
1321 * can do. If we don't clean in up we loop for ever. If
1322 * we skip the TRB than it gets overwritten reused after
1323 * a while since we use them in a ring buffer. a BUG()
1324 * would help. Lets hope that if this occures, someone
1325 * fixes the root cause instead of looking away :)
1327 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1328 dep->name, req->trb);
1329 count = trb.length;
1331 if (dep->direction) {
1332 if (count) {
1333 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1334 dep->name);
1335 status = -ECONNRESET;
1337 } else {
1338 if (count && (event->status & DEPEVT_STATUS_SHORT))
1339 s_pkt = 1;
1343 * We assume here we will always receive the entire data block
1344 * which we should receive. Meaning, if we program RX to
1345 * receive 4K but we receive only 2K, we assume that's all we
1346 * should receive and we simply bounce the request back to the
1347 * gadget driver for further processing.
1349 req->request.actual += req->request.length - count;
1350 dwc3_gadget_giveback(dep, req, status);
1351 if (s_pkt)
1352 break;
1353 if ((event->status & DEPEVT_STATUS_LST) && trb.lst)
1354 break;
1355 if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc)
1356 break;
1357 } while (1);
1359 if ((event->status & DEPEVT_STATUS_IOC) && trb.ioc)
1360 return 0;
1361 return 1;
1364 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1365 struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1366 int start_new)
1368 unsigned status = 0;
1369 int clean_busy;
1371 if (event->status & DEPEVT_STATUS_BUSERR)
1372 status = -ECONNRESET;
1374 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
1375 if (clean_busy) {
1376 dep->flags &= ~DWC3_EP_BUSY;
1377 dep->res_trans_idx = 0;
1381 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1382 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1384 u32 uf;
1386 if (list_empty(&dep->request_list)) {
1387 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1388 dep->name);
1389 return;
1392 if (event->parameters) {
1393 u32 mask;
1395 mask = ~(dep->interval - 1);
1396 uf = event->parameters & mask;
1397 /* 4 micro frames in the future */
1398 uf += dep->interval * 4;
1399 } else {
1400 uf = 0;
1403 __dwc3_gadget_kick_transfer(dep, uf, 1);
1406 static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep,
1407 const struct dwc3_event_depevt *event)
1409 struct dwc3 *dwc = dep->dwc;
1410 struct dwc3_event_depevt mod_ev = *event;
1413 * We were asked to remove one requests. It is possible that this
1414 * request and a few other were started together and have the same
1415 * transfer index. Since we stopped the complete endpoint we don't
1416 * know how many requests were already completed (and not yet)
1417 * reported and how could be done (later). We purge them all until
1418 * the end of the list.
1420 mod_ev.status = DEPEVT_STATUS_LST;
1421 dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN);
1422 dep->flags &= ~DWC3_EP_BUSY;
1423 /* pending requets are ignored and are queued on XferNotReady */
1426 static void dwc3_ep_cmd_compl(struct dwc3_ep *dep,
1427 const struct dwc3_event_depevt *event)
1429 u32 param = event->parameters;
1430 u32 cmd_type = (param >> 8) & ((1 << 5) - 1);
1432 switch (cmd_type) {
1433 case DWC3_DEPCMD_ENDTRANSFER:
1434 dwc3_process_ep_cmd_complete(dep, event);
1435 break;
1436 case DWC3_DEPCMD_STARTTRANSFER:
1437 dep->res_trans_idx = param & 0x7f;
1438 break;
1439 default:
1440 printk(KERN_ERR "%s() unknown /unexpected type: %d\n",
1441 __func__, cmd_type);
1442 break;
1446 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1447 const struct dwc3_event_depevt *event)
1449 struct dwc3_ep *dep;
1450 u8 epnum = event->endpoint_number;
1452 dep = dwc->eps[epnum];
1454 dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1455 dwc3_ep_event_string(event->endpoint_event));
1457 if (epnum == 0 || epnum == 1) {
1458 dwc3_ep0_interrupt(dwc, event);
1459 return;
1462 switch (event->endpoint_event) {
1463 case DWC3_DEPEVT_XFERCOMPLETE:
1464 if (usb_endpoint_xfer_isoc(dep->desc)) {
1465 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1466 dep->name);
1467 return;
1470 dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1471 break;
1472 case DWC3_DEPEVT_XFERINPROGRESS:
1473 if (!usb_endpoint_xfer_isoc(dep->desc)) {
1474 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1475 dep->name);
1476 return;
1479 dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1480 break;
1481 case DWC3_DEPEVT_XFERNOTREADY:
1482 if (usb_endpoint_xfer_isoc(dep->desc)) {
1483 dwc3_gadget_start_isoc(dwc, dep, event);
1484 } else {
1485 int ret;
1487 dev_vdbg(dwc->dev, "%s: reason %s\n",
1488 dep->name, event->status
1489 ? "Transfer Active"
1490 : "Transfer Not Active");
1492 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1493 if (!ret || ret == -EBUSY)
1494 return;
1496 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1497 dep->name);
1500 break;
1501 case DWC3_DEPEVT_STREAMEVT:
1502 if (!usb_endpoint_xfer_bulk(dep->desc)) {
1503 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1504 dep->name);
1505 return;
1508 switch (event->status) {
1509 case DEPEVT_STREAMEVT_FOUND:
1510 dev_vdbg(dwc->dev, "Stream %d found and started\n",
1511 event->parameters);
1513 break;
1514 case DEPEVT_STREAMEVT_NOTFOUND:
1515 /* FALLTHROUGH */
1516 default:
1517 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1519 break;
1520 case DWC3_DEPEVT_RXTXFIFOEVT:
1521 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1522 break;
1523 case DWC3_DEPEVT_EPCMDCMPLT:
1524 dwc3_ep_cmd_compl(dep, event);
1525 break;
1529 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1531 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1532 spin_unlock(&dwc->lock);
1533 dwc->gadget_driver->disconnect(&dwc->gadget);
1534 spin_lock(&dwc->lock);
1538 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
1540 struct dwc3_ep *dep;
1541 struct dwc3_gadget_ep_cmd_params params;
1542 u32 cmd;
1543 int ret;
1545 dep = dwc->eps[epnum];
1547 WARN_ON(!dep->res_trans_idx);
1548 if (dep->res_trans_idx) {
1549 cmd = DWC3_DEPCMD_ENDTRANSFER;
1550 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
1551 cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx);
1552 memset(&params, 0, sizeof(params));
1553 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
1554 WARN_ON_ONCE(ret);
1555 dep->res_trans_idx = 0;
1559 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
1561 u32 epnum;
1563 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1564 struct dwc3_ep *dep;
1566 dep = dwc->eps[epnum];
1567 if (!(dep->flags & DWC3_EP_ENABLED))
1568 continue;
1570 dwc3_remove_requests(dwc, dep);
1574 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
1576 u32 epnum;
1578 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1579 struct dwc3_ep *dep;
1580 struct dwc3_gadget_ep_cmd_params params;
1581 int ret;
1583 dep = dwc->eps[epnum];
1585 if (!(dep->flags & DWC3_EP_STALL))
1586 continue;
1588 dep->flags &= ~DWC3_EP_STALL;
1590 memset(&params, 0, sizeof(params));
1591 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1592 DWC3_DEPCMD_CLEARSTALL, &params);
1593 WARN_ON_ONCE(ret);
1597 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
1599 dev_vdbg(dwc->dev, "%s\n", __func__);
1600 #if 0
1602 U1/U2 is powersave optimization. Skip it for now. Anyway we need to
1603 enable it before we can disable it.
1605 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1606 reg &= ~DWC3_DCTL_INITU1ENA;
1607 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1609 reg &= ~DWC3_DCTL_INITU2ENA;
1610 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1611 #endif
1613 dwc3_stop_active_transfers(dwc);
1614 dwc3_disconnect_gadget(dwc);
1615 dwc->start_config_issued = false;
1617 dwc->gadget.speed = USB_SPEED_UNKNOWN;
1620 static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on)
1622 u32 reg;
1624 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
1626 if (on)
1627 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
1628 else
1629 reg |= DWC3_GUSB3PIPECTL_SUSPHY;
1631 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
1634 static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on)
1636 u32 reg;
1638 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
1640 if (on)
1641 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
1642 else
1643 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
1645 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
1648 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
1650 u32 reg;
1652 dev_vdbg(dwc->dev, "%s\n", __func__);
1654 /* Enable PHYs */
1655 dwc3_gadget_usb2_phy_power(dwc, true);
1656 dwc3_gadget_usb3_phy_power(dwc, true);
1658 if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
1659 dwc3_disconnect_gadget(dwc);
1661 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1662 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
1663 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1665 dwc3_stop_active_transfers(dwc);
1666 dwc3_clear_stall_all_ep(dwc);
1667 dwc->start_config_issued = false;
1669 /* Reset device address to zero */
1670 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1671 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
1672 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1675 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
1677 u32 reg;
1678 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
1681 * We change the clock only at SS but I dunno why I would want to do
1682 * this. Maybe it becomes part of the power saving plan.
1685 if (speed != DWC3_DSTS_SUPERSPEED)
1686 return;
1689 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
1690 * each time on Connect Done.
1692 if (!usb30_clock)
1693 return;
1695 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1696 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
1697 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1700 static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed)
1702 switch (speed) {
1703 case USB_SPEED_SUPER:
1704 dwc3_gadget_usb2_phy_power(dwc, false);
1705 break;
1706 case USB_SPEED_HIGH:
1707 case USB_SPEED_FULL:
1708 case USB_SPEED_LOW:
1709 dwc3_gadget_usb3_phy_power(dwc, false);
1710 break;
1714 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
1716 struct dwc3_gadget_ep_cmd_params params;
1717 struct dwc3_ep *dep;
1718 int ret;
1719 u32 reg;
1720 u8 speed;
1722 dev_vdbg(dwc->dev, "%s\n", __func__);
1724 memset(&params, 0x00, sizeof(params));
1726 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1727 speed = reg & DWC3_DSTS_CONNECTSPD;
1728 dwc->speed = speed;
1730 dwc3_update_ram_clk_sel(dwc, speed);
1732 switch (speed) {
1733 case DWC3_DCFG_SUPERSPEED:
1734 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1735 dwc->gadget.ep0->maxpacket = 512;
1736 dwc->gadget.speed = USB_SPEED_SUPER;
1737 break;
1738 case DWC3_DCFG_HIGHSPEED:
1739 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
1740 dwc->gadget.ep0->maxpacket = 64;
1741 dwc->gadget.speed = USB_SPEED_HIGH;
1742 break;
1743 case DWC3_DCFG_FULLSPEED2:
1744 case DWC3_DCFG_FULLSPEED1:
1745 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
1746 dwc->gadget.ep0->maxpacket = 64;
1747 dwc->gadget.speed = USB_SPEED_FULL;
1748 break;
1749 case DWC3_DCFG_LOWSPEED:
1750 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
1751 dwc->gadget.ep0->maxpacket = 8;
1752 dwc->gadget.speed = USB_SPEED_LOW;
1753 break;
1756 /* Disable unneded PHY */
1757 dwc3_gadget_disable_phy(dwc, dwc->gadget.speed);
1759 dep = dwc->eps[0];
1760 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1761 if (ret) {
1762 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1763 return;
1766 dep = dwc->eps[1];
1767 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
1768 if (ret) {
1769 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1770 return;
1774 * Configure PHY via GUSB3PIPECTLn if required.
1776 * Update GTXFIFOSIZn
1778 * In both cases reset values should be sufficient.
1782 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
1784 dev_vdbg(dwc->dev, "%s\n", __func__);
1787 * TODO take core out of low power mode when that's
1788 * implemented.
1791 dwc->gadget_driver->resume(&dwc->gadget);
1794 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
1795 unsigned int evtinfo)
1797 /* The fith bit says SuperSpeed yes or no. */
1798 dwc->link_state = evtinfo & DWC3_LINK_STATE_MASK;
1800 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
1803 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
1804 const struct dwc3_event_devt *event)
1806 switch (event->type) {
1807 case DWC3_DEVICE_EVENT_DISCONNECT:
1808 dwc3_gadget_disconnect_interrupt(dwc);
1809 break;
1810 case DWC3_DEVICE_EVENT_RESET:
1811 dwc3_gadget_reset_interrupt(dwc);
1812 break;
1813 case DWC3_DEVICE_EVENT_CONNECT_DONE:
1814 dwc3_gadget_conndone_interrupt(dwc);
1815 break;
1816 case DWC3_DEVICE_EVENT_WAKEUP:
1817 dwc3_gadget_wakeup_interrupt(dwc);
1818 break;
1819 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
1820 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
1821 break;
1822 case DWC3_DEVICE_EVENT_EOPF:
1823 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
1824 break;
1825 case DWC3_DEVICE_EVENT_SOF:
1826 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
1827 break;
1828 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
1829 dev_vdbg(dwc->dev, "Erratic Error\n");
1830 break;
1831 case DWC3_DEVICE_EVENT_CMD_CMPL:
1832 dev_vdbg(dwc->dev, "Command Complete\n");
1833 break;
1834 case DWC3_DEVICE_EVENT_OVERFLOW:
1835 dev_vdbg(dwc->dev, "Overflow\n");
1836 break;
1837 default:
1838 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
1842 static void dwc3_process_event_entry(struct dwc3 *dwc,
1843 const union dwc3_event *event)
1845 /* Endpoint IRQ, handle it and return early */
1846 if (event->type.is_devspec == 0) {
1847 /* depevt */
1848 return dwc3_endpoint_interrupt(dwc, &event->depevt);
1851 switch (event->type.type) {
1852 case DWC3_EVENT_TYPE_DEV:
1853 dwc3_gadget_interrupt(dwc, &event->devt);
1854 break;
1855 /* REVISIT what to do with Carkit and I2C events ? */
1856 default:
1857 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
1861 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
1863 struct dwc3_event_buffer *evt;
1864 int left;
1865 u32 count;
1867 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
1868 count &= DWC3_GEVNTCOUNT_MASK;
1869 if (!count)
1870 return IRQ_NONE;
1872 evt = dwc->ev_buffs[buf];
1873 left = count;
1875 while (left > 0) {
1876 union dwc3_event event;
1878 memcpy(&event.raw, (evt->buf + evt->lpos), sizeof(event.raw));
1879 dwc3_process_event_entry(dwc, &event);
1881 * XXX we wrap around correctly to the next entry as almost all
1882 * entries are 4 bytes in size. There is one entry which has 12
1883 * bytes which is a regular entry followed by 8 bytes data. ATM
1884 * I don't know how things are organized if were get next to the
1885 * a boundary so I worry about that once we try to handle that.
1887 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
1888 left -= 4;
1890 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
1893 return IRQ_HANDLED;
1896 static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
1898 struct dwc3 *dwc = _dwc;
1899 int i;
1900 irqreturn_t ret = IRQ_NONE;
1902 spin_lock(&dwc->lock);
1904 for (i = 0; i < dwc->num_event_buffers; i++) {
1905 irqreturn_t status;
1907 status = dwc3_process_event_buf(dwc, i);
1908 if (status == IRQ_HANDLED)
1909 ret = status;
1912 spin_unlock(&dwc->lock);
1914 return ret;
1918 * dwc3_gadget_init - Initializes gadget related registers
1919 * @dwc: Pointer to out controller context structure
1921 * Returns 0 on success otherwise negative errno.
1923 int __devinit dwc3_gadget_init(struct dwc3 *dwc)
1925 u32 reg;
1926 int ret;
1927 int irq;
1929 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
1930 &dwc->ctrl_req_addr, GFP_KERNEL);
1931 if (!dwc->ctrl_req) {
1932 dev_err(dwc->dev, "failed to allocate ctrl request\n");
1933 ret = -ENOMEM;
1934 goto err0;
1937 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
1938 &dwc->ep0_trb_addr, GFP_KERNEL);
1939 if (!dwc->ep0_trb) {
1940 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
1941 ret = -ENOMEM;
1942 goto err1;
1945 dwc->setup_buf = dma_alloc_coherent(dwc->dev,
1946 sizeof(*dwc->setup_buf) * 2,
1947 &dwc->setup_buf_addr, GFP_KERNEL);
1948 if (!dwc->setup_buf) {
1949 dev_err(dwc->dev, "failed to allocate setup buffer\n");
1950 ret = -ENOMEM;
1951 goto err2;
1954 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
1955 512, &dwc->ep0_bounce_addr, GFP_KERNEL);
1956 if (!dwc->ep0_bounce) {
1957 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
1958 ret = -ENOMEM;
1959 goto err3;
1962 dev_set_name(&dwc->gadget.dev, "gadget");
1964 dwc->gadget.ops = &dwc3_gadget_ops;
1965 dwc->gadget.is_dualspeed = true;
1966 dwc->gadget.speed = USB_SPEED_UNKNOWN;
1967 dwc->gadget.dev.parent = dwc->dev;
1969 dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
1971 dwc->gadget.dev.dma_parms = dwc->dev->dma_parms;
1972 dwc->gadget.dev.dma_mask = dwc->dev->dma_mask;
1973 dwc->gadget.dev.release = dwc3_gadget_release;
1974 dwc->gadget.name = "dwc3-gadget";
1977 * REVISIT: Here we should clear all pending IRQs to be
1978 * sure we're starting from a well known location.
1981 ret = dwc3_gadget_init_endpoints(dwc);
1982 if (ret)
1983 goto err4;
1985 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1987 ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED,
1988 "dwc3", dwc);
1989 if (ret) {
1990 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1991 irq, ret);
1992 goto err5;
1995 /* Enable all but Start and End of Frame IRQs */
1996 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1997 DWC3_DEVTEN_EVNTOVERFLOWEN |
1998 DWC3_DEVTEN_CMDCMPLTEN |
1999 DWC3_DEVTEN_ERRTICERREN |
2000 DWC3_DEVTEN_WKUPEVTEN |
2001 DWC3_DEVTEN_ULSTCNGEN |
2002 DWC3_DEVTEN_CONNECTDONEEN |
2003 DWC3_DEVTEN_USBRSTEN |
2004 DWC3_DEVTEN_DISCONNEVTEN);
2005 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
2007 ret = device_register(&dwc->gadget.dev);
2008 if (ret) {
2009 dev_err(dwc->dev, "failed to register gadget device\n");
2010 put_device(&dwc->gadget.dev);
2011 goto err6;
2014 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2015 if (ret) {
2016 dev_err(dwc->dev, "failed to register udc\n");
2017 goto err7;
2020 return 0;
2022 err7:
2023 device_unregister(&dwc->gadget.dev);
2025 err6:
2026 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2027 free_irq(irq, dwc);
2029 err5:
2030 dwc3_gadget_free_endpoints(dwc);
2032 err4:
2033 dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2034 dwc->ep0_bounce_addr);
2036 err3:
2037 dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2038 dwc->setup_buf, dwc->setup_buf_addr);
2040 err2:
2041 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2042 dwc->ep0_trb, dwc->ep0_trb_addr);
2044 err1:
2045 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2046 dwc->ctrl_req, dwc->ctrl_req_addr);
2048 err0:
2049 return ret;
2052 void dwc3_gadget_exit(struct dwc3 *dwc)
2054 int irq;
2056 usb_del_gadget_udc(&dwc->gadget);
2057 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2059 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2060 free_irq(irq, dwc);
2062 dwc3_gadget_free_endpoints(dwc);
2064 dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2065 dwc->ep0_bounce_addr);
2067 dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
2068 dwc->setup_buf, dwc->setup_buf_addr);
2070 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2071 dwc->ep0_trb, dwc->ep0_trb_addr);
2073 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2074 dwc->ctrl_req, dwc->ctrl_req_addr);
2076 device_unregister(&dwc->gadget.dev);