Linux 3.9-rc4
[linux-2.6/cjktty.git] / drivers / usb / gadget / bcm63xx_udc.c
blob8cc8253f1100490fc65bf288042daaef57b62b1f
1 /*
2 * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
4 * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
5 * Copyright (C) 2012 Broadcom Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
13 #include <linux/bitops.h>
14 #include <linux/bug.h>
15 #include <linux/clk.h>
16 #include <linux/compiler.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/errno.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/kconfig.h>
26 #include <linux/kernel.h>
27 #include <linux/list.h>
28 #include <linux/module.h>
29 #include <linux/moduleparam.h>
30 #include <linux/platform_device.h>
31 #include <linux/sched.h>
32 #include <linux/seq_file.h>
33 #include <linux/slab.h>
34 #include <linux/timer.h>
35 #include <linux/usb/ch9.h>
36 #include <linux/usb/gadget.h>
37 #include <linux/workqueue.h>
39 #include <bcm63xx_cpu.h>
40 #include <bcm63xx_iudma.h>
41 #include <bcm63xx_dev_usb_usbd.h>
42 #include <bcm63xx_io.h>
43 #include <bcm63xx_regs.h>
45 #define DRV_MODULE_NAME "bcm63xx_udc"
47 static const char bcm63xx_ep0name[] = "ep0";
48 static const char *const bcm63xx_ep_name[] = {
49 bcm63xx_ep0name,
50 "ep1in-bulk", "ep2out-bulk", "ep3in-int", "ep4out-int",
53 static bool use_fullspeed;
54 module_param(use_fullspeed, bool, S_IRUGO);
55 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
58 * RX IRQ coalescing options:
60 * false (default) - one IRQ per DATAx packet. Slow but reliable. The
61 * driver is able to pass the "testusb" suite and recover from conditions like:
63 * 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
64 * 2) Host sends 512 bytes of data
65 * 3) Host decides to reconfigure the device and sends SET_INTERFACE
66 * 4) Device shuts down the endpoint and cancels the RX transaction
68 * true - one IRQ per transfer, for transfers <= 2048B. Generates
69 * considerably fewer IRQs, but error recovery is less robust. Does not
70 * reliably pass "testusb".
72 * TX always uses coalescing, because we can cancel partially complete TX
73 * transfers by repeatedly flushing the FIFO. The hardware doesn't allow
74 * this on RX.
76 static bool irq_coalesce;
77 module_param(irq_coalesce, bool, S_IRUGO);
78 MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
80 #define BCM63XX_NUM_EP 5
81 #define BCM63XX_NUM_IUDMA 6
82 #define BCM63XX_NUM_FIFO_PAIRS 3
84 #define IUDMA_RESET_TIMEOUT_US 10000
86 #define IUDMA_EP0_RXCHAN 0
87 #define IUDMA_EP0_TXCHAN 1
89 #define IUDMA_MAX_FRAGMENT 2048
90 #define BCM63XX_MAX_CTRL_PKT 64
92 #define BCMEP_CTRL 0x00
93 #define BCMEP_ISOC 0x01
94 #define BCMEP_BULK 0x02
95 #define BCMEP_INTR 0x03
97 #define BCMEP_OUT 0x00
98 #define BCMEP_IN 0x01
100 #define BCM63XX_SPD_FULL 1
101 #define BCM63XX_SPD_HIGH 0
103 #define IUDMA_DMAC_OFFSET 0x200
104 #define IUDMA_DMAS_OFFSET 0x400
106 enum bcm63xx_ep0_state {
107 EP0_REQUEUE,
108 EP0_IDLE,
109 EP0_IN_DATA_PHASE_SETUP,
110 EP0_IN_DATA_PHASE_COMPLETE,
111 EP0_OUT_DATA_PHASE_SETUP,
112 EP0_OUT_DATA_PHASE_COMPLETE,
113 EP0_OUT_STATUS_PHASE,
114 EP0_IN_FAKE_STATUS_PHASE,
115 EP0_SHUTDOWN,
118 static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
119 "REQUEUE",
120 "IDLE",
121 "IN_DATA_PHASE_SETUP",
122 "IN_DATA_PHASE_COMPLETE",
123 "OUT_DATA_PHASE_SETUP",
124 "OUT_DATA_PHASE_COMPLETE",
125 "OUT_STATUS_PHASE",
126 "IN_FAKE_STATUS_PHASE",
127 "SHUTDOWN",
131 * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
132 * @ep_num: USB endpoint number.
133 * @n_bds: Number of buffer descriptors in the ring.
134 * @ep_type: Endpoint type (control, bulk, interrupt).
135 * @dir: Direction (in, out).
136 * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
137 * @max_pkt_hs: Maximum packet size in high speed mode.
138 * @max_pkt_fs: Maximum packet size in full speed mode.
140 struct iudma_ch_cfg {
141 int ep_num;
142 int n_bds;
143 int ep_type;
144 int dir;
145 int n_fifo_slots;
146 int max_pkt_hs;
147 int max_pkt_fs;
150 static const struct iudma_ch_cfg iudma_defaults[] = {
152 /* This controller was designed to support a CDC/RNDIS application.
153 It may be possible to reconfigure some of the endpoints, but
154 the hardware limitations (FIFO sizing and number of DMA channels)
155 may significantly impact flexibility and/or stability. Change
156 these values at your own risk.
158 ep_num ep_type n_fifo_slots max_pkt_fs
159 idx | n_bds | dir | max_pkt_hs |
160 | | | | | | | | */
161 [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
162 [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
163 [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
164 [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
165 [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
166 [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
169 struct bcm63xx_udc;
172 * struct iudma_ch - Represents the current state of a single IUDMA channel.
173 * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
174 * @ep_num: USB endpoint number. -1 for ep0 RX.
175 * @enabled: Whether bcm63xx_ep_enable() has been called.
176 * @max_pkt: "Chunk size" on the USB interface. Based on interface speed.
177 * @is_tx: true for TX, false for RX.
178 * @bep: Pointer to the associated endpoint. NULL for ep0 RX.
179 * @udc: Reference to the device controller.
180 * @read_bd: Next buffer descriptor to reap from the hardware.
181 * @write_bd: Next BD available for a new packet.
182 * @end_bd: Points to the final BD in the ring.
183 * @n_bds_used: Number of BD entries currently occupied.
184 * @bd_ring: Base pointer to the BD ring.
185 * @bd_ring_dma: Physical (DMA) address of bd_ring.
186 * @n_bds: Total number of BDs in the ring.
188 * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
189 * bidirectional. The "struct usb_ep" associated with ep0 is for TX (IN)
190 * only.
192 * Each bulk/intr endpoint has a single IUDMA channel and a single
193 * struct usb_ep.
195 struct iudma_ch {
196 unsigned int ch_idx;
197 int ep_num;
198 bool enabled;
199 int max_pkt;
200 bool is_tx;
201 struct bcm63xx_ep *bep;
202 struct bcm63xx_udc *udc;
204 struct bcm_enet_desc *read_bd;
205 struct bcm_enet_desc *write_bd;
206 struct bcm_enet_desc *end_bd;
207 int n_bds_used;
209 struct bcm_enet_desc *bd_ring;
210 dma_addr_t bd_ring_dma;
211 unsigned int n_bds;
215 * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
216 * @ep_num: USB endpoint number.
217 * @iudma: Pointer to IUDMA channel state.
218 * @ep: USB gadget layer representation of the EP.
219 * @udc: Reference to the device controller.
220 * @queue: Linked list of outstanding requests for this EP.
221 * @halted: 1 if the EP is stalled; 0 otherwise.
223 struct bcm63xx_ep {
224 unsigned int ep_num;
225 struct iudma_ch *iudma;
226 struct usb_ep ep;
227 struct bcm63xx_udc *udc;
228 struct list_head queue;
229 unsigned halted:1;
233 * struct bcm63xx_req - Internal (driver) state of a single request.
234 * @queue: Links back to the EP's request list.
235 * @req: USB gadget layer representation of the request.
236 * @offset: Current byte offset into the data buffer (next byte to queue).
237 * @bd_bytes: Number of data bytes in outstanding BD entries.
238 * @iudma: IUDMA channel used for the request.
240 struct bcm63xx_req {
241 struct list_head queue; /* ep's requests */
242 struct usb_request req;
243 unsigned int offset;
244 unsigned int bd_bytes;
245 struct iudma_ch *iudma;
249 * struct bcm63xx_udc - Driver/hardware private context.
250 * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
251 * @dev: Generic Linux device structure.
252 * @pd: Platform data (board/port info).
253 * @usbd_clk: Clock descriptor for the USB device block.
254 * @usbh_clk: Clock descriptor for the USB host block.
255 * @gadget: USB slave device.
256 * @driver: Driver for USB slave devices.
257 * @usbd_regs: Base address of the USBD/USB20D block.
258 * @iudma_regs: Base address of the USBD's associated IUDMA block.
259 * @bep: Array of endpoints, including ep0.
260 * @iudma: Array of all IUDMA channels used by this controller.
261 * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
262 * @iface: USB interface number, from SET_INTERFACE wIndex.
263 * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
264 * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
265 * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
266 * @ep0state: Current state of the ep0 state machine.
267 * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
268 * @wedgemap: Bitmap of wedged endpoints.
269 * @ep0_req_reset: USB reset is pending.
270 * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
271 * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
272 * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
273 * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
274 * @ep0_reply: Pending reply from gadget driver.
275 * @ep0_request: Outstanding ep0 request.
276 * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
277 * @debugfs_usbd: debugfs file "usbd" for controller state.
278 * @debugfs_iudma: debugfs file "usbd" for IUDMA state.
280 struct bcm63xx_udc {
281 spinlock_t lock;
283 struct device *dev;
284 struct bcm63xx_usbd_platform_data *pd;
285 struct clk *usbd_clk;
286 struct clk *usbh_clk;
288 struct usb_gadget gadget;
289 struct usb_gadget_driver *driver;
291 void __iomem *usbd_regs;
292 void __iomem *iudma_regs;
294 struct bcm63xx_ep bep[BCM63XX_NUM_EP];
295 struct iudma_ch iudma[BCM63XX_NUM_IUDMA];
297 int cfg;
298 int iface;
299 int alt_iface;
301 struct bcm63xx_req ep0_ctrl_req;
302 u8 *ep0_ctrl_buf;
304 int ep0state;
305 struct work_struct ep0_wq;
307 unsigned long wedgemap;
309 unsigned ep0_req_reset:1;
310 unsigned ep0_req_set_cfg:1;
311 unsigned ep0_req_set_iface:1;
312 unsigned ep0_req_shutdown:1;
314 unsigned ep0_req_completed:1;
315 struct usb_request *ep0_reply;
316 struct usb_request *ep0_request;
318 struct dentry *debugfs_root;
319 struct dentry *debugfs_usbd;
320 struct dentry *debugfs_iudma;
323 static const struct usb_ep_ops bcm63xx_udc_ep_ops;
325 /***********************************************************************
326 * Convenience functions
327 ***********************************************************************/
329 static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
331 return container_of(g, struct bcm63xx_udc, gadget);
334 static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
336 return container_of(ep, struct bcm63xx_ep, ep);
339 static inline struct bcm63xx_req *our_req(struct usb_request *req)
341 return container_of(req, struct bcm63xx_req, req);
344 static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
346 return bcm_readl(udc->usbd_regs + off);
349 static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
351 bcm_writel(val, udc->usbd_regs + off);
354 static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
356 return bcm_readl(udc->iudma_regs + off);
359 static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
361 bcm_writel(val, udc->iudma_regs + off);
364 static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off)
366 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
369 static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
371 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
374 static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off)
376 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
379 static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
381 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
384 static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
386 if (is_enabled) {
387 clk_enable(udc->usbh_clk);
388 clk_enable(udc->usbd_clk);
389 udelay(10);
390 } else {
391 clk_disable(udc->usbd_clk);
392 clk_disable(udc->usbh_clk);
396 /***********************************************************************
397 * Low-level IUDMA / FIFO operations
398 ***********************************************************************/
401 * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
402 * @udc: Reference to the device controller.
403 * @idx: Desired init_sel value.
405 * The "init_sel" signal is used as a selection index for both endpoints
406 * and IUDMA channels. Since these do not map 1:1, the use of this signal
407 * depends on the context.
409 static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
411 u32 val = usbd_readl(udc, USBD_CONTROL_REG);
413 val &= ~USBD_CONTROL_INIT_SEL_MASK;
414 val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
415 usbd_writel(udc, val, USBD_CONTROL_REG);
419 * bcm63xx_set_stall - Enable/disable stall on one endpoint.
420 * @udc: Reference to the device controller.
421 * @bep: Endpoint on which to operate.
422 * @is_stalled: true to enable stall, false to disable.
424 * See notes in bcm63xx_update_wedge() regarding automatic clearing of
425 * halt/stall conditions.
427 static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
428 bool is_stalled)
430 u32 val;
432 val = USBD_STALL_UPDATE_MASK |
433 (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
434 (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
435 usbd_writel(udc, val, USBD_STALL_REG);
439 * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
440 * @udc: Reference to the device controller.
442 * These parameters depend on the USB link speed. Settings are
443 * per-IUDMA-channel-pair.
445 static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
447 int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
448 u32 i, val, rx_fifo_slot, tx_fifo_slot;
450 /* set up FIFO boundaries and packet sizes; this is done in pairs */
451 rx_fifo_slot = tx_fifo_slot = 0;
452 for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
453 const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
454 const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
456 bcm63xx_ep_dma_select(udc, i >> 1);
458 val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
459 ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
460 USBD_RXFIFO_CONFIG_END_SHIFT);
461 rx_fifo_slot += rx_cfg->n_fifo_slots;
462 usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
463 usbd_writel(udc,
464 is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
465 USBD_RXFIFO_EPSIZE_REG);
467 val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
468 ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
469 USBD_TXFIFO_CONFIG_END_SHIFT);
470 tx_fifo_slot += tx_cfg->n_fifo_slots;
471 usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
472 usbd_writel(udc,
473 is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
474 USBD_TXFIFO_EPSIZE_REG);
476 usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
481 * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
482 * @udc: Reference to the device controller.
483 * @ep_num: Endpoint number.
485 static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
487 u32 val;
489 bcm63xx_ep_dma_select(udc, ep_num);
491 val = usbd_readl(udc, USBD_CONTROL_REG);
492 val |= USBD_CONTROL_FIFO_RESET_MASK;
493 usbd_writel(udc, val, USBD_CONTROL_REG);
494 usbd_readl(udc, USBD_CONTROL_REG);
498 * bcm63xx_fifo_reset - Flush all hardware FIFOs.
499 * @udc: Reference to the device controller.
501 static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
503 int i;
505 for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
506 bcm63xx_fifo_reset_ep(udc, i);
510 * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
511 * @udc: Reference to the device controller.
513 static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
515 u32 i, val;
517 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
518 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
520 if (cfg->ep_num < 0)
521 continue;
523 bcm63xx_ep_dma_select(udc, cfg->ep_num);
524 val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
525 ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
526 usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
531 * bcm63xx_ep_setup - Configure per-endpoint settings.
532 * @udc: Reference to the device controller.
534 * This needs to be rerun if the speed/cfg/intf/altintf changes.
536 static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
538 u32 val, i;
540 usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
542 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
543 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
544 int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
545 cfg->max_pkt_hs : cfg->max_pkt_fs;
546 int idx = cfg->ep_num;
548 udc->iudma[i].max_pkt = max_pkt;
550 if (idx < 0)
551 continue;
552 udc->bep[idx].ep.maxpacket = max_pkt;
554 val = (idx << USBD_CSR_EP_LOG_SHIFT) |
555 (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
556 (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
557 (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
558 (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
559 (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
560 (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
561 usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
566 * iudma_write - Queue a single IUDMA transaction.
567 * @udc: Reference to the device controller.
568 * @iudma: IUDMA channel to use.
569 * @breq: Request containing the transaction data.
571 * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
572 * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
573 * So iudma_write() may be called several times to fulfill a single
574 * usb_request.
576 * For TX IUDMA, this can queue multiple buffer descriptors if needed.
578 static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
579 struct bcm63xx_req *breq)
581 int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
582 unsigned int bytes_left = breq->req.length - breq->offset;
583 const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
584 iudma->max_pkt : IUDMA_MAX_FRAGMENT;
586 iudma->n_bds_used = 0;
587 breq->bd_bytes = 0;
588 breq->iudma = iudma;
590 if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
591 extra_zero_pkt = 1;
593 do {
594 struct bcm_enet_desc *d = iudma->write_bd;
595 u32 dmaflags = 0;
596 unsigned int n_bytes;
598 if (d == iudma->end_bd) {
599 dmaflags |= DMADESC_WRAP_MASK;
600 iudma->write_bd = iudma->bd_ring;
601 } else {
602 iudma->write_bd++;
604 iudma->n_bds_used++;
606 n_bytes = min_t(int, bytes_left, max_bd_bytes);
607 if (n_bytes)
608 dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
609 else
610 dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
611 DMADESC_USB_ZERO_MASK;
613 dmaflags |= DMADESC_OWNER_MASK;
614 if (first_bd) {
615 dmaflags |= DMADESC_SOP_MASK;
616 first_bd = 0;
620 * extra_zero_pkt forces one more iteration through the loop
621 * after all data is queued up, to send the zero packet
623 if (extra_zero_pkt && !bytes_left)
624 extra_zero_pkt = 0;
626 if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
627 (n_bytes == bytes_left && !extra_zero_pkt)) {
628 last_bd = 1;
629 dmaflags |= DMADESC_EOP_MASK;
632 d->address = breq->req.dma + breq->offset;
633 mb();
634 d->len_stat = dmaflags;
636 breq->offset += n_bytes;
637 breq->bd_bytes += n_bytes;
638 bytes_left -= n_bytes;
639 } while (!last_bd);
641 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
642 ENETDMAC_CHANCFG_REG(iudma->ch_idx));
646 * iudma_read - Check for IUDMA buffer completion.
647 * @udc: Reference to the device controller.
648 * @iudma: IUDMA channel to use.
650 * This checks to see if ALL of the outstanding BDs on the DMA channel
651 * have been filled. If so, it returns the actual transfer length;
652 * otherwise it returns -EBUSY.
654 static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
656 int i, actual_len = 0;
657 struct bcm_enet_desc *d = iudma->read_bd;
659 if (!iudma->n_bds_used)
660 return -EINVAL;
662 for (i = 0; i < iudma->n_bds_used; i++) {
663 u32 dmaflags;
665 dmaflags = d->len_stat;
667 if (dmaflags & DMADESC_OWNER_MASK)
668 return -EBUSY;
670 actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
671 DMADESC_LENGTH_SHIFT;
672 if (d == iudma->end_bd)
673 d = iudma->bd_ring;
674 else
675 d++;
678 iudma->read_bd = d;
679 iudma->n_bds_used = 0;
680 return actual_len;
684 * iudma_reset_channel - Stop DMA on a single channel.
685 * @udc: Reference to the device controller.
686 * @iudma: IUDMA channel to reset.
688 static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
690 int timeout = IUDMA_RESET_TIMEOUT_US;
691 struct bcm_enet_desc *d;
692 int ch_idx = iudma->ch_idx;
694 if (!iudma->is_tx)
695 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
697 /* stop DMA, then wait for the hardware to wrap up */
698 usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG(ch_idx));
700 while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)) &
701 ENETDMAC_CHANCFG_EN_MASK) {
702 udelay(1);
704 /* repeatedly flush the FIFO data until the BD completes */
705 if (iudma->is_tx && iudma->ep_num >= 0)
706 bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
708 if (!timeout--) {
709 dev_err(udc->dev, "can't reset IUDMA channel %d\n",
710 ch_idx);
711 break;
713 if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
714 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
715 ch_idx);
716 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
717 ENETDMAC_CHANCFG_REG(ch_idx));
720 usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG(ch_idx));
722 /* don't leave "live" HW-owned entries for the next guy to step on */
723 for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
724 d->len_stat = 0;
725 mb();
727 iudma->read_bd = iudma->write_bd = iudma->bd_ring;
728 iudma->n_bds_used = 0;
730 /* set up IRQs, UBUS burst size, and BD base for this channel */
731 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
732 ENETDMAC_IRMASK_REG(ch_idx));
733 usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG(ch_idx));
735 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG(ch_idx));
736 usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG(ch_idx));
740 * iudma_init_channel - One-time IUDMA channel initialization.
741 * @udc: Reference to the device controller.
742 * @ch_idx: Channel to initialize.
744 static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
746 struct iudma_ch *iudma = &udc->iudma[ch_idx];
747 const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
748 unsigned int n_bds = cfg->n_bds;
749 struct bcm63xx_ep *bep = NULL;
751 iudma->ep_num = cfg->ep_num;
752 iudma->ch_idx = ch_idx;
753 iudma->is_tx = !!(ch_idx & 0x01);
754 if (iudma->ep_num >= 0) {
755 bep = &udc->bep[iudma->ep_num];
756 bep->iudma = iudma;
757 INIT_LIST_HEAD(&bep->queue);
760 iudma->bep = bep;
761 iudma->udc = udc;
763 /* ep0 is always active; others are controlled by the gadget driver */
764 if (iudma->ep_num <= 0)
765 iudma->enabled = true;
767 iudma->n_bds = n_bds;
768 iudma->bd_ring = dmam_alloc_coherent(udc->dev,
769 n_bds * sizeof(struct bcm_enet_desc),
770 &iudma->bd_ring_dma, GFP_KERNEL);
771 if (!iudma->bd_ring)
772 return -ENOMEM;
773 iudma->end_bd = &iudma->bd_ring[n_bds - 1];
775 return 0;
779 * iudma_init - One-time initialization of all IUDMA channels.
780 * @udc: Reference to the device controller.
782 * Enable DMA, flush channels, and enable global IUDMA IRQs.
784 static int iudma_init(struct bcm63xx_udc *udc)
786 int i, rc;
788 usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
790 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
791 rc = iudma_init_channel(udc, i);
792 if (rc)
793 return rc;
794 iudma_reset_channel(udc, &udc->iudma[i]);
797 usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
798 return 0;
802 * iudma_uninit - Uninitialize IUDMA channels.
803 * @udc: Reference to the device controller.
805 * Kill global IUDMA IRQs, flush channels, and kill DMA.
807 static void iudma_uninit(struct bcm63xx_udc *udc)
809 int i;
811 usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
813 for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
814 iudma_reset_channel(udc, &udc->iudma[i]);
816 usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
819 /***********************************************************************
820 * Other low-level USBD operations
821 ***********************************************************************/
824 * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
825 * @udc: Reference to the device controller.
826 * @enable_irqs: true to enable, false to disable.
828 static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
830 u32 val;
832 usbd_writel(udc, 0, USBD_STATUS_REG);
834 val = BIT(USBD_EVENT_IRQ_USB_RESET) |
835 BIT(USBD_EVENT_IRQ_SETUP) |
836 BIT(USBD_EVENT_IRQ_SETCFG) |
837 BIT(USBD_EVENT_IRQ_SETINTF) |
838 BIT(USBD_EVENT_IRQ_USB_LINK);
839 usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
840 usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
844 * bcm63xx_select_phy_mode - Select between USB device and host mode.
845 * @udc: Reference to the device controller.
846 * @is_device: true for device, false for host.
848 * This should probably be reworked to use the drivers/usb/otg
849 * infrastructure.
851 * By default, the AFE/pullups are disabled in device mode, until
852 * bcm63xx_select_pullup() is called.
854 static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
856 u32 val, portmask = BIT(udc->pd->port_no);
858 if (BCMCPU_IS_6328()) {
859 /* configure pinmux to sense VBUS signal */
860 val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
861 val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
862 val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
863 GPIO_PINMUX_OTHR_6328_USB_HOST;
864 bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
867 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
868 if (is_device) {
869 val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
870 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
871 } else {
872 val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
873 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
875 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
877 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
878 if (is_device)
879 val |= USBH_PRIV_SWAP_USBD_MASK;
880 else
881 val &= ~USBH_PRIV_SWAP_USBD_MASK;
882 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
886 * bcm63xx_select_pullup - Enable/disable the pullup on D+
887 * @udc: Reference to the device controller.
888 * @is_on: true to enable the pullup, false to disable.
890 * If the pullup is active, the host will sense a FS/HS device connected to
891 * the port. If the pullup is inactive, the host will think the USB
892 * device has been disconnected.
894 static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
896 u32 val, portmask = BIT(udc->pd->port_no);
898 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
899 if (is_on)
900 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
901 else
902 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
903 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
907 * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
908 * @udc: Reference to the device controller.
910 * This just masks the IUDMA IRQs and releases the clocks. It is assumed
911 * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
913 static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
915 set_clocks(udc, true);
916 iudma_uninit(udc);
917 set_clocks(udc, false);
919 clk_put(udc->usbd_clk);
920 clk_put(udc->usbh_clk);
924 * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
925 * @udc: Reference to the device controller.
927 static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
929 int i, rc = 0;
930 u32 val;
932 udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
933 GFP_KERNEL);
934 if (!udc->ep0_ctrl_buf)
935 return -ENOMEM;
937 INIT_LIST_HEAD(&udc->gadget.ep_list);
938 for (i = 0; i < BCM63XX_NUM_EP; i++) {
939 struct bcm63xx_ep *bep = &udc->bep[i];
941 bep->ep.name = bcm63xx_ep_name[i];
942 bep->ep_num = i;
943 bep->ep.ops = &bcm63xx_udc_ep_ops;
944 list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
945 bep->halted = 0;
946 bep->ep.maxpacket = BCM63XX_MAX_CTRL_PKT;
947 bep->udc = udc;
948 bep->ep.desc = NULL;
949 INIT_LIST_HEAD(&bep->queue);
952 udc->gadget.ep0 = &udc->bep[0].ep;
953 list_del(&udc->bep[0].ep.ep_list);
955 udc->gadget.speed = USB_SPEED_UNKNOWN;
956 udc->ep0state = EP0_SHUTDOWN;
958 udc->usbh_clk = clk_get(udc->dev, "usbh");
959 if (IS_ERR(udc->usbh_clk))
960 return -EIO;
962 udc->usbd_clk = clk_get(udc->dev, "usbd");
963 if (IS_ERR(udc->usbd_clk)) {
964 clk_put(udc->usbh_clk);
965 return -EIO;
968 set_clocks(udc, true);
970 val = USBD_CONTROL_AUTO_CSRS_MASK |
971 USBD_CONTROL_DONE_CSRS_MASK |
972 (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
973 usbd_writel(udc, val, USBD_CONTROL_REG);
975 val = USBD_STRAPS_APP_SELF_PWR_MASK |
976 USBD_STRAPS_APP_RAM_IF_MASK |
977 USBD_STRAPS_APP_CSRPRGSUP_MASK |
978 USBD_STRAPS_APP_8BITPHY_MASK |
979 USBD_STRAPS_APP_RMTWKUP_MASK;
981 if (udc->gadget.max_speed == USB_SPEED_HIGH)
982 val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
983 else
984 val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
985 usbd_writel(udc, val, USBD_STRAPS_REG);
987 bcm63xx_set_ctrl_irqs(udc, false);
989 usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
991 val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
992 USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
993 usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
995 rc = iudma_init(udc);
996 set_clocks(udc, false);
997 if (rc)
998 bcm63xx_uninit_udc_hw(udc);
1000 return 0;
1003 /***********************************************************************
1004 * Standard EP gadget operations
1005 ***********************************************************************/
1008 * bcm63xx_ep_enable - Enable one endpoint.
1009 * @ep: Endpoint to enable.
1010 * @desc: Contains max packet, direction, etc.
1012 * Most of the endpoint parameters are fixed in this controller, so there
1013 * isn't much for this function to do.
1015 static int bcm63xx_ep_enable(struct usb_ep *ep,
1016 const struct usb_endpoint_descriptor *desc)
1018 struct bcm63xx_ep *bep = our_ep(ep);
1019 struct bcm63xx_udc *udc = bep->udc;
1020 struct iudma_ch *iudma = bep->iudma;
1021 unsigned long flags;
1023 if (!ep || !desc || ep->name == bcm63xx_ep0name)
1024 return -EINVAL;
1026 if (!udc->driver)
1027 return -ESHUTDOWN;
1029 spin_lock_irqsave(&udc->lock, flags);
1030 if (iudma->enabled) {
1031 spin_unlock_irqrestore(&udc->lock, flags);
1032 return -EINVAL;
1035 iudma->enabled = true;
1036 BUG_ON(!list_empty(&bep->queue));
1038 iudma_reset_channel(udc, iudma);
1040 bep->halted = 0;
1041 bcm63xx_set_stall(udc, bep, false);
1042 clear_bit(bep->ep_num, &udc->wedgemap);
1044 ep->desc = desc;
1045 ep->maxpacket = usb_endpoint_maxp(desc);
1047 spin_unlock_irqrestore(&udc->lock, flags);
1048 return 0;
1052 * bcm63xx_ep_disable - Disable one endpoint.
1053 * @ep: Endpoint to disable.
1055 static int bcm63xx_ep_disable(struct usb_ep *ep)
1057 struct bcm63xx_ep *bep = our_ep(ep);
1058 struct bcm63xx_udc *udc = bep->udc;
1059 struct iudma_ch *iudma = bep->iudma;
1060 struct list_head *pos, *n;
1061 unsigned long flags;
1063 if (!ep || !ep->desc)
1064 return -EINVAL;
1066 spin_lock_irqsave(&udc->lock, flags);
1067 if (!iudma->enabled) {
1068 spin_unlock_irqrestore(&udc->lock, flags);
1069 return -EINVAL;
1071 iudma->enabled = false;
1073 iudma_reset_channel(udc, iudma);
1075 if (!list_empty(&bep->queue)) {
1076 list_for_each_safe(pos, n, &bep->queue) {
1077 struct bcm63xx_req *breq =
1078 list_entry(pos, struct bcm63xx_req, queue);
1080 usb_gadget_unmap_request(&udc->gadget, &breq->req,
1081 iudma->is_tx);
1082 list_del(&breq->queue);
1083 breq->req.status = -ESHUTDOWN;
1085 spin_unlock_irqrestore(&udc->lock, flags);
1086 breq->req.complete(&iudma->bep->ep, &breq->req);
1087 spin_lock_irqsave(&udc->lock, flags);
1090 ep->desc = NULL;
1092 spin_unlock_irqrestore(&udc->lock, flags);
1093 return 0;
1097 * bcm63xx_udc_alloc_request - Allocate a new request.
1098 * @ep: Endpoint associated with the request.
1099 * @mem_flags: Flags to pass to kzalloc().
1101 static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1102 gfp_t mem_flags)
1104 struct bcm63xx_req *breq;
1106 breq = kzalloc(sizeof(*breq), mem_flags);
1107 if (!breq)
1108 return NULL;
1109 return &breq->req;
1113 * bcm63xx_udc_free_request - Free a request.
1114 * @ep: Endpoint associated with the request.
1115 * @req: Request to free.
1117 static void bcm63xx_udc_free_request(struct usb_ep *ep,
1118 struct usb_request *req)
1120 struct bcm63xx_req *breq = our_req(req);
1121 kfree(breq);
1125 * bcm63xx_udc_queue - Queue up a new request.
1126 * @ep: Endpoint associated with the request.
1127 * @req: Request to add.
1128 * @mem_flags: Unused.
1130 * If the queue is empty, start this request immediately. Otherwise, add
1131 * it to the list.
1133 * ep0 replies are sent through this function from the gadget driver, but
1134 * they are treated differently because they need to be handled by the ep0
1135 * state machine. (Sometimes they are replies to control requests that
1136 * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1138 static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1139 gfp_t mem_flags)
1141 struct bcm63xx_ep *bep = our_ep(ep);
1142 struct bcm63xx_udc *udc = bep->udc;
1143 struct bcm63xx_req *breq = our_req(req);
1144 unsigned long flags;
1145 int rc = 0;
1147 if (unlikely(!req || !req->complete || !req->buf || !ep))
1148 return -EINVAL;
1150 req->actual = 0;
1151 req->status = 0;
1152 breq->offset = 0;
1154 if (bep == &udc->bep[0]) {
1155 /* only one reply per request, please */
1156 if (udc->ep0_reply)
1157 return -EINVAL;
1159 udc->ep0_reply = req;
1160 schedule_work(&udc->ep0_wq);
1161 return 0;
1164 spin_lock_irqsave(&udc->lock, flags);
1165 if (!bep->iudma->enabled) {
1166 rc = -ESHUTDOWN;
1167 goto out;
1170 rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1171 if (rc == 0) {
1172 list_add_tail(&breq->queue, &bep->queue);
1173 if (list_is_singular(&bep->queue))
1174 iudma_write(udc, bep->iudma, breq);
1177 out:
1178 spin_unlock_irqrestore(&udc->lock, flags);
1179 return rc;
1183 * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1184 * @ep: Endpoint associated with the request.
1185 * @req: Request to remove.
1187 * If the request is not at the head of the queue, this is easy - just nuke
1188 * it. If the request is at the head of the queue, we'll need to stop the
1189 * DMA transaction and then queue up the successor.
1191 static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1193 struct bcm63xx_ep *bep = our_ep(ep);
1194 struct bcm63xx_udc *udc = bep->udc;
1195 struct bcm63xx_req *breq = our_req(req), *cur;
1196 unsigned long flags;
1197 int rc = 0;
1199 spin_lock_irqsave(&udc->lock, flags);
1200 if (list_empty(&bep->queue)) {
1201 rc = -EINVAL;
1202 goto out;
1205 cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1206 usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1208 if (breq == cur) {
1209 iudma_reset_channel(udc, bep->iudma);
1210 list_del(&breq->queue);
1212 if (!list_empty(&bep->queue)) {
1213 struct bcm63xx_req *next;
1215 next = list_first_entry(&bep->queue,
1216 struct bcm63xx_req, queue);
1217 iudma_write(udc, bep->iudma, next);
1219 } else {
1220 list_del(&breq->queue);
1223 out:
1224 spin_unlock_irqrestore(&udc->lock, flags);
1226 req->status = -ESHUTDOWN;
1227 req->complete(ep, req);
1229 return rc;
1233 * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1234 * @ep: Endpoint to halt.
1235 * @value: Zero to clear halt; nonzero to set halt.
1237 * See comments in bcm63xx_update_wedge().
1239 static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1241 struct bcm63xx_ep *bep = our_ep(ep);
1242 struct bcm63xx_udc *udc = bep->udc;
1243 unsigned long flags;
1245 spin_lock_irqsave(&udc->lock, flags);
1246 bcm63xx_set_stall(udc, bep, !!value);
1247 bep->halted = value;
1248 spin_unlock_irqrestore(&udc->lock, flags);
1250 return 0;
1254 * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1255 * @ep: Endpoint to wedge.
1257 * See comments in bcm63xx_update_wedge().
1259 static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1261 struct bcm63xx_ep *bep = our_ep(ep);
1262 struct bcm63xx_udc *udc = bep->udc;
1263 unsigned long flags;
1265 spin_lock_irqsave(&udc->lock, flags);
1266 set_bit(bep->ep_num, &udc->wedgemap);
1267 bcm63xx_set_stall(udc, bep, true);
1268 spin_unlock_irqrestore(&udc->lock, flags);
1270 return 0;
1273 static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1274 .enable = bcm63xx_ep_enable,
1275 .disable = bcm63xx_ep_disable,
1277 .alloc_request = bcm63xx_udc_alloc_request,
1278 .free_request = bcm63xx_udc_free_request,
1280 .queue = bcm63xx_udc_queue,
1281 .dequeue = bcm63xx_udc_dequeue,
1283 .set_halt = bcm63xx_udc_set_halt,
1284 .set_wedge = bcm63xx_udc_set_wedge,
1287 /***********************************************************************
1288 * EP0 handling
1289 ***********************************************************************/
1292 * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1293 * @udc: Reference to the device controller.
1294 * @ctrl: 8-byte SETUP request.
1296 static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1297 struct usb_ctrlrequest *ctrl)
1299 int rc;
1301 spin_unlock_irq(&udc->lock);
1302 rc = udc->driver->setup(&udc->gadget, ctrl);
1303 spin_lock_irq(&udc->lock);
1304 return rc;
1308 * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1309 * @udc: Reference to the device controller.
1311 * Many standard requests are handled automatically in the hardware, but
1312 * we still need to pass them to the gadget driver so that it can
1313 * reconfigure the interfaces/endpoints if necessary.
1315 * Unfortunately we are not able to send a STALL response if the host
1316 * requests an invalid configuration. If this happens, we'll have to be
1317 * content with printing a warning.
1319 static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1321 struct usb_ctrlrequest ctrl;
1322 int rc;
1324 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1325 ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1326 ctrl.wValue = cpu_to_le16(udc->cfg);
1327 ctrl.wIndex = 0;
1328 ctrl.wLength = 0;
1330 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1331 if (rc < 0) {
1332 dev_warn_ratelimited(udc->dev,
1333 "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1334 udc->cfg);
1336 return rc;
1340 * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1341 * @udc: Reference to the device controller.
1343 static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1345 struct usb_ctrlrequest ctrl;
1346 int rc;
1348 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1349 ctrl.bRequest = USB_REQ_SET_INTERFACE;
1350 ctrl.wValue = cpu_to_le16(udc->alt_iface);
1351 ctrl.wIndex = cpu_to_le16(udc->iface);
1352 ctrl.wLength = 0;
1354 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1355 if (rc < 0) {
1356 dev_warn_ratelimited(udc->dev,
1357 "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1358 udc->iface, udc->alt_iface);
1360 return rc;
1364 * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1365 * @udc: Reference to the device controller.
1366 * @ch_idx: IUDMA channel number.
1367 * @req: USB gadget layer representation of the request.
1369 static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1370 struct usb_request *req)
1372 struct bcm63xx_req *breq = our_req(req);
1373 struct iudma_ch *iudma = &udc->iudma[ch_idx];
1375 BUG_ON(udc->ep0_request);
1376 udc->ep0_request = req;
1378 req->actual = 0;
1379 breq->offset = 0;
1380 usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1381 iudma_write(udc, iudma, breq);
1385 * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1386 * @udc: Reference to the device controller.
1387 * @req: USB gadget layer representation of the request.
1388 * @status: Status to return to the gadget driver.
1390 static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1391 struct usb_request *req, int status)
1393 req->status = status;
1394 if (status)
1395 req->actual = 0;
1396 if (req->complete) {
1397 spin_unlock_irq(&udc->lock);
1398 req->complete(&udc->bep[0].ep, req);
1399 spin_lock_irq(&udc->lock);
1404 * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1405 * reset/shutdown.
1406 * @udc: Reference to the device controller.
1407 * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1409 static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1411 struct usb_request *req = udc->ep0_reply;
1413 udc->ep0_reply = NULL;
1414 usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1415 if (udc->ep0_request == req) {
1416 udc->ep0_req_completed = 0;
1417 udc->ep0_request = NULL;
1419 bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1423 * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1424 * transfer len.
1425 * @udc: Reference to the device controller.
1427 static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1429 struct usb_request *req = udc->ep0_request;
1431 udc->ep0_req_completed = 0;
1432 udc->ep0_request = NULL;
1434 return req->actual;
1438 * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1439 * @udc: Reference to the device controller.
1440 * @ch_idx: IUDMA channel number.
1441 * @length: Number of bytes to TX/RX.
1443 * Used for simple transfers performed by the ep0 worker. This will always
1444 * use ep0_ctrl_req / ep0_ctrl_buf.
1446 static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1447 int length)
1449 struct usb_request *req = &udc->ep0_ctrl_req.req;
1451 req->buf = udc->ep0_ctrl_buf;
1452 req->length = length;
1453 req->complete = NULL;
1455 bcm63xx_ep0_map_write(udc, ch_idx, req);
1459 * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1460 * @udc: Reference to the device controller.
1462 * EP0_IDLE probably shouldn't ever happen. EP0_REQUEUE means we're ready
1463 * for the next packet. Anything else means the transaction requires multiple
1464 * stages of handling.
1466 static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1468 int rc;
1469 struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1471 rc = bcm63xx_ep0_read_complete(udc);
1473 if (rc < 0) {
1474 dev_err(udc->dev, "missing SETUP packet\n");
1475 return EP0_IDLE;
1479 * Handle 0-byte IN STATUS acknowledgement. The hardware doesn't
1480 * ALWAYS deliver these 100% of the time, so if we happen to see one,
1481 * just throw it away.
1483 if (rc == 0)
1484 return EP0_REQUEUE;
1486 /* Drop malformed SETUP packets */
1487 if (rc != sizeof(*ctrl)) {
1488 dev_warn_ratelimited(udc->dev,
1489 "malformed SETUP packet (%d bytes)\n", rc);
1490 return EP0_REQUEUE;
1493 /* Process new SETUP packet arriving on ep0 */
1494 rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1495 if (rc < 0) {
1496 bcm63xx_set_stall(udc, &udc->bep[0], true);
1497 return EP0_REQUEUE;
1500 if (!ctrl->wLength)
1501 return EP0_REQUEUE;
1502 else if (ctrl->bRequestType & USB_DIR_IN)
1503 return EP0_IN_DATA_PHASE_SETUP;
1504 else
1505 return EP0_OUT_DATA_PHASE_SETUP;
1509 * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1510 * @udc: Reference to the device controller.
1512 * In state EP0_IDLE, the RX descriptor is either pending, or has been
1513 * filled with a SETUP packet from the host. This function handles new
1514 * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1515 * and reset/shutdown events.
1517 * Returns 0 if work was done; -EAGAIN if nothing to do.
1519 static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1521 if (udc->ep0_req_reset) {
1522 udc->ep0_req_reset = 0;
1523 } else if (udc->ep0_req_set_cfg) {
1524 udc->ep0_req_set_cfg = 0;
1525 if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1526 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1527 } else if (udc->ep0_req_set_iface) {
1528 udc->ep0_req_set_iface = 0;
1529 if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1530 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1531 } else if (udc->ep0_req_completed) {
1532 udc->ep0state = bcm63xx_ep0_do_setup(udc);
1533 return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1534 } else if (udc->ep0_req_shutdown) {
1535 udc->ep0_req_shutdown = 0;
1536 udc->ep0_req_completed = 0;
1537 udc->ep0_request = NULL;
1538 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1539 usb_gadget_unmap_request(&udc->gadget,
1540 &udc->ep0_ctrl_req.req, 0);
1542 /* bcm63xx_udc_pullup() is waiting for this */
1543 mb();
1544 udc->ep0state = EP0_SHUTDOWN;
1545 } else if (udc->ep0_reply) {
1547 * This could happen if a USB RESET shows up during an ep0
1548 * transaction (especially if a laggy driver like gadgetfs
1549 * is in use).
1551 dev_warn(udc->dev, "nuking unexpected reply\n");
1552 bcm63xx_ep0_nuke_reply(udc, 0);
1553 } else {
1554 return -EAGAIN;
1557 return 0;
1561 * bcm63xx_ep0_one_round - Handle the current ep0 state.
1562 * @udc: Reference to the device controller.
1564 * Returns 0 if work was done; -EAGAIN if nothing to do.
1566 static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1568 enum bcm63xx_ep0_state ep0state = udc->ep0state;
1569 bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1571 switch (udc->ep0state) {
1572 case EP0_REQUEUE:
1573 /* set up descriptor to receive SETUP packet */
1574 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1575 BCM63XX_MAX_CTRL_PKT);
1576 ep0state = EP0_IDLE;
1577 break;
1578 case EP0_IDLE:
1579 return bcm63xx_ep0_do_idle(udc);
1580 case EP0_IN_DATA_PHASE_SETUP:
1582 * Normal case: TX request is in ep0_reply (queued by the
1583 * callback), or will be queued shortly. When it's here,
1584 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1586 * Shutdown case: Stop waiting for the reply. Just
1587 * REQUEUE->IDLE. The gadget driver is NOT expected to
1588 * queue anything else now.
1590 if (udc->ep0_reply) {
1591 bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1592 udc->ep0_reply);
1593 ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1594 } else if (shutdown) {
1595 ep0state = EP0_REQUEUE;
1597 break;
1598 case EP0_IN_DATA_PHASE_COMPLETE: {
1600 * Normal case: TX packet (ep0_reply) is in flight; wait for
1601 * it to finish, then go back to REQUEUE->IDLE.
1603 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1604 * completion to the gadget driver, then REQUEUE->IDLE.
1606 if (udc->ep0_req_completed) {
1607 udc->ep0_reply = NULL;
1608 bcm63xx_ep0_read_complete(udc);
1610 * the "ack" sometimes gets eaten (see
1611 * bcm63xx_ep0_do_idle)
1613 ep0state = EP0_REQUEUE;
1614 } else if (shutdown) {
1615 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1616 bcm63xx_ep0_nuke_reply(udc, 1);
1617 ep0state = EP0_REQUEUE;
1619 break;
1621 case EP0_OUT_DATA_PHASE_SETUP:
1622 /* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1623 if (udc->ep0_reply) {
1624 bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1625 udc->ep0_reply);
1626 ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1627 } else if (shutdown) {
1628 ep0state = EP0_REQUEUE;
1630 break;
1631 case EP0_OUT_DATA_PHASE_COMPLETE: {
1632 /* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1633 if (udc->ep0_req_completed) {
1634 udc->ep0_reply = NULL;
1635 bcm63xx_ep0_read_complete(udc);
1637 /* send 0-byte ack to host */
1638 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1639 ep0state = EP0_OUT_STATUS_PHASE;
1640 } else if (shutdown) {
1641 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1642 bcm63xx_ep0_nuke_reply(udc, 0);
1643 ep0state = EP0_REQUEUE;
1645 break;
1647 case EP0_OUT_STATUS_PHASE:
1649 * Normal case: 0-byte OUT ack packet is in flight; wait
1650 * for it to finish, then go back to REQUEUE->IDLE.
1652 * Shutdown case: just cancel the transmission. Don't bother
1653 * calling the completion, because it originated from this
1654 * function anyway. Then go back to REQUEUE->IDLE.
1656 if (udc->ep0_req_completed) {
1657 bcm63xx_ep0_read_complete(udc);
1658 ep0state = EP0_REQUEUE;
1659 } else if (shutdown) {
1660 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1661 udc->ep0_request = NULL;
1662 ep0state = EP0_REQUEUE;
1664 break;
1665 case EP0_IN_FAKE_STATUS_PHASE: {
1667 * Normal case: we spoofed a SETUP packet and are now
1668 * waiting for the gadget driver to send a 0-byte reply.
1669 * This doesn't actually get sent to the HW because the
1670 * HW has already sent its own reply. Once we get the
1671 * response, return to IDLE.
1673 * Shutdown case: return to IDLE immediately.
1675 * Note that the ep0 RX descriptor has remained queued
1676 * (and possibly unfilled) during this entire transaction.
1677 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1678 * or SET_INTERFACE transactions.
1680 struct usb_request *r = udc->ep0_reply;
1682 if (!r) {
1683 if (shutdown)
1684 ep0state = EP0_IDLE;
1685 break;
1688 bcm63xx_ep0_complete(udc, r, 0);
1689 udc->ep0_reply = NULL;
1690 ep0state = EP0_IDLE;
1691 break;
1693 case EP0_SHUTDOWN:
1694 break;
1697 if (udc->ep0state == ep0state)
1698 return -EAGAIN;
1700 udc->ep0state = ep0state;
1701 return 0;
1705 * bcm63xx_ep0_process - ep0 worker thread / state machine.
1706 * @w: Workqueue struct.
1708 * bcm63xx_ep0_process is triggered any time an event occurs on ep0. It
1709 * is used to synchronize ep0 events and ensure that both HW and SW events
1710 * occur in a well-defined order. When the ep0 IUDMA queues are idle, it may
1711 * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1712 * by the USBD hardware.
1714 * The worker function will continue iterating around the state machine
1715 * until there is nothing left to do. Usually "nothing left to do" means
1716 * that we're waiting for a new event from the hardware.
1718 static void bcm63xx_ep0_process(struct work_struct *w)
1720 struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1721 spin_lock_irq(&udc->lock);
1722 while (bcm63xx_ep0_one_round(udc) == 0)
1724 spin_unlock_irq(&udc->lock);
1727 /***********************************************************************
1728 * Standard UDC gadget operations
1729 ***********************************************************************/
1732 * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1733 * @gadget: USB slave device.
1735 static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1737 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1739 return (usbd_readl(udc, USBD_STATUS_REG) &
1740 USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1744 * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1745 * @gadget: USB slave device.
1746 * @is_on: 0 to disable pullup, 1 to enable.
1748 * See notes in bcm63xx_select_pullup().
1750 static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1752 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1753 unsigned long flags;
1754 int i, rc = -EINVAL;
1756 spin_lock_irqsave(&udc->lock, flags);
1757 if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1758 udc->gadget.speed = USB_SPEED_UNKNOWN;
1759 udc->ep0state = EP0_REQUEUE;
1760 bcm63xx_fifo_setup(udc);
1761 bcm63xx_fifo_reset(udc);
1762 bcm63xx_ep_setup(udc);
1764 bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1765 for (i = 0; i < BCM63XX_NUM_EP; i++)
1766 bcm63xx_set_stall(udc, &udc->bep[i], false);
1768 bcm63xx_set_ctrl_irqs(udc, true);
1769 bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1770 rc = 0;
1771 } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1772 bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1774 udc->ep0_req_shutdown = 1;
1775 spin_unlock_irqrestore(&udc->lock, flags);
1777 while (1) {
1778 schedule_work(&udc->ep0_wq);
1779 if (udc->ep0state == EP0_SHUTDOWN)
1780 break;
1781 msleep(50);
1783 bcm63xx_set_ctrl_irqs(udc, false);
1784 cancel_work_sync(&udc->ep0_wq);
1785 return 0;
1788 spin_unlock_irqrestore(&udc->lock, flags);
1789 return rc;
1793 * bcm63xx_udc_start - Start the controller.
1794 * @gadget: USB slave device.
1795 * @driver: Driver for USB slave devices.
1797 static int bcm63xx_udc_start(struct usb_gadget *gadget,
1798 struct usb_gadget_driver *driver)
1800 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1801 unsigned long flags;
1803 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1804 !driver->setup)
1805 return -EINVAL;
1806 if (!udc)
1807 return -ENODEV;
1808 if (udc->driver)
1809 return -EBUSY;
1811 spin_lock_irqsave(&udc->lock, flags);
1813 set_clocks(udc, true);
1814 bcm63xx_fifo_setup(udc);
1815 bcm63xx_ep_init(udc);
1816 bcm63xx_ep_setup(udc);
1817 bcm63xx_fifo_reset(udc);
1818 bcm63xx_select_phy_mode(udc, true);
1820 udc->driver = driver;
1821 driver->driver.bus = NULL;
1822 udc->gadget.dev.driver = &driver->driver;
1823 udc->gadget.dev.of_node = udc->dev->of_node;
1825 spin_unlock_irqrestore(&udc->lock, flags);
1827 return 0;
1831 * bcm63xx_udc_stop - Shut down the controller.
1832 * @gadget: USB slave device.
1833 * @driver: Driver for USB slave devices.
1835 static int bcm63xx_udc_stop(struct usb_gadget *gadget,
1836 struct usb_gadget_driver *driver)
1838 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1839 unsigned long flags;
1841 spin_lock_irqsave(&udc->lock, flags);
1843 udc->driver = NULL;
1844 udc->gadget.dev.driver = NULL;
1847 * If we switch the PHY too abruptly after dropping D+, the host
1848 * will often complain:
1850 * hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1852 msleep(100);
1854 bcm63xx_select_phy_mode(udc, false);
1855 set_clocks(udc, false);
1857 spin_unlock_irqrestore(&udc->lock, flags);
1859 return 0;
1862 static const struct usb_gadget_ops bcm63xx_udc_ops = {
1863 .get_frame = bcm63xx_udc_get_frame,
1864 .pullup = bcm63xx_udc_pullup,
1865 .udc_start = bcm63xx_udc_start,
1866 .udc_stop = bcm63xx_udc_stop,
1869 /***********************************************************************
1870 * IRQ handling
1871 ***********************************************************************/
1874 * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1875 * @udc: Reference to the device controller.
1877 * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1878 * The driver never sees the raw control packets coming in on the ep0
1879 * IUDMA channel, but at least we get an interrupt event to tell us that
1880 * new values are waiting in the USBD_STATUS register.
1882 static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1884 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1886 udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1887 udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1888 udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1889 USBD_STATUS_ALTINTF_SHIFT;
1890 bcm63xx_ep_setup(udc);
1894 * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1895 * @udc: Reference to the device controller.
1897 * The link speed update coincides with a SETUP IRQ. Returns 1 if the
1898 * speed has changed, so that the caller can update the endpoint settings.
1900 static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1902 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1903 enum usb_device_speed oldspeed = udc->gadget.speed;
1905 switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1906 case BCM63XX_SPD_HIGH:
1907 udc->gadget.speed = USB_SPEED_HIGH;
1908 break;
1909 case BCM63XX_SPD_FULL:
1910 udc->gadget.speed = USB_SPEED_FULL;
1911 break;
1912 default:
1913 /* this should never happen */
1914 udc->gadget.speed = USB_SPEED_UNKNOWN;
1915 dev_err(udc->dev,
1916 "received SETUP packet with invalid link speed\n");
1917 return 0;
1920 if (udc->gadget.speed != oldspeed) {
1921 dev_info(udc->dev, "link up, %s-speed mode\n",
1922 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1923 return 1;
1924 } else {
1925 return 0;
1930 * bcm63xx_update_wedge - Iterate through wedged endpoints.
1931 * @udc: Reference to the device controller.
1932 * @new_status: true to "refresh" wedge status; false to clear it.
1934 * On a SETUP interrupt, we need to manually "refresh" the wedge status
1935 * because the controller hardware is designed to automatically clear
1936 * stalls in response to a CLEAR_FEATURE request from the host.
1938 * On a RESET interrupt, we do want to restore all wedged endpoints.
1940 static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1942 int i;
1944 for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1945 bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1946 if (!new_status)
1947 clear_bit(i, &udc->wedgemap);
1952 * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1953 * @irq: IRQ number (unused).
1954 * @dev_id: Reference to the device controller.
1956 * This is where we handle link (VBUS) down, USB reset, speed changes,
1957 * SET_CONFIGURATION, and SET_INTERFACE events.
1959 static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1961 struct bcm63xx_udc *udc = dev_id;
1962 u32 stat;
1963 bool disconnected = false;
1965 stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1966 usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1968 usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1970 spin_lock(&udc->lock);
1971 if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1972 /* VBUS toggled */
1974 if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1975 USBD_EVENTS_USB_LINK_MASK) &&
1976 udc->gadget.speed != USB_SPEED_UNKNOWN)
1977 dev_info(udc->dev, "link down\n");
1979 udc->gadget.speed = USB_SPEED_UNKNOWN;
1980 disconnected = true;
1982 if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
1983 bcm63xx_fifo_setup(udc);
1984 bcm63xx_fifo_reset(udc);
1985 bcm63xx_ep_setup(udc);
1987 bcm63xx_update_wedge(udc, false);
1989 udc->ep0_req_reset = 1;
1990 schedule_work(&udc->ep0_wq);
1991 disconnected = true;
1993 if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
1994 if (bcm63xx_update_link_speed(udc)) {
1995 bcm63xx_fifo_setup(udc);
1996 bcm63xx_ep_setup(udc);
1998 bcm63xx_update_wedge(udc, true);
2000 if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2001 bcm63xx_update_cfg_iface(udc);
2002 udc->ep0_req_set_cfg = 1;
2003 schedule_work(&udc->ep0_wq);
2005 if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2006 bcm63xx_update_cfg_iface(udc);
2007 udc->ep0_req_set_iface = 1;
2008 schedule_work(&udc->ep0_wq);
2010 spin_unlock(&udc->lock);
2012 if (disconnected && udc->driver)
2013 udc->driver->disconnect(&udc->gadget);
2015 return IRQ_HANDLED;
2019 * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2020 * @irq: IRQ number (unused).
2021 * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2023 * For the two ep0 channels, we have special handling that triggers the
2024 * ep0 worker thread. For normal bulk/intr channels, either queue up
2025 * the next buffer descriptor for the transaction (incomplete transaction),
2026 * or invoke the completion callback (complete transactions).
2028 static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2030 struct iudma_ch *iudma = dev_id;
2031 struct bcm63xx_udc *udc = iudma->udc;
2032 struct bcm63xx_ep *bep;
2033 struct usb_request *req = NULL;
2034 struct bcm63xx_req *breq = NULL;
2035 int rc;
2036 bool is_done = false;
2038 spin_lock(&udc->lock);
2040 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2041 ENETDMAC_IR_REG(iudma->ch_idx));
2042 bep = iudma->bep;
2043 rc = iudma_read(udc, iudma);
2045 /* special handling for EP0 RX (0) and TX (1) */
2046 if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2047 iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2048 req = udc->ep0_request;
2049 breq = our_req(req);
2051 /* a single request could require multiple submissions */
2052 if (rc >= 0) {
2053 req->actual += rc;
2055 if (req->actual >= req->length || breq->bd_bytes > rc) {
2056 udc->ep0_req_completed = 1;
2057 is_done = true;
2058 schedule_work(&udc->ep0_wq);
2060 /* "actual" on a ZLP is 1 byte */
2061 req->actual = min(req->actual, req->length);
2062 } else {
2063 /* queue up the next BD (same request) */
2064 iudma_write(udc, iudma, breq);
2067 } else if (!list_empty(&bep->queue)) {
2068 breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2069 req = &breq->req;
2071 if (rc >= 0) {
2072 req->actual += rc;
2074 if (req->actual >= req->length || breq->bd_bytes > rc) {
2075 is_done = true;
2076 list_del(&breq->queue);
2078 req->actual = min(req->actual, req->length);
2080 if (!list_empty(&bep->queue)) {
2081 struct bcm63xx_req *next;
2083 next = list_first_entry(&bep->queue,
2084 struct bcm63xx_req, queue);
2085 iudma_write(udc, iudma, next);
2087 } else {
2088 iudma_write(udc, iudma, breq);
2092 spin_unlock(&udc->lock);
2094 if (is_done) {
2095 usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2096 if (req->complete)
2097 req->complete(&bep->ep, req);
2100 return IRQ_HANDLED;
2103 /***********************************************************************
2104 * Debug filesystem
2105 ***********************************************************************/
2108 * bcm63xx_usbd_dbg_show - Show USBD controller state.
2109 * @s: seq_file to which the information will be written.
2110 * @p: Unused.
2112 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2114 static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2116 struct bcm63xx_udc *udc = s->private;
2118 if (!udc->driver)
2119 return -ENODEV;
2121 seq_printf(s, "ep0 state: %s\n",
2122 bcm63xx_ep0_state_names[udc->ep0state]);
2123 seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
2124 udc->ep0_req_reset ? "reset " : "",
2125 udc->ep0_req_set_cfg ? "set_cfg " : "",
2126 udc->ep0_req_set_iface ? "set_iface " : "",
2127 udc->ep0_req_shutdown ? "shutdown " : "",
2128 udc->ep0_request ? "pending " : "",
2129 udc->ep0_req_completed ? "completed " : "",
2130 udc->ep0_reply ? "reply " : "");
2131 seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2132 udc->cfg, udc->iface, udc->alt_iface);
2133 seq_printf(s, "regs:\n");
2134 seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
2135 usbd_readl(udc, USBD_CONTROL_REG),
2136 usbd_readl(udc, USBD_STRAPS_REG),
2137 usbd_readl(udc, USBD_STATUS_REG));
2138 seq_printf(s, " events: %08x; stall: %08x\n",
2139 usbd_readl(udc, USBD_EVENTS_REG),
2140 usbd_readl(udc, USBD_STALL_REG));
2142 return 0;
2146 * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2147 * @s: seq_file to which the information will be written.
2148 * @p: Unused.
2150 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2152 static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2154 struct bcm63xx_udc *udc = s->private;
2155 int ch_idx, i;
2156 u32 sram2, sram3;
2158 if (!udc->driver)
2159 return -ENODEV;
2161 for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2162 struct iudma_ch *iudma = &udc->iudma[ch_idx];
2163 struct list_head *pos;
2165 seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2166 switch (iudma_defaults[ch_idx].ep_type) {
2167 case BCMEP_CTRL:
2168 seq_printf(s, "control");
2169 break;
2170 case BCMEP_BULK:
2171 seq_printf(s, "bulk");
2172 break;
2173 case BCMEP_INTR:
2174 seq_printf(s, "interrupt");
2175 break;
2177 seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2178 seq_printf(s, " [ep%d]:\n",
2179 max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2180 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2181 usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)),
2182 usb_dmac_readl(udc, ENETDMAC_IR_REG(ch_idx)),
2183 usb_dmac_readl(udc, ENETDMAC_IRMASK_REG(ch_idx)),
2184 usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG(ch_idx)));
2186 sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG(ch_idx));
2187 sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG(ch_idx));
2188 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2189 usb_dmas_readl(udc, ENETDMAS_RSTART_REG(ch_idx)),
2190 sram2 >> 16, sram2 & 0xffff,
2191 sram3 >> 16, sram3 & 0xffff,
2192 usb_dmas_readl(udc, ENETDMAS_SRAM4_REG(ch_idx)));
2193 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2194 iudma->n_bds);
2196 if (iudma->bep) {
2197 i = 0;
2198 list_for_each(pos, &iudma->bep->queue)
2199 i++;
2200 seq_printf(s, "; %d queued\n", i);
2201 } else {
2202 seq_printf(s, "\n");
2205 for (i = 0; i < iudma->n_bds; i++) {
2206 struct bcm_enet_desc *d = &iudma->bd_ring[i];
2208 seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
2209 i * sizeof(*d), i,
2210 d->len_stat >> 16, d->len_stat & 0xffff,
2211 d->address);
2212 if (d == iudma->read_bd)
2213 seq_printf(s, " <<RD");
2214 if (d == iudma->write_bd)
2215 seq_printf(s, " <<WR");
2216 seq_printf(s, "\n");
2219 seq_printf(s, "\n");
2222 return 0;
2225 static int bcm63xx_usbd_dbg_open(struct inode *inode, struct file *file)
2227 return single_open(file, bcm63xx_usbd_dbg_show, inode->i_private);
2230 static int bcm63xx_iudma_dbg_open(struct inode *inode, struct file *file)
2232 return single_open(file, bcm63xx_iudma_dbg_show, inode->i_private);
2235 static const struct file_operations usbd_dbg_fops = {
2236 .owner = THIS_MODULE,
2237 .open = bcm63xx_usbd_dbg_open,
2238 .llseek = seq_lseek,
2239 .read = seq_read,
2240 .release = single_release,
2243 static const struct file_operations iudma_dbg_fops = {
2244 .owner = THIS_MODULE,
2245 .open = bcm63xx_iudma_dbg_open,
2246 .llseek = seq_lseek,
2247 .read = seq_read,
2248 .release = single_release,
2253 * bcm63xx_udc_init_debugfs - Create debugfs entries.
2254 * @udc: Reference to the device controller.
2256 static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2258 struct dentry *root, *usbd, *iudma;
2260 if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2261 return;
2263 root = debugfs_create_dir(udc->gadget.name, NULL);
2264 if (IS_ERR(root) || !root)
2265 goto err_root;
2267 usbd = debugfs_create_file("usbd", 0400, root, udc,
2268 &usbd_dbg_fops);
2269 if (!usbd)
2270 goto err_usbd;
2271 iudma = debugfs_create_file("iudma", 0400, root, udc,
2272 &iudma_dbg_fops);
2273 if (!iudma)
2274 goto err_iudma;
2276 udc->debugfs_root = root;
2277 udc->debugfs_usbd = usbd;
2278 udc->debugfs_iudma = iudma;
2279 return;
2280 err_iudma:
2281 debugfs_remove(usbd);
2282 err_usbd:
2283 debugfs_remove(root);
2284 err_root:
2285 dev_err(udc->dev, "debugfs is not available\n");
2289 * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2290 * @udc: Reference to the device controller.
2292 * debugfs_remove() is safe to call with a NULL argument.
2294 static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2296 debugfs_remove(udc->debugfs_iudma);
2297 debugfs_remove(udc->debugfs_usbd);
2298 debugfs_remove(udc->debugfs_root);
2299 udc->debugfs_iudma = NULL;
2300 udc->debugfs_usbd = NULL;
2301 udc->debugfs_root = NULL;
2304 /***********************************************************************
2305 * Driver init/exit
2306 ***********************************************************************/
2309 * bcm63xx_udc_gadget_release - Called from device_release().
2310 * @dev: Unused.
2312 * We get a warning if this function doesn't exist, but it's empty because
2313 * we don't have to free any of the memory allocated with the devm_* APIs.
2315 static void bcm63xx_udc_gadget_release(struct device *dev)
2320 * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2321 * @pdev: Platform device struct from the bcm63xx BSP code.
2323 * Note that platform data is required, because pd.port_no varies from chip
2324 * to chip and is used to switch the correct USB port to device mode.
2326 static int bcm63xx_udc_probe(struct platform_device *pdev)
2328 struct device *dev = &pdev->dev;
2329 struct bcm63xx_usbd_platform_data *pd = dev->platform_data;
2330 struct bcm63xx_udc *udc;
2331 struct resource *res;
2332 int rc = -ENOMEM, i, irq;
2334 udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2335 if (!udc) {
2336 dev_err(dev, "cannot allocate memory\n");
2337 return -ENOMEM;
2340 platform_set_drvdata(pdev, udc);
2341 udc->dev = dev;
2342 udc->pd = pd;
2344 if (!pd) {
2345 dev_err(dev, "missing platform data\n");
2346 return -EINVAL;
2349 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2350 if (!res) {
2351 dev_err(dev, "error finding USBD resource\n");
2352 return -ENXIO;
2355 udc->usbd_regs = devm_ioremap_resource(dev, res);
2356 if (IS_ERR(udc->usbd_regs))
2357 return PTR_ERR(udc->usbd_regs);
2359 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2360 if (!res) {
2361 dev_err(dev, "error finding IUDMA resource\n");
2362 return -ENXIO;
2365 udc->iudma_regs = devm_ioremap_resource(dev, res);
2366 if (IS_ERR(udc->iudma_regs))
2367 return PTR_ERR(udc->iudma_regs);
2369 spin_lock_init(&udc->lock);
2370 INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2371 dev_set_name(&udc->gadget.dev, "gadget");
2373 udc->gadget.ops = &bcm63xx_udc_ops;
2374 udc->gadget.name = dev_name(dev);
2375 udc->gadget.dev.parent = dev;
2376 udc->gadget.dev.release = bcm63xx_udc_gadget_release;
2377 udc->gadget.dev.dma_mask = dev->dma_mask;
2379 if (!pd->use_fullspeed && !use_fullspeed)
2380 udc->gadget.max_speed = USB_SPEED_HIGH;
2381 else
2382 udc->gadget.max_speed = USB_SPEED_FULL;
2384 /* request clocks, allocate buffers, and clear any pending IRQs */
2385 rc = bcm63xx_init_udc_hw(udc);
2386 if (rc)
2387 return rc;
2389 rc = -ENXIO;
2391 /* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2392 irq = platform_get_irq(pdev, 0);
2393 if (irq < 0) {
2394 dev_err(dev, "missing IRQ resource #0\n");
2395 goto out_uninit;
2397 if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2398 dev_name(dev), udc) < 0) {
2399 dev_err(dev, "error requesting IRQ #%d\n", irq);
2400 goto out_uninit;
2403 /* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2404 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2405 irq = platform_get_irq(pdev, i + 1);
2406 if (irq < 0) {
2407 dev_err(dev, "missing IRQ resource #%d\n", i + 1);
2408 goto out_uninit;
2410 if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2411 dev_name(dev), &udc->iudma[i]) < 0) {
2412 dev_err(dev, "error requesting IRQ #%d\n", irq);
2413 goto out_uninit;
2417 rc = device_register(&udc->gadget.dev);
2418 if (rc)
2419 goto out_uninit;
2421 bcm63xx_udc_init_debugfs(udc);
2422 rc = usb_add_gadget_udc(dev, &udc->gadget);
2423 if (!rc)
2424 return 0;
2426 bcm63xx_udc_cleanup_debugfs(udc);
2427 device_unregister(&udc->gadget.dev);
2428 out_uninit:
2429 bcm63xx_uninit_udc_hw(udc);
2430 return rc;
2434 * bcm63xx_udc_remove - Remove the device from the system.
2435 * @pdev: Platform device struct from the bcm63xx BSP code.
2437 static int bcm63xx_udc_remove(struct platform_device *pdev)
2439 struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2441 bcm63xx_udc_cleanup_debugfs(udc);
2442 usb_del_gadget_udc(&udc->gadget);
2443 device_unregister(&udc->gadget.dev);
2444 BUG_ON(udc->driver);
2446 platform_set_drvdata(pdev, NULL);
2447 bcm63xx_uninit_udc_hw(udc);
2449 return 0;
2452 static struct platform_driver bcm63xx_udc_driver = {
2453 .probe = bcm63xx_udc_probe,
2454 .remove = bcm63xx_udc_remove,
2455 .driver = {
2456 .name = DRV_MODULE_NAME,
2457 .owner = THIS_MODULE,
2460 module_platform_driver(bcm63xx_udc_driver);
2462 MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2463 MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2464 MODULE_LICENSE("GPL");
2465 MODULE_ALIAS("platform:" DRV_MODULE_NAME);