1 From nobody Mon Sep 17 00:00:00 2001
2 From: HÃ¥vard Skinnemoen <hskinnemoen@atmel.com>
3 Date: Fri Nov 18 18:13:25 2005 +0100
4 Subject: [PATCH] Driver for the Atmel HUSB2 Device Controller
6 This adds the driver for the Atmel HUSB2 Device Controller.
10 drivers/usb/gadget/Kconfig | 10
11 drivers/usb/gadget/Makefile | 1
12 drivers/usb/gadget/gadget_chips.h | 8
13 drivers/usb/gadget/husb2_udc.c | 1998 ++++++++++++++++++++++++++++++++++++++
14 drivers/usb/gadget/husb2_udc.h | 406 +++++++
15 5 files changed, 2423 insertions(+)
17 Index: linux-2.6.18-avr32/drivers/usb/gadget/Kconfig
18 ===================================================================
19 --- linux-2.6.18-avr32.orig/drivers/usb/gadget/Kconfig 2006-11-02 15:54:18.000000000 +0100
20 +++ linux-2.6.18-avr32/drivers/usb/gadget/Kconfig 2006-11-02 15:56:20.000000000 +0100
21 @@ -154,6 +154,16 @@ config USB_LH7A40X
23 select USB_GADGET_SELECTED
25 +config USB_GADGET_HUSB2DEV
26 + boolean "Atmel HUSB2DEVICE"
27 + select USB_GADGET_DUALSPEED
32 + depends on USB_GADGET_HUSB2DEV
34 + select USB_GADGET_SELECTED
36 config USB_GADGET_OMAP
37 boolean "OMAP USB Device Controller"
38 Index: linux-2.6.18-avr32/drivers/usb/gadget/Makefile
39 ===================================================================
40 --- linux-2.6.18-avr32.orig/drivers/usb/gadget/Makefile 2006-11-02 15:54:18.000000000 +0100
41 +++ linux-2.6.18-avr32/drivers/usb/gadget/Makefile 2006-11-02 15:56:20.000000000 +0100
42 @@ -8,6 +8,7 @@ obj-$(CONFIG_USB_GOKU) += goku_udc.o
43 obj-$(CONFIG_USB_OMAP) += omap_udc.o
44 obj-$(CONFIG_USB_LH7A40X) += lh7a40x_udc.o
45 obj-$(CONFIG_USB_AT91) += at91_udc.o
46 +obj-$(CONFIG_USB_HUSB2DEV) += husb2_udc.o
50 Index: linux-2.6.18-avr32/drivers/usb/gadget/gadget_chips.h
51 ===================================================================
52 --- linux-2.6.18-avr32.orig/drivers/usb/gadget/gadget_chips.h 2006-11-02 15:54:18.000000000 +0100
53 +++ linux-2.6.18-avr32/drivers/usb/gadget/gadget_chips.h 2006-11-02 15:56:20.000000000 +0100
55 #define gadget_is_pxa27x(g) 0
58 +#ifdef CONFIG_USB_GADGET_HUSB2DEV
59 +#define gadget_is_husb2dev(g) !strcmp("husb2_udc", (g)->name)
61 +#define gadget_is_husb2dev(g) 0
64 #ifdef CONFIG_USB_GADGET_S3C2410
65 #define gadget_is_s3c2410(g) !strcmp("s3c2410_udc", (g)->name)
67 @@ -169,5 +175,7 @@ static inline int usb_gadget_controller_
69 else if (gadget_is_mpc8272(gadget))
71 + else if (gadget_is_husb2dev(gadget))
75 Index: linux-2.6.18-avr32/drivers/usb/gadget/husb2_udc.c
76 ===================================================================
77 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
78 +++ linux-2.6.18-avr32/drivers/usb/gadget/husb2_udc.c 2006-11-02 16:06:40.000000000 +0100
81 + * Driver for the Atmel HUSB2device high speed USB device controller
83 + * Copyright (C) 2005-2006 Atmel Corporation
85 + * This program is free software; you can redistribute it and/or modify
86 + * it under the terms of the GNU General Public License version 2 as
87 + * published by the Free Software Foundation.
91 +#include <linux/config.h>
92 +#include <linux/clk.h>
93 +#include <linux/module.h>
94 +#include <linux/init.h>
95 +#include <linux/interrupt.h>
96 +#include <linux/device.h>
97 +#include <linux/dma-mapping.h>
98 +#include <linux/list.h>
99 +#include <linux/platform_device.h>
100 +#include <linux/usb_ch9.h>
101 +#include <linux/usb_gadget.h>
102 +#include <linux/dmapool.h>
103 +#include <linux/delay.h>
107 +#include "husb2_udc.h"
109 +#define DRIVER_VERSION "0.9"
111 +#define DMA_ADDR_INVALID (~(dma_addr_t)0)
113 +#define FIFO_IOMEM_ID 0
114 +#define CTRL_IOMEM_ID 1
117 +#define DBG_ERR 0x0001 /* report all error returns */
118 +#define DBG_HW 0x0002 /* debug hardware initialization */
119 +#define DBG_GADGET 0x0004 /* calls to/from gadget driver */
120 +#define DBG_INT 0x0008 /* interrupts */
121 +#define DBG_BUS 0x0010 /* report changes in bus state */
122 +#define DBG_QUEUE 0x0020 /* debug request queue processing */
123 +#define DBG_FIFO 0x0040 /* debug FIFO contents */
124 +#define DBG_DMA 0x0080 /* debug DMA handling */
125 +#define DBG_REQ 0x0100 /* print out queued request length */
126 +#define DBG_ALL 0xffff
127 +#define DBG_NONE 0x0000
129 +#define DEBUG_LEVEL (DBG_ERR|DBG_REQ)
130 +#define DBG(level, fmt, ...) \
132 + if ((level) & DEBUG_LEVEL) \
133 + printk(KERN_DEBUG "udc: " fmt, ## __VA_ARGS__); \
136 +#define DBG(level, fmt...)
139 +static struct husb2_udc the_udc;
141 +#ifdef CONFIG_DEBUG_FS
142 +#include <linux/debugfs.h>
143 +#include <asm/uaccess.h>
145 +static int queue_dbg_open(struct inode *inode, struct file *file)
147 + struct husb2_ep *ep = inode->u.generic_ip;
148 + struct husb2_request *req, *req_copy;
149 + struct list_head *queue_data;
151 + queue_data = kmalloc(sizeof(*queue_data), GFP_KERNEL);
154 + INIT_LIST_HEAD(queue_data);
156 + spin_lock_irq(&ep->udc->lock);
157 + list_for_each_entry(req, &ep->queue, queue) {
158 + req_copy = kmalloc(sizeof(*req_copy), GFP_ATOMIC);
161 + memcpy(req_copy, req, sizeof(*req_copy));
162 + list_add_tail(&req_copy->queue, queue_data);
164 + spin_unlock_irq(&ep->udc->lock);
166 + file->private_data = queue_data;
170 + spin_unlock_irq(&ep->udc->lock);
171 + list_for_each_entry_safe(req, req_copy, queue_data, queue) {
172 + list_del(&req->queue);
180 + * bbbbbbbb llllllll IZS sssss nnnn FDL\n\0
182 + * b: buffer address
184 + * I/i: interrupt/no interrupt
185 + * Z/z: zero/no zero
186 + * S/s: short ok/short not ok
189 + * F/f: submitted/not submitted to FIFO
190 + * D/d: using/not using DMA
191 + * L/l: last transaction/not last transaction
193 +static ssize_t queue_dbg_read(struct file *file, char __user *buf,
194 + size_t nbytes, loff_t *ppos)
196 + struct list_head *queue = file->private_data;
197 + struct husb2_request *req, *tmp_req;
198 + size_t len, remaining, actual = 0;
201 + if (!access_ok(VERIFY_WRITE, buf, nbytes))
204 + mutex_lock(&file->f_dentry->d_inode->i_mutex);
205 + list_for_each_entry_safe(req, tmp_req, queue, queue) {
206 + len = snprintf(tmpbuf, sizeof(tmpbuf),
207 + "%8p %08x %c%c%c %5d %4u %c%c%c\n",
208 + req->req.buf, req->req.length,
209 + req->req.no_interrupt ? 'i' : 'I',
210 + req->req.zero ? 'Z' : 'z',
211 + req->req.short_not_ok ? 's' : 'S',
214 + req->submitted ? 'F' : 'f',
215 + req->using_dma ? 'D' : 'd',
216 + req->last_transaction ? 'L' : 'l');
217 + len = min(len, sizeof(tmpbuf));
221 + list_del(&req->queue);
224 + remaining = __copy_to_user(buf, tmpbuf, len);
225 + actual += len - remaining;
232 + mutex_unlock(&file->f_dentry->d_inode->i_mutex);
237 +static int queue_dbg_release(struct inode *inode, struct file *file)
239 + struct list_head *queue_data = file->private_data;
240 + struct husb2_request *req, *tmp_req;
242 + list_for_each_entry_safe(req, tmp_req, queue_data, queue) {
243 + list_del(&req->queue);
250 +static int regs_dbg_open(struct inode *inode, struct file *file)
252 + struct husb2_udc *udc;
257 + mutex_lock(&inode->i_mutex);
258 + udc = inode->u.generic_ip;
259 + data = kmalloc(inode->i_size, GFP_KERNEL);
263 + spin_lock_irq(&udc->lock);
264 + for (i = 0; i < inode->i_size / 4; i++)
265 + data[i] = __raw_readl(udc->regs + i * 4);
266 + spin_unlock_irq(&udc->lock);
268 + file->private_data = data;
272 + mutex_unlock(&inode->i_mutex);
277 +static ssize_t regs_dbg_read(struct file *file, char __user *buf,
278 + size_t nbytes, loff_t *ppos)
280 + struct inode *inode = file->f_dentry->d_inode;
283 + mutex_lock(&inode->i_mutex);
284 + ret = simple_read_from_buffer(buf, nbytes, ppos,
285 + file->private_data,
286 + file->f_dentry->d_inode->i_size);
287 + mutex_unlock(&inode->i_mutex);
292 +static int regs_dbg_release(struct inode *inode, struct file *file)
294 + kfree(file->private_data);
298 +const struct file_operations queue_dbg_fops = {
299 + .owner = THIS_MODULE,
300 + .open = queue_dbg_open,
301 + .llseek = no_llseek,
302 + .read = queue_dbg_read,
303 + .release = queue_dbg_release,
306 +const struct file_operations regs_dbg_fops = {
307 + .owner = THIS_MODULE,
308 + .open = regs_dbg_open,
309 + .llseek = generic_file_llseek,
310 + .read = regs_dbg_read,
311 + .release = regs_dbg_release,
314 +static void husb2_ep_init_debugfs(struct husb2_udc *udc,
315 + struct husb2_ep *ep)
317 + struct dentry *ep_root;
319 + ep_root = debugfs_create_dir(ep_name(ep), udc->debugfs_root);
322 + ep->debugfs_dir = ep_root;
324 + ep->debugfs_queue = debugfs_create_file("queue", 0400, ep_root,
325 + ep, &queue_dbg_fops);
326 + if (!ep->debugfs_queue)
329 + if (ep_can_dma(ep)) {
330 + ep->debugfs_dma_status
331 + = debugfs_create_u32("dma_status", 0400, ep_root,
332 + &ep->last_dma_status);
333 + if (!ep->debugfs_dma_status)
334 + goto err_dma_status;
340 + debugfs_remove(ep->debugfs_queue);
342 + debugfs_remove(ep_root);
344 + dev_err(&ep->udc->pdev->dev,
345 + "failed to create debugfs directory for %s\n", ep_name(ep));
348 +static void husb2_ep_cleanup_debugfs(struct husb2_ep *ep)
350 + debugfs_remove(ep->debugfs_queue);
351 + debugfs_remove(ep->debugfs_dma_status);
352 + debugfs_remove(ep->debugfs_dir);
353 + ep->debugfs_dma_status = NULL;
354 + ep->debugfs_dir = NULL;
357 +static void husb2_init_debugfs(struct husb2_udc *udc)
359 + struct dentry *root, *regs;
360 + struct resource *regs_resource;
362 + root = debugfs_create_dir(udc->gadget.name, NULL);
363 + if (IS_ERR(root) || !root)
365 + udc->debugfs_root = root;
367 + regs = debugfs_create_file("regs", 0400, root, udc, ®s_dbg_fops);
371 + regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM,
373 + regs->d_inode->i_size = regs_resource->end - regs_resource->start + 1;
374 + udc->debugfs_regs = regs;
376 + husb2_ep_init_debugfs(udc, to_husb2_ep(udc->gadget.ep0));
381 + debugfs_remove(root);
383 + udc->debugfs_root = NULL;
384 + dev_err(&udc->pdev->dev, "debugfs is not available\n");
387 +static void husb2_cleanup_debugfs(struct husb2_udc *udc)
389 + husb2_ep_cleanup_debugfs(to_husb2_ep(udc->gadget.ep0));
390 + debugfs_remove(udc->debugfs_regs);
391 + debugfs_remove(udc->debugfs_root);
392 + udc->debugfs_regs = NULL;
393 + udc->debugfs_root = NULL;
396 +static inline void husb2_ep_init_debugfs(struct husb2_udc *udc,
397 + struct husb2_ep *ep)
402 +static inline void husb2_ep_cleanup_debugfs(struct husb2_ep *ep)
407 +static inline void husb2_init_debugfs(struct husb2_udc *udc)
412 +static inline void husb2_cleanup_debugfs(struct husb2_udc *udc)
418 +static void copy_to_fifo(void __iomem *fifo, void *buf, int len)
422 + DBG(DBG_FIFO, "copy to FIFO (len %d):\n", len);
423 + for (; len > 0; len -= 4, buf += 4, fifo += 4) {
424 + tmp = *(unsigned long *)buf;
426 + DBG(DBG_FIFO, " -> %08lx\n", tmp);
427 + __raw_writel(tmp, fifo);
430 + DBG(DBG_FIFO, " -> %02lx\n", tmp >> 24);
431 + __raw_writeb(tmp >> 24, fifo);
440 +static void copy_from_fifo(void *buf, void __iomem *fifo, int len)
448 + DBG(DBG_FIFO, "copy from FIFO (len %d):\n", len);
449 + for (p.w = buf; len > 0; len -= 4, p.w++, fifo += 4) {
451 + tmp = __raw_readl(fifo);
453 + DBG(DBG_FIFO, " -> %08lx\n", tmp);
456 + tmp = __raw_readb(fifo);
458 + DBG(DBG_FIFO, " -> %02lx\n", tmp);
465 +static void next_fifo_transaction(struct husb2_ep *ep,
466 + struct husb2_request *req)
468 + unsigned int transaction_len;
470 + transaction_len = req->req.length - req->req.actual;
471 + req->last_transaction = 1;
472 + if (transaction_len > ep->ep.maxpacket) {
473 + transaction_len = ep->ep.maxpacket;
474 + req->last_transaction = 0;
475 + } else if (transaction_len == ep->ep.maxpacket
476 + && req->req.zero) {
477 + req->last_transaction = 0;
479 + DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n",
480 + ep_name(ep), req, transaction_len,
481 + req->last_transaction ? ", done" : "");
483 + copy_to_fifo(ep->fifo, req->req.buf + req->req.actual, transaction_len);
484 + husb2_ep_writel(ep, SET_STA, HUSB2_BIT(TX_PK_RDY));
485 + req->req.actual += transaction_len;
488 +static void submit_request(struct husb2_ep *ep, struct husb2_request *req)
490 + DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d)\n",
491 + ep_name(ep), req, req->req.length);
493 + req->req.actual = 0;
494 + req->submitted = 1;
496 + if (req->using_dma) {
497 + if (req->req.length == 0) {
498 + husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(TX_PK_RDY));
500 + husb2_ep_writel(ep, CTL_DIS, HUSB2_BIT(TX_PK_RDY));
501 + husb2_dma_writel(ep, NXT_DSC,
502 + req->packet[0].desc_dma);
503 + husb2_dma_writel(ep, CONTROL, HUSB2_BIT(DMA_LINK));
506 + next_fifo_transaction(ep, req);
507 + if (req->last_transaction)
508 + husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(TX_COMPLETE));
510 + husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(TX_PK_RDY));
514 +static void submit_next_request(struct husb2_ep *ep)
516 + struct husb2_request *req;
518 + if (list_empty(&ep->queue)) {
519 + husb2_ep_writel(ep, CTL_DIS, (HUSB2_BIT(TX_PK_RDY)
520 + | HUSB2_BIT(RX_BK_RDY)));
524 + req = list_entry(ep->queue.next, struct husb2_request, queue);
525 + if (!req->submitted)
526 + submit_request(ep, req);
529 +static void send_status(struct husb2_udc *udc, struct husb2_ep *ep)
531 + ep->state = STATUS_STAGE_IN;
532 + husb2_ep_writel(ep, SET_STA, HUSB2_BIT(TX_PK_RDY));
533 + husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(TX_COMPLETE));
536 +static void receive_data(struct husb2_ep *ep)
538 + struct husb2_udc *udc = ep->udc;
539 + struct husb2_request *req;
540 + unsigned long status;
541 + unsigned int bytecount, nr_busy;
542 + int is_complete = 0;
544 + status = husb2_ep_readl(ep, STA);
545 + nr_busy = HUSB2_BFEXT(BUSY_BANKS, status);
547 + DBG(DBG_QUEUE, "receive data: nr_busy=%u\n", nr_busy);
549 + while (nr_busy > 0) {
550 + if (list_empty(&ep->queue)) {
551 + husb2_ep_writel(ep, CTL_DIS, HUSB2_BIT(RX_BK_RDY));
554 + req = list_entry(ep->queue.next,
555 + struct husb2_request, queue);
557 + bytecount = HUSB2_BFEXT(BYTE_COUNT, status);
559 + if (status & (1 << 31))
561 + if (req->req.actual + bytecount >= req->req.length) {
563 + bytecount = req->req.length - req->req.actual;
566 + copy_from_fifo(req->req.buf + req->req.actual,
567 + ep->fifo, bytecount);
568 + req->req.actual += bytecount;
570 + husb2_ep_writel(ep, CLR_STA, HUSB2_BIT(RX_BK_RDY));
573 + DBG(DBG_QUEUE, "%s: request done\n", ep_name(ep));
574 + req->req.status = 0;
575 + list_del_init(&req->queue);
576 + req->req.complete(&ep->ep, &req->req);
579 + status = husb2_ep_readl(ep, STA);
580 + nr_busy = HUSB2_BFEXT(BUSY_BANKS, status);
582 + if (is_complete && ep_is_control(ep)) {
583 + BUG_ON(nr_busy != 0);
584 + send_status(udc, ep);
590 +static void request_complete(struct husb2_ep *ep,
591 + struct husb2_request *req,
594 + struct husb2_udc *udc = ep->udc;
597 + BUG_ON(!list_empty(&req->queue));
599 + if (req->req.status == -EINPROGRESS)
600 + req->req.status = status;
603 + for (i = 0; i < req->nr_pkts; i++)
604 + dma_pool_free(udc->desc_pool, req->packet[i].desc,
605 + req->packet[i].desc_dma);
606 + kfree(req->packet);
607 + req->packet = NULL;
608 + dma_unmap_single(&udc->pdev->dev,
609 + req->req.dma, req->req.length,
611 + ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
612 + req->req.dma = DMA_ADDR_INVALID;
615 + DBG(DBG_GADGET | DBG_REQ,
616 + "%s: req %p complete: status %d, actual %u\n",
617 + ep_name(ep), req, req->req.status, req->req.actual);
618 + req->req.complete(&ep->ep, &req->req);
621 +static void request_complete_list(struct husb2_ep *ep,
622 + struct list_head *list,
625 + struct husb2_request *req, *tmp_req;
627 + list_for_each_entry_safe(req, tmp_req, list, queue) {
628 + list_del_init(&req->queue);
629 + request_complete(ep, req, status);
633 +static int husb2_ep_enable(struct usb_ep *_ep,
634 + const struct usb_endpoint_descriptor *desc)
636 + struct husb2_ep *ep = to_husb2_ep(_ep);
637 + struct husb2_udc *udc = ep->udc;
638 + unsigned long flags, ept_cfg, maxpacket;
640 + DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep_name(ep), desc);
642 + maxpacket = le16_to_cpu(desc->wMaxPacketSize);
645 + || desc->bDescriptorType != USB_DT_ENDPOINT
646 + || ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)
649 + || maxpacket > ep->fifo_size) {
650 + DBG(DBG_ERR, "ep_enable: Invalid argument");
654 + if (((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
655 + == USB_ENDPOINT_XFER_ISOC)
656 + && !(ep->capabilities & HUSB2_EP_CAP_ISOC)) {
657 + DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n",
662 + if (maxpacket <= 8)
663 + ept_cfg = HUSB2_BF(EPT_SIZE, HUSB2_EPT_SIZE_8);
665 + /* LSB is bit 1, not 0 */
666 + ept_cfg = HUSB2_BF(EPT_SIZE, fls(maxpacket - 1) - 3);
667 + DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n",
668 + ep_name(ep), ept_cfg, maxpacket);
670 + if ((desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
671 + ept_cfg |= HUSB2_BIT(EPT_DIR);
673 + switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
674 + case USB_ENDPOINT_XFER_CONTROL:
675 + ept_cfg |= HUSB2_BF(EPT_TYPE, HUSB2_EPT_TYPE_CONTROL);
677 + case USB_ENDPOINT_XFER_ISOC:
678 + ept_cfg |= HUSB2_BF(EPT_TYPE, HUSB2_EPT_TYPE_ISO);
680 + case USB_ENDPOINT_XFER_BULK:
681 + ept_cfg |= HUSB2_BF(EPT_TYPE, HUSB2_EPT_TYPE_BULK);
683 + case USB_ENDPOINT_XFER_INT:
684 + ept_cfg |= HUSB2_BF(EPT_TYPE, HUSB2_EPT_TYPE_INT);
687 + ept_cfg |= HUSB2_BF(BK_NUMBER, ep->nr_banks);
689 + spin_lock_irqsave(&ep->udc->lock, flags);
692 + spin_unlock_irqrestore(&ep->udc->lock, flags);
693 + DBG(DBG_ERR, "ep%d already enabled\n", ep->index);
698 + ep->ep.maxpacket = maxpacket;
700 + husb2_ep_writel(ep, CFG, ept_cfg);
701 + husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(EPT_ENABLE));
703 + if (ep_can_dma(ep)) {
704 + husb2_writel(udc, INT_ENB,
705 + (husb2_readl(udc, INT_ENB)
706 + | HUSB2_BF(EPT_INT, 1 << ep->index)
707 + | HUSB2_BF(DMA_INT, 1 << ep->index)));
708 + husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(AUTO_VALID));
710 + husb2_writel(udc, INT_ENB,
711 + (husb2_readl(udc, INT_ENB)
712 + | HUSB2_BF(EPT_INT, 1 << ep->index)));
715 + spin_unlock_irqrestore(&udc->lock, flags);
717 + DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index,
718 + (unsigned long)husb2_ep_readl(ep, CFG));
719 + DBG(DBG_HW, "INT_ENB after init: %#08lx\n",
720 + (unsigned long)husb2_readl(udc, INT_ENB));
722 + husb2_ep_init_debugfs(udc, ep);
727 +static int husb2_ep_disable(struct usb_ep *_ep)
729 + struct husb2_ep *ep = to_husb2_ep(_ep);
730 + struct husb2_udc *udc = ep->udc;
731 + LIST_HEAD(req_list);
732 + unsigned long flags;
734 + DBG(DBG_GADGET, "ep_disable: %s\n", ep_name(ep));
736 + husb2_ep_cleanup_debugfs(ep);
738 + spin_lock_irqsave(&udc->lock, flags);
741 + spin_unlock_irqrestore(&udc->lock, flags);
742 + DBG(DBG_ERR, "ep_disable: %s not enabled\n",
748 + list_splice_init(&ep->queue, &req_list);
749 + if (ep_can_dma(ep)) {
750 + husb2_dma_writel(ep, CONTROL, 0);
751 + husb2_dma_writel(ep, ADDRESS, 0);
752 + husb2_dma_readl(ep, STATUS);
754 + husb2_ep_writel(ep, CTL_DIS, HUSB2_BIT(EPT_ENABLE));
755 + husb2_writel(udc, INT_ENB, (husb2_readl(udc, INT_ENB)
756 + & ~HUSB2_BF(EPT_INT, 1 << ep->index)));
758 + spin_unlock_irqrestore(&udc->lock, flags);
760 + request_complete_list(ep, &req_list, -ESHUTDOWN);
765 +static struct usb_request *
766 +husb2_ep_alloc_request(struct usb_ep *_ep, unsigned gfp_flags)
768 + struct husb2_request *req;
770 + DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags);
772 + req = kzalloc(sizeof(*req), gfp_flags);
776 + INIT_LIST_HEAD(&req->queue);
777 + req->req.dma = DMA_ADDR_INVALID;
783 +husb2_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
785 + struct husb2_request *req = to_husb2_req(_req);
787 + DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req);
792 +static void *husb2_ep_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
793 + dma_addr_t *dma, unsigned gfp_flags)
795 + struct husb2_ep *ep = to_husb2_ep(_ep);
799 + * We depend on kmalloc() returning cache-aligned memory. This
800 + * is normally guaranteed as long as we allocate a whole
801 + * cacheline or more.
803 + * When CONFIG_DEBUG_SLAB is enabled, however, the slab
804 + * allocator inserts red zones and ownership information,
805 + * causing the slab objects to be misaligned.
807 + * One alternative would be to use dma_alloc_coherent, but
808 + * that would make us unable to allocate anything less than a
811 +#ifdef CONFIG_DEBUG_SLAB
812 +# error The HUSB2 UDC driver breaks with SLAB debugging enabled
815 + if (bytes < L1_CACHE_BYTES)
816 + bytes = L1_CACHE_BYTES;
818 + buf = kmalloc(bytes, gfp_flags);
821 + * Seems like we have to map the buffer any chance we get.
822 + * ether.c wants us to initialize the dma member of a
823 + * different request than the one receiving the buffer, so one
826 + * Ah, screw it. The ether driver is probably wrong, and this
827 + * is not the right place to do the mapping. The driver
828 + * shouldn't mess with our DMA mappings anyway.
830 + *dma = DMA_ADDR_INVALID;
832 + DBG(DBG_GADGET, "ep_alloc_buffer: %s, %u, 0x%x -> %p\n",
833 + ep_name(ep), bytes, gfp_flags, buf);
838 +static void husb2_ep_free_buffer(struct usb_ep *_ep, void *buf,
839 + dma_addr_t dma, unsigned bytes)
841 + DBG(DBG_GADGET, "ep_free_buffer: %s, buf %p (size %u)\n",
842 + _ep->name, buf, bytes);
846 +static int queue_dma(struct husb2_udc *udc, struct husb2_ep *ep,
847 + struct husb2_request *req, unsigned int direction,
850 + struct husb2_packet *pkt, *prev_pkt;
851 + unsigned int pkt_size, nr_pkts, i;
852 + unsigned int residue;
854 + unsigned long flags;
857 + req->using_dma = 1;
859 + if (req->req.length == 0) {
860 + if (!req->req.zero)
864 + spin_lock_irqsave(&udc->lock, flags);
865 + husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(TX_PK_RDY));
866 + list_add_tail(&req->queue, &ep->queue);
867 + spin_unlock_irqrestore(&udc->lock, flags);
872 + if (req->req.dma == DMA_ADDR_INVALID)
873 + req->req.dma = dma_map_single(&udc->pdev->dev,
878 + dma_sync_single_for_device(&udc->pdev->dev,
883 + pkt_size = ep->ep.maxpacket;
884 + nr_pkts = req->req.length / pkt_size;
885 + residue = req->req.length % pkt_size;
888 + else if (req->req.zero && ep_is_in(ep))
889 + /* ensure last packet is short */
892 + req->nr_pkts = nr_pkts;
894 + req->packet = kzalloc(sizeof(*req->packet) * nr_pkts, gfp_flags);
896 + goto out_of_memory;
898 + addr = req->req.dma;
899 + ctrl = (HUSB2_BF(DMA_BUF_LEN, pkt_size)
900 + | HUSB2_BIT(DMA_CH_EN) | HUSB2_BIT(DMA_LINK)
901 + | HUSB2_BIT(DMA_END_TR_EN) | HUSB2_BIT(DMA_END_TR_IE));
904 + DBG(DBG_DMA, "DMA descriptors:\n");
905 + for (i = 0; i < nr_pkts; i++) {
906 + pkt = &req->packet[i];
907 + pkt->desc = dma_pool_alloc(udc->desc_pool, gfp_flags,
910 + goto out_of_memory;
913 + prev_pkt->desc->next = pkt->desc_dma;
914 + DBG(DBG_DMA, "[%d] n%08x a%08x c%08x\n",
915 + i - 1, prev_pkt->desc->next, prev_pkt->desc->addr,
916 + prev_pkt->desc->ctrl);
920 + pkt->desc->addr = addr;
921 + pkt->desc->ctrl = ctrl;
925 + /* special care is needed for the last packet... */
926 + ctrl = (HUSB2_BIT(DMA_CH_EN)
927 + | HUSB2_BIT(DMA_END_TR_EN) | HUSB2_BIT(DMA_END_TR_IE)
928 + | HUSB2_BIT(DMA_END_BUF_IE));
930 + ctrl |= HUSB2_BIT(DMA_END_BUF_EN);
931 + if (req->req.zero || residue)
932 + ctrl |= HUSB2_BF(DMA_BUF_LEN, residue);
934 + ctrl |= HUSB2_BF(DMA_BUF_LEN, pkt_size);
935 + pkt->desc->ctrl = ctrl;
937 + DBG(DBG_DMA, "[%d] n%08x a%08x c%08x\n",
938 + i - 1, prev_pkt->desc->next, prev_pkt->desc->addr,
939 + prev_pkt->desc->ctrl);
941 + /* Add this request to the queue and try to chain the DMA descriptors */
942 + spin_lock_irqsave(&udc->lock, flags);
944 + /* If the DMA controller is idle, start it */
945 + if (list_empty(&ep->queue)) {
946 + husb2_dma_writel(ep, NXT_DSC, req->packet[0].desc_dma);
947 + husb2_dma_writel(ep, CONTROL, HUSB2_BIT(DMA_LINK));
950 + list_add_tail(&req->queue, &ep->queue);
952 + spin_unlock_irqrestore(&udc->lock, flags);
957 + printk(KERN_ERR "ERROR: Could not allocate DMA memory for endpoint %s\n",
960 + for (i = 0; i < nr_pkts; i++)
961 + if (req->packet[i].desc)
962 + dma_pool_free(udc->desc_pool,
963 + req->packet[i].desc,
964 + req->packet[i].desc_dma);
965 + kfree(req->packet);
971 +static int husb2_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
974 + struct husb2_request *req = to_husb2_req(_req);
975 + struct husb2_ep *ep = to_husb2_ep(_ep);
976 + struct husb2_udc *udc = ep->udc;
977 + unsigned long flags;
978 + int direction_in = 0;
980 + DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ,
981 + "%s: queue req %p, len %u\n", ep_name(ep), req, _req->length);
983 + if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
989 + req->submitted = 0;
990 + req->using_dma = 0;
991 + req->last_transaction = 0;
994 + BUG_ON(req->packet);
997 + || (ep_is_control(ep) && (ep->state == DATA_STAGE_IN
998 + || ep->state == STATUS_STAGE_IN)))
1001 + _req->status = -EINPROGRESS;
1004 + if (ep_can_dma(ep)) {
1005 + return queue_dma(udc, ep, req, (direction_in
1007 + : DMA_FROM_DEVICE),
1010 + spin_lock_irqsave(&udc->lock, flags);
1011 + list_add_tail(&req->queue, &ep->queue);
1014 + husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(TX_PK_RDY));
1016 + husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(RX_BK_RDY));
1017 + spin_unlock_irqrestore(&udc->lock, flags);
1023 +static void husb2_update_req(struct husb2_ep *ep, struct husb2_request *req,
1026 + struct husb2_dma_desc *desc;
1032 + addr = husb2_dma_readl(ep, ADDRESS);
1033 + req->req.actual = 0;
1035 + for (i = 0; i < req->nr_pkts; i++) {
1036 + desc = req->packet[i].desc;
1037 + from = desc->addr;
1038 + size = HUSB2_BFEXT(DMA_BUF_LEN, desc->ctrl);
1040 + req->req.actual += size;
1042 + DBG(DBG_DMA, " from=%#08x, size=%#zx\n", from, size);
1044 + if (from <= addr && (from + size) >= addr)
1048 + req->req.actual -= HUSB2_BFEXT(DMA_BUF_LEN, status);
1051 +static int stop_dma(struct husb2_ep *ep, u32 *pstatus)
1053 + unsigned int timeout;
1057 + * Stop the DMA controller. When writing both CH_EN
1058 + * and LINK to 0, the other bits are not affected.
1060 + husb2_dma_writel(ep, CONTROL, 0);
1062 + /* Wait for the FIFO to empty */
1063 + for (timeout = 40; timeout; --timeout) {
1064 + status = husb2_dma_readl(ep, STATUS);
1065 + if (!(status & HUSB2_BIT(DMA_CH_EN)))
1071 + *pstatus = status;
1073 + if (timeout == 0) {
1074 + dev_err(&ep->udc->pdev->dev,
1075 + "%s: timed out waiting for DMA FIFO to empty\n",
1077 + return -ETIMEDOUT;
1083 +static int husb2_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1085 + struct husb2_ep *ep = to_husb2_ep(_ep);
1086 + struct husb2_udc *udc = ep->udc;
1087 + struct husb2_request *req = to_husb2_req(_req);
1088 + unsigned long flags;
1091 + DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n", ep_name(ep), req);
1093 + spin_lock_irqsave(&udc->lock, flags);
1095 + if (req->using_dma) {
1097 + * If this request is currently being transferred,
1098 + * stop the DMA controller and reset the FIFO.
1100 + if (ep->queue.next == &req->queue) {
1101 + status = husb2_dma_readl(ep, STATUS);
1102 + if (status & HUSB2_BIT(DMA_CH_EN))
1103 + stop_dma(ep, &status);
1105 +#ifdef CONFIG_DEBUG_FS
1106 + ep->last_dma_status = status;
1109 + husb2_writel(udc, EPT_RST,
1110 + 1 << ep_index(ep));
1112 + husb2_update_req(ep, req, status);
1117 + * Errors should stop the queue from advancing until the
1118 + * completion function returns.
1120 + list_del_init(&req->queue);
1121 + spin_unlock_irqrestore(&udc->lock, flags);
1123 + request_complete(ep, req, -ECONNRESET);
1125 + /* Process the next request if any */
1126 + spin_lock_irqsave(&udc->lock, flags);
1127 + submit_next_request(ep);
1128 + spin_unlock_irqrestore(&udc->lock, flags);
1133 +static int husb2_ep_set_halt(struct usb_ep *_ep, int value)
1135 + struct husb2_ep *ep = to_husb2_ep(_ep);
1136 + struct husb2_udc *udc = ep->udc;
1137 + unsigned long flags;
1140 + DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep_name(ep),
1141 + value ? "set" : "clear");
1144 + DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n",
1148 + if (ep_is_isochronous(ep)) {
1149 + DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n",
1154 + spin_lock_irqsave(&udc->lock, flags);
1157 + * We can't halt IN endpoints while there are still data to be
1160 + if (!list_empty(&ep->queue)
1161 + || ((value && ep_is_in(ep)
1162 + && (husb2_ep_readl(ep, STA)
1163 + & HUSB2_BF(BUSY_BANKS, -1L))))) {
1167 + husb2_ep_writel(ep, SET_STA, HUSB2_BIT(FORCE_STALL));
1169 + husb2_ep_writel(ep, CLR_STA, (HUSB2_BIT(FORCE_STALL)
1170 + | HUSB2_BIT(TOGGLE_SEQ)));
1171 + husb2_ep_readl(ep, STA);
1174 + spin_unlock_irqrestore(&udc->lock, flags);
1179 +static int husb2_ep_fifo_status(struct usb_ep *_ep)
1181 + struct husb2_ep *ep = to_husb2_ep(_ep);
1183 + return HUSB2_BFEXT(BYTE_COUNT, husb2_ep_readl(ep, STA));
1186 +static void husb2_ep_fifo_flush(struct usb_ep *_ep)
1188 + struct husb2_ep *ep = to_husb2_ep(_ep);
1189 + struct husb2_udc *udc = ep->udc;
1191 + husb2_writel(udc, EPT_RST, 1 << ep->index);
1194 +struct usb_ep_ops husb2_ep_ops = {
1195 + .enable = husb2_ep_enable,
1196 + .disable = husb2_ep_disable,
1197 + .alloc_request = husb2_ep_alloc_request,
1198 + .free_request = husb2_ep_free_request,
1199 + .alloc_buffer = husb2_ep_alloc_buffer,
1200 + .free_buffer = husb2_ep_free_buffer,
1201 + .queue = husb2_ep_queue,
1202 + .dequeue = husb2_ep_dequeue,
1203 + .set_halt = husb2_ep_set_halt,
1204 + .fifo_status = husb2_ep_fifo_status,
1205 + .fifo_flush = husb2_ep_fifo_flush,
1208 +static int husb2_udc_get_frame(struct usb_gadget *gadget)
1210 + struct husb2_udc *udc = to_husb2_udc(gadget);
1212 + return HUSB2_BFEXT(FRAME_NUMBER, husb2_readl(udc, FNUM));
1215 +struct usb_gadget_ops husb2_udc_ops = {
1216 + .get_frame = husb2_udc_get_frame,
1219 +#define EP(nam, type, idx, caps) { \
1221 + .ops = &husb2_ep_ops, \
1223 + .maxpacket = type##_FIFO_SIZE, \
1225 + .udc = &the_udc, \
1226 + .queue = LIST_HEAD_INIT(husb2_ep[idx].queue), \
1227 + .fifo_size = type##_FIFO_SIZE, \
1228 + .nr_banks = type##_NR_BANKS, \
1230 + .capabilities = caps, \
1233 +static struct husb2_ep husb2_ep[] = {
1234 + EP("ep0", EP0, 0, 0),
1235 + EP("ep1in-bulk", BULK, 1, HUSB2_EP_CAP_DMA),
1236 + EP("ep2out-bulk", BULK, 2, HUSB2_EP_CAP_DMA),
1237 + EP("ep3in-iso", ISO, 3, HUSB2_EP_CAP_DMA | HUSB2_EP_CAP_ISOC),
1238 + EP("ep4out-iso", ISO, 4, HUSB2_EP_CAP_DMA | HUSB2_EP_CAP_ISOC),
1239 + EP("ep5in-int", INT, 5, HUSB2_EP_CAP_DMA),
1240 + EP("ep6out-int", INT, 6, HUSB2_EP_CAP_DMA),
1244 +static struct usb_endpoint_descriptor husb2_ep0_desc = {
1245 + .bLength = USB_DT_ENDPOINT_SIZE,
1246 + .bDescriptorType = USB_DT_ENDPOINT,
1247 + .bEndpointAddress = 0,
1248 + .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1249 + .wMaxPacketSize = __constant_cpu_to_le16(64),
1250 + /* FIXME: I have no idea what to put here */
1254 +static void nop_release(struct device *dev)
1259 +static struct husb2_udc the_udc = {
1261 + .ops = &husb2_udc_ops,
1262 + .ep0 = &husb2_ep[0].ep,
1263 + .ep_list = LIST_HEAD_INIT(the_udc.gadget.ep_list),
1264 + .is_dualspeed = 1,
1265 + .name = "husb2_udc",
1267 + .bus_id = "gadget",
1268 + .release = nop_release,
1272 + .lock = SPIN_LOCK_UNLOCKED,
1275 +static void udc_enable(struct husb2_udc *udc)
1277 + struct husb2_ep *ep0 = &husb2_ep[0];
1279 + /* Enable the controller */
1280 + husb2_writel(udc, CTRL, HUSB2_BIT(EN_HUSB2));
1282 + /* Reset all endpoints and enable basic interrupts */
1283 + husb2_writel(udc, EPT_RST, ~0UL);
1284 + husb2_writel(udc, INT_ENB, (HUSB2_BIT(DET_SUSPEND)
1285 + | HUSB2_BIT(END_OF_RESET)
1286 + | HUSB2_BIT(END_OF_RESUME)));
1288 + /* Configure endpoint 0 */
1289 + ep0->desc = &husb2_ep0_desc;
1291 + husb2_writel(udc, EPT_RST, 1 << 0);
1292 + husb2_ep_writel(ep0, CTL_ENB, HUSB2_BIT(EPT_ENABLE));
1293 + husb2_ep_writel(ep0, CFG, (HUSB2_BF(EPT_SIZE, EP0_EPT_SIZE)
1294 + | HUSB2_BF(EPT_TYPE, HUSB2_EPT_TYPE_CONTROL)
1295 + | HUSB2_BF(BK_NUMBER, HUSB2_BK_NUMBER_ONE)));
1297 + husb2_ep_writel(ep0, CTL_ENB, HUSB2_BIT(RX_SETUP));
1298 + husb2_writel(udc, INT_ENB, (husb2_readl(udc, INT_ENB)
1299 + | HUSB2_BF(EPT_INT, 1)));
1301 + if (!(husb2_ep_readl(ep0, CFG) & HUSB2_BIT(EPT_MAPPED)))
1302 + dev_warn(&udc->pdev->dev,
1303 + "WARNING: EP0 configuration is invalid!\n");
1306 +static void udc_disable(struct husb2_udc *udc)
1308 + udc->gadget.speed = USB_SPEED_UNKNOWN;
1310 + husb2_writel(udc, CTRL, 0);
1314 + * Called with interrupts disabled and udc->lock held.
1316 +static void reset_all_endpoints(struct husb2_udc *udc)
1318 + struct husb2_ep *ep;
1319 + struct husb2_request *req, *tmp_req;
1321 + husb2_writel(udc, EPT_RST, ~0UL);
1323 + ep = to_husb2_ep(udc->gadget.ep0);
1324 + list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) {
1325 + list_del_init(&req->queue);
1326 + request_complete(ep, req, -ECONNRESET);
1328 + BUG_ON(!list_empty(&ep->queue));
1330 + list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1332 + husb2_ep_disable(&ep->ep);
1336 +static struct husb2_ep *get_ep_by_addr(struct husb2_udc *udc, u16 wIndex)
1338 + struct husb2_ep *ep;
1340 + if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1341 + return to_husb2_ep(udc->gadget.ep0);
1343 + list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
1344 + u8 bEndpointAddress;
1348 + bEndpointAddress = ep->desc->bEndpointAddress;
1349 + if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1351 + if ((wIndex & USB_ENDPOINT_NUMBER_MASK)
1352 + == (bEndpointAddress & USB_ENDPOINT_NUMBER_MASK))
1359 +/* Called with interrupts disabled and udc->lock held */
1360 +static inline void set_protocol_stall(struct husb2_udc *udc,
1361 + struct husb2_ep *ep)
1363 + husb2_ep_writel(ep, SET_STA, HUSB2_BIT(FORCE_STALL));
1364 + ep->state = WAIT_FOR_SETUP;
1367 +static inline int is_stalled(struct husb2_udc *udc, struct husb2_ep *ep)
1369 + if (husb2_ep_readl(ep, STA) & HUSB2_BIT(FORCE_STALL))
1374 +static inline void set_address(struct husb2_udc *udc, unsigned int addr)
1378 + DBG(DBG_BUS, "setting address %u...\n", addr);
1379 + regval = husb2_readl(udc, CTRL);
1380 + regval = HUSB2_BFINS(DEV_ADDR, addr, regval);
1381 + husb2_writel(udc, CTRL, regval);
1384 +static int handle_ep0_setup(struct husb2_udc *udc, struct husb2_ep *ep,
1385 + struct usb_ctrlrequest *crq)
1387 + switch (crq->bRequest) {
1388 + case USB_REQ_GET_STATUS: {
1391 + if (crq->bRequestType == (USB_DIR_IN | USB_RECIP_DEVICE)) {
1392 + /* Self-powered, no remote wakeup */
1393 + status = __constant_cpu_to_le16(1 << 0);
1394 + } else if (crq->bRequestType
1395 + == (USB_DIR_IN | USB_RECIP_INTERFACE)) {
1396 + status = __constant_cpu_to_le16(0);
1397 + } else if (crq->bRequestType
1398 + == (USB_DIR_IN | USB_RECIP_ENDPOINT)) {
1399 + struct husb2_ep *target;
1401 + target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1406 + if (is_stalled(udc, target))
1407 + status |= __constant_cpu_to_le16(1);
1412 + /* Write directly to the FIFO. No queueing is done. */
1413 + if(crq->wLength != __constant_cpu_to_le16(sizeof(status)))
1415 + ep->state = DATA_STAGE_IN;
1416 + __raw_writew(status, ep->fifo);
1417 + husb2_ep_writel(ep, SET_STA, HUSB2_BIT(TX_PK_RDY));
1421 + case USB_REQ_CLEAR_FEATURE: {
1422 + if (crq->bRequestType == USB_RECIP_DEVICE) {
1423 + /* We don't support TEST_MODE */
1425 + } else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
1426 + struct husb2_ep *target;
1428 + if (crq->wValue != __constant_cpu_to_le16(USB_ENDPOINT_HALT)
1429 + || crq->wLength != __constant_cpu_to_le16(0))
1431 + target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1435 + husb2_ep_writel(target, CLR_STA, (HUSB2_BIT(FORCE_STALL)
1436 + | HUSB2_BIT(TOGGLE_SEQ)));
1441 + send_status(udc, ep);
1445 + case USB_REQ_SET_FEATURE: {
1446 + if (crq->bRequestType == USB_RECIP_DEVICE) {
1447 + /* We don't support TEST_MODE */
1449 + } else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
1450 + struct husb2_ep *target;
1452 + if (crq->wValue != __constant_cpu_to_le16(USB_ENDPOINT_HALT)
1453 + || crq->wLength != __constant_cpu_to_le16(0))
1456 + target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1460 + husb2_ep_writel(target, SET_STA, HUSB2_BIT(FORCE_STALL));
1464 + send_status(udc, ep);
1468 + case USB_REQ_SET_ADDRESS:
1469 + if (crq->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
1472 + set_address(udc, le16_to_cpu(crq->wValue));
1473 + send_status(udc, ep);
1474 + ep->state = STATUS_STAGE_ADDR;
1479 + return udc->driver->setup(&udc->gadget, crq);
1486 + "udc: %s: Invalid setup request: %02x.%02x v%04x i%04x l%d, "
1487 + "halting endpoint...\n",
1488 + ep_name(ep), crq->bRequestType, crq->bRequest,
1489 + le16_to_cpu(crq->wValue), le16_to_cpu(crq->wIndex),
1490 + le16_to_cpu(crq->wLength));
1491 + set_protocol_stall(udc, ep);
1495 +static void husb2_control_irq(struct husb2_udc *udc, struct husb2_ep *ep)
1497 + struct husb2_request *req;
1502 + epstatus = husb2_ep_readl(ep, STA);
1503 + epctrl = husb2_ep_readl(ep, CTL);
1505 + DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n",
1506 + ep_name(ep), epstatus);
1509 + if (!list_empty(&ep->queue))
1510 + req = list_entry(ep->queue.next,
1511 + struct husb2_request, queue);
1513 + if ((epctrl & HUSB2_BIT(TX_PK_RDY))
1514 + && !(epstatus & HUSB2_BIT(TX_PK_RDY))) {
1515 + DBG(DBG_BUS, "tx pk rdy: %d\n", ep->state);
1517 + if (req->submitted)
1518 + next_fifo_transaction(ep, req);
1520 + submit_request(ep, req);
1522 + if (req->last_transaction) {
1523 + husb2_ep_writel(ep, CTL_DIS, HUSB2_BIT(TX_PK_RDY));
1524 + husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(TX_COMPLETE));
1528 + if ((epstatus & epctrl) & HUSB2_BIT(TX_COMPLETE)) {
1529 + husb2_ep_writel(ep, CLR_STA, HUSB2_BIT(TX_COMPLETE));
1530 + DBG(DBG_BUS, "txc: %d\n", ep->state);
1532 + switch (ep->state) {
1533 + case DATA_STAGE_IN:
1534 + husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(RX_BK_RDY));
1535 + husb2_ep_writel(ep, CTL_DIS,
1536 + HUSB2_BIT(TX_COMPLETE));
1537 + ep->state = STATUS_STAGE_OUT;
1539 + case STATUS_STAGE_ADDR:
1540 + /* Activate our new address */
1541 + husb2_writel(udc, CTRL, (husb2_readl(udc, CTRL)
1542 + | HUSB2_BIT(FADDR_EN)));
1543 + husb2_ep_writel(ep, CTL_DIS,
1544 + HUSB2_BIT(TX_COMPLETE));
1545 + ep->state = WAIT_FOR_SETUP;
1547 + case STATUS_STAGE_IN:
1549 + list_del_init(&req->queue);
1550 + request_complete(ep, req, 0);
1551 + submit_next_request(ep);
1553 + BUG_ON(!list_empty(&ep->queue));
1554 + husb2_ep_writel(ep, CTL_DIS,
1555 + HUSB2_BIT(TX_COMPLETE));
1556 + ep->state = WAIT_FOR_SETUP;
1560 + "udc: %s: TXCOMP: Invalid endpoint state %d, "
1561 + "halting endpoint...\n",
1562 + ep_name(ep), ep->state);
1563 + set_protocol_stall(udc, ep);
1569 + if ((epstatus & epctrl) & HUSB2_BIT(RX_BK_RDY)) {
1570 + DBG(DBG_BUS, "rxc: %d\n", ep->state);
1572 + switch (ep->state) {
1573 + case STATUS_STAGE_OUT:
1574 + husb2_ep_writel(ep, CLR_STA, HUSB2_BIT(RX_BK_RDY));
1577 + list_del_init(&req->queue);
1578 + request_complete(ep, req, 0);
1580 + husb2_ep_writel(ep, CTL_DIS, HUSB2_BIT(RX_BK_RDY));
1581 + ep->state = WAIT_FOR_SETUP;
1584 + case DATA_STAGE_OUT:
1589 + husb2_ep_writel(ep, CLR_STA, HUSB2_BIT(RX_BK_RDY));
1590 + set_protocol_stall(udc, ep);
1592 + "udc: %s: RXRDY: Invalid endpoint state %d, "
1593 + "halting endpoint...\n",
1594 + ep_name(ep), ep->state);
1600 + if (epstatus & HUSB2_BIT(RX_SETUP)) {
1602 + struct usb_ctrlrequest crq;
1603 + unsigned long data[2];
1605 + unsigned int pkt_len;
1608 + if (ep->state != WAIT_FOR_SETUP) {
1610 + * Didn't expect a SETUP packet at this
1611 + * point. Clean up any pending requests (which
1612 + * may be successful).
1614 + int status = -EPROTO;
1617 + * RXRDY is dropped when SETUP packets arrive.
1618 + * Just pretend we received the status packet.
1620 + if (ep->state == STATUS_STAGE_OUT)
1624 + list_del_init(&req->queue);
1625 + request_complete(ep, req, status);
1627 + BUG_ON(!list_empty(&ep->queue));
1630 + pkt_len = HUSB2_BFEXT(BYTE_COUNT, husb2_ep_readl(ep, STA));
1631 + DBG(DBG_HW, "Packet length: %u\n", pkt_len);
1632 + BUG_ON(pkt_len != sizeof(crq));
1634 + DBG(DBG_FIFO, "Copying ctrl request from 0x%p:\n", ep->fifo);
1635 + copy_from_fifo(crq.data, ep->fifo, sizeof(crq));
1637 + /* Free up one bank in the FIFO so that we can
1638 + * generate or receive a reply right away. */
1639 + husb2_ep_writel(ep, CLR_STA, HUSB2_BIT(RX_SETUP));
1641 + /* printk(KERN_DEBUG "setup: %d: %02x.%02x\n",
1642 + ep->state, crq.crq.bRequestType,
1643 + crq.crq.bRequest); */
1645 + if (crq.crq.bRequestType & USB_DIR_IN) {
1647 + * The USB 2.0 spec states that "if wLength is
1648 + * zero, there is no data transfer phase."
1649 + * However, testusb #14 seems to actually
1650 + * expect a data phase even if wLength = 0...
1652 + ep->state = DATA_STAGE_IN;
1654 + if (crq.crq.wLength != __constant_cpu_to_le16(0))
1655 + ep->state = DATA_STAGE_OUT;
1657 + ep->state = STATUS_STAGE_IN;
1661 + if (ep->index == 0)
1662 + ret = handle_ep0_setup(udc, ep, &crq.crq);
1664 + ret = udc->driver->setup(&udc->gadget, &crq.crq);
1666 + DBG(DBG_BUS, "req %02x.%02x, length %d, state %d, ret %d\n",
1667 + crq.crq.bRequestType, crq.crq.bRequest,
1668 + le16_to_cpu(crq.crq.wLength), ep->state, ret);
1671 + /* Let the host know that we failed */
1672 + set_protocol_stall(udc, ep);
1677 +static void husb2_ep_irq(struct husb2_udc *udc, struct husb2_ep *ep)
1679 + struct husb2_request *req;
1683 + epstatus = husb2_ep_readl(ep, STA);
1684 + epctrl = husb2_ep_readl(ep, CTL);
1686 + DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n",
1687 + ep_name(ep), epstatus);
1689 + while ((epctrl & HUSB2_BIT(TX_PK_RDY))
1690 + && !(epstatus & HUSB2_BIT(TX_PK_RDY))) {
1691 + BUG_ON(!ep_is_in(ep));
1693 + DBG(DBG_BUS, "%s: TX PK ready\n", ep_name(ep));
1695 + if (list_empty(&ep->queue)) {
1696 + dev_warn(&udc->pdev->dev, "ep_irq: queue empty\n");
1697 + husb2_ep_writel(ep, CTL_DIS, HUSB2_BIT(TX_PK_RDY));
1701 + req = list_entry(ep->queue.next, struct husb2_request, queue);
1703 + if (req->using_dma) {
1704 + BUG_ON(!req->send_zlp);
1706 + /* Send a zero-length packet */
1707 + husb2_ep_writel(ep, SET_STA,
1708 + HUSB2_BIT(TX_PK_RDY));
1709 + husb2_ep_writel(ep, CTL_DIS,
1710 + HUSB2_BIT(TX_PK_RDY));
1711 + list_del_init(&req->queue);
1712 + submit_next_request(ep);
1713 + request_complete(ep, req, 0);
1715 + if (req->submitted)
1716 + next_fifo_transaction(ep, req);
1718 + submit_request(ep, req);
1720 + if (req->last_transaction) {
1721 + list_del_init(&req->queue);
1722 + submit_next_request(ep);
1723 + request_complete(ep, req, 0);
1727 + epstatus = husb2_ep_readl(ep, STA);
1728 + epctrl = husb2_ep_readl(ep, CTL);
1730 + if ((epstatus & epctrl) & HUSB2_BIT(RX_BK_RDY)) {
1731 + BUG_ON(ep_is_in(ep));
1733 + DBG(DBG_BUS, "%s: RX data ready\n", ep_name(ep));
1735 + husb2_ep_writel(ep, CLR_STA, HUSB2_BIT(RX_BK_RDY));
1739 +static void husb2_dma_irq(struct husb2_udc *udc, struct husb2_ep *ep)
1741 + struct husb2_request *req;
1742 + u32 status, control, pending;
1744 + status = husb2_dma_readl(ep, STATUS);
1745 + control = husb2_dma_readl(ep, CONTROL);
1746 +#ifdef CONFIG_DEBUG_FS
1747 + ep->last_dma_status = status;
1749 + pending = status & control;
1750 + DBG(DBG_INT, "dma irq, status=%#08x, pending=%#08x, control=%#08x\n",
1751 + status, pending, control);
1753 + BUG_ON(status & HUSB2_BIT(DMA_CH_EN));
1755 + if (list_empty(&ep->queue))
1756 + /* Might happen if a reset comes along at the right moment */
1759 + if (pending & (HUSB2_BIT(DMA_END_TR_ST) | HUSB2_BIT(DMA_END_BUF_ST))) {
1760 + req = list_entry(ep->queue.next, struct husb2_request, queue);
1761 + husb2_update_req(ep, req, status);
1763 + if (req->send_zlp) {
1764 + husb2_ep_writel(ep, CTL_ENB, HUSB2_BIT(TX_PK_RDY));
1766 + list_del_init(&req->queue);
1767 + submit_next_request(ep);
1768 + request_complete(ep, req, 0);
1773 +static irqreturn_t husb2_udc_irq(int irq, void *devid, struct pt_regs *regs)
1775 + struct husb2_udc *udc = devid;
1780 + spin_lock(&udc->lock);
1782 + status = husb2_readl(udc, INT_STA);
1783 + DBG(DBG_INT, "irq, status=%#08x\n", status);
1785 + if (status & HUSB2_BIT(DET_SUSPEND)) {
1786 + husb2_writel(udc, INT_CLR, HUSB2_BIT(DET_SUSPEND));
1787 + //DBG(DBG_BUS, "Suspend detected\n");
1788 + if (udc->gadget.speed != USB_SPEED_UNKNOWN
1789 + && udc->driver && udc->driver->suspend)
1790 + udc->driver->suspend(&udc->gadget);
1793 + if (status & HUSB2_BIT(WAKE_UP)) {
1794 + husb2_writel(udc, INT_CLR, HUSB2_BIT(WAKE_UP));
1795 + //DBG(DBG_BUS, "Wake Up CPU detected\n");
1798 + if (status & HUSB2_BIT(END_OF_RESUME)) {
1799 + husb2_writel(udc, INT_CLR, HUSB2_BIT(END_OF_RESUME));
1800 + DBG(DBG_BUS, "Resume detected\n");
1801 + if (udc->gadget.speed != USB_SPEED_UNKNOWN
1802 + && udc->driver && udc->driver->resume)
1803 + udc->driver->resume(&udc->gadget);
1806 + dma_status = HUSB2_BFEXT(DMA_INT, status);
1810 + for (i = 1; i < HUSB2_NR_ENDPOINTS; i++)
1811 + if (dma_status & (1 << i))
1812 + husb2_dma_irq(udc, &husb2_ep[i]);
1815 + ep_status = HUSB2_BFEXT(EPT_INT, status);
1819 + for (i = 0; i < HUSB2_NR_ENDPOINTS; i++)
1820 + if (ep_status & (1 << i)) {
1821 + if (ep_is_control(&husb2_ep[i]))
1822 + husb2_control_irq(udc, &husb2_ep[i]);
1824 + husb2_ep_irq(udc, &husb2_ep[i]);
1828 + if (status & HUSB2_BIT(END_OF_RESET)) {
1829 + husb2_writel(udc, INT_CLR, HUSB2_BIT(END_OF_RESET));
1830 + if (status & HUSB2_BIT(HIGH_SPEED)) {
1831 + DBG(DBG_BUS, "High-speed bus reset detected\n");
1832 + udc->gadget.speed = USB_SPEED_HIGH;
1834 + DBG(DBG_BUS, "Full-speed bus reset detected\n");
1835 + udc->gadget.speed = USB_SPEED_FULL;
1837 + /* Better start from scratch... */
1838 + reset_all_endpoints(udc);
1839 + husb2_ep[0].state = WAIT_FOR_SETUP;
1843 + spin_unlock(&udc->lock);
1845 + return IRQ_HANDLED;
1848 +int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1850 + struct husb2_udc *udc = &the_udc;
1853 + spin_lock(&udc->lock);
1862 + udc->driver = driver;
1863 + udc->gadget.dev.driver = &driver->driver;
1865 + device_add(&udc->gadget.dev);
1866 + ret = driver->bind(&udc->gadget);
1868 + DBG(DBG_ERR, "Could not bind to driver %s: error %d\n",
1869 + driver->driver.name, ret);
1870 + device_del(&udc->gadget.dev);
1872 + udc->driver = NULL;
1873 + udc->gadget.dev.driver = NULL;
1877 + /* TODO: Create sysfs files */
1879 + DBG(DBG_GADGET, "registered driver `%s'\n", driver->driver.name);
1883 + spin_unlock(&udc->lock);
1886 +EXPORT_SYMBOL(usb_gadget_register_driver);
1888 +int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1890 + struct husb2_udc *udc = &the_udc;
1893 + spin_lock(&udc->lock);
1899 + if (driver != udc->driver)
1902 + local_irq_disable();
1904 + local_irq_enable();
1906 + driver->unbind(&udc->gadget);
1907 + udc->driver = NULL;
1909 + device_del(&udc->gadget.dev);
1911 + /* TODO: Remove sysfs files */
1913 + DBG(DBG_GADGET, "unregistered driver `%s'\n", driver->driver.name);
1916 + spin_unlock(&udc->lock);
1919 +EXPORT_SYMBOL(usb_gadget_unregister_driver);
1921 +static int __devinit husb2_udc_probe(struct platform_device *pdev)
1923 + struct resource *regs, *fifo;
1924 + struct clk *pclk, *hclk;
1925 + struct husb2_udc *udc = &the_udc;
1928 + regs = platform_get_resource(pdev, IORESOURCE_MEM, CTRL_IOMEM_ID);
1929 + fifo = platform_get_resource(pdev, IORESOURCE_MEM, FIFO_IOMEM_ID);
1930 + if (!regs || !fifo)
1933 + irq = platform_get_irq(pdev, 0);
1937 + pclk = clk_get(&pdev->dev, "pclk");
1939 + return PTR_ERR(pclk);
1940 + hclk = clk_get(&pdev->dev, "hclk");
1941 + if (IS_ERR(hclk)) {
1942 + ret = PTR_ERR(hclk);
1943 + goto out_put_pclk;
1954 + udc->regs = ioremap(regs->start, regs->end - regs->start + 1);
1956 + dev_err(&pdev->dev, "Unable to map I/O memory, aborting.\n");
1957 + goto out_disable_clocks;
1959 + dev_info(&pdev->dev, "MMIO registers at 0x%08lx mapped at %p\n",
1960 + (unsigned long)regs->start, udc->regs);
1961 + udc->fifo = ioremap(fifo->start, fifo->end - fifo->start + 1);
1963 + dev_err(&pdev->dev, "Unable to map FIFO, aborting.\n");
1964 + goto out_unmap_regs;
1966 + dev_info(&pdev->dev, "FIFO at 0x%08lx mapped at %p\n",
1967 + (unsigned long)fifo->start, udc->fifo);
1969 + device_initialize(&udc->gadget.dev);
1970 + udc->gadget.dev.parent = &pdev->dev;
1971 + udc->gadget.dev.dma_mask = pdev->dev.dma_mask;
1973 + /* The 3-word descriptors must be 4-word aligned... */
1974 + udc->desc_pool = dma_pool_create("husb2-desc", &pdev->dev,
1975 + sizeof(struct husb2_dma_desc),
1977 + if (!udc->desc_pool) {
1978 + dev_err(&pdev->dev, "Cannot create descriptor DMA pool\n");
1979 + goto out_unmap_fifo;
1982 + platform_set_drvdata(pdev, udc);
1986 + INIT_LIST_HEAD(&husb2_ep[0].ep.ep_list);
1987 + husb2_ep[0].ep_regs = udc->regs + HUSB2_EPT_BASE(0);
1988 + husb2_ep[0].dma_regs = udc->regs + HUSB2_DMA_BASE(0);
1989 + husb2_ep[0].fifo = udc->fifo + HUSB2_FIFO_BASE(0);
1990 + for (i = 1; i < ARRAY_SIZE(husb2_ep); i++) {
1991 + struct husb2_ep *ep = &husb2_ep[i];
1993 + ep->ep_regs = udc->regs + HUSB2_EPT_BASE(i);
1994 + ep->dma_regs = udc->regs + HUSB2_DMA_BASE(i);
1995 + ep->fifo = udc->fifo + HUSB2_FIFO_BASE(i);
1997 + list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
2000 + ret = request_irq(irq, husb2_udc_irq, SA_SAMPLE_RANDOM,
2001 + "husb2_udc", udc);
2003 + dev_err(&pdev->dev, "Cannot request irq %d (error %d)\n",
2005 + goto out_free_pool;
2009 + husb2_init_debugfs(udc);
2014 + dma_pool_destroy(udc->desc_pool);
2016 + iounmap(udc->fifo);
2018 + iounmap(udc->regs);
2019 +out_disable_clocks:
2020 + clk_disable(hclk);
2021 + clk_disable(pclk);
2026 + platform_set_drvdata(pdev, NULL);
2031 +static int __devexit husb2_udc_remove(struct platform_device *pdev)
2033 + struct husb2_udc *udc;
2035 + udc = platform_get_drvdata(pdev);
2039 + husb2_cleanup_debugfs(udc);
2041 + free_irq(udc->irq, udc);
2042 + dma_pool_destroy(udc->desc_pool);
2043 + iounmap(udc->fifo);
2044 + iounmap(udc->regs);
2045 + clk_disable(udc->hclk);
2046 + clk_disable(udc->pclk);
2047 + clk_put(udc->hclk);
2048 + clk_put(udc->pclk);
2049 + platform_set_drvdata(pdev, NULL);
2054 +static struct platform_driver udc_driver = {
2055 + .probe = husb2_udc_probe,
2056 + .remove = __devexit_p(husb2_udc_remove),
2062 +static int __init udc_init(void)
2064 + printk(KERN_INFO "husb2device: Driver version %s\n", DRIVER_VERSION);
2065 + return platform_driver_register(&udc_driver);
2067 +module_init(udc_init);
2069 +static void __exit udc_exit(void)
2071 + platform_driver_unregister(&udc_driver);
2073 +module_exit(udc_exit);
2075 +MODULE_DESCRIPTION("Atmel HUSB2 Device Controller driver");
2076 +MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
2077 +MODULE_LICENSE("GPL");
2078 Index: linux-2.6.18-avr32/drivers/usb/gadget/husb2_udc.h
2079 ===================================================================
2080 --- /dev/null 1970-01-01 00:00:00.000000000 +0000
2081 +++ linux-2.6.18-avr32/drivers/usb/gadget/husb2_udc.h 2006-11-02 16:03:44.000000000 +0100
2084 + * Driver for the Atmel HUSB2device high speed USB device controller
2086 + * Copyright (C) 2005-2006 Atmel Corporation
2088 + * This program is free software; you can redistribute it and/or modify
2089 + * it under the terms of the GNU General Public License version 2 as
2090 + * published by the Free Software Foundation.
2092 +#ifndef __LINUX_USB_GADGET_HUSB2_UDC_H__
2093 +#define __LINUX_USB_GADGET_HUSB2_UDC_H__
2095 +/* USB register offsets */
2096 +#define HUSB2_CTRL 0x0000
2097 +#define HUSB2_FNUM 0x0004
2098 +#define HUSB2_INT_ENB 0x0010
2099 +#define HUSB2_INT_STA 0x0014
2100 +#define HUSB2_INT_CLR 0x0018
2101 +#define HUSB2_EPT_RST 0x001c
2102 +#define HUSB2_TST_SOF_CNT 0x00d0
2103 +#define HUSB2_TST_CNT_A 0x00d4
2104 +#define HUSB2_TST_CNT_B 0x00d8
2105 +#define HUSB2_TST_MODE_REG 0x00dc
2106 +#define HUSB2_TST 0x00f0
2108 +/* USB endpoint register offsets */
2109 +#define HUSB2_EPT_CFG 0x0000
2110 +#define HUSB2_EPT_CTL_ENB 0x0004
2111 +#define HUSB2_EPT_CTL_DIS 0x0008
2112 +#define HUSB2_EPT_CTL 0x000c
2113 +#define HUSB2_EPT_SET_STA 0x0014
2114 +#define HUSB2_EPT_CLR_STA 0x0018
2115 +#define HUSB2_EPT_STA 0x001c
2117 +/* USB DMA register offsets */
2118 +#define HUSB2_DMA_NXT_DSC 0x0000
2119 +#define HUSB2_DMA_ADDRESS 0x0004
2120 +#define HUSB2_DMA_CONTROL 0x0008
2121 +#define HUSB2_DMA_STATUS 0x000c
2123 +/* Bitfields in CTRL */
2124 +#define HUSB2_DEV_ADDR_OFFSET 0
2125 +#define HUSB2_DEV_ADDR_SIZE 7
2126 +#define HUSB2_FADDR_EN_OFFSET 7
2127 +#define HUSB2_FADDR_EN_SIZE 1
2128 +#define HUSB2_EN_HUSB2_OFFSET 8
2129 +#define HUSB2_EN_HUSB2_SIZE 1
2130 +#define HUSB2_DETACH_OFFSET 9
2131 +#define HUSB2_DETACH_SIZE 1
2132 +#define HUSB2_REMOTE_WAKE_UP_OFFSET 10
2133 +#define HUSB2_REMOTE_WAKE_UP_SIZE 1
2135 +/* Bitfields in FNUM */
2136 +#define HUSB2_MICRO_FRAME_NUM_OFFSET 0
2137 +#define HUSB2_MICRO_FRAME_NUM_SIZE 3
2138 +#define HUSB2_FRAME_NUMBER_OFFSET 3
2139 +#define HUSB2_FRAME_NUMBER_SIZE 11
2140 +#define HUSB2_FRAME_NUM_ERROR_OFFSET 31
2141 +#define HUSB2_FRAME_NUM_ERROR_SIZE 1
2143 +/* Bitfields in INT_ENB/INT_STA/INT_CLR */
2144 +#define HUSB2_HIGH_SPEED_OFFSET 0
2145 +#define HUSB2_HIGH_SPEED_SIZE 1
2146 +#define HUSB2_DET_SUSPEND_OFFSET 1
2147 +#define HUSB2_DET_SUSPEND_SIZE 1
2148 +#define HUSB2_MICRO_SOF_OFFSET 2
2149 +#define HUSB2_MICRO_SOF_SIZE 1
2150 +#define HUSB2_SOF_OFFSET 3
2151 +#define HUSB2_SOF_SIZE 1
2152 +#define HUSB2_END_OF_RESET_OFFSET 4
2153 +#define HUSB2_END_OF_RESET_SIZE 1
2154 +#define HUSB2_WAKE_UP_OFFSET 5
2155 +#define HUSB2_WAKE_UP_SIZE 1
2156 +#define HUSB2_END_OF_RESUME_OFFSET 6
2157 +#define HUSB2_END_OF_RESUME_SIZE 1
2158 +#define HUSB2_UPSTREAM_RESUME_OFFSET 7
2159 +#define HUSB2_UPSTREAM_RESUME_SIZE 1
2160 +#define HUSB2_EPT_INT_OFFSET 8
2161 +#define HUSB2_EPT_INT_SIZE 16
2162 +#define HUSB2_DMA_INT_OFFSET 24
2163 +#define HUSB2_DMA_INT_SIZE 8
2165 +/* Bitfields in EPT_RST */
2166 +#define HUSB2_RST_OFFSET 0
2167 +#define HUSB2_RST_SIZE 16
2169 +/* Bitfields in TST_SOF_CNT */
2170 +#define HUSB2_SOF_CNT_MAX_OFFSET 0
2171 +#define HUSB2_SOF_CNT_MAX_SIZE 7
2172 +#define HUSB2_SOF_CNT_LOAD_OFFSET 7
2173 +#define HUSB2_SOF_CNT_LOAD_SIZE 1
2175 +/* Bitfields in TST_CNT_A */
2176 +#define HUSB2_CNT_A_MAX_OFFSET 0
2177 +#define HUSB2_CNT_A_MAX_SIZE 7
2178 +#define HUSB2_CNT_A_LOAD_OFFSET 7
2179 +#define HUSB2_CNT_A_LOAD_SIZE 1
2181 +/* Bitfields in TST_CNT_B */
2182 +#define HUSB2_CNT_B_MAX_OFFSET 0
2183 +#define HUSB2_CNT_B_MAX_SIZE 7
2184 +#define HUSB2_CNT_B_LOAD_OFFSET 7
2185 +#define HUSB2_CNT_B_LOAD_SIZE 1
2187 +/* Bitfields in TST_MODE_REG */
2188 +#define HUSB2_TST_MODE_OFFSET 0
2189 +#define HUSB2_TST_MODE_SIZE 6
2191 +/* Bitfields in HUSB2_TST */
2192 +#define HUSB2_SPEED_CFG_OFFSET 0
2193 +#define HUSB2_SPEED_CFG_SIZE 2
2194 +#define HUSB2_TST_J_MODE_OFFSET 2
2195 +#define HUSB2_TST_J_MODE_SIZE 1
2196 +#define HUSB2_TST_K_MODE_OFFSET 3
2197 +#define HUSB2_TST_K_MODE_SIZE 1
2198 +#define HUSB2_TST_PKT_MODE_OFFSE 4
2199 +#define HUSB2_TST_PKT_MODE_SIZE 1
2200 +#define HUSB2_OPMODE2_OFFSET 5
2201 +#define HUSB2_OPMODE2_SIZE 1
2203 +/* Bitfields in EPT_CFG */
2204 +#define HUSB2_EPT_SIZE_OFFSET 0
2205 +#define HUSB2_EPT_SIZE_SIZE 3
2206 +#define HUSB2_EPT_DIR_OFFSET 3
2207 +#define HUSB2_EPT_DIR_SIZE 1
2208 +#define HUSB2_EPT_TYPE_OFFSET 4
2209 +#define HUSB2_EPT_TYPE_SIZE 2
2210 +#define HUSB2_BK_NUMBER_OFFSET 6
2211 +#define HUSB2_BK_NUMBER_SIZE 2
2212 +#define HUSB2_NB_TRANS_OFFSET 8
2213 +#define HUSB2_NB_TRANS_SIZE 2
2214 +#define HUSB2_EPT_MAPPED_OFFSET 31
2215 +#define HUSB2_EPT_MAPPED_SIZE 1
2217 +/* Bitfields in EPT_CTL/EPT_CTL_ENB/EPT_CTL_DIS */
2218 +#define HUSB2_EPT_ENABLE_OFFSET 0
2219 +#define HUSB2_EPT_ENABLE_SIZE 1
2220 +#define HUSB2_AUTO_VALID_OFFSET 1
2221 +#define HUSB2_AUTO_VALID_SIZE 1
2222 +#define HUSB2_INT_DIS_DMA_OFFSET 3
2223 +#define HUSB2_INT_DIS_DMA_SIZE 1
2224 +#define HUSB2_NYET_DIS_OFFSET 4
2225 +#define HUSB2_NYET_DIS_SIZE 1
2226 +#define HUSB2_DATAX_RX_OFFSET 6
2227 +#define HUSB2_DATAX_RX_SIZE 1
2228 +#define HUSB2_MDATA_RX_OFFSET 7
2229 +#define HUSB2_MDATA_RX_SIZE 1
2230 +/* Bits 8-15 and 31 enable interrupts for respective bits in EPT_STA */
2231 +#define HUSB2_BUSY_BANK_IE_OFFSET 18
2232 +#define HUSB2_BUSY_BANK_IE_SIZE 1
2234 +/* Bitfields in EPT_SET_STA/EPT_CLR_STA/EPT_STA */
2235 +#define HUSB2_FORCE_STALL_OFFSET 5
2236 +#define HUSB2_FORCE_STALL_SIZE 1
2237 +#define HUSB2_TOGGLE_SEQ_OFFSET 6
2238 +#define HUSB2_TOGGLE_SEQ_SIZE 2
2239 +#define HUSB2_ERR_OVFLW_OFFSET 8
2240 +#define HUSB2_ERR_OVFLW_SIZE 1
2241 +#define HUSB2_RX_BK_RDY_OFFSET 9
2242 +#define HUSB2_RX_BK_RDY_SIZE 1
2243 +#define HUSB2_KILL_BANK_OFFSET 9
2244 +#define HUSB2_KILL_BANK_SIZE 1
2245 +#define HUSB2_TX_COMPLETE_OFFSET 10
2246 +#define HUSB2_TX_COMPLETE_SIZE 1
2247 +#define HUSB2_TX_PK_RDY_OFFSET 11
2248 +#define HUSB2_TX_PK_RDY_SIZE 1
2249 +#define HUSB2_ISO_ERR_TRANS_OFFSET 11
2250 +#define HUSB2_ISO_ERR_TRANS_SIZE 1
2251 +#define HUSB2_RX_SETUP_OFFSET 12
2252 +#define HUSB2_RX_SETUP_SIZE 1
2253 +#define HUSB2_ISO_ERR_FLOW_OFFSET 12
2254 +#define HUSB2_ISO_ERR_FLOW_SIZE 1
2255 +#define HUSB2_STALL_SENT_OFFSET 13
2256 +#define HUSB2_STALL_SENT_SIZE 1
2257 +#define HUSB2_ISO_ERR_CRC_OFFSET 13
2258 +#define HUSB2_ISO_ERR_CRC_SIZE 1
2259 +#define HUSB2_ISO_ERR_NBTRANS_OFFSET 13
2260 +#define HUSB2_ISO_ERR_NBTRANS_SIZE 1
2261 +#define HUSB2_NAK_IN_OFFSET 14
2262 +#define HUSB2_NAK_IN_SIZE 1
2263 +#define HUSB2_ISO_ERR_FLUSH_OFFSET 14
2264 +#define HUSB2_ISO_ERR_FLUSH_SIZE 1
2265 +#define HUSB2_NAK_OUT_OFFSET 15
2266 +#define HUSB2_NAK_OUT_SIZE 1
2267 +#define HUSB2_CURRENT_BANK_OFFSET 16
2268 +#define HUSB2_CURRENT_BANK_SIZE 2
2269 +#define HUSB2_BUSY_BANKS_OFFSET 18
2270 +#define HUSB2_BUSY_BANKS_SIZE 2
2271 +#define HUSB2_BYTE_COUNT_OFFSET 20
2272 +#define HUSB2_BYTE_COUNT_SIZE 11
2273 +#define HUSB2_SHORT_PACKET_OFFSET 31
2274 +#define HUSB2_SHORT_PACKET_SIZE 1
2276 +/* Bitfields in DMA_CONTROL */
2277 +#define HUSB2_DMA_CH_EN_OFFSET 0
2278 +#define HUSB2_DMA_CH_EN_SIZE 1
2279 +#define HUSB2_DMA_LINK_OFFSET 1
2280 +#define HUSB2_DMA_LINK_SIZE 1
2281 +#define HUSB2_DMA_END_TR_EN_OFFSET 2
2282 +#define HUSB2_DMA_END_TR_EN_SIZE 1
2283 +#define HUSB2_DMA_END_BUF_EN_OFFSET 3
2284 +#define HUSB2_DMA_END_BUF_EN_SIZE 1
2285 +#define HUSB2_DMA_END_TR_IE_OFFSET 4
2286 +#define HUSB2_DMA_END_TR_IE_SIZE 1
2287 +#define HUSB2_DMA_END_BUF_IE_OFFSET 5
2288 +#define HUSB2_DMA_END_BUF_IE_SIZE 1
2289 +#define HUSB2_DMA_DESC_LOAD_IE_OFFSET 6
2290 +#define HUSB2_DMA_DESC_LOAD_IE_SIZE 1
2291 +#define HUSB2_DMA_BURST_LOCK_OFFSET 7
2292 +#define HUSB2_DMA_BURST_LOCK_SIZE 1
2293 +#define HUSB2_DMA_BUF_LEN_OFFSET 16
2294 +#define HUSB2_DMA_BUF_LEN_SIZE 16
2296 +/* Bitfields in DMA_STATUS */
2297 +#define HUSB2_DMA_CH_ACTIVE_OFFSET 1
2298 +#define HUSB2_DMA_CH_ACTIVE_SIZE 1
2299 +#define HUSB2_DMA_END_TR_ST_OFFSET 4
2300 +#define HUSB2_DMA_END_TR_ST_SIZE 1
2301 +#define HUSB2_DMA_END_BUF_ST_OFFSET 5
2302 +#define HUSB2_DMA_END_BUF_ST_SIZE 1
2303 +#define HUSB2_DMA_DESC_LOAD_ST_OFFSET 6
2304 +#define HUSB2_DMA_DESC_LOAD_ST_SIZE 1
2306 +/* Constants for SPEED_CFG */
2307 +#define HUSB2_SPEED_CFG_NORMAL 0
2308 +#define HUSB2_SPEED_CFG_FORCE_HIGH 2
2309 +#define HUSB2_SPEED_CFG_FORCE_FULL 3
2311 +/* Constants for EPT_SIZE */
2312 +#define HUSB2_EPT_SIZE_8 0
2313 +#define HUSB2_EPT_SIZE_16 1
2314 +#define HUSB2_EPT_SIZE_32 2
2315 +#define HUSB2_EPT_SIZE_64 3
2316 +#define HUSB2_EPT_SIZE_128 4
2317 +#define HUSB2_EPT_SIZE_256 5
2318 +#define HUSB2_EPT_SIZE_512 6
2319 +#define HUSB2_EPT_SIZE_1024 7
2321 +/* Constants for EPT_TYPE */
2322 +#define HUSB2_EPT_TYPE_CONTROL 0
2323 +#define HUSB2_EPT_TYPE_ISO 1
2324 +#define HUSB2_EPT_TYPE_BULK 2
2325 +#define HUSB2_EPT_TYPE_INT 3
2327 +/* Constants for BK_NUMBER */
2328 +#define HUSB2_BK_NUMBER_ZERO 0
2329 +#define HUSB2_BK_NUMBER_ONE 1
2330 +#define HUSB2_BK_NUMBER_DOUBLE 2
2331 +#define HUSB2_BK_NUMBER_TRIPLE 3
2333 +/* Bit manipulation macros */
2334 +#define HUSB2_BIT(name) \
2335 + (1 << HUSB2_##name##_OFFSET)
2336 +#define HUSB2_BF(name,value) \
2337 + (((value) & ((1 << HUSB2_##name##_SIZE) - 1)) \
2338 + << HUSB2_##name##_OFFSET)
2339 +#define HUSB2_BFEXT(name,value) \
2340 + (((value) >> HUSB2_##name##_OFFSET) \
2341 + & ((1 << HUSB2_##name##_SIZE) - 1))
2342 +#define HUSB2_BFINS(name,value,old) \
2343 + (((old) & ~(((1 << HUSB2_##name##_SIZE) - 1) \
2344 + << HUSB2_##name##_OFFSET)) \
2345 + | HUSB2_BF(name,value))
2347 +/* Register access macros */
2348 +#define husb2_readl(udc,reg) \
2349 + __raw_readl((udc)->regs + HUSB2_##reg)
2350 +#define husb2_writel(udc,reg,value) \
2351 + __raw_writel((value), (udc)->regs + HUSB2_##reg)
2352 +#define husb2_ep_readl(ep,reg) \
2353 + __raw_readl((ep)->ep_regs + HUSB2_EPT_##reg)
2354 +#define husb2_ep_writel(ep,reg,value) \
2355 + __raw_writel((value), (ep)->ep_regs + HUSB2_EPT_##reg)
2356 +#define husb2_dma_readl(ep,reg) \
2357 + __raw_readl((ep)->dma_regs + HUSB2_DMA_##reg)
2358 +#define husb2_dma_writel(ep,reg,value) \
2359 + __raw_writel((value), (ep)->dma_regs + HUSB2_DMA_##reg)
2361 +/* Calculate base address for a given endpoint or DMA controller */
2362 +#define HUSB2_EPT_BASE(x) (0x100 + (x) * 0x20)
2363 +#define HUSB2_DMA_BASE(x) (0x300 + (x) * 0x10)
2364 +#define HUSB2_FIFO_BASE(x) ((x) << 16)
2366 +/* Synth parameters */
2367 +#define HUSB2_NR_ENDPOINTS 7
2369 +#define EP0_FIFO_SIZE 64
2370 +#define EP0_EPT_SIZE HUSB2_EPT_SIZE_64
2371 +#define EP0_NR_BANKS 1
2372 +#define BULK_FIFO_SIZE 512
2373 +#define BULK_EPT_SIZE HUSB2_EPT_SIZE_512
2374 +#define BULK_NR_BANKS 2
2375 +#define ISO_FIFO_SIZE 1024
2376 +#define ISO_EPT_SIZE HUSB2_EPT_SIZE_1024
2377 +#define ISO_NR_BANKS 3
2378 +#define INT_FIFO_SIZE 64
2379 +#define INT_EPT_SIZE HUSB2_EPT_SIZE_64
2380 +#define INT_NR_BANKS 3
2382 +enum husb2_ctrl_state {
2388 + STATUS_STAGE_ADDR,
2394 + EP_STATE_OUT_DATA,
2395 + EP_STATE_SET_ADDR_STATUS,
2396 + EP_STATE_RX_STATUS,
2397 + EP_STATE_TX_STATUS,
2401 +struct husb2_dma_desc {
2409 + void __iomem *ep_regs;
2410 + void __iomem *dma_regs;
2411 + void __iomem *fifo;
2413 + struct husb2_udc *udc;
2415 + struct list_head queue;
2416 + const struct usb_endpoint_descriptor *desc;
2423 +#ifdef CONFIG_DEBUG_FS
2424 + u32 last_dma_status;
2425 + struct dentry *debugfs_dir;
2426 + struct dentry *debugfs_queue;
2427 + struct dentry *debugfs_dma_status;
2430 +#define HUSB2_EP_CAP_ISOC 0x0001
2431 +#define HUSB2_EP_CAP_DMA 0x0002
2433 +struct husb2_packet {
2434 + struct husb2_dma_desc *desc;
2435 + dma_addr_t desc_dma;
2438 +struct husb2_request {
2439 + struct usb_request req;
2440 + struct list_head queue;
2442 + struct husb2_packet *packet;
2443 + unsigned int nr_pkts;
2445 + unsigned int submitted:1;
2446 + unsigned int using_dma:1;
2447 + unsigned int last_transaction:1;
2448 + unsigned int send_zlp:1;
2454 + void __iomem *regs;
2455 + void __iomem *fifo;
2457 + struct dma_pool *desc_pool;
2459 + struct usb_gadget gadget;
2460 + struct usb_gadget_driver *driver;
2461 + struct platform_device *pdev;
2466 +#ifdef CONFIG_DEBUG_FS
2467 + struct dentry *debugfs_root;
2468 + struct dentry *debugfs_regs;
2472 +#define to_husb2_ep(x) container_of((x), struct husb2_ep, ep)
2473 +#define to_husb2_req(x) container_of((x), struct husb2_request, req)
2474 +#define to_husb2_udc(x) container_of((x), struct husb2_udc, gadget)
2476 +#define ep_index(ep) ((ep)->index)
2477 +#define ep_can_dma(ep) ((ep)->capabilities & HUSB2_EP_CAP_DMA)
2478 +#define ep_is_in(ep) (((ep)->desc->bEndpointAddress \
2479 + & USB_ENDPOINT_DIR_MASK) \
2481 +#define ep_is_isochronous(ep) \
2482 + (((ep)->desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) \
2483 + == USB_ENDPOINT_XFER_ISOC)
2484 +#define ep_is_control(ep) (ep_index(ep) == 0)
2485 +#define ep_name(ep) ((ep)->ep.name)
2486 +#define ep_is_idle(ep) ((ep)->state == EP_STATE_IDLE)
2488 +#endif /* __LINUX_USB_GADGET_HUSB2_H */