2 * Host Controller Driver for the Elan Digital Systems U132 adapter
4 * Copyright(C) 2006 Elan Digital Systems Limited
5 * http://www.elandigitalsystems.com
7 * Author and Maintainer - Tony Olech - Elan Digital Systems
8 * tony.olech@elandigitalsystems.com
10 * This program is free software;you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation, version 2.
15 * This driver was written by Tony Olech(tony.olech@elandigitalsystems.com)
16 * based on various USB host drivers in the 2.6.15 linux kernel
17 * with constant reference to the 3rd Edition of Linux Device Drivers
18 * published by O'Reilly
20 * The U132 adapter is a USB to CardBus adapter specifically designed
21 * for PC cards that contain an OHCI host controller. Typical PC cards
22 * are the Orange Mobile 3G Option GlobeTrotter Fusion card.
24 * The U132 adapter will *NOT *work with PC cards that do not contain
25 * an OHCI controller. A simple way to test whether a PC card has an
26 * OHCI controller as an interface is to insert the PC card directly
27 * into a laptop(or desktop) with a CardBus slot and if "lspci" shows
28 * a new USB controller and "lsusb -v" shows a new OHCI Host Controller
29 * then there is a good chance that the U132 adapter will support the
30 * PC card.(you also need the specific client driver for the PC card)
32 * Please inform the Author and Maintainer about any PC cards that
33 * contain OHCI Host Controller and work when directly connected to
34 * an embedded CardBus slot but do not work when they are connected
35 * via an ELAN U132 adapter.
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/moduleparam.h>
41 #include <linux/delay.h>
42 #include <linux/ioport.h>
43 #include <linux/pci_ids.h>
44 #include <linux/sched.h>
45 #include <linux/slab.h>
46 #include <linux/errno.h>
47 #include <linux/init.h>
48 #include <linux/timer.h>
49 #include <linux/list.h>
50 #include <linux/interrupt.h>
51 #include <linux/usb.h>
52 #include <linux/usb/hcd.h>
53 #include <linux/workqueue.h>
54 #include <linux/platform_device.h>
55 #include <linux/mutex.h>
58 #include <asm/system.h>
59 #include <asm/byteorder.h>
63 #define OHCI_CONTROL_INIT OHCI_CTRL_CBSR
64 #define OHCI_INTR_INIT (OHCI_INTR_MIE | OHCI_INTR_UE | OHCI_INTR_RD | \
66 MODULE_AUTHOR("Tony Olech - Elan Digital Systems Limited");
67 MODULE_DESCRIPTION("U132 USB Host Controller Driver");
68 MODULE_LICENSE("GPL");
69 #define INT_MODULE_PARM(n, v) static int n = v;module_param(n, int, 0444)
70 INT_MODULE_PARM(testing
, 0);
71 /* Some boards misreport power switching/overcurrent*/
72 static int distrust_firmware
= 1;
73 module_param(distrust_firmware
, bool, 0);
74 MODULE_PARM_DESC(distrust_firmware
, "true to distrust firmware power/overcurren"
76 static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait
);
78 * u132_module_lock exists to protect access to global variables
81 static struct mutex u132_module_lock
;
82 static int u132_exiting
;
83 static int u132_instances
;
84 static struct list_head u132_static_list
;
86 * end of the global variables protected by u132_module_lock
88 static struct workqueue_struct
*workqueue
;
89 #define MAX_U132_PORTS 7
90 #define MAX_U132_ADDRS 128
91 #define MAX_U132_UDEVS 4
92 #define MAX_U132_ENDPS 100
93 #define MAX_U132_RINGS 4
94 static const char *cc_to_text
[16] = {
124 struct usb_device
*usb_device
;
129 u8 endp_number_in
[16];
130 u8 endp_number_out
[16];
132 #define ENDP_QUEUE_SHIFT 3
133 #define ENDP_QUEUE_SIZE (1<<ENDP_QUEUE_SHIFT)
134 #define ENDP_QUEUE_MASK (ENDP_QUEUE_SIZE-1)
136 struct list_head urb_more
;
149 struct list_head endp_ring
;
150 struct u132_ring
*ring
;
151 unsigned toggle_bits
:2;
157 unsigned dequeueing
:1;
158 unsigned edset_flush
:1;
159 unsigned spare_bits
:14;
160 unsigned long jiffies
;
161 struct usb_host_endpoint
*hep
;
162 struct u132_spin queue_lock
;
166 struct urb
*urb_list
[ENDP_QUEUE_SIZE
];
167 struct list_head urb_more
;
168 struct delayed_work scheduler
;
175 struct u132_endp
*curr_endp
;
176 struct delayed_work scheduler
;
180 struct list_head u132_list
;
181 struct mutex sw_lock
;
182 struct mutex scheduler_lock
;
183 struct u132_platform_data
*board
;
184 struct platform_device
*platform_dev
;
185 struct u132_ring ring
[MAX_U132_RINGS
];
193 u32 hc_roothub_status
;
195 u32 hc_roothub_portstatus
[MAX_ROOT_PORTS
];
197 unsigned long next_statechange
;
198 struct delayed_work monitor
;
200 struct u132_addr addr
[MAX_U132_ADDRS
];
201 struct u132_udev udev
[MAX_U132_UDEVS
];
202 struct u132_port port
[MAX_U132_PORTS
];
203 struct u132_endp
*endp
[MAX_U132_ENDPS
];
207 * these cannot be inlines because we need the structure offset!!
208 * Does anyone have a better way?????
210 #define ftdi_read_pcimem(pdev, member, data) usb_ftdi_elan_read_pcimem(pdev, \
211 offsetof(struct ohci_regs, member), 0, data);
212 #define ftdi_write_pcimem(pdev, member, data) usb_ftdi_elan_write_pcimem(pdev, \
213 offsetof(struct ohci_regs, member), 0, data);
214 #define u132_read_pcimem(u132, member, data) \
215 usb_ftdi_elan_read_pcimem(u132->platform_dev, offsetof(struct \
216 ohci_regs, member), 0, data);
217 #define u132_write_pcimem(u132, member, data) \
218 usb_ftdi_elan_write_pcimem(u132->platform_dev, offsetof(struct \
219 ohci_regs, member), 0, data);
220 static inline struct u132
*udev_to_u132(struct u132_udev
*udev
)
222 u8 udev_number
= udev
->udev_number
;
223 return container_of(udev
, struct u132
, udev
[udev_number
]);
226 static inline struct u132
*hcd_to_u132(struct usb_hcd
*hcd
)
228 return (struct u132
*)(hcd
->hcd_priv
);
231 static inline struct usb_hcd
*u132_to_hcd(struct u132
*u132
)
233 return container_of((void *)u132
, struct usb_hcd
, hcd_priv
);
236 static inline void u132_disable(struct u132
*u132
)
238 u132_to_hcd(u132
)->state
= HC_STATE_HALT
;
242 #define kref_to_u132(d) container_of(d, struct u132, kref)
243 #define kref_to_u132_endp(d) container_of(d, struct u132_endp, kref)
244 #define kref_to_u132_udev(d) container_of(d, struct u132_udev, kref)
245 #include "../misc/usb_u132.h"
246 static const char hcd_name
[] = "u132_hcd";
247 #define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE | \
248 USB_PORT_STAT_C_SUSPEND | USB_PORT_STAT_C_OVERCURRENT | \
249 USB_PORT_STAT_C_RESET) << 16)
250 static void u132_hcd_delete(struct kref
*kref
)
252 struct u132
*u132
= kref_to_u132(kref
);
253 struct platform_device
*pdev
= u132
->platform_dev
;
254 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
256 mutex_lock(&u132_module_lock
);
257 list_del_init(&u132
->u132_list
);
259 mutex_unlock(&u132_module_lock
);
260 dev_warn(&u132
->platform_dev
->dev
, "FREEING the hcd=%p and thus the u13"
261 "2=%p going=%d pdev=%p\n", hcd
, u132
, u132
->going
, pdev
);
265 static inline void u132_u132_put_kref(struct u132
*u132
)
267 kref_put(&u132
->kref
, u132_hcd_delete
);
270 static inline void u132_u132_init_kref(struct u132
*u132
)
272 kref_init(&u132
->kref
);
275 static void u132_udev_delete(struct kref
*kref
)
277 struct u132_udev
*udev
= kref_to_u132_udev(kref
);
278 udev
->udev_number
= 0;
279 udev
->usb_device
= NULL
;
281 udev
->enumeration
= 0;
284 static inline void u132_udev_put_kref(struct u132
*u132
, struct u132_udev
*udev
)
286 kref_put(&udev
->kref
, u132_udev_delete
);
289 static inline void u132_udev_get_kref(struct u132
*u132
, struct u132_udev
*udev
)
291 kref_get(&udev
->kref
);
294 static inline void u132_udev_init_kref(struct u132
*u132
,
295 struct u132_udev
*udev
)
297 kref_init(&udev
->kref
);
300 static inline void u132_ring_put_kref(struct u132
*u132
, struct u132_ring
*ring
)
302 kref_put(&u132
->kref
, u132_hcd_delete
);
305 static void u132_ring_requeue_work(struct u132
*u132
, struct u132_ring
*ring
,
309 if (queue_delayed_work(workqueue
, &ring
->scheduler
, delta
))
311 } else if (queue_delayed_work(workqueue
, &ring
->scheduler
, 0))
313 kref_put(&u132
->kref
, u132_hcd_delete
);
317 static void u132_ring_queue_work(struct u132
*u132
, struct u132_ring
*ring
,
320 kref_get(&u132
->kref
);
321 u132_ring_requeue_work(u132
, ring
, delta
);
325 static void u132_ring_cancel_work(struct u132
*u132
, struct u132_ring
*ring
)
327 if (cancel_delayed_work(&ring
->scheduler
))
328 kref_put(&u132
->kref
, u132_hcd_delete
);
331 static void u132_endp_delete(struct kref
*kref
)
333 struct u132_endp
*endp
= kref_to_u132_endp(kref
);
334 struct u132
*u132
= endp
->u132
;
335 u8 usb_addr
= endp
->usb_addr
;
336 u8 usb_endp
= endp
->usb_endp
;
337 u8 address
= u132
->addr
[usb_addr
].address
;
338 struct u132_udev
*udev
= &u132
->udev
[address
];
339 u8 endp_number
= endp
->endp_number
;
340 struct usb_host_endpoint
*hep
= endp
->hep
;
341 struct u132_ring
*ring
= endp
->ring
;
342 struct list_head
*head
= &endp
->endp_ring
;
344 if (endp
== ring
->curr_endp
) {
345 if (list_empty(head
)) {
346 ring
->curr_endp
= NULL
;
349 struct u132_endp
*next_endp
= list_entry(head
->next
,
350 struct u132_endp
, endp_ring
);
351 ring
->curr_endp
= next_endp
;
357 udev
->endp_number_in
[usb_endp
] = 0;
358 u132_udev_put_kref(u132
, udev
);
361 udev
->endp_number_out
[usb_endp
] = 0;
362 u132_udev_put_kref(u132
, udev
);
364 u132
->endp
[endp_number
- 1] = NULL
;
367 u132_u132_put_kref(u132
);
370 static inline void u132_endp_put_kref(struct u132
*u132
, struct u132_endp
*endp
)
372 kref_put(&endp
->kref
, u132_endp_delete
);
375 static inline void u132_endp_get_kref(struct u132
*u132
, struct u132_endp
*endp
)
377 kref_get(&endp
->kref
);
380 static inline void u132_endp_init_kref(struct u132
*u132
,
381 struct u132_endp
*endp
)
383 kref_init(&endp
->kref
);
384 kref_get(&u132
->kref
);
387 static void u132_endp_queue_work(struct u132
*u132
, struct u132_endp
*endp
,
390 if (queue_delayed_work(workqueue
, &endp
->scheduler
, delta
))
391 kref_get(&endp
->kref
);
394 static void u132_endp_cancel_work(struct u132
*u132
, struct u132_endp
*endp
)
396 if (cancel_delayed_work(&endp
->scheduler
))
397 kref_put(&endp
->kref
, u132_endp_delete
);
400 static inline void u132_monitor_put_kref(struct u132
*u132
)
402 kref_put(&u132
->kref
, u132_hcd_delete
);
405 static void u132_monitor_queue_work(struct u132
*u132
, unsigned int delta
)
407 if (queue_delayed_work(workqueue
, &u132
->monitor
, delta
))
408 kref_get(&u132
->kref
);
411 static void u132_monitor_requeue_work(struct u132
*u132
, unsigned int delta
)
413 if (!queue_delayed_work(workqueue
, &u132
->monitor
, delta
))
414 kref_put(&u132
->kref
, u132_hcd_delete
);
417 static void u132_monitor_cancel_work(struct u132
*u132
)
419 if (cancel_delayed_work(&u132
->monitor
))
420 kref_put(&u132
->kref
, u132_hcd_delete
);
423 static int read_roothub_info(struct u132
*u132
)
427 retval
= u132_read_pcimem(u132
, revision
, &revision
);
429 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device co"
432 } else if ((revision
& 0xFF) == 0x10) {
433 } else if ((revision
& 0xFF) == 0x11) {
435 dev_err(&u132
->platform_dev
->dev
, "device revision is not valid"
436 " %08X\n", revision
);
439 retval
= u132_read_pcimem(u132
, control
, &u132
->hc_control
);
441 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device co"
445 retval
= u132_read_pcimem(u132
, roothub
.status
,
446 &u132
->hc_roothub_status
);
448 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device re"
449 "g roothub.status\n", retval
);
452 retval
= u132_read_pcimem(u132
, roothub
.a
, &u132
->hc_roothub_a
);
454 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device re"
455 "g roothub.a\n", retval
);
459 int I
= u132
->num_ports
;
462 retval
= u132_read_pcimem(u132
, roothub
.portstatus
[i
],
463 &u132
->hc_roothub_portstatus
[i
]);
465 dev_err(&u132
->platform_dev
->dev
, "error %d acc"
466 "essing device roothub.portstatus[%d]\n"
476 static void u132_hcd_monitor_work(struct work_struct
*work
)
478 struct u132
*u132
= container_of(work
, struct u132
, monitor
.work
);
479 if (u132
->going
> 1) {
480 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
482 u132_monitor_put_kref(u132
);
484 } else if (u132
->going
> 0) {
485 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
486 u132_monitor_put_kref(u132
);
490 mutex_lock(&u132
->sw_lock
);
491 retval
= read_roothub_info(u132
);
493 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
496 mutex_unlock(&u132
->sw_lock
);
498 ftdi_elan_gone_away(u132
->platform_dev
);
499 u132_monitor_put_kref(u132
);
502 u132_monitor_requeue_work(u132
, 500);
503 mutex_unlock(&u132
->sw_lock
);
509 static void u132_hcd_giveback_urb(struct u132
*u132
, struct u132_endp
*endp
,
510 struct urb
*urb
, int status
)
512 struct u132_ring
*ring
;
514 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
515 urb
->error_count
= 0;
516 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
517 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
518 endp
->queue_next
+= 1;
519 if (ENDP_QUEUE_SIZE
> --endp
->queue_size
) {
521 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
523 struct list_head
*next
= endp
->urb_more
.next
;
524 struct u132_urbq
*urbq
= list_entry(next
, struct u132_urbq
,
527 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] =
530 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
533 mutex_lock(&u132
->scheduler_lock
);
536 u132_ring_cancel_work(u132
, ring
);
537 u132_ring_queue_work(u132
, ring
, 0);
538 mutex_unlock(&u132
->scheduler_lock
);
539 u132_endp_put_kref(u132
, endp
);
540 usb_hcd_giveback_urb(hcd
, urb
, status
);
544 static void u132_hcd_forget_urb(struct u132
*u132
, struct u132_endp
*endp
,
545 struct urb
*urb
, int status
)
547 u132_endp_put_kref(u132
, endp
);
550 static void u132_hcd_abandon_urb(struct u132
*u132
, struct u132_endp
*endp
,
551 struct urb
*urb
, int status
)
554 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
555 urb
->error_count
= 0;
556 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
557 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
558 endp
->queue_next
+= 1;
559 if (ENDP_QUEUE_SIZE
> --endp
->queue_size
) {
561 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
563 struct list_head
*next
= endp
->urb_more
.next
;
564 struct u132_urbq
*urbq
= list_entry(next
, struct u132_urbq
,
567 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] =
570 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
572 } usb_hcd_giveback_urb(hcd
, urb
, status
);
576 static inline int edset_input(struct u132
*u132
, struct u132_ring
*ring
,
577 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
578 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
579 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
580 int halted
, int skipped
, int actual
, int non_null
))
582 return usb_ftdi_elan_edset_input(u132
->platform_dev
, ring
->number
, endp
,
583 urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
586 static inline int edset_setup(struct u132
*u132
, struct u132_ring
*ring
,
587 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
588 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
589 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
590 int halted
, int skipped
, int actual
, int non_null
))
592 return usb_ftdi_elan_edset_setup(u132
->platform_dev
, ring
->number
, endp
,
593 urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
596 static inline int edset_single(struct u132
*u132
, struct u132_ring
*ring
,
597 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
598 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
599 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
600 int halted
, int skipped
, int actual
, int non_null
))
602 return usb_ftdi_elan_edset_single(u132
->platform_dev
, ring
->number
,
603 endp
, urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
606 static inline int edset_output(struct u132
*u132
, struct u132_ring
*ring
,
607 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
608 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
609 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
610 int halted
, int skipped
, int actual
, int non_null
))
612 return usb_ftdi_elan_edset_output(u132
->platform_dev
, ring
->number
,
613 endp
, urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
618 * must not LOCK sw_lock
621 static void u132_hcd_interrupt_recv(void *data
, struct urb
*urb
, u8
*buf
,
622 int len
, int toggle_bits
, int error_count
, int condition_code
,
623 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
625 struct u132_endp
*endp
= data
;
626 struct u132
*u132
= endp
->u132
;
627 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
628 struct u132_udev
*udev
= &u132
->udev
[address
];
629 mutex_lock(&u132
->scheduler_lock
);
630 if (u132
->going
> 1) {
631 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
633 mutex_unlock(&u132
->scheduler_lock
);
634 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
636 } else if (endp
->dequeueing
) {
637 endp
->dequeueing
= 0;
638 mutex_unlock(&u132
->scheduler_lock
);
639 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
641 } else if (u132
->going
> 0) {
642 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
644 mutex_unlock(&u132
->scheduler_lock
);
645 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
647 } else if (!urb
->unlinked
) {
648 struct u132_ring
*ring
= endp
->ring
;
649 u8
*u
= urb
->transfer_buffer
+ urb
->actual_length
;
656 urb
->actual_length
+= len
;
657 if ((condition_code
== TD_CC_NOERROR
) &&
658 (urb
->transfer_buffer_length
> urb
->actual_length
)) {
659 endp
->toggle_bits
= toggle_bits
;
660 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
662 if (urb
->actual_length
> 0) {
664 mutex_unlock(&u132
->scheduler_lock
);
665 retval
= edset_single(u132
, ring
, endp
, urb
,
666 address
, endp
->toggle_bits
,
667 u132_hcd_interrupt_recv
);
669 u132_hcd_giveback_urb(u132
, endp
, urb
,
674 endp
->jiffies
= jiffies
+
675 msecs_to_jiffies(urb
->interval
);
676 u132_ring_cancel_work(u132
, ring
);
677 u132_ring_queue_work(u132
, ring
, 0);
678 mutex_unlock(&u132
->scheduler_lock
);
679 u132_endp_put_kref(u132
, endp
);
682 } else if ((condition_code
== TD_DATAUNDERRUN
) &&
683 ((urb
->transfer_flags
& URB_SHORT_NOT_OK
) == 0)) {
684 endp
->toggle_bits
= toggle_bits
;
685 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
687 mutex_unlock(&u132
->scheduler_lock
);
688 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
691 if (condition_code
== TD_CC_NOERROR
) {
692 endp
->toggle_bits
= toggle_bits
;
693 usb_settoggle(udev
->usb_device
, endp
->usb_endp
,
695 } else if (condition_code
== TD_CC_STALL
) {
696 endp
->toggle_bits
= 0x2;
697 usb_settoggle(udev
->usb_device
, endp
->usb_endp
,
700 endp
->toggle_bits
= 0x2;
701 usb_settoggle(udev
->usb_device
, endp
->usb_endp
,
703 dev_err(&u132
->platform_dev
->dev
, "urb=%p givin"
704 "g back INTERRUPT %s\n", urb
,
705 cc_to_text
[condition_code
]);
707 mutex_unlock(&u132
->scheduler_lock
);
708 u132_hcd_giveback_urb(u132
, endp
, urb
,
709 cc_to_error
[condition_code
]);
713 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
714 "unlinked=%d\n", urb
, urb
->unlinked
);
715 mutex_unlock(&u132
->scheduler_lock
);
716 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
721 static void u132_hcd_bulk_output_sent(void *data
, struct urb
*urb
, u8
*buf
,
722 int len
, int toggle_bits
, int error_count
, int condition_code
,
723 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
725 struct u132_endp
*endp
= data
;
726 struct u132
*u132
= endp
->u132
;
727 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
728 mutex_lock(&u132
->scheduler_lock
);
729 if (u132
->going
> 1) {
730 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
732 mutex_unlock(&u132
->scheduler_lock
);
733 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
735 } else if (endp
->dequeueing
) {
736 endp
->dequeueing
= 0;
737 mutex_unlock(&u132
->scheduler_lock
);
738 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
740 } else if (u132
->going
> 0) {
741 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
743 mutex_unlock(&u132
->scheduler_lock
);
744 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
746 } else if (!urb
->unlinked
) {
747 struct u132_ring
*ring
= endp
->ring
;
748 urb
->actual_length
+= len
;
749 endp
->toggle_bits
= toggle_bits
;
750 if (urb
->transfer_buffer_length
> urb
->actual_length
) {
752 mutex_unlock(&u132
->scheduler_lock
);
753 retval
= edset_output(u132
, ring
, endp
, urb
, address
,
754 endp
->toggle_bits
, u132_hcd_bulk_output_sent
);
756 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
759 mutex_unlock(&u132
->scheduler_lock
);
760 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
764 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
765 "unlinked=%d\n", urb
, urb
->unlinked
);
766 mutex_unlock(&u132
->scheduler_lock
);
767 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
772 static void u132_hcd_bulk_input_recv(void *data
, struct urb
*urb
, u8
*buf
,
773 int len
, int toggle_bits
, int error_count
, int condition_code
,
774 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
776 struct u132_endp
*endp
= data
;
777 struct u132
*u132
= endp
->u132
;
778 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
779 struct u132_udev
*udev
= &u132
->udev
[address
];
780 mutex_lock(&u132
->scheduler_lock
);
781 if (u132
->going
> 1) {
782 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
784 mutex_unlock(&u132
->scheduler_lock
);
785 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
787 } else if (endp
->dequeueing
) {
788 endp
->dequeueing
= 0;
789 mutex_unlock(&u132
->scheduler_lock
);
790 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
792 } else if (u132
->going
> 0) {
793 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
795 mutex_unlock(&u132
->scheduler_lock
);
796 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
798 } else if (!urb
->unlinked
) {
799 struct u132_ring
*ring
= endp
->ring
;
800 u8
*u
= urb
->transfer_buffer
+ urb
->actual_length
;
807 urb
->actual_length
+= len
;
808 if ((condition_code
== TD_CC_NOERROR
) &&
809 (urb
->transfer_buffer_length
> urb
->actual_length
)) {
811 endp
->toggle_bits
= toggle_bits
;
812 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
814 mutex_unlock(&u132
->scheduler_lock
);
815 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
816 ring
->number
, endp
, urb
, address
,
817 endp
->usb_endp
, endp
->toggle_bits
,
818 u132_hcd_bulk_input_recv
);
820 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
822 } else if (condition_code
== TD_CC_NOERROR
) {
823 endp
->toggle_bits
= toggle_bits
;
824 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
826 mutex_unlock(&u132
->scheduler_lock
);
827 u132_hcd_giveback_urb(u132
, endp
, urb
,
828 cc_to_error
[condition_code
]);
830 } else if ((condition_code
== TD_DATAUNDERRUN
) &&
831 ((urb
->transfer_flags
& URB_SHORT_NOT_OK
) == 0)) {
832 endp
->toggle_bits
= toggle_bits
;
833 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
835 mutex_unlock(&u132
->scheduler_lock
);
836 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
838 } else if (condition_code
== TD_DATAUNDERRUN
) {
839 endp
->toggle_bits
= toggle_bits
;
840 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
842 dev_warn(&u132
->platform_dev
->dev
, "urb=%p(SHORT NOT OK"
843 ") giving back BULK IN %s\n", urb
,
844 cc_to_text
[condition_code
]);
845 mutex_unlock(&u132
->scheduler_lock
);
846 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
848 } else if (condition_code
== TD_CC_STALL
) {
849 endp
->toggle_bits
= 0x2;
850 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0, 0);
851 mutex_unlock(&u132
->scheduler_lock
);
852 u132_hcd_giveback_urb(u132
, endp
, urb
,
853 cc_to_error
[condition_code
]);
856 endp
->toggle_bits
= 0x2;
857 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0, 0);
858 dev_err(&u132
->platform_dev
->dev
, "urb=%p giving back B"
859 "ULK IN code=%d %s\n", urb
, condition_code
,
860 cc_to_text
[condition_code
]);
861 mutex_unlock(&u132
->scheduler_lock
);
862 u132_hcd_giveback_urb(u132
, endp
, urb
,
863 cc_to_error
[condition_code
]);
867 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
868 "unlinked=%d\n", urb
, urb
->unlinked
);
869 mutex_unlock(&u132
->scheduler_lock
);
870 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
875 static void u132_hcd_configure_empty_sent(void *data
, struct urb
*urb
, u8
*buf
,
876 int len
, int toggle_bits
, int error_count
, int condition_code
,
877 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
879 struct u132_endp
*endp
= data
;
880 struct u132
*u132
= endp
->u132
;
881 mutex_lock(&u132
->scheduler_lock
);
882 if (u132
->going
> 1) {
883 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
885 mutex_unlock(&u132
->scheduler_lock
);
886 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
888 } else if (endp
->dequeueing
) {
889 endp
->dequeueing
= 0;
890 mutex_unlock(&u132
->scheduler_lock
);
891 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
893 } else if (u132
->going
> 0) {
894 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
896 mutex_unlock(&u132
->scheduler_lock
);
897 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
899 } else if (!urb
->unlinked
) {
900 mutex_unlock(&u132
->scheduler_lock
);
901 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
904 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
905 "unlinked=%d\n", urb
, urb
->unlinked
);
906 mutex_unlock(&u132
->scheduler_lock
);
907 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
912 static void u132_hcd_configure_input_recv(void *data
, struct urb
*urb
, u8
*buf
,
913 int len
, int toggle_bits
, int error_count
, int condition_code
,
914 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
916 struct u132_endp
*endp
= data
;
917 struct u132
*u132
= endp
->u132
;
918 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
919 mutex_lock(&u132
->scheduler_lock
);
920 if (u132
->going
> 1) {
921 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
923 mutex_unlock(&u132
->scheduler_lock
);
924 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
926 } else if (endp
->dequeueing
) {
927 endp
->dequeueing
= 0;
928 mutex_unlock(&u132
->scheduler_lock
);
929 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
931 } else if (u132
->going
> 0) {
932 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
934 mutex_unlock(&u132
->scheduler_lock
);
935 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
937 } else if (!urb
->unlinked
) {
938 struct u132_ring
*ring
= endp
->ring
;
939 u8
*u
= urb
->transfer_buffer
;
946 urb
->actual_length
= len
;
947 if ((condition_code
== TD_CC_NOERROR
) || ((condition_code
==
948 TD_DATAUNDERRUN
) && ((urb
->transfer_flags
&
949 URB_SHORT_NOT_OK
) == 0))) {
951 mutex_unlock(&u132
->scheduler_lock
);
952 retval
= usb_ftdi_elan_edset_empty(u132
->platform_dev
,
953 ring
->number
, endp
, urb
, address
,
955 u132_hcd_configure_empty_sent
);
957 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
959 } else if (condition_code
== TD_CC_STALL
) {
960 mutex_unlock(&u132
->scheduler_lock
);
961 dev_warn(&u132
->platform_dev
->dev
, "giving back SETUP I"
962 "NPUT STALL urb %p\n", urb
);
963 u132_hcd_giveback_urb(u132
, endp
, urb
,
964 cc_to_error
[condition_code
]);
967 mutex_unlock(&u132
->scheduler_lock
);
968 dev_err(&u132
->platform_dev
->dev
, "giving back SETUP IN"
969 "PUT %s urb %p\n", cc_to_text
[condition_code
],
971 u132_hcd_giveback_urb(u132
, endp
, urb
,
972 cc_to_error
[condition_code
]);
976 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
977 "unlinked=%d\n", urb
, urb
->unlinked
);
978 mutex_unlock(&u132
->scheduler_lock
);
979 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
984 static void u132_hcd_configure_empty_recv(void *data
, struct urb
*urb
, u8
*buf
,
985 int len
, int toggle_bits
, int error_count
, int condition_code
,
986 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
988 struct u132_endp
*endp
= data
;
989 struct u132
*u132
= endp
->u132
;
990 mutex_lock(&u132
->scheduler_lock
);
991 if (u132
->going
> 1) {
992 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
994 mutex_unlock(&u132
->scheduler_lock
);
995 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
997 } else if (endp
->dequeueing
) {
998 endp
->dequeueing
= 0;
999 mutex_unlock(&u132
->scheduler_lock
);
1000 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1002 } else if (u132
->going
> 0) {
1003 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1005 mutex_unlock(&u132
->scheduler_lock
);
1006 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1008 } else if (!urb
->unlinked
) {
1009 mutex_unlock(&u132
->scheduler_lock
);
1010 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1013 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1014 "unlinked=%d\n", urb
, urb
->unlinked
);
1015 mutex_unlock(&u132
->scheduler_lock
);
1016 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1021 static void u132_hcd_configure_setup_sent(void *data
, struct urb
*urb
, u8
*buf
,
1022 int len
, int toggle_bits
, int error_count
, int condition_code
,
1023 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1025 struct u132_endp
*endp
= data
;
1026 struct u132
*u132
= endp
->u132
;
1027 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1028 mutex_lock(&u132
->scheduler_lock
);
1029 if (u132
->going
> 1) {
1030 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1032 mutex_unlock(&u132
->scheduler_lock
);
1033 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1035 } else if (endp
->dequeueing
) {
1036 endp
->dequeueing
= 0;
1037 mutex_unlock(&u132
->scheduler_lock
);
1038 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1040 } else if (u132
->going
> 0) {
1041 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1043 mutex_unlock(&u132
->scheduler_lock
);
1044 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1046 } else if (!urb
->unlinked
) {
1047 if (usb_pipein(urb
->pipe
)) {
1049 struct u132_ring
*ring
= endp
->ring
;
1050 mutex_unlock(&u132
->scheduler_lock
);
1051 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1052 ring
->number
, endp
, urb
, address
,
1054 u132_hcd_configure_input_recv
);
1056 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1060 struct u132_ring
*ring
= endp
->ring
;
1061 mutex_unlock(&u132
->scheduler_lock
);
1062 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1063 ring
->number
, endp
, urb
, address
,
1065 u132_hcd_configure_empty_recv
);
1067 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1071 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1072 "unlinked=%d\n", urb
, urb
->unlinked
);
1073 mutex_unlock(&u132
->scheduler_lock
);
1074 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1079 static void u132_hcd_enumeration_empty_recv(void *data
, struct urb
*urb
,
1080 u8
*buf
, int len
, int toggle_bits
, int error_count
, int condition_code
,
1081 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1083 struct u132_endp
*endp
= data
;
1084 struct u132
*u132
= endp
->u132
;
1085 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1086 struct u132_udev
*udev
= &u132
->udev
[address
];
1087 mutex_lock(&u132
->scheduler_lock
);
1088 if (u132
->going
> 1) {
1089 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1091 mutex_unlock(&u132
->scheduler_lock
);
1092 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1094 } else if (endp
->dequeueing
) {
1095 endp
->dequeueing
= 0;
1096 mutex_unlock(&u132
->scheduler_lock
);
1097 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1099 } else if (u132
->going
> 0) {
1100 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1102 mutex_unlock(&u132
->scheduler_lock
);
1103 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1105 } else if (!urb
->unlinked
) {
1106 u132
->addr
[0].address
= 0;
1107 endp
->usb_addr
= udev
->usb_addr
;
1108 mutex_unlock(&u132
->scheduler_lock
);
1109 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1112 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1113 "unlinked=%d\n", urb
, urb
->unlinked
);
1114 mutex_unlock(&u132
->scheduler_lock
);
1115 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1120 static void u132_hcd_enumeration_address_sent(void *data
, struct urb
*urb
,
1121 u8
*buf
, int len
, int toggle_bits
, int error_count
, int condition_code
,
1122 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1124 struct u132_endp
*endp
= data
;
1125 struct u132
*u132
= endp
->u132
;
1126 mutex_lock(&u132
->scheduler_lock
);
1127 if (u132
->going
> 1) {
1128 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1130 mutex_unlock(&u132
->scheduler_lock
);
1131 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1133 } else if (endp
->dequeueing
) {
1134 endp
->dequeueing
= 0;
1135 mutex_unlock(&u132
->scheduler_lock
);
1136 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1138 } else if (u132
->going
> 0) {
1139 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1141 mutex_unlock(&u132
->scheduler_lock
);
1142 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1144 } else if (!urb
->unlinked
) {
1146 struct u132_ring
*ring
= endp
->ring
;
1147 mutex_unlock(&u132
->scheduler_lock
);
1148 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1149 ring
->number
, endp
, urb
, 0, endp
->usb_endp
, 0,
1150 u132_hcd_enumeration_empty_recv
);
1152 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1155 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1156 "unlinked=%d\n", urb
, urb
->unlinked
);
1157 mutex_unlock(&u132
->scheduler_lock
);
1158 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1163 static void u132_hcd_initial_empty_sent(void *data
, struct urb
*urb
, u8
*buf
,
1164 int len
, int toggle_bits
, int error_count
, int condition_code
,
1165 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1167 struct u132_endp
*endp
= data
;
1168 struct u132
*u132
= endp
->u132
;
1169 mutex_lock(&u132
->scheduler_lock
);
1170 if (u132
->going
> 1) {
1171 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1173 mutex_unlock(&u132
->scheduler_lock
);
1174 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1176 } else if (endp
->dequeueing
) {
1177 endp
->dequeueing
= 0;
1178 mutex_unlock(&u132
->scheduler_lock
);
1179 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1181 } else if (u132
->going
> 0) {
1182 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1184 mutex_unlock(&u132
->scheduler_lock
);
1185 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1187 } else if (!urb
->unlinked
) {
1188 mutex_unlock(&u132
->scheduler_lock
);
1189 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1192 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1193 "unlinked=%d\n", urb
, urb
->unlinked
);
1194 mutex_unlock(&u132
->scheduler_lock
);
1195 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1200 static void u132_hcd_initial_input_recv(void *data
, struct urb
*urb
, u8
*buf
,
1201 int len
, int toggle_bits
, int error_count
, int condition_code
,
1202 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1204 struct u132_endp
*endp
= data
;
1205 struct u132
*u132
= endp
->u132
;
1206 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1207 mutex_lock(&u132
->scheduler_lock
);
1208 if (u132
->going
> 1) {
1209 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1211 mutex_unlock(&u132
->scheduler_lock
);
1212 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1214 } else if (endp
->dequeueing
) {
1215 endp
->dequeueing
= 0;
1216 mutex_unlock(&u132
->scheduler_lock
);
1217 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1219 } else if (u132
->going
> 0) {
1220 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1222 mutex_unlock(&u132
->scheduler_lock
);
1223 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1225 } else if (!urb
->unlinked
) {
1227 struct u132_ring
*ring
= endp
->ring
;
1228 u8
*u
= urb
->transfer_buffer
;
1235 urb
->actual_length
= len
;
1236 mutex_unlock(&u132
->scheduler_lock
);
1237 retval
= usb_ftdi_elan_edset_empty(u132
->platform_dev
,
1238 ring
->number
, endp
, urb
, address
, endp
->usb_endp
, 0x3,
1239 u132_hcd_initial_empty_sent
);
1241 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1244 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1245 "unlinked=%d\n", urb
, urb
->unlinked
);
1246 mutex_unlock(&u132
->scheduler_lock
);
1247 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1252 static void u132_hcd_initial_setup_sent(void *data
, struct urb
*urb
, u8
*buf
,
1253 int len
, int toggle_bits
, int error_count
, int condition_code
,
1254 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1256 struct u132_endp
*endp
= data
;
1257 struct u132
*u132
= endp
->u132
;
1258 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1259 mutex_lock(&u132
->scheduler_lock
);
1260 if (u132
->going
> 1) {
1261 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1263 mutex_unlock(&u132
->scheduler_lock
);
1264 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1266 } else if (endp
->dequeueing
) {
1267 endp
->dequeueing
= 0;
1268 mutex_unlock(&u132
->scheduler_lock
);
1269 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1271 } else if (u132
->going
> 0) {
1272 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1274 mutex_unlock(&u132
->scheduler_lock
);
1275 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1277 } else if (!urb
->unlinked
) {
1279 struct u132_ring
*ring
= endp
->ring
;
1280 mutex_unlock(&u132
->scheduler_lock
);
1281 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1282 ring
->number
, endp
, urb
, address
, endp
->usb_endp
, 0,
1283 u132_hcd_initial_input_recv
);
1285 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1288 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1289 "unlinked=%d\n", urb
, urb
->unlinked
);
1290 mutex_unlock(&u132
->scheduler_lock
);
1291 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1297 * this work function is only executed from the work queue
1300 static void u132_hcd_ring_work_scheduler(struct work_struct
*work
)
1302 struct u132_ring
*ring
=
1303 container_of(work
, struct u132_ring
, scheduler
.work
);
1304 struct u132
*u132
= ring
->u132
;
1305 mutex_lock(&u132
->scheduler_lock
);
1307 mutex_unlock(&u132
->scheduler_lock
);
1308 u132_ring_put_kref(u132
, ring
);
1310 } else if (ring
->curr_endp
) {
1311 struct u132_endp
*last_endp
= ring
->curr_endp
;
1312 struct list_head
*scan
;
1313 struct list_head
*head
= &last_endp
->endp_ring
;
1314 unsigned long wakeup
= 0;
1315 list_for_each(scan
, head
) {
1316 struct u132_endp
*endp
= list_entry(scan
,
1317 struct u132_endp
, endp_ring
);
1318 if (endp
->queue_next
== endp
->queue_last
) {
1319 } else if ((endp
->delayed
== 0)
1320 || time_after_eq(jiffies
, endp
->jiffies
)) {
1321 ring
->curr_endp
= endp
;
1322 u132_endp_cancel_work(u132
, last_endp
);
1323 u132_endp_queue_work(u132
, last_endp
, 0);
1324 mutex_unlock(&u132
->scheduler_lock
);
1325 u132_ring_put_kref(u132
, ring
);
1328 unsigned long delta
= endp
->jiffies
- jiffies
;
1333 if (last_endp
->queue_next
== last_endp
->queue_last
) {
1334 } else if ((last_endp
->delayed
== 0) || time_after_eq(jiffies
,
1335 last_endp
->jiffies
)) {
1336 u132_endp_cancel_work(u132
, last_endp
);
1337 u132_endp_queue_work(u132
, last_endp
, 0);
1338 mutex_unlock(&u132
->scheduler_lock
);
1339 u132_ring_put_kref(u132
, ring
);
1342 unsigned long delta
= last_endp
->jiffies
- jiffies
;
1347 u132_ring_requeue_work(u132
, ring
, wakeup
);
1348 mutex_unlock(&u132
->scheduler_lock
);
1351 mutex_unlock(&u132
->scheduler_lock
);
1352 u132_ring_put_kref(u132
, ring
);
1356 mutex_unlock(&u132
->scheduler_lock
);
1357 u132_ring_put_kref(u132
, ring
);
1362 static void u132_hcd_endp_work_scheduler(struct work_struct
*work
)
1364 struct u132_ring
*ring
;
1365 struct u132_endp
*endp
=
1366 container_of(work
, struct u132_endp
, scheduler
.work
);
1367 struct u132
*u132
= endp
->u132
;
1368 mutex_lock(&u132
->scheduler_lock
);
1370 if (endp
->edset_flush
) {
1371 endp
->edset_flush
= 0;
1372 if (endp
->dequeueing
)
1373 usb_ftdi_elan_edset_flush(u132
->platform_dev
,
1374 ring
->number
, endp
);
1375 mutex_unlock(&u132
->scheduler_lock
);
1376 u132_endp_put_kref(u132
, endp
);
1378 } else if (endp
->active
) {
1379 mutex_unlock(&u132
->scheduler_lock
);
1380 u132_endp_put_kref(u132
, endp
);
1382 } else if (ring
->in_use
) {
1383 mutex_unlock(&u132
->scheduler_lock
);
1384 u132_endp_put_kref(u132
, endp
);
1386 } else if (endp
->queue_next
== endp
->queue_last
) {
1387 mutex_unlock(&u132
->scheduler_lock
);
1388 u132_endp_put_kref(u132
, endp
);
1390 } else if (endp
->pipetype
== PIPE_INTERRUPT
) {
1391 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1393 mutex_unlock(&u132
->scheduler_lock
);
1394 u132_endp_put_kref(u132
, endp
);
1398 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1401 ring
->curr_endp
= endp
;
1403 mutex_unlock(&u132
->scheduler_lock
);
1404 retval
= edset_single(u132
, ring
, endp
, urb
, address
,
1405 endp
->toggle_bits
, u132_hcd_interrupt_recv
);
1407 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1410 } else if (endp
->pipetype
== PIPE_CONTROL
) {
1411 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1413 mutex_unlock(&u132
->scheduler_lock
);
1414 u132_endp_put_kref(u132
, endp
);
1416 } else if (address
== 0) {
1418 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1421 ring
->curr_endp
= endp
;
1423 mutex_unlock(&u132
->scheduler_lock
);
1424 retval
= edset_setup(u132
, ring
, endp
, urb
, address
,
1425 0x2, u132_hcd_initial_setup_sent
);
1427 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1429 } else if (endp
->usb_addr
== 0) {
1431 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1434 ring
->curr_endp
= endp
;
1436 mutex_unlock(&u132
->scheduler_lock
);
1437 retval
= edset_setup(u132
, ring
, endp
, urb
, 0, 0x2,
1438 u132_hcd_enumeration_address_sent
);
1440 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1444 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1446 address
= u132
->addr
[endp
->usb_addr
].address
;
1448 ring
->curr_endp
= endp
;
1450 mutex_unlock(&u132
->scheduler_lock
);
1451 retval
= edset_setup(u132
, ring
, endp
, urb
, address
,
1452 0x2, u132_hcd_configure_setup_sent
);
1454 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1459 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1461 mutex_unlock(&u132
->scheduler_lock
);
1462 u132_endp_put_kref(u132
, endp
);
1466 struct urb
*urb
= endp
->urb_list
[
1467 ENDP_QUEUE_MASK
& endp
->queue_next
];
1469 ring
->curr_endp
= endp
;
1471 mutex_unlock(&u132
->scheduler_lock
);
1472 retval
= edset_input(u132
, ring
, endp
, urb
,
1473 address
, endp
->toggle_bits
,
1474 u132_hcd_bulk_input_recv
);
1477 u132_hcd_giveback_urb(u132
, endp
, urb
,
1481 } else { /* output pipe */
1482 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1484 mutex_unlock(&u132
->scheduler_lock
);
1485 u132_endp_put_kref(u132
, endp
);
1489 struct urb
*urb
= endp
->urb_list
[
1490 ENDP_QUEUE_MASK
& endp
->queue_next
];
1492 ring
->curr_endp
= endp
;
1494 mutex_unlock(&u132
->scheduler_lock
);
1495 retval
= edset_output(u132
, ring
, endp
, urb
,
1496 address
, endp
->toggle_bits
,
1497 u132_hcd_bulk_output_sent
);
1500 u132_hcd_giveback_urb(u132
, endp
, urb
,
1509 static void port_power(struct u132
*u132
, int pn
, int is_on
)
1511 u132
->port
[pn
].power
= is_on
;
1516 static void u132_power(struct u132
*u132
, int is_on
)
1518 struct usb_hcd
*hcd
= u132_to_hcd(u132
)
1519 ; /* hub is inactive unless the port is powered */
1526 hcd
->state
= HC_STATE_HALT
;
1530 static int u132_periodic_reinit(struct u132
*u132
)
1533 u32 fi
= u132
->hc_fminterval
& 0x03fff;
1536 retval
= u132_read_pcimem(u132
, fminterval
, &fminterval
);
1539 fit
= fminterval
& FIT
;
1540 retval
= u132_write_pcimem(u132
, fminterval
,
1541 (fit
^ FIT
) | u132
->hc_fminterval
);
1544 retval
= u132_write_pcimem(u132
, periodicstart
,
1545 ((9 * fi
) / 10) & 0x3fff);
1551 static char *hcfs2string(int state
)
1554 case OHCI_USB_RESET
:
1556 case OHCI_USB_RESUME
:
1559 return "operational";
1560 case OHCI_USB_SUSPEND
:
1566 static int u132_init(struct u132
*u132
)
1571 u132
->next_statechange
= jiffies
;
1572 retval
= u132_write_pcimem(u132
, intrdisable
, OHCI_INTR_MIE
);
1575 retval
= u132_read_pcimem(u132
, control
, &control
);
1578 if (u132
->num_ports
== 0) {
1580 retval
= u132_read_pcimem(u132
, roothub
.a
, &rh_a
);
1583 u132
->num_ports
= rh_a
& RH_A_NDP
;
1584 retval
= read_roothub_info(u132
);
1588 if (u132
->num_ports
> MAX_U132_PORTS
)
1595 /* Start an OHCI controller, set the BUS operational
1596 * resets USB and controller
1599 static int u132_run(struct u132
*u132
)
1608 int mask
= OHCI_INTR_INIT
;
1609 int first
= u132
->hc_fminterval
== 0;
1611 int reset_timeout
= 30; /* ... allow extra time */
1615 retval
= u132_read_pcimem(u132
, fminterval
, &temp
);
1618 u132
->hc_fminterval
= temp
& 0x3fff;
1619 u132
->hc_fminterval
|= FSMP(u132
->hc_fminterval
) << 16;
1621 retval
= u132_read_pcimem(u132
, control
, &u132
->hc_control
);
1624 dev_info(&u132
->platform_dev
->dev
, "resetting from state '%s', control "
1625 "= %08X\n", hcfs2string(u132
->hc_control
& OHCI_CTRL_HCFS
),
1627 switch (u132
->hc_control
& OHCI_CTRL_HCFS
) {
1631 case OHCI_USB_SUSPEND
:
1632 case OHCI_USB_RESUME
:
1633 u132
->hc_control
&= OHCI_CTRL_RWC
;
1634 u132
->hc_control
|= OHCI_USB_RESUME
;
1638 u132
->hc_control
&= OHCI_CTRL_RWC
;
1639 u132
->hc_control
|= OHCI_USB_RESET
;
1643 retval
= u132_write_pcimem(u132
, control
, u132
->hc_control
);
1646 retval
= u132_read_pcimem(u132
, control
, &control
);
1650 retval
= u132_read_pcimem(u132
, roothub
.a
, &roothub_a
);
1653 if (!(roothub_a
& RH_A_NPS
)) {
1654 int temp
; /* power down each port */
1655 for (temp
= 0; temp
< u132
->num_ports
; temp
++) {
1656 retval
= u132_write_pcimem(u132
,
1657 roothub
.portstatus
[temp
], RH_PS_LSDA
);
1662 retval
= u132_read_pcimem(u132
, control
, &control
);
1666 retval
= u132_read_pcimem(u132
, cmdstatus
, &status
);
1669 retval
= u132_write_pcimem(u132
, cmdstatus
, OHCI_HCR
);
1673 retval
= u132_read_pcimem(u132
, cmdstatus
, &status
);
1676 if (0 != (status
& OHCI_HCR
)) {
1677 if (--reset_timeout
== 0) {
1678 dev_err(&u132
->platform_dev
->dev
, "USB HC reset"
1687 if (u132
->flags
& OHCI_QUIRK_INITRESET
) {
1688 retval
= u132_write_pcimem(u132
, control
, u132
->hc_control
);
1691 retval
= u132_read_pcimem(u132
, control
, &control
);
1695 retval
= u132_write_pcimem(u132
, ed_controlhead
, 0x00000000);
1698 retval
= u132_write_pcimem(u132
, ed_bulkhead
, 0x11000000);
1701 retval
= u132_write_pcimem(u132
, hcca
, 0x00000000);
1704 retval
= u132_periodic_reinit(u132
);
1707 retval
= u132_read_pcimem(u132
, fminterval
, &fminterval
);
1710 retval
= u132_read_pcimem(u132
, periodicstart
, &periodicstart
);
1713 if (0 == (fminterval
& 0x3fff0000) || 0 == periodicstart
) {
1714 if (!(u132
->flags
& OHCI_QUIRK_INITRESET
)) {
1715 u132
->flags
|= OHCI_QUIRK_INITRESET
;
1718 dev_err(&u132
->platform_dev
->dev
, "init err(%08x %04x)"
1719 "\n", fminterval
, periodicstart
);
1720 } /* start controller operations */
1721 u132
->hc_control
&= OHCI_CTRL_RWC
;
1722 u132
->hc_control
|= OHCI_CONTROL_INIT
| OHCI_CTRL_BLE
| OHCI_USB_OPER
;
1723 retval
= u132_write_pcimem(u132
, control
, u132
->hc_control
);
1726 retval
= u132_write_pcimem(u132
, cmdstatus
, OHCI_BLF
);
1729 retval
= u132_read_pcimem(u132
, cmdstatus
, &cmdstatus
);
1732 retval
= u132_read_pcimem(u132
, control
, &control
);
1735 u132_to_hcd(u132
)->state
= HC_STATE_RUNNING
;
1736 retval
= u132_write_pcimem(u132
, roothub
.status
, RH_HS_DRWE
);
1739 retval
= u132_write_pcimem(u132
, intrstatus
, mask
);
1742 retval
= u132_write_pcimem(u132
, intrdisable
,
1743 OHCI_INTR_MIE
| OHCI_INTR_OC
| OHCI_INTR_RHSC
| OHCI_INTR_FNO
|
1744 OHCI_INTR_UE
| OHCI_INTR_RD
| OHCI_INTR_SF
| OHCI_INTR_WDH
|
1747 return retval
; /* handle root hub init quirks ... */
1748 retval
= u132_read_pcimem(u132
, roothub
.a
, &roothub_a
);
1751 roothub_a
&= ~(RH_A_PSM
| RH_A_OCPM
);
1752 if (u132
->flags
& OHCI_QUIRK_SUPERIO
) {
1753 roothub_a
|= RH_A_NOCP
;
1754 roothub_a
&= ~(RH_A_POTPGT
| RH_A_NPS
);
1755 retval
= u132_write_pcimem(u132
, roothub
.a
, roothub_a
);
1758 } else if ((u132
->flags
& OHCI_QUIRK_AMD756
) || distrust_firmware
) {
1759 roothub_a
|= RH_A_NPS
;
1760 retval
= u132_write_pcimem(u132
, roothub
.a
, roothub_a
);
1764 retval
= u132_write_pcimem(u132
, roothub
.status
, RH_HS_LPSC
);
1767 retval
= u132_write_pcimem(u132
, roothub
.b
,
1768 (roothub_a
& RH_A_NPS
) ? 0 : RH_B_PPCM
);
1771 retval
= u132_read_pcimem(u132
, control
, &control
);
1774 mdelay((roothub_a
>> 23) & 0x1fe);
1775 u132_to_hcd(u132
)->state
= HC_STATE_RUNNING
;
1779 static void u132_hcd_stop(struct usb_hcd
*hcd
)
1781 struct u132
*u132
= hcd_to_u132(hcd
);
1782 if (u132
->going
> 1) {
1783 dev_err(&u132
->platform_dev
->dev
, "u132 device %p(hcd=%p) has b"
1784 "een removed %d\n", u132
, hcd
, u132
->going
);
1785 } else if (u132
->going
> 0) {
1786 dev_err(&u132
->platform_dev
->dev
, "device hcd=%p is being remov"
1789 mutex_lock(&u132
->sw_lock
);
1791 u132_power(u132
, 0);
1792 mutex_unlock(&u132
->sw_lock
);
1796 static int u132_hcd_start(struct usb_hcd
*hcd
)
1798 struct u132
*u132
= hcd_to_u132(hcd
);
1799 if (u132
->going
> 1) {
1800 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1803 } else if (u132
->going
> 0) {
1804 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
1806 } else if (hcd
->self
.controller
) {
1808 struct platform_device
*pdev
=
1809 to_platform_device(hcd
->self
.controller
);
1810 u16 vendor
= ((struct u132_platform_data
*)
1811 (pdev
->dev
.platform_data
))->vendor
;
1812 u16 device
= ((struct u132_platform_data
*)
1813 (pdev
->dev
.platform_data
))->device
;
1814 mutex_lock(&u132
->sw_lock
);
1816 if (vendor
== PCI_VENDOR_ID_AMD
&& device
== 0x740c) {
1817 u132
->flags
= OHCI_QUIRK_AMD756
;
1818 } else if (vendor
== PCI_VENDOR_ID_OPTI
&& device
== 0xc861) {
1819 dev_err(&u132
->platform_dev
->dev
, "WARNING: OPTi workar"
1820 "ounds unavailable\n");
1821 } else if (vendor
== PCI_VENDOR_ID_COMPAQ
&& device
== 0xa0f8)
1822 u132
->flags
|= OHCI_QUIRK_ZFMICRO
;
1823 retval
= u132_run(u132
);
1829 mutex_unlock(&u132
->sw_lock
);
1832 dev_err(&u132
->platform_dev
->dev
, "platform_device missing\n");
1837 static int u132_hcd_reset(struct usb_hcd
*hcd
)
1839 struct u132
*u132
= hcd_to_u132(hcd
);
1840 if (u132
->going
> 1) {
1841 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1844 } else if (u132
->going
> 0) {
1845 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
1849 mutex_lock(&u132
->sw_lock
);
1850 retval
= u132_init(u132
);
1855 mutex_unlock(&u132
->sw_lock
);
1860 static int create_endpoint_and_queue_int(struct u132
*u132
,
1861 struct u132_udev
*udev
, struct urb
*urb
,
1862 struct usb_device
*usb_dev
, u8 usb_addr
, u8 usb_endp
, u8 address
,
1865 struct u132_ring
*ring
;
1869 struct u132_endp
*endp
= kmalloc(sizeof(struct u132_endp
), mem_flags
);
1874 spin_lock_init(&endp
->queue_lock
.slock
);
1875 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
1876 rc
= usb_hcd_link_urb_to_ep(u132_to_hcd(u132
), urb
);
1878 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
1883 endp_number
= ++u132
->num_endpoints
;
1884 urb
->ep
->hcpriv
= u132
->endp
[endp_number
- 1] = endp
;
1885 INIT_DELAYED_WORK(&endp
->scheduler
, u132_hcd_endp_work_scheduler
);
1886 INIT_LIST_HEAD(&endp
->urb_more
);
1887 ring
= endp
->ring
= &u132
->ring
[0];
1888 if (ring
->curr_endp
) {
1889 list_add_tail(&endp
->endp_ring
, &ring
->curr_endp
->endp_ring
);
1891 INIT_LIST_HEAD(&endp
->endp_ring
);
1892 ring
->curr_endp
= endp
;
1895 endp
->dequeueing
= 0;
1896 endp
->edset_flush
= 0;
1899 endp
->endp_number
= endp_number
;
1901 endp
->hep
= urb
->ep
;
1902 endp
->pipetype
= usb_pipetype(urb
->pipe
);
1903 u132_endp_init_kref(u132
, endp
);
1904 if (usb_pipein(urb
->pipe
)) {
1905 endp
->toggle_bits
= 0x2;
1906 usb_settoggle(udev
->usb_device
, usb_endp
, 0, 0);
1909 udev
->endp_number_in
[usb_endp
] = endp_number
;
1910 u132_udev_get_kref(u132
, udev
);
1912 endp
->toggle_bits
= 0x2;
1913 usb_settoggle(udev
->usb_device
, usb_endp
, 1, 0);
1916 udev
->endp_number_out
[usb_endp
] = endp_number
;
1917 u132_udev_get_kref(u132
, udev
);
1921 endp
->jiffies
= jiffies
+ msecs_to_jiffies(urb
->interval
);
1922 endp
->udev_number
= address
;
1923 endp
->usb_addr
= usb_addr
;
1924 endp
->usb_endp
= usb_endp
;
1925 endp
->queue_size
= 1;
1926 endp
->queue_last
= 0;
1927 endp
->queue_next
= 0;
1928 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
1929 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
1930 u132_endp_queue_work(u132
, endp
, msecs_to_jiffies(urb
->interval
));
1934 static int queue_int_on_old_endpoint(struct u132
*u132
,
1935 struct u132_udev
*udev
, struct urb
*urb
,
1936 struct usb_device
*usb_dev
, struct u132_endp
*endp
, u8 usb_addr
,
1937 u8 usb_endp
, u8 address
)
1941 endp
->jiffies
= jiffies
+ msecs_to_jiffies(urb
->interval
);
1942 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
1943 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
1945 struct u132_urbq
*urbq
= kmalloc(sizeof(struct u132_urbq
),
1948 endp
->queue_size
-= 1;
1951 list_add_tail(&urbq
->urb_more
, &endp
->urb_more
);
1958 static int create_endpoint_and_queue_bulk(struct u132
*u132
,
1959 struct u132_udev
*udev
, struct urb
*urb
,
1960 struct usb_device
*usb_dev
, u8 usb_addr
, u8 usb_endp
, u8 address
,
1964 struct u132_ring
*ring
;
1968 struct u132_endp
*endp
= kmalloc(sizeof(struct u132_endp
), mem_flags
);
1973 spin_lock_init(&endp
->queue_lock
.slock
);
1974 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
1975 rc
= usb_hcd_link_urb_to_ep(u132_to_hcd(u132
), urb
);
1977 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
1982 endp_number
= ++u132
->num_endpoints
;
1983 urb
->ep
->hcpriv
= u132
->endp
[endp_number
- 1] = endp
;
1984 INIT_DELAYED_WORK(&endp
->scheduler
, u132_hcd_endp_work_scheduler
);
1985 INIT_LIST_HEAD(&endp
->urb_more
);
1986 endp
->dequeueing
= 0;
1987 endp
->edset_flush
= 0;
1990 endp
->endp_number
= endp_number
;
1992 endp
->hep
= urb
->ep
;
1993 endp
->pipetype
= usb_pipetype(urb
->pipe
);
1994 u132_endp_init_kref(u132
, endp
);
1995 if (usb_pipein(urb
->pipe
)) {
1996 endp
->toggle_bits
= 0x2;
1997 usb_settoggle(udev
->usb_device
, usb_endp
, 0, 0);
2001 udev
->endp_number_in
[usb_endp
] = endp_number
;
2002 u132_udev_get_kref(u132
, udev
);
2004 endp
->toggle_bits
= 0x2;
2005 usb_settoggle(udev
->usb_device
, usb_endp
, 1, 0);
2009 udev
->endp_number_out
[usb_endp
] = endp_number
;
2010 u132_udev_get_kref(u132
, udev
);
2012 ring
= endp
->ring
= &u132
->ring
[ring_number
- 1];
2013 if (ring
->curr_endp
) {
2014 list_add_tail(&endp
->endp_ring
, &ring
->curr_endp
->endp_ring
);
2016 INIT_LIST_HEAD(&endp
->endp_ring
);
2017 ring
->curr_endp
= endp
;
2021 endp
->udev_number
= address
;
2022 endp
->usb_addr
= usb_addr
;
2023 endp
->usb_endp
= usb_endp
;
2024 endp
->queue_size
= 1;
2025 endp
->queue_last
= 0;
2026 endp
->queue_next
= 0;
2027 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2028 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2029 u132_endp_queue_work(u132
, endp
, 0);
2033 static int queue_bulk_on_old_endpoint(struct u132
*u132
, struct u132_udev
*udev
,
2035 struct usb_device
*usb_dev
, struct u132_endp
*endp
, u8 usb_addr
,
2036 u8 usb_endp
, u8 address
)
2039 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2040 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2042 struct u132_urbq
*urbq
= kmalloc(sizeof(struct u132_urbq
),
2045 endp
->queue_size
-= 1;
2048 list_add_tail(&urbq
->urb_more
, &endp
->urb_more
);
2055 static int create_endpoint_and_queue_control(struct u132
*u132
,
2057 struct usb_device
*usb_dev
, u8 usb_addr
, u8 usb_endp
,
2060 struct u132_ring
*ring
;
2064 struct u132_endp
*endp
= kmalloc(sizeof(struct u132_endp
), mem_flags
);
2069 spin_lock_init(&endp
->queue_lock
.slock
);
2070 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
2071 rc
= usb_hcd_link_urb_to_ep(u132_to_hcd(u132
), urb
);
2073 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2078 endp_number
= ++u132
->num_endpoints
;
2079 urb
->ep
->hcpriv
= u132
->endp
[endp_number
- 1] = endp
;
2080 INIT_DELAYED_WORK(&endp
->scheduler
, u132_hcd_endp_work_scheduler
);
2081 INIT_LIST_HEAD(&endp
->urb_more
);
2082 ring
= endp
->ring
= &u132
->ring
[0];
2083 if (ring
->curr_endp
) {
2084 list_add_tail(&endp
->endp_ring
, &ring
->curr_endp
->endp_ring
);
2086 INIT_LIST_HEAD(&endp
->endp_ring
);
2087 ring
->curr_endp
= endp
;
2090 endp
->dequeueing
= 0;
2091 endp
->edset_flush
= 0;
2094 endp
->endp_number
= endp_number
;
2096 endp
->hep
= urb
->ep
;
2097 u132_endp_init_kref(u132
, endp
);
2098 u132_endp_get_kref(u132
, endp
);
2099 if (usb_addr
== 0) {
2100 u8 address
= u132
->addr
[usb_addr
].address
;
2101 struct u132_udev
*udev
= &u132
->udev
[address
];
2102 endp
->udev_number
= address
;
2103 endp
->usb_addr
= usb_addr
;
2104 endp
->usb_endp
= usb_endp
;
2107 endp
->pipetype
= usb_pipetype(urb
->pipe
);
2108 u132_udev_init_kref(u132
, udev
);
2109 u132_udev_get_kref(u132
, udev
);
2110 udev
->endp_number_in
[usb_endp
] = endp_number
;
2111 udev
->endp_number_out
[usb_endp
] = endp_number
;
2113 endp
->queue_size
= 1;
2114 endp
->queue_last
= 0;
2115 endp
->queue_next
= 0;
2116 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2117 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2118 u132_endp_queue_work(u132
, endp
, 0);
2120 } else { /*(usb_addr > 0) */
2121 u8 address
= u132
->addr
[usb_addr
].address
;
2122 struct u132_udev
*udev
= &u132
->udev
[address
];
2123 endp
->udev_number
= address
;
2124 endp
->usb_addr
= usb_addr
;
2125 endp
->usb_endp
= usb_endp
;
2128 endp
->pipetype
= usb_pipetype(urb
->pipe
);
2129 u132_udev_get_kref(u132
, udev
);
2130 udev
->enumeration
= 2;
2131 udev
->endp_number_in
[usb_endp
] = endp_number
;
2132 udev
->endp_number_out
[usb_endp
] = endp_number
;
2134 endp
->queue_size
= 1;
2135 endp
->queue_last
= 0;
2136 endp
->queue_next
= 0;
2137 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2138 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2139 u132_endp_queue_work(u132
, endp
, 0);
2144 static int queue_control_on_old_endpoint(struct u132
*u132
,
2146 struct usb_device
*usb_dev
, struct u132_endp
*endp
, u8 usb_addr
,
2149 if (usb_addr
== 0) {
2150 if (usb_pipein(urb
->pipe
)) {
2152 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2153 endp
->urb_list
[ENDP_QUEUE_MASK
&
2154 endp
->queue_last
++] = urb
;
2156 struct u132_urbq
*urbq
=
2157 kmalloc(sizeof(struct u132_urbq
),
2160 endp
->queue_size
-= 1;
2163 list_add_tail(&urbq
->urb_more
,
2169 } else { /* usb_pipeout(urb->pipe) */
2170 struct u132_addr
*addr
= &u132
->addr
[usb_dev
->devnum
];
2171 int I
= MAX_U132_UDEVS
;
2174 struct u132_udev
*udev
= &u132
->udev
[++i
];
2175 if (udev
->usb_device
) {
2178 udev
->enumeration
= 1;
2179 u132
->addr
[0].address
= i
;
2180 endp
->udev_number
= i
;
2181 udev
->udev_number
= i
;
2182 udev
->usb_addr
= usb_dev
->devnum
;
2183 u132_udev_init_kref(u132
, udev
);
2184 udev
->endp_number_in
[usb_endp
] =
2186 u132_udev_get_kref(u132
, udev
);
2187 udev
->endp_number_out
[usb_endp
] =
2189 udev
->usb_device
= usb_dev
;
2190 ((u8
*) (urb
->setup_packet
))[2] =
2192 u132_udev_get_kref(u132
, udev
);
2197 dev_err(&u132
->platform_dev
->dev
, "run out of d"
2202 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2203 endp
->urb_list
[ENDP_QUEUE_MASK
&
2204 endp
->queue_last
++] = urb
;
2206 struct u132_urbq
*urbq
=
2207 kmalloc(sizeof(struct u132_urbq
),
2210 endp
->queue_size
-= 1;
2213 list_add_tail(&urbq
->urb_more
,
2220 } else { /*(usb_addr > 0) */
2221 u8 address
= u132
->addr
[usb_addr
].address
;
2222 struct u132_udev
*udev
= &u132
->udev
[address
];
2224 if (udev
->enumeration
!= 2)
2225 udev
->enumeration
= 2;
2226 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2227 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] =
2230 struct u132_urbq
*urbq
=
2231 kmalloc(sizeof(struct u132_urbq
), GFP_ATOMIC
);
2233 endp
->queue_size
-= 1;
2236 list_add_tail(&urbq
->urb_more
, &endp
->urb_more
);
2244 static int u132_urb_enqueue(struct usb_hcd
*hcd
, struct urb
*urb
,
2247 struct u132
*u132
= hcd_to_u132(hcd
);
2248 if (irqs_disabled()) {
2249 if (__GFP_WAIT
& mem_flags
) {
2250 printk(KERN_ERR
"invalid context for function that migh"
2255 if (u132
->going
> 1) {
2256 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2259 } else if (u132
->going
> 0) {
2260 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
2264 u8 usb_addr
= usb_pipedevice(urb
->pipe
);
2265 u8 usb_endp
= usb_pipeendpoint(urb
->pipe
);
2266 struct usb_device
*usb_dev
= urb
->dev
;
2267 if (usb_pipetype(urb
->pipe
) == PIPE_INTERRUPT
) {
2268 u8 address
= u132
->addr
[usb_addr
].address
;
2269 struct u132_udev
*udev
= &u132
->udev
[address
];
2270 struct u132_endp
*endp
= urb
->ep
->hcpriv
;
2271 urb
->actual_length
= 0;
2275 spin_lock_irqsave(&endp
->queue_lock
.slock
,
2277 retval
= usb_hcd_link_urb_to_ep(hcd
, urb
);
2279 retval
= queue_int_on_old_endpoint(
2285 usb_hcd_unlink_urb_from_ep(
2288 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2293 u132_endp_queue_work(u132
, endp
,
2294 msecs_to_jiffies(urb
->interval
))
2298 } else if (u132
->num_endpoints
== MAX_U132_ENDPS
) {
2300 } else { /*(endp == NULL) */
2301 return create_endpoint_and_queue_int(u132
, udev
,
2302 urb
, usb_dev
, usb_addr
,
2303 usb_endp
, address
, mem_flags
);
2305 } else if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
) {
2306 dev_err(&u132
->platform_dev
->dev
, "the hardware does no"
2307 "t support PIPE_ISOCHRONOUS\n");
2309 } else if (usb_pipetype(urb
->pipe
) == PIPE_BULK
) {
2310 u8 address
= u132
->addr
[usb_addr
].address
;
2311 struct u132_udev
*udev
= &u132
->udev
[address
];
2312 struct u132_endp
*endp
= urb
->ep
->hcpriv
;
2313 urb
->actual_length
= 0;
2317 spin_lock_irqsave(&endp
->queue_lock
.slock
,
2319 retval
= usb_hcd_link_urb_to_ep(hcd
, urb
);
2321 retval
= queue_bulk_on_old_endpoint(
2327 usb_hcd_unlink_urb_from_ep(
2330 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2335 u132_endp_queue_work(u132
, endp
, 0);
2338 } else if (u132
->num_endpoints
== MAX_U132_ENDPS
) {
2341 return create_endpoint_and_queue_bulk(u132
,
2342 udev
, urb
, usb_dev
, usb_addr
,
2343 usb_endp
, address
, mem_flags
);
2345 struct u132_endp
*endp
= urb
->ep
->hcpriv
;
2347 u8
*b
= urb
->setup_packet
;
2349 char data
[30 * 3 + 4];
2351 int m
= (sizeof(data
) - 1) / 3;
2354 while (urb_size
-- > 0) {
2356 } else if (i
++ < m
) {
2357 int w
= sprintf(d
, " %02X", *b
++);
2361 d
+= sprintf(d
, " ..");
2366 spin_lock_irqsave(&endp
->queue_lock
.slock
,
2368 retval
= usb_hcd_link_urb_to_ep(hcd
, urb
);
2370 retval
= queue_control_on_old_endpoint(
2375 usb_hcd_unlink_urb_from_ep(
2378 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2383 u132_endp_queue_work(u132
, endp
, 0);
2386 } else if (u132
->num_endpoints
== MAX_U132_ENDPS
) {
2389 return create_endpoint_and_queue_control(u132
,
2390 urb
, usb_dev
, usb_addr
, usb_endp
,
2396 static int dequeue_from_overflow_chain(struct u132
*u132
,
2397 struct u132_endp
*endp
, struct urb
*urb
)
2399 struct list_head
*scan
;
2400 struct list_head
*head
= &endp
->urb_more
;
2401 list_for_each(scan
, head
) {
2402 struct u132_urbq
*urbq
= list_entry(scan
, struct u132_urbq
,
2404 if (urbq
->urb
== urb
) {
2405 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
2407 endp
->queue_size
-= 1;
2408 urb
->error_count
= 0;
2409 usb_hcd_giveback_urb(hcd
, urb
, 0);
2414 dev_err(&u132
->platform_dev
->dev
, "urb=%p not found in endp[%d]=%p ring"
2415 "[%d] %c%c usb_endp=%d usb_addr=%d size=%d next=%04X last=%04X"
2416 "\n", urb
, endp
->endp_number
, endp
, endp
->ring
->number
,
2417 endp
->input
? 'I' : ' ', endp
->output
? 'O' : ' ',
2418 endp
->usb_endp
, endp
->usb_addr
, endp
->queue_size
,
2419 endp
->queue_next
, endp
->queue_last
);
2423 static int u132_endp_urb_dequeue(struct u132
*u132
, struct u132_endp
*endp
,
2424 struct urb
*urb
, int status
)
2429 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
2430 rc
= usb_hcd_check_unlink_urb(u132_to_hcd(u132
), urb
, status
);
2432 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2435 if (endp
->queue_size
== 0) {
2436 dev_err(&u132
->platform_dev
->dev
, "urb=%p not found in endp[%d]"
2437 "=%p ring[%d] %c%c usb_endp=%d usb_addr=%d\n", urb
,
2438 endp
->endp_number
, endp
, endp
->ring
->number
,
2439 endp
->input
? 'I' : ' ', endp
->output
? 'O' : ' ',
2440 endp
->usb_endp
, endp
->usb_addr
);
2441 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2444 if (urb
== endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_next
]) {
2446 endp
->dequeueing
= 1;
2447 endp
->edset_flush
= 1;
2448 u132_endp_queue_work(u132
, endp
, 0);
2449 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2452 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2453 u132_hcd_abandon_urb(u132
, endp
, urb
, status
);
2458 u16 queue_size
= endp
->queue_size
;
2459 u16 queue_scan
= endp
->queue_next
;
2460 struct urb
**urb_slot
= NULL
;
2461 while (++queue_list
< ENDP_QUEUE_SIZE
&& --queue_size
> 0) {
2462 if (urb
== endp
->urb_list
[ENDP_QUEUE_MASK
&
2464 urb_slot
= &endp
->urb_list
[ENDP_QUEUE_MASK
&
2470 while (++queue_list
< ENDP_QUEUE_SIZE
&& --queue_size
> 0) {
2471 *urb_slot
= endp
->urb_list
[ENDP_QUEUE_MASK
&
2473 urb_slot
= &endp
->urb_list
[ENDP_QUEUE_MASK
&
2477 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
2479 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
2480 endp
->queue_size
-= 1;
2481 if (list_empty(&endp
->urb_more
)) {
2482 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2485 struct list_head
*next
= endp
->urb_more
.next
;
2486 struct u132_urbq
*urbq
= list_entry(next
,
2487 struct u132_urbq
, urb_more
);
2489 *urb_slot
= urbq
->urb
;
2490 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2493 } urb
->error_count
= 0;
2494 usb_hcd_giveback_urb(hcd
, urb
, status
);
2496 } else if (list_empty(&endp
->urb_more
)) {
2497 dev_err(&u132
->platform_dev
->dev
, "urb=%p not found in "
2498 "endp[%d]=%p ring[%d] %c%c usb_endp=%d usb_addr"
2499 "=%d size=%d next=%04X last=%04X\n", urb
,
2500 endp
->endp_number
, endp
, endp
->ring
->number
,
2501 endp
->input
? 'I' : ' ',
2502 endp
->output
? 'O' : ' ', endp
->usb_endp
,
2503 endp
->usb_addr
, endp
->queue_size
,
2504 endp
->queue_next
, endp
->queue_last
);
2505 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2510 usb_hcd_unlink_urb_from_ep(u132_to_hcd(u132
), urb
);
2511 retval
= dequeue_from_overflow_chain(u132
, endp
,
2513 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2519 static int u132_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
2521 struct u132
*u132
= hcd_to_u132(hcd
);
2522 if (u132
->going
> 2) {
2523 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2527 u8 usb_addr
= usb_pipedevice(urb
->pipe
);
2528 u8 usb_endp
= usb_pipeendpoint(urb
->pipe
);
2529 u8 address
= u132
->addr
[usb_addr
].address
;
2530 struct u132_udev
*udev
= &u132
->udev
[address
];
2531 if (usb_pipein(urb
->pipe
)) {
2532 u8 endp_number
= udev
->endp_number_in
[usb_endp
];
2533 struct u132_endp
*endp
= u132
->endp
[endp_number
- 1];
2534 return u132_endp_urb_dequeue(u132
, endp
, urb
, status
);
2536 u8 endp_number
= udev
->endp_number_out
[usb_endp
];
2537 struct u132_endp
*endp
= u132
->endp
[endp_number
- 1];
2538 return u132_endp_urb_dequeue(u132
, endp
, urb
, status
);
2543 static void u132_endpoint_disable(struct usb_hcd
*hcd
,
2544 struct usb_host_endpoint
*hep
)
2546 struct u132
*u132
= hcd_to_u132(hcd
);
2547 if (u132
->going
> 2) {
2548 dev_err(&u132
->platform_dev
->dev
, "u132 device %p(hcd=%p hep=%p"
2549 ") has been removed %d\n", u132
, hcd
, hep
,
2552 struct u132_endp
*endp
= hep
->hcpriv
;
2554 u132_endp_put_kref(u132
, endp
);
2558 static int u132_get_frame(struct usb_hcd
*hcd
)
2560 struct u132
*u132
= hcd_to_u132(hcd
);
2561 if (u132
->going
> 1) {
2562 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2565 } else if (u132
->going
> 0) {
2566 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2570 dev_err(&u132
->platform_dev
->dev
, "TODO: u132_get_frame\n");
2576 static int u132_roothub_descriptor(struct u132
*u132
,
2577 struct usb_hub_descriptor
*desc
)
2583 retval
= u132_read_pcimem(u132
, roothub
.a
, &rh_a
);
2586 desc
->bDescriptorType
= 0x29;
2587 desc
->bPwrOn2PwrGood
= (rh_a
& RH_A_POTPGT
) >> 24;
2588 desc
->bHubContrCurrent
= 0;
2589 desc
->bNbrPorts
= u132
->num_ports
;
2590 temp
= 1 + (u132
->num_ports
/ 8);
2591 desc
->bDescLength
= 7 + 2 * temp
;
2593 if (rh_a
& RH_A_NPS
)
2595 if (rh_a
& RH_A_PSM
)
2597 if (rh_a
& RH_A_NOCP
)
2599 else if (rh_a
& RH_A_OCPM
)
2601 desc
->wHubCharacteristics
= cpu_to_le16(temp
);
2602 retval
= u132_read_pcimem(u132
, roothub
.b
, &rh_b
);
2605 memset(desc
->bitmap
, 0xff, sizeof(desc
->bitmap
));
2606 desc
->bitmap
[0] = rh_b
& RH_B_DR
;
2607 if (u132
->num_ports
> 7) {
2608 desc
->bitmap
[1] = (rh_b
& RH_B_DR
) >> 8;
2609 desc
->bitmap
[2] = 0xff;
2611 desc
->bitmap
[1] = 0xff;
2615 static int u132_roothub_status(struct u132
*u132
, __le32
*desc
)
2618 int ret_status
= u132_read_pcimem(u132
, roothub
.status
, &rh_status
);
2619 *desc
= cpu_to_le32(rh_status
);
2623 static int u132_roothub_portstatus(struct u132
*u132
, __le32
*desc
, u16 wIndex
)
2625 if (wIndex
== 0 || wIndex
> u132
->num_ports
) {
2628 int port
= wIndex
- 1;
2629 u32 rh_portstatus
= -1;
2630 int ret_portstatus
= u132_read_pcimem(u132
,
2631 roothub
.portstatus
[port
], &rh_portstatus
);
2632 *desc
= cpu_to_le32(rh_portstatus
);
2633 if (*(u16
*) (desc
+ 2)) {
2634 dev_info(&u132
->platform_dev
->dev
, "Port %d Status Chan"
2635 "ge = %08X\n", port
, *desc
);
2637 return ret_portstatus
;
2642 /* this timer value might be vendor-specific ... */
2643 #define PORT_RESET_HW_MSEC 10
2644 #define PORT_RESET_MSEC 10
2645 /* wrap-aware logic morphed from <linux/jiffies.h> */
2646 #define tick_before(t1, t2) ((s16)(((s16)(t1))-((s16)(t2))) < 0)
2647 static int u132_roothub_portreset(struct u132
*u132
, int port_index
)
2653 retval
= u132_read_pcimem(u132
, fmnumber
, &fmnumber
);
2657 reset_done
= now
+ PORT_RESET_MSEC
;
2661 retval
= u132_read_pcimem(u132
,
2662 roothub
.portstatus
[port_index
], &portstat
);
2665 if (RH_PS_PRS
& portstat
)
2669 } while (tick_before(now
, reset_done
));
2670 if (RH_PS_PRS
& portstat
)
2672 if (RH_PS_CCS
& portstat
) {
2673 if (RH_PS_PRSC
& portstat
) {
2674 retval
= u132_write_pcimem(u132
,
2675 roothub
.portstatus
[port_index
],
2681 break; /* start the next reset,
2682 sleep till it's probably done */
2683 retval
= u132_write_pcimem(u132
, roothub
.portstatus
[port_index
],
2687 msleep(PORT_RESET_HW_MSEC
);
2688 retval
= u132_read_pcimem(u132
, fmnumber
, &fmnumber
);
2692 } while (tick_before(now
, reset_done
));
2696 static int u132_roothub_setportfeature(struct u132
*u132
, u16 wValue
,
2699 if (wIndex
== 0 || wIndex
> u132
->num_ports
) {
2703 int port_index
= wIndex
- 1;
2704 struct u132_port
*port
= &u132
->port
[port_index
];
2705 port
->Status
&= ~(1 << wValue
);
2707 case USB_PORT_FEAT_SUSPEND
:
2708 retval
= u132_write_pcimem(u132
,
2709 roothub
.portstatus
[port_index
], RH_PS_PSS
);
2713 case USB_PORT_FEAT_POWER
:
2714 retval
= u132_write_pcimem(u132
,
2715 roothub
.portstatus
[port_index
], RH_PS_PPS
);
2719 case USB_PORT_FEAT_RESET
:
2720 retval
= u132_roothub_portreset(u132
, port_index
);
2730 static int u132_roothub_clearportfeature(struct u132
*u132
, u16 wValue
,
2733 if (wIndex
== 0 || wIndex
> u132
->num_ports
) {
2736 int port_index
= wIndex
- 1;
2739 struct u132_port
*port
= &u132
->port
[port_index
];
2740 port
->Status
&= ~(1 << wValue
);
2742 case USB_PORT_FEAT_ENABLE
:
2745 case USB_PORT_FEAT_C_ENABLE
:
2748 case USB_PORT_FEAT_SUSPEND
:
2750 if ((u132
->hc_control
& OHCI_CTRL_HCFS
)
2752 dev_err(&u132
->platform_dev
->dev
, "TODO resume_"
2756 case USB_PORT_FEAT_C_SUSPEND
:
2759 case USB_PORT_FEAT_POWER
:
2762 case USB_PORT_FEAT_C_CONNECTION
:
2765 case USB_PORT_FEAT_C_OVER_CURRENT
:
2768 case USB_PORT_FEAT_C_RESET
:
2774 retval
= u132_write_pcimem(u132
, roothub
.portstatus
[port_index
],
2783 /* the virtual root hub timer IRQ checks for hub status*/
2784 static int u132_hub_status_data(struct usb_hcd
*hcd
, char *buf
)
2786 struct u132
*u132
= hcd_to_u132(hcd
);
2787 if (u132
->going
> 1) {
2788 dev_err(&u132
->platform_dev
->dev
, "device hcd=%p has been remov"
2789 "ed %d\n", hcd
, u132
->going
);
2791 } else if (u132
->going
> 0) {
2792 dev_err(&u132
->platform_dev
->dev
, "device hcd=%p is being remov"
2796 int i
, changed
= 0, length
= 1;
2797 if (u132
->flags
& OHCI_QUIRK_AMD756
) {
2798 if ((u132
->hc_roothub_a
& RH_A_NDP
) > MAX_ROOT_PORTS
) {
2799 dev_err(&u132
->platform_dev
->dev
, "bogus NDP, r"
2800 "ereads as NDP=%d\n",
2801 u132
->hc_roothub_a
& RH_A_NDP
);
2805 if (u132
->hc_roothub_status
& (RH_HS_LPSC
| RH_HS_OCIC
))
2806 buf
[0] = changed
= 1;
2809 if (u132
->num_ports
> 7) {
2813 for (i
= 0; i
< u132
->num_ports
; i
++) {
2814 if (u132
->hc_roothub_portstatus
[i
] & (RH_PS_CSC
|
2815 RH_PS_PESC
| RH_PS_PSSC
| RH_PS_OCIC
|
2819 buf
[0] |= 1 << (i
+ 1);
2821 buf
[1] |= 1 << (i
- 7);
2824 if (!(u132
->hc_roothub_portstatus
[i
] & RH_PS_CCS
))
2827 if ((u132
->hc_roothub_portstatus
[i
] & RH_PS_PSS
))
2831 return changed
? length
: 0;
2835 static int u132_hub_control(struct usb_hcd
*hcd
, u16 typeReq
, u16 wValue
,
2836 u16 wIndex
, char *buf
, u16 wLength
)
2838 struct u132
*u132
= hcd_to_u132(hcd
);
2839 if (u132
->going
> 1) {
2840 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2843 } else if (u132
->going
> 0) {
2844 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2848 mutex_lock(&u132
->sw_lock
);
2850 case ClearHubFeature
:
2852 case C_HUB_OVER_CURRENT
:
2853 case C_HUB_LOCAL_POWER
:
2861 case C_HUB_OVER_CURRENT
:
2862 case C_HUB_LOCAL_POWER
:
2868 case ClearPortFeature
:{
2869 retval
= u132_roothub_clearportfeature(u132
,
2875 case GetHubDescriptor
:{
2876 retval
= u132_roothub_descriptor(u132
,
2877 (struct usb_hub_descriptor
*)buf
);
2883 retval
= u132_roothub_status(u132
,
2889 case GetPortStatus
:{
2890 retval
= u132_roothub_portstatus(u132
,
2891 (__le32
*) buf
, wIndex
);
2896 case SetPortFeature
:{
2897 retval
= u132_roothub_setportfeature(u132
,
2913 mutex_unlock(&u132
->sw_lock
);
2918 static int u132_start_port_reset(struct usb_hcd
*hcd
, unsigned port_num
)
2920 struct u132
*u132
= hcd_to_u132(hcd
);
2921 if (u132
->going
> 1) {
2922 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2925 } else if (u132
->going
> 0) {
2926 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2934 static int u132_bus_suspend(struct usb_hcd
*hcd
)
2936 struct u132
*u132
= hcd_to_u132(hcd
);
2937 if (u132
->going
> 1) {
2938 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2941 } else if (u132
->going
> 0) {
2942 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2948 static int u132_bus_resume(struct usb_hcd
*hcd
)
2950 struct u132
*u132
= hcd_to_u132(hcd
);
2951 if (u132
->going
> 1) {
2952 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2955 } else if (u132
->going
> 0) {
2956 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2963 #define u132_bus_suspend NULL
2964 #define u132_bus_resume NULL
2966 static struct hc_driver u132_hc_driver
= {
2967 .description
= hcd_name
,
2968 .hcd_priv_size
= sizeof(struct u132
),
2970 .flags
= HCD_USB11
| HCD_MEMORY
,
2971 .reset
= u132_hcd_reset
,
2972 .start
= u132_hcd_start
,
2973 .stop
= u132_hcd_stop
,
2974 .urb_enqueue
= u132_urb_enqueue
,
2975 .urb_dequeue
= u132_urb_dequeue
,
2976 .endpoint_disable
= u132_endpoint_disable
,
2977 .get_frame_number
= u132_get_frame
,
2978 .hub_status_data
= u132_hub_status_data
,
2979 .hub_control
= u132_hub_control
,
2980 .bus_suspend
= u132_bus_suspend
,
2981 .bus_resume
= u132_bus_resume
,
2982 .start_port_reset
= u132_start_port_reset
,
2986 * This function may be called by the USB core whilst the "usb_all_devices_rwsem"
2987 * is held for writing, thus this module must not call usb_remove_hcd()
2988 * synchronously - but instead should immediately stop activity to the
2989 * device and asynchronously call usb_remove_hcd()
2991 static int __devexit
u132_remove(struct platform_device
*pdev
)
2993 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
2995 struct u132
*u132
= hcd_to_u132(hcd
);
2996 if (u132
->going
++ > 1) {
2997 dev_err(&u132
->platform_dev
->dev
, "already being remove"
3001 int rings
= MAX_U132_RINGS
;
3002 int endps
= MAX_U132_ENDPS
;
3003 dev_err(&u132
->platform_dev
->dev
, "removing device u132"
3004 ".%d\n", u132
->sequence_num
);
3006 mutex_lock(&u132
->sw_lock
);
3007 u132_monitor_cancel_work(u132
);
3008 while (rings
-- > 0) {
3009 struct u132_ring
*ring
= &u132
->ring
[rings
];
3010 u132_ring_cancel_work(u132
, ring
);
3011 } while (endps
-- > 0) {
3012 struct u132_endp
*endp
= u132
->endp
[endps
];
3014 u132_endp_cancel_work(u132
, endp
);
3017 printk(KERN_INFO
"removing device u132.%d\n",
3018 u132
->sequence_num
);
3019 mutex_unlock(&u132
->sw_lock
);
3020 usb_remove_hcd(hcd
);
3021 u132_u132_put_kref(u132
);
3028 static void u132_initialise(struct u132
*u132
, struct platform_device
*pdev
)
3030 int rings
= MAX_U132_RINGS
;
3031 int ports
= MAX_U132_PORTS
;
3032 int addrs
= MAX_U132_ADDRS
;
3033 int udevs
= MAX_U132_UDEVS
;
3034 int endps
= MAX_U132_ENDPS
;
3035 u132
->board
= pdev
->dev
.platform_data
;
3036 u132
->platform_dev
= pdev
;
3039 mutex_init(&u132
->sw_lock
);
3040 mutex_init(&u132
->scheduler_lock
);
3041 while (rings
-- > 0) {
3042 struct u132_ring
*ring
= &u132
->ring
[rings
];
3044 ring
->number
= rings
+ 1;
3046 ring
->curr_endp
= NULL
;
3047 INIT_DELAYED_WORK(&ring
->scheduler
,
3048 u132_hcd_ring_work_scheduler
);
3050 mutex_lock(&u132
->sw_lock
);
3051 INIT_DELAYED_WORK(&u132
->monitor
, u132_hcd_monitor_work
);
3052 while (ports
-- > 0) {
3053 struct u132_port
*port
= &u132
->port
[ports
];
3060 while (addrs
-- > 0) {
3061 struct u132_addr
*addr
= &u132
->addr
[addrs
];
3064 while (udevs
-- > 0) {
3065 struct u132_udev
*udev
= &u132
->udev
[udevs
];
3066 int i
= ARRAY_SIZE(udev
->endp_number_in
);
3067 int o
= ARRAY_SIZE(udev
->endp_number_out
);
3068 udev
->usb_device
= NULL
;
3069 udev
->udev_number
= 0;
3071 udev
->portnumber
= 0;
3073 udev
->endp_number_in
[i
] = 0;
3076 udev
->endp_number_out
[o
] = 0;
3080 u132
->endp
[endps
] = NULL
;
3082 mutex_unlock(&u132
->sw_lock
);
3086 static int __devinit
u132_probe(struct platform_device
*pdev
)
3088 struct usb_hcd
*hcd
;
3095 if (u132_exiting
> 0)
3098 retval
= ftdi_write_pcimem(pdev
, intrdisable
, OHCI_INTR_MIE
);
3101 retval
= ftdi_read_pcimem(pdev
, control
, &control
);
3104 retval
= ftdi_read_pcimem(pdev
, roothub
.a
, &rh_a
);
3107 num_ports
= rh_a
& RH_A_NDP
; /* refuse to confuse usbcore */
3108 if (pdev
->dev
.dma_mask
)
3111 hcd
= usb_create_hcd(&u132_hc_driver
, &pdev
->dev
, dev_name(&pdev
->dev
));
3113 printk(KERN_ERR
"failed to create the usb hcd struct for U132\n"
3115 ftdi_elan_gone_away(pdev
);
3118 struct u132
*u132
= hcd_to_u132(hcd
);
3120 hcd
->rsrc_start
= 0;
3121 mutex_lock(&u132_module_lock
);
3122 list_add_tail(&u132
->u132_list
, &u132_static_list
);
3123 u132
->sequence_num
= ++u132_instances
;
3124 mutex_unlock(&u132_module_lock
);
3125 u132_u132_init_kref(u132
);
3126 u132_initialise(u132
, pdev
);
3127 hcd
->product_desc
= "ELAN U132 Host Controller";
3128 retval
= usb_add_hcd(hcd
, 0, 0);
3130 dev_err(&u132
->platform_dev
->dev
, "init error %d\n",
3132 u132_u132_put_kref(u132
);
3135 u132_monitor_queue_work(u132
, 100);
3143 /* for this device there's no useful distinction between the controller
3144 * and its root hub, except that the root hub only gets direct PM calls
3145 * when CONFIG_USB_SUSPEND is enabled.
3147 static int u132_suspend(struct platform_device
*pdev
, pm_message_t state
)
3149 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
3150 struct u132
*u132
= hcd_to_u132(hcd
);
3151 if (u132
->going
> 1) {
3152 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
3155 } else if (u132
->going
> 0) {
3156 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
3159 int retval
= 0, ports
;
3161 switch (state
.event
) {
3162 case PM_EVENT_FREEZE
:
3163 retval
= u132_bus_suspend(hcd
);
3165 case PM_EVENT_SUSPEND
:
3166 case PM_EVENT_HIBERNATE
:
3167 ports
= MAX_U132_PORTS
;
3168 while (ports
-- > 0) {
3169 port_power(u132
, ports
, 0);
3177 static int u132_resume(struct platform_device
*pdev
)
3179 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
3180 struct u132
*u132
= hcd_to_u132(hcd
);
3181 if (u132
->going
> 1) {
3182 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
3185 } else if (u132
->going
> 0) {
3186 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
3190 if (!u132
->port
[0].power
) {
3191 int ports
= MAX_U132_PORTS
;
3192 while (ports
-- > 0) {
3193 port_power(u132
, ports
, 1);
3197 retval
= u132_bus_resume(hcd
);
3204 #define u132_suspend NULL
3205 #define u132_resume NULL
3208 * this driver is loaded explicitly by ftdi_u132
3210 * the platform_driver struct is static because it is per type of module
3212 static struct platform_driver u132_platform_driver
= {
3213 .probe
= u132_probe
,
3214 .remove
= __devexit_p(u132_remove
),
3215 .suspend
= u132_suspend
,
3216 .resume
= u132_resume
,
3218 .name
= (char *)hcd_name
,
3219 .owner
= THIS_MODULE
,
3222 static int __init
u132_hcd_init(void)
3225 INIT_LIST_HEAD(&u132_static_list
);
3228 mutex_init(&u132_module_lock
);
3231 printk(KERN_INFO
"driver %s built at %s on %s\n", hcd_name
, __TIME__
,
3233 workqueue
= create_singlethread_workqueue("u132");
3234 retval
= platform_driver_register(&u132_platform_driver
);
3239 module_init(u132_hcd_init
);
3240 static void __exit
u132_hcd_exit(void)
3244 mutex_lock(&u132_module_lock
);
3246 mutex_unlock(&u132_module_lock
);
3247 list_for_each_entry_safe(u132
, temp
, &u132_static_list
, u132_list
) {
3248 platform_device_unregister(u132
->platform_dev
);
3250 platform_driver_unregister(&u132_platform_driver
);
3251 printk(KERN_INFO
"u132-hcd driver deregistered\n");
3252 wait_event(u132_hcd_wait
, u132_instances
== 0);
3253 flush_workqueue(workqueue
);
3254 destroy_workqueue(workqueue
);
3258 module_exit(u132_hcd_exit
);
3259 MODULE_LICENSE("GPL");
3260 MODULE_ALIAS("platform:u132_hcd");