2 * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/usb.h>
19 #include <linux/errno.h>
20 #include <linux/init.h>
21 #include <linux/tty.h>
22 #include <linux/tty_driver.h>
23 #include <linux/tty_flip.h>
24 #include <linux/slab.h>
25 #include <linux/usb/cdc.h>
29 static struct workqueue_struct
*mux_rx_wq
;
31 static u16 packet_type
[TTY_MAX_COUNT
] = {0xF011, 0xF010};
33 #define USB_DEVICE_CDC_DATA(vid, pid) \
35 USB_DEVICE_ID_MATCH_DEVICE |\
36 USB_DEVICE_ID_MATCH_INT_CLASS |\
37 USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
40 .bInterfaceClass = USB_CLASS_COMM,\
41 .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM
43 static const struct usb_device_id id_table
[] = {
44 { USB_DEVICE_CDC_DATA(0x1076, 0x8000) }, /* GCT GDM7240 */
45 { USB_DEVICE_CDC_DATA(0x1076, 0x8f00) }, /* GCT GDM7243 */
46 { USB_DEVICE_CDC_DATA(0x1076, 0x9000) }, /* GCT GDM7243 */
47 { USB_DEVICE_CDC_DATA(0x1d74, 0x2300) }, /* LGIT Phoenix */
52 MODULE_DEVICE_TABLE(usb
, id_table
);
54 static int packet_type_to_index(u16 packetType
)
58 for (i
= 0; i
< TTY_MAX_COUNT
; i
++) {
59 if (packet_type
[i
] == packetType
)
66 static struct mux_tx
*alloc_mux_tx(int len
)
68 struct mux_tx
*t
= NULL
;
70 t
= kzalloc(sizeof(struct mux_tx
), GFP_ATOMIC
);
74 t
->urb
= usb_alloc_urb(0, GFP_ATOMIC
);
75 t
->buf
= kmalloc(MUX_TX_MAX_SIZE
, GFP_ATOMIC
);
76 if (!t
->urb
|| !t
->buf
) {
86 static void free_mux_tx(struct mux_tx
*t
)
95 static struct mux_rx
*alloc_mux_rx(void)
97 struct mux_rx
*r
= NULL
;
99 r
= kzalloc(sizeof(struct mux_rx
), GFP_KERNEL
);
103 r
->urb
= usb_alloc_urb(0, GFP_KERNEL
);
104 r
->buf
= kmalloc(MUX_RX_MAX_SIZE
, GFP_KERNEL
);
105 if (!r
->urb
|| !r
->buf
) {
106 usb_free_urb(r
->urb
);
115 static void free_mux_rx(struct mux_rx
*r
)
118 usb_free_urb(r
->urb
);
124 static struct mux_rx
*get_rx_struct(struct rx_cxt
*rx
)
129 spin_lock_irqsave(&rx
->free_list_lock
, flags
);
131 if (list_empty(&rx
->rx_free_list
)) {
132 spin_unlock_irqrestore(&rx
->free_list_lock
, flags
);
136 r
= list_entry(rx
->rx_free_list
.prev
, struct mux_rx
, free_list
);
137 list_del(&r
->free_list
);
139 spin_unlock_irqrestore(&rx
->free_list_lock
, flags
);
144 static void put_rx_struct(struct rx_cxt
*rx
, struct mux_rx
*r
)
148 spin_lock_irqsave(&rx
->free_list_lock
, flags
);
149 list_add_tail(&r
->free_list
, &rx
->rx_free_list
);
150 spin_unlock_irqrestore(&rx
->free_list_lock
, flags
);
154 static int up_to_host(struct mux_rx
*r
)
156 struct mux_dev
*mux_dev
= (struct mux_dev
*)r
->mux_dev
;
157 struct mux_pkt_header
*mux_header
;
158 unsigned int start_flag
;
159 unsigned int payload_size
;
160 unsigned short packet_type
;
163 u32 packet_size_sum
= r
->offset
;
165 int ret
= TO_HOST_INVALID_PACKET
;
169 mux_header
= (struct mux_pkt_header
*)(r
->buf
+ packet_size_sum
);
170 start_flag
= __le32_to_cpu(mux_header
->start_flag
);
171 payload_size
= __le32_to_cpu(mux_header
->payload_size
);
172 packet_type
= __le16_to_cpu(mux_header
->packet_type
);
174 if (start_flag
!= START_FLAG
) {
175 pr_err("invalid START_FLAG %x\n", start_flag
);
179 remain
= (MUX_HEADER_SIZE
+ payload_size
) % 4;
180 dummy_cnt
= remain
? (4-remain
) : 0;
182 if (len
- packet_size_sum
<
183 MUX_HEADER_SIZE
+ payload_size
+ dummy_cnt
) {
184 pr_err("invalid payload : %d %d %04x\n",
185 payload_size
, len
, packet_type
);
189 index
= packet_type_to_index(packet_type
);
191 pr_err("invalid index %d\n", index
);
195 ret
= r
->callback(mux_header
->data
,
199 RECV_PACKET_PROCESS_CONTINUE
201 if (ret
== TO_HOST_BUFFER_REQUEST_FAIL
) {
202 r
->offset
+= packet_size_sum
;
206 packet_size_sum
+= MUX_HEADER_SIZE
+ payload_size
+ dummy_cnt
;
207 if (len
- packet_size_sum
<= MUX_HEADER_SIZE
+ 2) {
208 ret
= r
->callback(NULL
,
212 RECV_PACKET_PROCESS_COMPLETE
221 static void do_rx(struct work_struct
*work
)
223 struct mux_dev
*mux_dev
=
224 container_of(work
, struct mux_dev
, work_rx
.work
);
226 struct rx_cxt
*rx
= (struct rx_cxt
*)&mux_dev
->rx
;
231 spin_lock_irqsave(&rx
->to_host_lock
, flags
);
232 if (list_empty(&rx
->to_host_list
)) {
233 spin_unlock_irqrestore(&rx
->to_host_lock
, flags
);
236 r
= list_entry(rx
->to_host_list
.next
, struct mux_rx
, to_host_list
);
237 list_del(&r
->to_host_list
);
238 spin_unlock_irqrestore(&rx
->to_host_lock
, flags
);
241 if (ret
== TO_HOST_BUFFER_REQUEST_FAIL
)
242 pr_err("failed to send mux data to host\n");
244 put_rx_struct(rx
, r
);
248 static void remove_rx_submit_list(struct mux_rx
*r
, struct rx_cxt
*rx
)
251 struct mux_rx
*r_remove
, *r_remove_next
;
253 spin_lock_irqsave(&rx
->submit_list_lock
, flags
);
254 list_for_each_entry_safe(r_remove
, r_remove_next
, &rx
->rx_submit_list
, rx_submit_list
) {
256 list_del(&r
->rx_submit_list
);
258 spin_unlock_irqrestore(&rx
->submit_list_lock
, flags
);
261 static void gdm_mux_rcv_complete(struct urb
*urb
)
263 struct mux_rx
*r
= urb
->context
;
264 struct mux_dev
*mux_dev
= (struct mux_dev
*)r
->mux_dev
;
265 struct rx_cxt
*rx
= &mux_dev
->rx
;
268 remove_rx_submit_list(r
, rx
);
271 if (mux_dev
->usb_state
== PM_NORMAL
)
272 pr_err("%s: urb status error %d\n",
273 __func__
, urb
->status
);
274 put_rx_struct(rx
, r
);
276 r
->len
= r
->urb
->actual_length
;
277 spin_lock_irqsave(&rx
->to_host_lock
, flags
);
278 list_add_tail(&r
->to_host_list
, &rx
->to_host_list
);
279 queue_work(mux_rx_wq
, &mux_dev
->work_rx
.work
);
280 spin_unlock_irqrestore(&rx
->to_host_lock
, flags
);
284 static int gdm_mux_recv(void *priv_dev
,
285 int (*cb
)(void *data
, int len
, int tty_index
, struct tty_dev
*tty_dev
, int complete
)
288 struct mux_dev
*mux_dev
= priv_dev
;
289 struct usb_device
*usbdev
= mux_dev
->usbdev
;
291 struct rx_cxt
*rx
= &mux_dev
->rx
;
296 pr_err("device is disconnected\n");
300 r
= get_rx_struct(rx
);
302 pr_err("get_rx_struct fail\n");
307 r
->mux_dev
= (void *)mux_dev
;
311 usb_fill_bulk_urb(r
->urb
,
313 usb_rcvbulkpipe(usbdev
, 0x86),
316 gdm_mux_rcv_complete
,
319 spin_lock_irqsave(&rx
->submit_list_lock
, flags
);
320 list_add_tail(&r
->rx_submit_list
, &rx
->rx_submit_list
);
321 spin_unlock_irqrestore(&rx
->submit_list_lock
, flags
);
323 ret
= usb_submit_urb(r
->urb
, GFP_KERNEL
);
326 spin_lock_irqsave(&rx
->submit_list_lock
, flags
);
327 list_del(&r
->rx_submit_list
);
328 spin_unlock_irqrestore(&rx
->submit_list_lock
, flags
);
330 put_rx_struct(rx
, r
);
332 pr_err("usb_submit_urb ret=%d\n", ret
);
335 usb_mark_last_busy(usbdev
);
340 static void gdm_mux_send_complete(struct urb
*urb
)
342 struct mux_tx
*t
= urb
->context
;
344 if (urb
->status
== -ECONNRESET
) {
345 pr_info("CONNRESET\n");
351 t
->callback(t
->cb_data
);
356 static int gdm_mux_send(void *priv_dev
, void *data
, int len
, int tty_index
,
357 void (*cb
)(void *data
), void *cb_data
)
359 struct mux_dev
*mux_dev
= priv_dev
;
360 struct usb_device
*usbdev
= mux_dev
->usbdev
;
361 struct mux_pkt_header
*mux_header
;
362 struct mux_tx
*t
= NULL
;
363 static u32 seq_num
= 1;
370 if (mux_dev
->usb_state
== PM_SUSPEND
) {
371 ret
= usb_autopm_get_interface(mux_dev
->intf
);
373 usb_autopm_put_interface(mux_dev
->intf
);
376 spin_lock_irqsave(&mux_dev
->write_lock
, flags
);
378 remain
= (MUX_HEADER_SIZE
+ len
) % 4;
379 dummy_cnt
= remain
? (4 - remain
) : 0;
381 total_len
= len
+ MUX_HEADER_SIZE
+ dummy_cnt
;
383 t
= alloc_mux_tx(total_len
);
385 pr_err("alloc_mux_tx fail\n");
386 spin_unlock_irqrestore(&mux_dev
->write_lock
, flags
);
390 mux_header
= (struct mux_pkt_header
*)t
->buf
;
391 mux_header
->start_flag
= __cpu_to_le32(START_FLAG
);
392 mux_header
->seq_num
= __cpu_to_le32(seq_num
++);
393 mux_header
->payload_size
= __cpu_to_le32((u32
)len
);
394 mux_header
->packet_type
= __cpu_to_le16(packet_type
[tty_index
]);
396 memcpy(t
->buf
+MUX_HEADER_SIZE
, data
, len
);
397 memset(t
->buf
+MUX_HEADER_SIZE
+len
, 0, dummy_cnt
);
401 t
->cb_data
= cb_data
;
403 usb_fill_bulk_urb(t
->urb
,
405 usb_sndbulkpipe(usbdev
, 5),
408 gdm_mux_send_complete
,
411 ret
= usb_submit_urb(t
->urb
, GFP_ATOMIC
);
413 spin_unlock_irqrestore(&mux_dev
->write_lock
, flags
);
416 pr_err("usb_submit_urb Error: %d\n", ret
);
418 usb_mark_last_busy(usbdev
);
423 static int gdm_mux_send_control(void *priv_dev
, int request
, int value
, void *buf
, int len
)
425 struct mux_dev
*mux_dev
= priv_dev
;
426 struct usb_device
*usbdev
= mux_dev
->usbdev
;
429 ret
= usb_control_msg(usbdev
,
430 usb_sndctrlpipe(usbdev
, 0),
441 pr_err("usb_control_msg error: %d\n", ret
);
443 return ret
< 0 ? ret
: 0;
446 static void release_usb(struct mux_dev
*mux_dev
)
448 struct rx_cxt
*rx
= &mux_dev
->rx
;
449 struct mux_rx
*r
, *r_next
;
452 cancel_delayed_work(&mux_dev
->work_rx
);
454 spin_lock_irqsave(&rx
->submit_list_lock
, flags
);
455 list_for_each_entry_safe(r
, r_next
, &rx
->rx_submit_list
, rx_submit_list
) {
456 spin_unlock_irqrestore(&rx
->submit_list_lock
, flags
);
457 usb_kill_urb(r
->urb
);
458 spin_lock_irqsave(&rx
->submit_list_lock
, flags
);
460 spin_unlock_irqrestore(&rx
->submit_list_lock
, flags
);
462 spin_lock_irqsave(&rx
->free_list_lock
, flags
);
463 list_for_each_entry_safe(r
, r_next
, &rx
->rx_free_list
, free_list
) {
464 list_del(&r
->free_list
);
467 spin_unlock_irqrestore(&rx
->free_list_lock
, flags
);
469 spin_lock_irqsave(&rx
->to_host_lock
, flags
);
470 list_for_each_entry_safe(r
, r_next
, &rx
->to_host_list
, to_host_list
) {
471 if (r
->mux_dev
== (void *)mux_dev
) {
472 list_del(&r
->to_host_list
);
476 spin_unlock_irqrestore(&rx
->to_host_lock
, flags
);
480 static int init_usb(struct mux_dev
*mux_dev
)
483 struct rx_cxt
*rx
= &mux_dev
->rx
;
487 spin_lock_init(&mux_dev
->write_lock
);
488 INIT_LIST_HEAD(&rx
->to_host_list
);
489 INIT_LIST_HEAD(&rx
->rx_submit_list
);
490 INIT_LIST_HEAD(&rx
->rx_free_list
);
491 spin_lock_init(&rx
->to_host_lock
);
492 spin_lock_init(&rx
->submit_list_lock
);
493 spin_lock_init(&rx
->free_list_lock
);
495 for (i
= 0; i
< MAX_ISSUE_NUM
* 2; i
++) {
502 list_add(&r
->free_list
, &rx
->rx_free_list
);
505 INIT_DELAYED_WORK(&mux_dev
->work_rx
, do_rx
);
510 static int gdm_mux_probe(struct usb_interface
*intf
, const struct usb_device_id
*id
)
512 struct mux_dev
*mux_dev
;
513 struct tty_dev
*tty_dev
;
514 u16 idVendor
, idProduct
;
515 int bInterfaceNumber
;
518 struct usb_device
*usbdev
= interface_to_usbdev(intf
);
519 bInterfaceNumber
= intf
->cur_altsetting
->desc
.bInterfaceNumber
;
521 idVendor
= __le16_to_cpu(usbdev
->descriptor
.idVendor
);
522 idProduct
= __le16_to_cpu(usbdev
->descriptor
.idProduct
);
524 pr_info("mux vid = 0x%04x pid = 0x%04x\n", idVendor
, idProduct
);
526 if (bInterfaceNumber
!= 2)
529 mux_dev
= kzalloc(sizeof(struct mux_dev
), GFP_KERNEL
);
533 tty_dev
= kzalloc(sizeof(struct tty_dev
), GFP_KERNEL
);
539 mux_dev
->usbdev
= usbdev
;
540 mux_dev
->control_intf
= intf
;
542 ret
= init_usb(mux_dev
);
546 tty_dev
->priv_dev
= (void *)mux_dev
;
547 tty_dev
->send_func
= gdm_mux_send
;
548 tty_dev
->recv_func
= gdm_mux_recv
;
549 tty_dev
->send_control
= gdm_mux_send_control
;
551 ret
= register_lte_tty_device(tty_dev
, &intf
->dev
);
553 goto err_unregister_tty
;
555 for (i
= 0; i
< TTY_MAX_COUNT
; i
++)
556 mux_dev
->tty_dev
= tty_dev
;
558 mux_dev
->intf
= intf
;
559 mux_dev
->usb_state
= PM_NORMAL
;
562 usb_set_intfdata(intf
, tty_dev
);
567 unregister_lte_tty_device(tty_dev
);
569 release_usb(mux_dev
);
577 static void gdm_mux_disconnect(struct usb_interface
*intf
)
579 struct tty_dev
*tty_dev
;
580 struct mux_dev
*mux_dev
;
581 struct usb_device
*usbdev
= interface_to_usbdev(intf
);
583 tty_dev
= usb_get_intfdata(intf
);
585 mux_dev
= tty_dev
->priv_dev
;
587 release_usb(mux_dev
);
588 unregister_lte_tty_device(tty_dev
);
596 static int gdm_mux_suspend(struct usb_interface
*intf
, pm_message_t pm_msg
)
598 struct tty_dev
*tty_dev
;
599 struct mux_dev
*mux_dev
;
601 struct mux_rx
*r
, *r_next
;
604 tty_dev
= usb_get_intfdata(intf
);
605 mux_dev
= tty_dev
->priv_dev
;
608 if (mux_dev
->usb_state
!= PM_NORMAL
) {
609 pr_err("usb suspend - invalid state\n");
613 mux_dev
->usb_state
= PM_SUSPEND
;
616 spin_lock_irqsave(&rx
->submit_list_lock
, flags
);
617 list_for_each_entry_safe(r
, r_next
, &rx
->rx_submit_list
, rx_submit_list
) {
618 spin_unlock_irqrestore(&rx
->submit_list_lock
, flags
);
619 usb_kill_urb(r
->urb
);
620 spin_lock_irqsave(&rx
->submit_list_lock
, flags
);
622 spin_unlock_irqrestore(&rx
->submit_list_lock
, flags
);
627 static int gdm_mux_resume(struct usb_interface
*intf
)
629 struct tty_dev
*tty_dev
;
630 struct mux_dev
*mux_dev
;
633 tty_dev
= usb_get_intfdata(intf
);
634 mux_dev
= tty_dev
->priv_dev
;
636 if (mux_dev
->usb_state
!= PM_SUSPEND
) {
637 pr_err("usb resume - invalid state\n");
641 mux_dev
->usb_state
= PM_NORMAL
;
643 for (i
= 0; i
< MAX_ISSUE_NUM
; i
++)
644 gdm_mux_recv(mux_dev
, mux_dev
->rx_cb
);
649 static struct usb_driver gdm_mux_driver
= {
651 .probe
= gdm_mux_probe
,
652 .disconnect
= gdm_mux_disconnect
,
653 .id_table
= id_table
,
654 .supports_autosuspend
= 1,
655 .suspend
= gdm_mux_suspend
,
656 .resume
= gdm_mux_resume
,
657 .reset_resume
= gdm_mux_resume
,
660 static int __init
gdm_usb_mux_init(void)
663 mux_rx_wq
= create_workqueue("mux_rx_wq");
664 if (mux_rx_wq
== NULL
) {
665 pr_err("work queue create fail\n");
669 register_lte_tty_driver();
671 return usb_register(&gdm_mux_driver
);
674 static void __exit
gdm_usb_mux_exit(void)
676 unregister_lte_tty_driver();
679 flush_workqueue(mux_rx_wq
);
680 destroy_workqueue(mux_rx_wq
);
683 usb_deregister(&gdm_mux_driver
);
686 module_init(gdm_usb_mux_init
);
687 module_exit(gdm_usb_mux_exit
);
689 MODULE_DESCRIPTION("GCT LTE TTY Device Driver");
690 MODULE_LICENSE("GPL");