2 * CDC Ethernet based the networking peripherals of Huawei data card devices
3 * This driver is developed based on usbnet.c and cdc_ether.c
4 * Copyright (C) 2009 by Franko Fang (Huawei Technologies Co., Ltd.)
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will support Huawei data card devices for Linux networking,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/workqueue.h>
29 #include <linux/mii.h>
30 #include <linux/usb.h>
31 #include <linux/sched.h>
32 #include <linux/ctype.h>
33 #include <linux/usb/cdc.h>
34 #include <linux/usbdevice_fs.h>
36 #include <linux/version.h>
37 /////////////////////////////////////////////////////////////////////////////////////////////////
38 #define DRIVER_VERSION "v2.07.00.00"
39 #define DRIVER_AUTHOR "Franko Fang <huananhu@huawei.com>"
40 #define DRIVER_DESC "Huawei ether driver for 3G data card ether device"
41 //////////////////////////////////////////////////////////////////////////////////////////////////////
42 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
43 #define RX_QLEN(dev) ( ((dev)->udev->speed == USB_SPEED_HIGH) ? \
44 (RX_MAX_QUEUE_MEMORY / (dev)->rx_urb_size) : 4)
45 #define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
46 (RX_MAX_QUEUE_MEMORY / (dev)->hard_mtu) : 4)
48 // reawaken network queue this soon after stopping; else watchdog barks
49 #define TX_TIMEOUT_JIFFIES (5 * HZ)
51 // throttle rx/tx briefly after some faults, so khubd might disconnect()
52 // us (it polls at HZ/4 usually) before we report too many false errors.
53 #define THROTTLE_JIFFIES (HZ / 8)
56 #define UNLINK_TIMEOUT_MS 3
57 //////////////////////////////////////////////////////////////////////////////////////////////
58 // randomly generated ethernet address
59 static u8 node_id
[ETH_ALEN
];
61 static const char driver_name
[] = "hw_cdc_net";
63 /* use ethtool to change the level for any given device */
64 static int msg_level
= -1;
65 module_param (msg_level
, int, 0);
66 MODULE_PARM_DESC (msg_level
, "Override default message level");
67 //////////////////////////////////////////////////////////////////////////////////////////
68 #define HW_TLP_MASK_SYNC 0xF800
69 #define HW_TLP_MASK_LENGTH 0x07FF
70 #define HW_TLP_BITS_SYNC 0xF800
74 unsigned short pktlength
;
75 unsigned char payload
;
77 #define HW_TLP_HDR_LENGTH sizeof(unsigned short)
80 typedef enum __HW_TLP_BUF_STATE
{
81 HW_TLP_BUF_STATE_IDLE
= 0,
82 HW_TLP_BUF_STATE_PARTIAL_FILL
,
83 HW_TLP_BUF_STATE_PARTIAL_HDR
,
84 HW_TLP_BUF_STATE_HDR_ONLY
,
85 HW_TLP_BUF_STATE_ERROR
88 struct hw_cdc_tlp_tmp
{
90 unsigned short pktlength
;
91 unsigned short bytesneeded
;
93 /*max ethernet pkt size 1514*/
94 #define HW_USB_RECEIVE_BUFFER_SIZE 1600L
95 /*for Tin-layer-protocol (TLP)*/
96 #define HW_USB_MRECEIVE_BUFFER_SIZE 4096L
98 #define HW_USB_MRECEIVE_MAX_BUFFER_SIZE (1024 * 16)
100 #define HW_JUNGO_BCDDEVICE_VALUE 0x0102
101 #define BINTERFACESUBCLASS 0x02
102 ///////////////////////////////////////////////////////////////////////////////////////////
103 #define EVENT_TX_HALT 0
104 #define EVENT_RX_HALT 1
105 #define EVENT_RX_MEMORY 2
106 #define EVENT_STS_SPLIT 3
107 #define EVENT_LINK_RESET 4
110 #define NCM_TX_DEFAULT_TIMEOUT_MS 2
112 static int ncm_prefer_32
= 1;
113 //module_param(ncm_prefer_32, bool, S_IRUGO);
114 module_param(ncm_prefer_32
, int, S_IRUGO
);
116 static int ncm_prefer_crc
= 0;
117 //module_param(ncm_prefer_crc, bool, S_IRUGO);
118 module_param(ncm_prefer_crc
, int, S_IRUGO
);
120 static unsigned long ncm_tx_timeout
= NCM_TX_DEFAULT_TIMEOUT_MS
;
121 module_param(ncm_tx_timeout
, ulong
, S_IRUGO
);
123 static unsigned int ncm_read_buf_count
= 4;
124 module_param(ncm_read_buf_count
, uint
, S_IRUGO
);
126 static unsigned short ncm_read_size_in1k
= 4;
127 module_param(ncm_read_size_in1k
, short , S_IRUGO
);
129 static int rt_debug
= 0;
130 //module_param(rt_debug, bool, S_IRUGO|S_IWUSR);
131 module_param(rt_debug
, int, S_IRUGO
| S_IWUSR
);
134 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
135 //#include <linux/unaligned/access_ok.h>
137 static inline u16
get_unaligned_le16(const void *p
)
139 return le16_to_cpup((__le16
*)p
);
142 static inline u32
get_unaligned_le32(const void *p
)
144 return le32_to_cpup((__le32
*)p
);
147 static inline void put_unaligned_le16(u16 val
, void *p
)
149 *((__le16
*)p
) = cpu_to_le16(val
);
152 static inline void put_unaligned_le32(u32 val
, void *p
)
154 *((__le32
*)p
) = cpu_to_le32(val
);
158 /* Add for DTS2011050903736 lxz 20110520 start*/
159 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
160 #define LINUX_VERSION37_LATER 1
162 #define LINUX_VERSION37_LATER 0
164 /* Add for DTS2011050903736 lxz 20110520 end*/
168 >2.6.36 some syetem not find ncm.h but find cdc.h
169 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
170 #include <linux/usb/ncm.h>
173 #define USB_CDC_NCM_TYPE 0x1a
175 /* NCM Functional Descriptor */
176 /* change usb_cdc_ncm_desc -> usb_cdc_ncm_desc_hw ,prevent cdc.h redefinition 11-05*/
177 struct usb_cdc_ncm_desc_hw
{
179 __u8 bDescriptorType
;
180 __u8 bDescriptorSubType
;
181 __le16 bcdNcmVersion
;
182 __u8 bmNetworkCapabilities
;
183 } __attribute__ ((packed
));
185 #ifdef NCM_NCAP_ETH_FILTER
186 #undef NCM_NCAP_ETH_FILTER
188 #ifdef NCM_NCAP_NET_ADDRESS
189 #undef NCM_NCAP_NET_ADDRESS
191 #ifdef NCM_NCAP_ENCAP_COMM
192 #undef NCM_NCAP_ENCAP_COMM
194 #ifdef NCM_NCAP_MAX_DGRAM
195 #undef NCM_NCAP_MAX_DGRAM
197 #ifdef NCM_NCAP_CRC_MODE
198 #undef NCM_NCAP_CRC_MODE
201 #define NCM_NCAP_ETH_FILTER (1 << 0)
202 #define NCM_NCAP_NET_ADDRESS (1 << 1)
203 #define NCM_NCAP_ENCAP_COMM (1 << 2)
204 #define NCM_NCAP_MAX_DGRAM (1 << 3)
205 #define NCM_NCAP_CRC_MODE (1 << 4)
207 #ifdef USB_CDC_GET_NTB_PARAMETERS
208 #undef USB_CDC_GET_NTB_PARAMETERS
210 #ifdef USB_CDC_GET_NET_ADDRESS
211 #undef USB_CDC_GET_NET_ADDRESS
213 #ifdef USB_CDC_SET_NET_ADDRESS
214 #undef USB_CDC_SET_NET_ADDRESS
216 #ifdef USB_CDC_GET_NTB_FORMAT
217 #undef USB_CDC_GET_NTB_FORMAT
219 #ifdef USB_CDC_SET_NTB_FORMAT
220 #undef USB_CDC_SET_NTB_FORMAT
222 #ifdef USB_CDC_GET_NTB_INPUT_SIZE
223 #undef USB_CDC_GET_NTB_INPUT_SIZE
225 #ifdef USB_CDC_SET_NTB_INPUT_SIZE
226 #undef USB_CDC_SET_NTB_INPUT_SIZE
228 #ifdef USB_CDC_GET_MAX_DATAGRAM_SIZE
229 #undef USB_CDC_GET_MAX_DATAGRAM_SIZE
231 #ifdef USB_CDC_SET_MAX_DATAGRAM_SIZE
232 #undef USB_CDC_SET_MAX_DATAGRAM_SIZE
234 #ifdef USB_CDC_GET_CRC_MODE
235 #undef USB_CDC_GET_CRC_MODE
237 #ifdef USB_CDC_SET_CRC_MODE
238 #undef USB_CDC_SET_CRC_MODE
241 #define USB_CDC_GET_NTB_PARAMETERS 0x80
242 #define USB_CDC_GET_NET_ADDRESS 0x81
243 #define USB_CDC_SET_NET_ADDRESS 0x82
244 #define USB_CDC_GET_NTB_FORMAT 0x83
245 #define USB_CDC_SET_NTB_FORMAT 0x84
246 #define USB_CDC_GET_NTB_INPUT_SIZE 0x85
247 #define USB_CDC_SET_NTB_INPUT_SIZE 0x86
248 #define USB_CDC_GET_MAX_DATAGRAM_SIZE 0x87
249 #define USB_CDC_SET_MAX_DATAGRAM_SIZE 0x88
250 #define USB_CDC_GET_CRC_MODE 0x89
251 #define USB_CDC_SET_CRC_MODE 0x8a
254 * Class Specific structures and constants
256 * CDC NCM parameter structure, CDC NCM subclass 6.2.1
259 struct usb_cdc_ncm_ntb_parameter_hw
{
261 __le16 bmNtbFormatSupported
;
262 __le32 dwNtbInMaxSize
;
263 __le16 wNdpInDivisor
;
264 __le16 wNdpInPayloadRemainder
;
265 __le16 wNdpInAlignment
;
267 __le32 dwNtbOutMaxSize
;
268 __le16 wNdpOutDivisor
;
269 __le16 wNdpOutPayloadRemainder
;
270 __le16 wNdpOutAlignment
;
272 } __attribute__ ((packed
));
275 * CDC NCM transfer headers, CDC NCM subclass 3.2
277 #ifdef NCM_NTH16_SIGN
278 #undef NCM_NTH16_SIGN
280 #ifdef NCM_NTH32_SIGN
281 #undef NCM_NTH32_SIGN
284 #define NCM_NTH16_SIGN 0x484D434E /* NCMH */
285 #define NCM_NTH32_SIGN 0x686D636E /* ncmh */
287 /* change usb_cdc_ncm_nth16 -> usb_cdc_ncm_nth16_hw ,prevent cdc.h redefinition */
288 struct usb_cdc_ncm_nth16_hw
{
290 __le16 wHeaderLength
;
294 } __attribute__ ((packed
));
296 /* change usb_cdc_ncm_nth32 -> usb_cdc_ncm_nth_hw ,prevent cdc.h redefinition */
297 struct usb_cdc_ncm_nth32_hw
{
299 __le16 wHeaderLength
;
301 __le32 dwBlockLength
;
303 } __attribute__ ((packed
));
306 * CDC NCM datagram pointers, CDC NCM subclass 3.3
308 #ifdef NCM_NDP16_CRC_SIGN
309 #undef NCM_NDP16_CRC_SIGN
311 #ifdef NCM_NDP16_NOCRC_SIGN
312 #undef NCM_NDP16_NOCRC_SIGN
314 #ifdef NCM_NDP32_CRC_SIGN
315 #undef NCM_NDP32_CRC_SIGN
317 #ifdef NCM_NDP32_NOCRC_SIGN
318 #undef NCM_NDP32_NOCRC_SIGN
321 #define NCM_NDP16_CRC_SIGN 0x314D434E /* NCM1 */
322 #define NCM_NDP16_NOCRC_SIGN 0x304D434E /* NCM0 */
323 #define NCM_NDP32_CRC_SIGN 0x316D636E /* ncm1 */
324 #define NCM_NDP32_NOCRC_SIGN 0x306D636E /* ncm0 */
326 /* change usb_cdc_ncm_ndp16 -> usb_cdc_ncm_ndp16_hw ,prevent cdc.h redefinition */
327 struct usb_cdc_ncm_ndp16_hw
{
332 } __attribute__ ((packed
));
334 /* change usb_cdc_ncm_ndp32 -> usb_cdc_ncm_ndp32_hw ,prevent cdc.h redefinition */
335 struct usb_cdc_ncm_ndp32_hw
{
339 __le32 dwNextFpIndex
;
342 } __attribute__ ((packed
));
345 * Here are options for NCM Datagram Pointer table (NDP) parser.
346 * There are 2 different formats: NDP16 and NDP32 in the spec (ch. 3),
347 * in NDP16 offsets and sizes fields are 1 16bit word wide,
348 * in NDP32 -- 2 16bit words wide. Also signatures are different.
349 * To make the parser code the same, put the differences in the structure,
350 * and switch pointers to the structures when the format is changed.
353 /* change usb_cdc_ncm_ndp32 -> usb_cdc_ncm_ndp32_hw ,prevent redefinition */
354 struct ndp_parser_opts_hw
{
359 unsigned ndplen_align
;
360 /* sizes in u16 units */
361 unsigned dgram_item_len
; /* index or length */
362 unsigned block_length
;
366 unsigned next_fp_index
;
369 #ifdef INIT_NDP16_OPTS
370 #undef INIT_NDP16_OPTS
372 #ifdef INIT_NDP32_OPTS
373 #undef INIT_NDP32_OPTS
376 #define INIT_NDP16_OPTS { \
377 .nth_sign = NCM_NTH16_SIGN, \
378 .ndp_sign = NCM_NDP16_NOCRC_SIGN, \
379 .nth_size = sizeof(struct usb_cdc_ncm_nth16_hw), \
380 .ndp_size = sizeof(struct usb_cdc_ncm_ndp16_hw), \
382 .dgram_item_len = 1, \
387 .next_fp_index = 1, \
390 #define INIT_NDP32_OPTS { \
391 .nth_sign = NCM_NTH32_SIGN, \
392 .ndp_sign = NCM_NDP32_NOCRC_SIGN, \
393 .nth_size = sizeof(struct usb_cdc_ncm_nth32_hw), \
394 .ndp_size = sizeof(struct usb_cdc_ncm_ndp32_hw), \
396 .dgram_item_len = 2, \
401 .next_fp_index = 2, \
404 static inline void put_ncm(__le16
**p
, unsigned size
, unsigned val
)
408 put_unaligned_le16((u16
)val
, *p
);
411 put_unaligned_le32((u32
)val
, *p
);
421 static inline unsigned get_ncm(__le16
**p
, unsigned size
)
427 tmp
= get_unaligned_le16(*p
);
430 tmp
= get_unaligned_le32(*p
);
440 #ifdef NCM_CONTROL_TIMEOUT
441 #undef NCM_CONTROL_TIMEOUT
444 #define NCM_CONTROL_TIMEOUT (5 * 1000)
447 /* 'u' must be of unsigned type */
448 #define IS_POWER2(u) (((u) > 0) && !((u) & ((u) - 1)))
450 /* 'p' must designate a variable of type * __le16 (in all get/put_ncm_leXX) */
451 #define get_ncm_le16(p) \
452 ({ __le16 val = get_unaligned_le16(p); p += 1; val; })
454 #define get_ncm_le32(p) \
455 ({ __le32 val = get_unaligned_le32(p); p += 2; val; })
457 #define put_ncm_le16(val, p) \
458 ({ put_unaligned_le16((val), p); p += 1; })
460 #define put_ncm_le32(val, p) \
461 ({ put_unaligned_le32((val), p); p += 2; })
463 #define NCM_NDP_MIN_ALIGNMENT 4
465 #ifdef NCM_NTB_MIN_IN_SIZE
466 #undef NCM_NTB_MIN_IN_SIZE
468 #define NCM_NTB_MIN_IN_SIZE 2048
470 #ifdef NCM_NTB_MIN_OUT_SIZE
471 #undef NCM_NTB_MIN_OUT_SIZE
474 #define NCM_NDP16_ENTRY_LEN 4
476 /* NTB16 must include: NTB16 header, NDP16 header, datagram pointer entry,
477 * terminating (NULL) datagram entry
479 #define NCM_NTB_MIN_OUT_SIZE (sizeof(struct usb_cdc_ncm_nth16_hw) \
480 + sizeof(struct usb_cdc_ncm_ndp16_hw) + 2 * NCM_NDP16_ENTRY_LEN)
483 #define max(_a, _b) (((_a) > (_b)) ? (_a) : (_b))
487 #define min(_a, _b) (((_a) < (_b)) ? (_a) : (_b))
490 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
491 #define NCM_NTB_HARD_MAX_IN_SIZE ((u32)(max(16,(int)ncm_read_size_in1k) * 1024))
493 #define NCM_NTB_HARD_MAX_IN_SIZE ((u32)(max(2,(int)ncm_read_size_in1k) * 1024))
496 #define RX_QLEN_NCM ncm_read_buf_count
497 #define TX_QLEN_NCM 4
499 /* These are actually defined in usbnet.c and we need to redefine these here in
500 * order to calculate the size of the SKB pool
504 static struct ndp_parser_opts_hw ndp16_opts
= INIT_NDP16_OPTS
;
505 static struct ndp_parser_opts_hw ndp32_opts
= INIT_NDP32_OPTS
;
508 struct list_head list
;
514 /* Maximum possible length of this NTB */
516 /* The current offset of the NDP */
518 /* The current length of the NDP */
520 /* End of the datagrams section */
522 /* Entries list (datagram index/lenght pairs) */
523 struct list_head entries
;
524 /* Number of datagrams in this NTB */
526 /* The SKB with the actual NTB data */
530 #define NTB_LEN(n) ((n)->ndp_off + (n)->ndp_len)
531 #define NTB_IS_EMPTY(n) ((n)->ndgrams == 0)
534 struct usb_cdc_ncm_desc_hw
*ncm_desc
;
535 //struct usbnet *unet;
536 struct hw_cdc_net
*ndev
;
537 struct usb_interface
*control
;
538 struct usb_interface
*data
;
540 #define NTB_FORMAT_SUPPORTED_16BIT 0x0001
541 #define NTB_FORMAT_SUPPORTED_32BIT 0x0002
549 #define NCM_BIT_MODE_16 0
550 #define NCM_BIT_MODE_32 1
552 #define NCM_CRC_MODE_NO 0
553 #define NCM_CRC_MODE_YES 1
556 struct ndp_parser_opts_hw popts
;
560 struct sk_buff
**skb_pool
;
561 unsigned skb_pool_size
;
562 struct timer_list tx_timer
;
563 /* The maximum amount of jiffies that a datagram can be held (in the
564 * current-NTB) before it must be sent on the bus
566 unsigned long tx_timeout_jiffies
;
567 #ifdef CONFIG_CDC_ENCAP_COMMAND
568 struct cdc_encap
*cdc_encap_ctx
;
575 struct usb_device
*udev
;
576 struct usb_interface
*intf
;
577 const char *driver_name
;
578 const char *driver_desc
;
580 wait_queue_head_t
*wait
;
581 struct mutex phy_mutex
;
582 unsigned char suspend_count
;
584 /* i/o info: pipes etc */
586 struct usb_host_endpoint
*status
;
588 struct timer_list delay
;
590 /* protocol/interface state */
591 struct net_device
*net
;
592 struct net_device_stats stats
;
594 unsigned long data
[5];
596 u32 hard_mtu
; /* count any extra framing */
597 size_t rx_urb_size
; /* size for rx urbs */
598 struct mii_if_info mii
;
600 /* various kinds of pending driver work */
601 struct sk_buff_head rxq
;
602 struct sk_buff_head txq
;
603 struct sk_buff_head done
;
604 struct urb
*interrupt
;
605 struct tasklet_struct bh
;
607 struct work_struct kevent
;
608 struct delayed_work status_work
;//fangxiaozhi added for work
612 /*The state and buffer for the data of TLP*/
613 HW_TLP_BUF_STATE hw_tlp_buffer_state
;
614 struct hw_cdc_tlp_tmp hw_tlp_tmp_buf
;
615 /*indicate the download tlp feature is activated or not*/
616 int hw_tlp_download_is_actived
;
620 struct ncm_ctx
*ncm_ctx
;
624 static inline struct usb_driver
*driver_of(struct usb_interface
*intf
)
626 return to_usb_driver(intf
->dev
.driver
);
630 /* Drivers that reuse some of the standard USB CDC infrastructure
631 * (notably, using multiple interfaces according to the CDC
632 * union descriptor) get some helper code.
634 struct hw_dev_state
{
635 struct usb_cdc_header_desc
*header
;
636 struct usb_cdc_union_desc
*u
;
637 struct usb_cdc_ether_desc
*ether
;
638 struct usb_interface
*control
;
639 struct usb_interface
*data
;
643 /* we record the state for each of our queued skbs */
647 rx_start
, rx_done
, rx_cleanup
650 struct skb_data
{ /* skb->cb is one of these */
652 struct hw_cdc_net
*dev
;
653 enum skb_state state
;
656 ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
657 #define devdbg(hw_cdc_net, fmt, arg...) \
658 ((void)(rt_debug && printk(KERN_ERR "Hw_cdc_driver######: " fmt "\n" , ## arg)))
661 #define deverr(hw_cdc_net, fmt, arg...) \
662 printk(KERN_ERR "%s: " fmt "\n" , (hw_cdc_net)->net->name , ## arg)
663 #define devwarn(hw_cdc_net, fmt, arg...) \
664 printk(KERN_WARNING "%s: " fmt "\n" , (hw_cdc_net)->net->name , ## arg)
666 #define devinfo(hw_cdc_net, fmt, arg...) \
667 printk(KERN_INFO "%s: " fmt "\n" , (hw_cdc_net)->net->name , ## arg); \
670 ////////////////////////////////////////////////////////////////////////////////
671 static void hw_cdc_status(struct hw_cdc_net
*dev
, struct urb
*urb
);
672 static inline int hw_get_ethernet_addr(struct hw_cdc_net
*dev
);
673 static int hw_cdc_bind(struct hw_cdc_net
*dev
, struct usb_interface
*intf
);
674 void hw_cdc_unbind(struct hw_cdc_net
*dev
, struct usb_interface
*intf
);
675 int cdc_ncm_rx_fixup(struct hw_cdc_net
*dev
, struct sk_buff
*skb
);
676 struct sk_buff
* cdc_ncm_tx_fixup(struct hw_cdc_net
*dev
, struct sk_buff
*skb
,
678 ///////////////////////////
679 int hw_get_endpoints(struct hw_cdc_net
*, struct usb_interface
*);
680 void hw_skb_return (struct hw_cdc_net
*, struct sk_buff
*);
681 void hw_unlink_rx_urbs(struct hw_cdc_net
*);
682 void hw_defer_kevent (struct hw_cdc_net
*, int );
683 int hw_get_settings (struct net_device
*, struct ethtool_cmd
*);
684 int hw_set_settings (struct net_device
*, struct ethtool_cmd
*);
685 u32
hw_get_link (struct net_device
*);
686 int hw_nway_reset(struct net_device
*);
687 void hw_get_drvinfo (struct net_device
*, struct ethtool_drvinfo
*);
688 u32
hw_get_msglevel (struct net_device
*);
689 void hw_set_msglevel (struct net_device
*, u32
);
690 void hw_disconnect (struct usb_interface
*);
691 int hw_cdc_probe (struct usb_interface
*, const struct usb_device_id
*);
692 int hw_resume (struct usb_interface
*);
693 int hw_suspend (struct usb_interface
*, pm_message_t
);
694 //////////////////////////
696 /*Begin : fangxiaozhi added for work*/
697 static void hw_cdc_check_status_work(struct work_struct
*work
);
699 struct delayed_work *option_suspend_wq
702 /*End : fangxiaozhi added for work*/
710 /* handles CDC Ethernet and many other network "bulk data" interfaces */
711 int hw_get_endpoints(struct hw_cdc_net
*dev
, struct usb_interface
*intf
)
714 struct usb_host_interface
*alt
= NULL
;
715 struct usb_host_endpoint
*in
= NULL
, *out
= NULL
;
716 struct usb_host_endpoint
*status
= NULL
;
718 for (tmp
= 0; tmp
< intf
->num_altsetting
; tmp
++) {
721 //in = out = status = NULL;
725 alt
= intf
->altsetting
+ tmp
;
727 /* take the first altsetting with in-bulk + out-bulk;
728 * remember any status endpoint, just in case;
729 * ignore other endpoints and altsetttings.
731 for (ep
= 0; ep
< alt
->desc
.bNumEndpoints
; ep
++) {
733 struct usb_host_endpoint
*e
;
736 e
= alt
->endpoint
+ ep
;
737 switch (e
->desc
.bmAttributes
) {
738 case USB_ENDPOINT_XFER_INT
:
739 if (!usb_endpoint_dir_in(&e
->desc
)){
744 case USB_ENDPOINT_XFER_BULK
:
749 if (usb_endpoint_dir_in(&e
->desc
)) {
752 }else if (intr
&& !status
){
765 if (!alt
|| !in
|| !out
){
768 if (alt
->desc
.bAlternateSetting
!= 0) {
769 tmp
= usb_set_interface (dev
->udev
, alt
->desc
.bInterfaceNumber
,
770 alt
->desc
.bAlternateSetting
);
776 dev
->in
= usb_rcvbulkpipe (dev
->udev
,
777 in
->desc
.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK
);
778 dev
->out
= usb_sndbulkpipe (dev
->udev
,
779 out
->desc
.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK
);
780 dev
->status
= status
;
783 EXPORT_SYMBOL_GPL(hw_get_endpoints
);
785 static void intr_complete (struct urb
*urb
);
787 static int init_status (struct hw_cdc_net
*dev
, struct usb_interface
*intf
)
795 pipe
= usb_rcvintpipe (dev
->udev
,
796 dev
->status
->desc
.bEndpointAddress
797 & USB_ENDPOINT_NUMBER_MASK
);
798 maxp
= usb_maxpacket (dev
->udev
, pipe
, 0);
800 /* avoid 1 msec chatter: min 8 msec poll rate */
801 period
= max ((int) dev
->status
->desc
.bInterval
,
802 (dev
->udev
->speed
== USB_SPEED_HIGH
) ? 7 : 3);
804 buf
= kmalloc (maxp
, GFP_KERNEL
);
806 dev
->interrupt
= usb_alloc_urb (0, GFP_KERNEL
);
807 if (!dev
->interrupt
) {
811 usb_fill_int_urb(dev
->interrupt
, dev
->udev
, pipe
,
812 buf
, maxp
, intr_complete
, dev
, period
);
814 "status ep%din, %d bytes period %d\n",
815 usb_pipeendpoint(pipe
), maxp
, period
);
822 /* Passes this packet up the stack, updating its accounting.
823 * Some link protocols batch packets, so their rx_fixup paths
824 * can return clones as well as just modify the original skb.
826 void hw_skb_return (struct hw_cdc_net
*dev
, struct sk_buff
*skb
)
833 sn
= be32_to_cpu(*(u32
*)(skb
->data
+ 0x26));
834 devdbg(dev
,"hw_skb_return,len:%d receive sn:%x, time:%ld-%ld",
835 skb
->len
,sn
,current_kernel_time().tv_sec
,current_kernel_time().tv_nsec
);
839 sn
= be32_to_cpu(*(u32
*)(skb
->data
+ 0x2a));
840 devdbg(dev
,"hw_skb_return,len:%d receive ack sn:%x, time:%ld-%ld",
841 skb
->len
,sn
,current_kernel_time().tv_sec
,current_kernel_time().tv_nsec
);
844 skb
->protocol
= eth_type_trans (skb
, dev
->net
);
845 dev
->stats
.rx_packets
++;
846 dev
->stats
.rx_bytes
+= skb
->len
;
848 if (netif_msg_rx_status (dev
)){
849 devdbg (dev
, "< rx, len %zu, type 0x%x",
850 skb
->len
+ sizeof (struct ethhdr
), skb
->protocol
);
852 memset (skb
->cb
, 0, sizeof (struct skb_data
));
853 status
= netif_rx (skb
);
854 if (status
!= NET_RX_SUCCESS
&& netif_msg_rx_err (dev
)){
855 devdbg (dev
, "netif_rx status %d", status
);
858 EXPORT_SYMBOL_GPL(hw_skb_return
);
860 // unlink pending rx/tx; completion handlers do all other cleanup
862 static int unlink_urbs (struct hw_cdc_net
*dev
, struct sk_buff_head
*q
)
865 struct sk_buff
*skb
, *skbnext
;
868 spin_lock_irqsave (&q
->lock
, flags
);
869 for (skb
= q
->next
; skb
!= (struct sk_buff
*) q
; skb
= skbnext
) {
870 struct skb_data
*entry
;
874 entry
= (struct skb_data
*) skb
->cb
;
878 // during some PM-driven resume scenarios,
879 // these (async) unlinks complete immediately
880 retval
= usb_unlink_urb (urb
);
881 if (retval
!= -EINPROGRESS
&& retval
!= 0){
882 devdbg (dev
, "unlink urb err, %d", retval
);
889 spin_unlock_irqrestore (&q
->lock
, flags
);
894 // Flush all pending rx urbs
895 // minidrivers may need to do this when the MTU changes
897 void hw_unlink_rx_urbs(struct hw_cdc_net
*dev
)
899 if (netif_running(dev
->net
)) {
900 (void) unlink_urbs (dev
, &dev
->rxq
);
901 tasklet_schedule(&dev
->bh
);
904 EXPORT_SYMBOL_GPL(hw_unlink_rx_urbs
);
907 /*-------------------------------------------------------------------------
909 * Network Device Driver (peer link to "Host Device", from USB host)
911 *-------------------------------------------------------------------------*/
913 static int hw_change_mtu (struct net_device
*net
, int new_mtu
)
915 struct hw_cdc_net
*dev
= netdev_priv(net
);
916 int ll_mtu
= new_mtu
+ net
->hard_header_len
;
917 int old_hard_mtu
= dev
->hard_mtu
;
918 int old_rx_urb_size
= dev
->rx_urb_size
;
924 // no second zero-length packet read wanted after mtu-sized packets
925 if ((ll_mtu
% dev
->maxpacket
) == 0){
930 dev
->hard_mtu
= net
->mtu
+ net
->hard_header_len
;
931 if (dev
->rx_urb_size
== old_hard_mtu
&& !dev
->is_ncm
) {
932 dev
->rx_urb_size
= dev
->hard_mtu
;
933 if (dev
->rx_urb_size
> old_rx_urb_size
)
935 hw_unlink_rx_urbs(dev
);
939 devdbg(dev
,"change mtu :%d, urb_size:%u",new_mtu
,(u32
)dev
->rx_urb_size
);
944 /*-------------------------------------------------------------------------*/
945 //#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
946 static struct net_device_stats
*hw_get_stats (struct net_device
*net
)
948 struct hw_cdc_net
*dev
= netdev_priv(net
);
952 /*-------------------------------------------------------------------------*/
954 static void tx_defer_bh(struct hw_cdc_net
*dev
,
956 struct sk_buff_head
*list
)
960 spin_lock_irqsave(&list
->lock
, flags
);
961 __skb_unlink(skb
, list
);
962 spin_unlock(&list
->lock
);
963 spin_lock(&dev
->done
.lock
);
964 __skb_queue_tail(&dev
->done
, skb
);
965 if (1 <= dev
->done
.qlen
){
966 tasklet_schedule(&dev
->bh
);
968 spin_unlock_irqrestore(&dev
->done
.lock
, flags
);
970 ////////////////////////////////////////////
971 static HW_TLP_BUF_STATE
submit_skb(struct hw_cdc_net
*dev
,
976 struct skb_data
* entry
;
980 if (len
> dev
->rx_urb_size
){
981 devdbg(dev
, "The package length is too large\n");
982 return HW_TLP_BUF_STATE_ERROR
;
985 if ((skb
= alloc_skb (len
+ NET_IP_ALIGN
, GFP_ATOMIC
)) == NULL
) {
986 return HW_TLP_BUF_STATE_ERROR
;
988 skb_reserve (skb
, NET_IP_ALIGN
);
991 entry
= (struct skb_data
*) skb
->cb
;
994 entry
->state
= rx_done
;
995 entry
->length
= skb
->len
;
997 memcpy(skb
->data
, data
, len
);
1000 spin_lock_irqsave(&dev
->done
.lock
, flags
);
1001 __skb_queue_tail(&dev
->done
, skb
);
1002 if (1 <= dev
->done
.qlen
){
1003 tasklet_schedule(&dev
->bh
);
1005 spin_unlock_irqrestore(&dev
->done
.lock
, flags
);
1006 return HW_TLP_BUF_STATE_IDLE
;
1008 static void reset_tlp_tmp_buf(struct hw_cdc_net
*dev
)
1010 dev
->hw_tlp_tmp_buf
.bytesneeded
= 0;
1011 dev
->hw_tlp_tmp_buf
.pktlength
= 0;
1013 static void rx_tlp_parse(struct hw_cdc_net
*dev
, struct sk_buff
*skb
)
1015 struct hw_cdc_tlp
*tlp
= NULL
;
1016 int remain_bytes
= (int)skb
->len
;
1017 unsigned short pktlen
= 0;
1018 unsigned char *cur_ptr
= skb
->data
;
1019 unsigned char *payload_ptr
= NULL
;
1020 unsigned char *buf_start
= skb
->data
;
1021 unsigned char *buf_end
= buf_start
+ skb
->len
;
1022 unsigned char *ptr
= NULL
;
1024 /*decoding the TLP packets into the ether packet*/
1025 while (remain_bytes
> 0){
1026 switch (dev
->hw_tlp_buffer_state
){
1027 case HW_TLP_BUF_STATE_IDLE
:
1029 if (HW_TLP_HDR_LENGTH
< remain_bytes
){
1030 tlp
= (struct hw_cdc_tlp
*)cur_ptr
;
1031 pktlen
= (tlp
->pktlength
& HW_TLP_MASK_LENGTH
);
1032 payload_ptr
= (unsigned char *)&(tlp
->payload
);
1034 //validate the tlp packet header
1035 if (HW_TLP_BITS_SYNC
!= (tlp
->pktlength
& HW_TLP_MASK_SYNC
)){
1036 devdbg(dev
, "The pktlength is error");
1037 dev
->hw_tlp_buffer_state
= HW_TLP_BUF_STATE_ERROR
;
1040 /*The receiced buffer has the whole ether packet */
1041 if ( (payload_ptr
+ pktlen
) <= buf_end
){
1042 /*Get the ether packet from the TLP packet, and put it into the done queue*/
1043 submit_skb(dev
, payload_ptr
, pktlen
);
1044 cur_ptr
= payload_ptr
+ pktlen
;
1045 remain_bytes
= buf_end
- cur_ptr
;
1046 }else{/*has the part of the ether packet*/
1047 if (pktlen
> dev
->rx_urb_size
){
1048 devdbg(dev
, "The pktlen is invalid");
1049 dev
->hw_tlp_buffer_state
= HW_TLP_BUF_STATE_ERROR
;
1052 dev
->hw_tlp_tmp_buf
.bytesneeded
= (payload_ptr
+ pktlen
) - buf_end
;
1053 dev
->hw_tlp_tmp_buf
.pktlength
= buf_end
- payload_ptr
;
1054 memcpy(dev
->hw_tlp_tmp_buf
.buffer
, payload_ptr
,
1055 dev
->hw_tlp_tmp_buf
.pktlength
);
1056 dev
->hw_tlp_buffer_state
= HW_TLP_BUF_STATE_PARTIAL_FILL
;
1060 else if (HW_TLP_HDR_LENGTH
== remain_bytes
){
1061 memcpy(dev
->hw_tlp_tmp_buf
.buffer
, cur_ptr
, remain_bytes
);
1062 dev
->hw_tlp_tmp_buf
.bytesneeded
= 0;
1063 dev
->hw_tlp_tmp_buf
.pktlength
= remain_bytes
;
1064 dev
->hw_tlp_buffer_state
= HW_TLP_BUF_STATE_HDR_ONLY
;
1067 else if (remain_bytes
> 0){
1068 memcpy(dev
->hw_tlp_tmp_buf
.buffer
, cur_ptr
, remain_bytes
);
1069 dev
->hw_tlp_tmp_buf
.bytesneeded
= HW_TLP_HDR_LENGTH
- remain_bytes
;
1070 dev
->hw_tlp_tmp_buf
.pktlength
= remain_bytes
;
1071 dev
->hw_tlp_buffer_state
= HW_TLP_BUF_STATE_PARTIAL_HDR
;
1075 dev
->hw_tlp_buffer_state
= HW_TLP_BUF_STATE_ERROR
;
1079 case HW_TLP_BUF_STATE_HDR_ONLY
:
1081 tlp
->pktlength
= *((unsigned short*)dev
->hw_tlp_tmp_buf
.buffer
);
1082 pktlen
= (tlp
->pktlength
& HW_TLP_MASK_LENGTH
);
1083 payload_ptr
= cur_ptr
;
1084 reset_tlp_tmp_buf(dev
);
1085 /*validate the tlp packet header*/
1086 if (HW_TLP_BITS_SYNC
!= (tlp
->pktlength
& HW_TLP_MASK_SYNC
)){
1087 devdbg(dev
, "The pktlength is error");
1088 dev
->hw_tlp_buffer_state
= HW_TLP_BUF_STATE_ERROR
;
1091 if ( (payload_ptr
+ pktlen
) <= buf_end
){
1092 submit_skb(dev
, payload_ptr
, pktlen
);
1093 cur_ptr
= payload_ptr
+ pktlen
;
1094 remain_bytes
= buf_end
- cur_ptr
;
1095 dev
->hw_tlp_buffer_state
= HW_TLP_BUF_STATE_IDLE
;
1097 if (pktlen
> dev
->rx_urb_size
){
1098 dev
->hw_tlp_buffer_state
= HW_TLP_BUF_STATE_ERROR
;
1101 dev
->hw_tlp_tmp_buf
.bytesneeded
= (payload_ptr
+ pktlen
) - buf_end
;
1102 dev
->hw_tlp_tmp_buf
.pktlength
= buf_end
- payload_ptr
;
1103 memcpy(dev
->hw_tlp_tmp_buf
.buffer
, payload_ptr
,
1104 dev
->hw_tlp_tmp_buf
.pktlength
);
1105 dev
->hw_tlp_buffer_state
= HW_TLP_BUF_STATE_PARTIAL_FILL
;
1110 case HW_TLP_BUF_STATE_PARTIAL_HDR
:
1112 memcpy(dev
->hw_tlp_tmp_buf
.buffer
+ dev
->hw_tlp_tmp_buf
.pktlength
,
1113 cur_ptr
, dev
->hw_tlp_tmp_buf
.bytesneeded
);
1114 cur_ptr
+= dev
->hw_tlp_tmp_buf
.bytesneeded
;
1115 dev
->hw_tlp_buffer_state
= HW_TLP_BUF_STATE_HDR_ONLY
;
1116 remain_bytes
-= dev
->hw_tlp_tmp_buf
.bytesneeded
;
1119 case HW_TLP_BUF_STATE_PARTIAL_FILL
:
1121 if (remain_bytes
< dev
->hw_tlp_tmp_buf
.bytesneeded
){
1122 memcpy(dev
->hw_tlp_tmp_buf
.buffer
+ dev
->hw_tlp_tmp_buf
.pktlength
,
1123 cur_ptr
, remain_bytes
);
1124 dev
->hw_tlp_tmp_buf
.pktlength
+= remain_bytes
;
1125 dev
->hw_tlp_tmp_buf
.bytesneeded
-= remain_bytes
;
1126 dev
->hw_tlp_buffer_state
= HW_TLP_BUF_STATE_PARTIAL_FILL
;
1127 cur_ptr
+= remain_bytes
;
1130 unsigned short tmplen
= dev
->hw_tlp_tmp_buf
.bytesneeded
1131 + dev
->hw_tlp_tmp_buf
.pktlength
;
1132 if (HW_USB_RECEIVE_BUFFER_SIZE
< tmplen
){
1133 devdbg(dev
, "The tlp length is larger than 1600");
1134 ptr
= (unsigned char *)kmalloc(dev
->hw_tlp_tmp_buf
.bytesneeded
1135 + dev
->hw_tlp_tmp_buf
.pktlength
,GFP_KERNEL
);
1137 memcpy(ptr
, dev
->hw_tlp_tmp_buf
.buffer
,
1138 dev
->hw_tlp_tmp_buf
.pktlength
);
1139 memcpy(ptr
+ dev
->hw_tlp_tmp_buf
.pktlength
, cur_ptr
,
1140 dev
->hw_tlp_tmp_buf
.bytesneeded
);
1141 submit_skb(dev
, ptr
, tmplen
);
1146 memcpy(dev
->hw_tlp_tmp_buf
.buffer
+ dev
->hw_tlp_tmp_buf
.pktlength
,
1147 cur_ptr
, dev
->hw_tlp_tmp_buf
.bytesneeded
);
1148 submit_skb(dev
, dev
->hw_tlp_tmp_buf
.buffer
, tmplen
);
1150 remain_bytes
-= dev
->hw_tlp_tmp_buf
.bytesneeded
;
1151 cur_ptr
+= dev
->hw_tlp_tmp_buf
.bytesneeded
;
1152 dev
->hw_tlp_buffer_state
= HW_TLP_BUF_STATE_IDLE
;
1153 reset_tlp_tmp_buf(dev
);
1157 case HW_TLP_BUF_STATE_ERROR
:
1161 reset_tlp_tmp_buf(dev
);
1162 dev
->hw_tlp_buffer_state
= HW_TLP_BUF_STATE_IDLE
;
1169 static void rx_defer_bh(struct hw_cdc_net
*dev
,
1170 struct sk_buff
*skb
,
1171 struct sk_buff_head
*list
)
1173 unsigned long flags
;
1174 spin_lock_irqsave(&list
->lock
, flags
);
1175 __skb_unlink(skb
, list
);
1176 spin_unlock_irqrestore(&list
->lock
, flags
);
1178 /*deal with the download tlp feature*/
1179 if (1 == dev
->hw_tlp_download_is_actived
){
1180 rx_tlp_parse(dev
, skb
);
1181 dev_kfree_skb_any(skb
);
1183 spin_lock_irqsave(&dev
->done
.lock
, flags
);
1184 __skb_queue_tail(&dev
->done
, skb
);
1185 if (1 <= dev
->done
.qlen
){
1186 tasklet_schedule(&dev
->bh
);
1188 spin_unlock_irqrestore(&dev
->done
.lock
, flags
);
1191 ////////////////////////
1193 /* some work can't be done in tasklets, so we use keventd
1195 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1196 * but tasklet_schedule() doesn't. hope the failure is rare.
1198 void hw_defer_kevent (struct hw_cdc_net
*dev
, int work
)
1200 set_bit (work
, &dev
->flags
);
1201 if (!schedule_work (&dev
->kevent
)){
1202 deverr (dev
, "kevent %d may have been dropped", work
);
1205 devdbg (dev
, "kevent %d scheduled", work
);
1208 EXPORT_SYMBOL_GPL(hw_defer_kevent
);
1210 /*-------------------------------------------------------------------------*/
1215 static void rx_complete (struct urb
*urb
);
1216 static void rx_submit (struct hw_cdc_net
*dev
, struct urb
*urb
, gfp_t flags
)
1218 struct sk_buff
*skb
;
1219 struct skb_data
*entry
;
1221 unsigned long lockflags
;
1222 size_t size
= dev
->rx_urb_size
;
1225 if ((skb
= alloc_skb (size
+ NET_IP_ALIGN
, flags
)) == NULL
) {
1226 deverr (dev
, "no rx skb");
1227 hw_defer_kevent (dev
, EVENT_RX_MEMORY
);
1231 skb_reserve (skb
, NET_IP_ALIGN
);
1233 entry
= (struct skb_data
*) skb
->cb
;
1236 entry
->state
= rx_start
;
1240 usb_fill_bulk_urb (urb
, dev
->udev
, dev
->in
,
1241 skb
->data
, size
, rx_complete
, skb
);
1243 spin_lock_irqsave (&dev
->rxq
.lock
, lockflags
);
1246 if (netif_running (dev
->net
)
1247 && netif_device_present (dev
->net
)
1248 && !test_bit (EVENT_RX_HALT
, &dev
->flags
)) {
1249 switch (retval
= usb_submit_urb (urb
, GFP_ATOMIC
)) {
1251 case 0://submit successfully
1252 __skb_queue_tail (&dev
->rxq
, skb
);
1255 hw_defer_kevent (dev
, EVENT_RX_HALT
);
1258 hw_defer_kevent (dev
, EVENT_RX_MEMORY
);
1261 if (netif_msg_ifdown (dev
)){
1262 devdbg (dev
, "device gone");
1264 netif_device_detach (dev
->net
);
1267 if (netif_msg_rx_err (dev
)){
1268 devdbg (dev
, "rx submit, %d", retval
);
1270 tasklet_schedule (&dev
->bh
);
1274 if (netif_msg_ifdown (dev
)){
1275 devdbg (dev
, "rx: stopped");
1279 spin_unlock_irqrestore (&dev
->rxq
.lock
, lockflags
);
1281 devdbg (dev
, "usb_submit_urb status:%x, time:%ld-%ld",
1282 retval
,current_kernel_time().tv_sec
,current_kernel_time().tv_nsec
);
1286 dev_kfree_skb_any (skb
);
1291 /*-------------------------------------------------------------------------*/
1293 static inline void rx_process (struct hw_cdc_net
*dev
, struct sk_buff
*skb
)
1298 if(!cdc_ncm_rx_fixup(dev
, skb
)){
1303 hw_skb_return (dev
, skb
);
1306 if (netif_msg_rx_err (dev
)){
1307 devdbg (dev
, "drop");
1310 dev
->stats
.rx_errors
++;
1311 skb_queue_tail (&dev
->done
, skb
);
1315 /*-------------------------------------------------------------------------*/
1316 static void rx_complete (struct urb
*urb
)
1318 struct sk_buff
*skb
= (struct sk_buff
*) urb
->context
;
1319 struct skb_data
*entry
= (struct skb_data
*) skb
->cb
;
1320 struct hw_cdc_net
*dev
= entry
->dev
;
1321 int urb_status
= urb
->status
;
1324 devdbg (dev
, "rx_complete,urb:%p,rx length %d, time %ld-%ld",
1325 urb
, urb
->actual_length
,current_kernel_time().tv_sec
,
1326 current_kernel_time().tv_nsec
);
1327 skb_put (skb
, urb
->actual_length
);
1328 entry
->state
= rx_done
;
1331 switch (urb_status
) {
1334 if (skb
->len
< dev
->net
->hard_header_len
) {
1335 entry
->state
= rx_cleanup
;
1336 dev
->stats
.rx_errors
++;
1337 dev
->stats
.rx_length_errors
++;
1338 if (netif_msg_rx_err (dev
)){
1339 devdbg (dev
, "rx length %d", skb
->len
);
1344 /* stalls need manual reset. this is rare ... except that
1345 * when going through USB 2.0 TTs, unplug appears this way.
1346 * we avoid the highspeed version of the ETIMEOUT/EILSEQ
1347 * storm, recovering as needed.
1350 dev
->stats
.rx_errors
++;
1351 hw_defer_kevent (dev
, EVENT_RX_HALT
);
1354 /* software-driven interface shutdown */
1355 case -ECONNRESET
: /* async unlink */
1356 case -ESHUTDOWN
: /* hardware gone */
1357 if (netif_msg_ifdown (dev
)){
1358 devdbg (dev
, "rx shutdown, code %d", urb_status
);
1362 /* we get controller i/o faults during khubd disconnect() delays.
1363 * throttle down resubmits, to avoid log floods; just temporarily,
1364 * so we still recover when the fault isn't a khubd delay.
1369 dev
->stats
.rx_errors
++;
1370 if (!timer_pending (&dev
->delay
)) {
1371 mod_timer (&dev
->delay
, jiffies
+ THROTTLE_JIFFIES
);
1372 if (netif_msg_link (dev
)){
1373 devdbg (dev
, "rx throttle %d", urb_status
);
1377 entry
->state
= rx_cleanup
;
1382 /* data overrun ... flush fifo? */
1384 dev
->stats
.rx_over_errors
++;
1388 entry
->state
= rx_cleanup
;
1389 dev
->stats
.rx_errors
++;
1390 if (netif_msg_rx_err (dev
)){
1391 devdbg (dev
, "rx status %d", urb_status
);
1396 rx_defer_bh(dev
, skb
, &dev
->rxq
);
1399 if (netif_running (dev
->net
)
1400 && !test_bit (EVENT_RX_HALT
, &dev
->flags
)) {
1401 rx_submit (dev
, urb
, GFP_ATOMIC
);
1406 if (netif_msg_rx_err (dev
)){
1407 devdbg (dev
, "no read resubmitted");
1410 static void intr_complete (struct urb
*urb
)
1412 struct hw_cdc_net
*dev
= urb
->context
;
1413 int status
= urb
->status
;
1417 hw_cdc_status(dev
, urb
);
1420 /* software-driven interface shutdown */
1421 case -ENOENT
: /* urb killed */
1422 case -ESHUTDOWN
: /* hardware gone */
1423 if (netif_msg_ifdown (dev
)){
1424 devdbg (dev
, "intr shutdown, code %d", status
);
1428 /* NOTE: not throttling like RX/TX, since this endpoint
1429 * already polls infrequently
1432 devdbg (dev
, "intr status %d", status
);
1436 if (!netif_running (dev
->net
)){
1440 memset(urb
->transfer_buffer
, 0, urb
->transfer_buffer_length
);
1441 status
= usb_submit_urb (urb
, GFP_ATOMIC
);
1442 if (status
!= 0 && netif_msg_timer (dev
)){
1443 deverr(dev
, "intr resubmit --> %d", status
);
1447 /*-------------------------------------------------------------------------*/
1452 /*-------------------------------------------------------------------------*/
1454 // precondition: never called in_interrupt
1456 static int hw_stop (struct net_device
*net
)
1458 struct hw_cdc_net
*dev
= netdev_priv(net
);
1460 DECLARE_WAIT_QUEUE_HEAD_ONSTACK (unlink_wakeup
);
1461 DECLARE_WAITQUEUE (wait
, current
);
1463 netif_stop_queue (net
);
1465 if (netif_msg_ifdown (dev
)){
1466 devinfo (dev
, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
1467 dev
->stats
.rx_packets
, dev
->stats
.tx_packets
,
1468 dev
->stats
.rx_errors
, dev
->stats
.tx_errors
1472 // ensure there are no more active urbs
1473 add_wait_queue (&unlink_wakeup
, &wait
);
1474 dev
->wait
= &unlink_wakeup
;
1475 temp
= unlink_urbs (dev
, &dev
->txq
) + unlink_urbs (dev
, &dev
->rxq
);
1477 // maybe wait for deletions to finish.
1478 while (!skb_queue_empty(&dev
->rxq
)
1479 && !skb_queue_empty(&dev
->txq
)
1480 && !skb_queue_empty(&dev
->done
)) {
1481 msleep(UNLINK_TIMEOUT_MS
);
1482 if (netif_msg_ifdown (dev
)){
1483 devdbg (dev
, "waited for %d urb completions", temp
);
1487 remove_wait_queue (&unlink_wakeup
, &wait
);
1489 /*cleanup the data for TLP*/
1490 dev
->hw_tlp_buffer_state
= HW_TLP_BUF_STATE_IDLE
;
1491 if (NULL
!= dev
->hw_tlp_tmp_buf
.buffer
){
1492 kfree(dev
->hw_tlp_tmp_buf
.buffer
);
1493 dev
->hw_tlp_tmp_buf
.buffer
= NULL
;
1495 dev
->hw_tlp_tmp_buf
.pktlength
= 0;
1496 dev
->hw_tlp_tmp_buf
.bytesneeded
= 0;
1498 usb_kill_urb(dev
->interrupt
);
1500 /* deferred work (task, timer, softirq) must also stop.
1501 * can't flush_scheduled_work() until we drop rtnl (later),
1502 * else workers could deadlock; so make workers a NOP.
1505 del_timer_sync (&dev
->delay
);
1506 tasklet_kill (&dev
->bh
);
1507 usb_autopm_put_interface(dev
->intf
);
1512 /*-------------------------------------------------------------------------*/
1514 // posts reads, and enables write queuing
1516 // precondition: never called in_interrupt
1518 static int hw_open (struct net_device
*net
)
1520 struct hw_cdc_net
*dev
= netdev_priv(net
);
1522 if ((retval
= usb_autopm_get_interface(dev
->intf
)) < 0) {
1523 if (netif_msg_ifup (dev
)){
1525 "resumption fail (%d) hw_cdc_net usb-%s-%s, %s",
1527 dev
->udev
->bus
->bus_name
, dev
->udev
->devpath
,
1533 /*Initialized the data for TLP*/
1534 dev
->hw_tlp_buffer_state
= HW_TLP_BUF_STATE_IDLE
;
1535 dev
->hw_tlp_tmp_buf
.buffer
= kmalloc(HW_USB_RECEIVE_BUFFER_SIZE
, GFP_KERNEL
);
1536 if (NULL
!= dev
->hw_tlp_tmp_buf
.buffer
){
1537 memset(dev
->hw_tlp_tmp_buf
.buffer
, 0, HW_USB_RECEIVE_BUFFER_SIZE
);
1539 dev
->hw_tlp_tmp_buf
.pktlength
= 0;
1540 dev
->hw_tlp_tmp_buf
.bytesneeded
= 0;
1543 /* start any status interrupt transfer */
1544 if (dev
->interrupt
) {
1545 retval
= usb_submit_urb (dev
->interrupt
, GFP_KERNEL
);
1547 if (netif_msg_ifup (dev
)){
1548 deverr (dev
, "intr submit %d", retval
);
1554 netif_start_queue (net
);
1556 // delay posting reads until we're fully open
1557 tasklet_schedule (&dev
->bh
);
1560 usb_autopm_put_interface(dev
->intf
);
1565 /*-------------------------------------------------------------------------*/
1567 /* ethtool methods; minidrivers may need to add some more, but
1568 * they'll probably want to use this base set.
1571 int hw_get_settings (struct net_device
*net
, struct ethtool_cmd
*cmd
)
1573 struct hw_cdc_net
*dev
= netdev_priv(net
);
1575 if (!dev
->mii
.mdio_read
){
1579 return mii_ethtool_gset(&dev
->mii
, cmd
);
1581 EXPORT_SYMBOL_GPL(hw_get_settings
);
1583 int hw_set_settings (struct net_device
*net
, struct ethtool_cmd
*cmd
)
1585 struct hw_cdc_net
*dev
= netdev_priv(net
);
1588 if (!dev
->mii
.mdio_write
){
1592 retval
= mii_ethtool_sset(&dev
->mii
, cmd
);
1597 EXPORT_SYMBOL_GPL(hw_set_settings
);
1599 u32
hw_get_link (struct net_device
*net
)
1601 struct hw_cdc_net
*dev
= netdev_priv(net
);
1603 /* if the device has mii operations, use those */
1604 if (dev
->mii
.mdio_read
){
1605 return mii_link_ok(&dev
->mii
);
1608 /* Otherwise, say we're up (to avoid breaking scripts) */
1611 EXPORT_SYMBOL_GPL(hw_get_link
);
1613 int hw_nway_reset(struct net_device
*net
)
1615 struct hw_cdc_net
*dev
= netdev_priv(net
);
1617 if (!dev
->mii
.mdio_write
){
1621 return mii_nway_restart(&dev
->mii
);
1623 EXPORT_SYMBOL_GPL(hw_nway_reset
);
1625 void hw_get_drvinfo (struct net_device
*net
, struct ethtool_drvinfo
*info
)
1627 struct hw_cdc_net
*dev
= netdev_priv(net
);
1629 strncpy (info
->driver
, dev
->driver_name
, sizeof info
->driver
);
1630 strncpy (info
->version
, DRIVER_VERSION
, sizeof info
->version
);
1631 strncpy (info
->fw_version
, dev
->driver_desc
,
1632 sizeof info
->fw_version
);
1633 usb_make_path (dev
->udev
, info
->bus_info
, sizeof info
->bus_info
);
1635 EXPORT_SYMBOL_GPL(hw_get_drvinfo
);
1637 u32
hw_get_msglevel (struct net_device
*net
)
1639 struct hw_cdc_net
*dev
= netdev_priv(net
);
1641 return dev
->msg_enable
;
1643 EXPORT_SYMBOL_GPL(hw_get_msglevel
);
1645 void hw_set_msglevel (struct net_device
*net
, u32 level
)
1647 struct hw_cdc_net
*dev
= netdev_priv(net
);
1649 dev
->msg_enable
= level
;
1651 EXPORT_SYMBOL_GPL(hw_set_msglevel
);
1653 /* drivers may override default ethtool_ops in their bind() routine */
1654 static struct ethtool_ops hw_ethtool_ops
= {
1655 .get_settings
= hw_get_settings
,
1656 .set_settings
= hw_set_settings
,
1657 .get_link
= hw_get_link
,
1658 .nway_reset
= hw_nway_reset
,
1659 .get_drvinfo
= hw_get_drvinfo
,
1660 .get_msglevel
= hw_get_msglevel
,
1661 .set_msglevel
= hw_set_msglevel
,
1664 /*-------------------------------------------------------------------------*/
1666 /* work that cannot be done in interrupt context uses keventd.
1668 * NOTE: with 2.5 we could do more of this using completion callbacks,
1669 * especially now that control transfers can be queued.
1672 kevent (struct work_struct
*work
)
1674 struct hw_cdc_net
*dev
=
1675 container_of(work
, struct hw_cdc_net
, kevent
);
1678 /* usb_clear_halt() needs a thread context */
1679 if (test_bit (EVENT_TX_HALT
, &dev
->flags
)) {
1680 unlink_urbs (dev
, &dev
->txq
);
1681 status
= usb_clear_halt (dev
->udev
, dev
->out
);
1684 && status
!= -ESHUTDOWN
) {
1685 if (netif_msg_tx_err (dev
)){
1686 deverr (dev
, "can't clear tx halt, status %d",
1690 clear_bit (EVENT_TX_HALT
, &dev
->flags
);
1691 if (status
!= -ESHUTDOWN
){
1692 netif_wake_queue (dev
->net
);
1696 if (test_bit (EVENT_RX_HALT
, &dev
->flags
)) {
1697 unlink_urbs (dev
, &dev
->rxq
);
1698 status
= usb_clear_halt (dev
->udev
, dev
->in
);
1701 && status
!= -ESHUTDOWN
) {
1702 if (netif_msg_rx_err (dev
)){
1703 deverr (dev
, "can't clear rx halt, status %d",
1707 clear_bit (EVENT_RX_HALT
, &dev
->flags
);
1708 tasklet_schedule (&dev
->bh
);
1712 /* tasklet could resubmit itself forever if memory is tight */
1713 if (test_bit (EVENT_RX_MEMORY
, &dev
->flags
)) {
1714 struct urb
*urb
= NULL
;
1716 if (netif_running (dev
->net
)){
1717 urb
= usb_alloc_urb (0, GFP_KERNEL
);
1719 clear_bit (EVENT_RX_MEMORY
, &dev
->flags
);
1722 clear_bit (EVENT_RX_MEMORY
, &dev
->flags
);
1723 rx_submit (dev
, urb
, GFP_KERNEL
);
1724 tasklet_schedule (&dev
->bh
);
1728 if (test_bit (EVENT_LINK_RESET
, &dev
->flags
)) {
1729 clear_bit (EVENT_LINK_RESET
, &dev
->flags
);
1733 devdbg (dev
, "kevent done, flags = 0x%lx",
1738 /*-------------------------------------------------------------------------*/
1740 static void tx_complete (struct urb
*urb
)
1742 struct sk_buff
*skb
= (struct sk_buff
*) urb
->context
;
1743 struct skb_data
*entry
= (struct skb_data
*) skb
->cb
;
1744 struct hw_cdc_net
*dev
= entry
->dev
;
1746 devdbg(dev
,"tx_complete,status:%d,len:%d, *********time:%ld-%ld",
1747 urb
->status
,(int)entry
->length
,
1748 current_kernel_time().tv_sec
,
1749 current_kernel_time().tv_nsec
);
1751 if (urb
->status
== 0) {
1752 dev
->stats
.tx_packets
++;
1753 dev
->stats
.tx_bytes
+= entry
->length
;
1755 dev
->stats
.tx_errors
++;
1757 switch (urb
->status
) {
1759 hw_defer_kevent (dev
, EVENT_TX_HALT
);
1762 /* software-driven interface shutdown */
1763 case -ECONNRESET
: // async unlink
1764 case -ESHUTDOWN
: // hardware gone
1767 // like rx, tx gets controller i/o faults during khubd delays
1768 // and so it uses the same throttling mechanism.
1772 if (!timer_pending (&dev
->delay
)) {
1773 mod_timer (&dev
->delay
,
1774 jiffies
+ THROTTLE_JIFFIES
);
1775 if (netif_msg_link (dev
)){
1776 devdbg (dev
, "tx throttle %d",
1780 netif_stop_queue (dev
->net
);
1783 if (netif_msg_tx_err (dev
)){
1784 devdbg (dev
, "tx err %d", entry
->urb
->status
);
1791 entry
->state
= tx_done
;
1792 tx_defer_bh(dev
, skb
, &dev
->txq
);
1795 /*-------------------------------------------------------------------------*/
1797 static void hw_tx_timeout (struct net_device
*net
)
1799 struct hw_cdc_net
*dev
= netdev_priv(net
);
1801 unlink_urbs (dev
, &dev
->txq
);
1802 tasklet_schedule (&dev
->bh
);
1804 // FIXME: device recovery -- reset?
1807 /*-------------------------------------------------------------------------*/
1809 static int hw_start_xmit (struct sk_buff
*skb
, struct net_device
*net
)
1811 struct hw_cdc_net
*dev
= netdev_priv(net
);
1813 int retval
= NET_XMIT_SUCCESS
;
1814 struct urb
*urb
= NULL
;
1815 struct skb_data
*entry
;
1816 unsigned long flags
;
1819 skb
= cdc_ncm_tx_fixup (dev
, skb
, GFP_ATOMIC
);
1821 if (netif_msg_tx_err (dev
)){
1822 devdbg (dev
, "can't tx_fixup skb");
1830 if (!(urb
= usb_alloc_urb (0, GFP_ATOMIC
))) {
1831 if (netif_msg_tx_err (dev
)){
1832 devdbg (dev
, "no urb");
1837 entry
= (struct skb_data
*) skb
->cb
;
1840 entry
->state
= tx_start
;
1841 entry
->length
= length
;
1843 usb_fill_bulk_urb (urb
, dev
->udev
, dev
->out
,
1844 skb
->data
, skb
->len
, tx_complete
, skb
);
1846 /* don't assume the hardware handles USB_ZERO_PACKET
1847 * NOTE: strictly conforming cdc-ether devices should expect
1848 * the ZLP here, but ignore the one-byte packet.
1850 if ((length
% dev
->maxpacket
) == 0) {
1851 urb
->transfer_buffer_length
++;
1852 if (skb_tailroom(skb
)) {
1853 skb
->data
[skb
->len
] = 0;
1858 devdbg(dev
,"hw_start_xmit ,usb_submit_urb,len:%d, time:%ld-%ld",
1859 skb
->len
,current_kernel_time().tv_sec
,current_kernel_time().tv_nsec
);
1861 spin_lock_irqsave (&dev
->txq
.lock
, flags
);
1863 switch ((retval
= usb_submit_urb (urb
, GFP_ATOMIC
))) {
1865 netif_stop_queue (net
);
1866 hw_defer_kevent (dev
, EVENT_TX_HALT
);
1869 if (netif_msg_tx_err (dev
)){
1870 devdbg (dev
, "tx: submit urb err %d", retval
);
1874 net
->trans_start
= jiffies
;
1875 __skb_queue_tail (&dev
->txq
, skb
);
1876 if (dev
->txq
.qlen
>= TX_QLEN (dev
)){
1877 netif_stop_queue (net
);
1880 spin_unlock_irqrestore (&dev
->txq
.lock
, flags
);
1883 if (netif_msg_tx_err (dev
)){
1884 devdbg (dev
, "drop, code %d", retval
);
1887 retval
= NET_XMIT_SUCCESS
;
1888 dev
->stats
.tx_dropped
++;
1890 dev_kfree_skb_any (skb
);
1893 } else if (netif_msg_tx_queued (dev
)) {
1894 devdbg (dev
, "> tx, len %d, type 0x%x",
1895 length
, skb
->protocol
);
1901 /*-------------------------------------------------------------------------*/
1903 // tasklet (work deferred from completions, in_irq) or timer
1905 static void hw_bh (unsigned long param
)
1907 struct hw_cdc_net
*dev
= (struct hw_cdc_net
*) param
;
1908 struct sk_buff
*skb
;
1909 struct skb_data
*entry
;
1911 while ((skb
= skb_dequeue (&dev
->done
))) {
1912 entry
= (struct skb_data
*) skb
->cb
;
1913 switch (entry
->state
) {
1915 entry
->state
= rx_cleanup
;
1916 rx_process (dev
, skb
);
1920 usb_free_urb (entry
->urb
);
1921 dev_kfree_skb (skb
);
1924 devdbg (dev
, "bogus skb state %d", entry
->state
);
1928 // waiting for all pending urbs to complete?
1930 if ((dev
->txq
.qlen
+ dev
->rxq
.qlen
+ dev
->done
.qlen
) == 0) {
1931 wake_up (dev
->wait
);
1934 // or are we maybe short a few urbs?
1935 } else if (netif_running (dev
->net
)
1936 && netif_device_present (dev
->net
)
1937 && !timer_pending (&dev
->delay
)
1938 && !test_bit (EVENT_RX_HALT
, &dev
->flags
)) {
1939 int temp
= dev
->rxq
.qlen
;
1940 int qlen
= dev
->is_ncm
? RX_QLEN_NCM
: RX_QLEN (dev
);
1947 // don't refill the queue all at once
1948 for (i
= 0; i
< 10 && dev
->rxq
.qlen
< qlen
; i
++) {
1949 urb
= usb_alloc_urb (0, GFP_ATOMIC
);
1951 rx_submit (dev
, urb
, GFP_ATOMIC
);
1954 if (temp
!= dev
->rxq
.qlen
&& netif_msg_link (dev
)){
1955 devdbg (dev
, "rxqlen %d --> %d",
1956 temp
, dev
->rxq
.qlen
);
1958 if (dev
->rxq
.qlen
< qlen
){
1959 tasklet_schedule (&dev
->bh
);
1962 if (dev
->txq
.qlen
< (dev
->is_ncm
? TX_QLEN_NCM
:TX_QLEN (dev
))){
1963 netif_wake_queue (dev
->net
);
1969 /*-------------------------------------------------------------------------
1971 * USB Device Driver support
1973 *-------------------------------------------------------------------------*/
1975 // precondition: never called in_interrupt
1977 void hw_disconnect (struct usb_interface
*intf
)
1979 struct hw_cdc_net
*dev
;
1980 struct usb_device
*xdev
;
1981 struct net_device
*net
;
1983 dev
= usb_get_intfdata(intf
);
1984 usb_set_intfdata(intf
, NULL
);
1989 xdev
= interface_to_usbdev (intf
);
1991 if (netif_msg_probe (dev
)){
1992 devinfo (dev
, "unregister '%s' usb-%s-%s, %s",
1993 intf
->dev
.driver
->name
,
1994 xdev
->bus
->bus_name
, xdev
->devpath
,
1998 /*ͬ²½È¡Ïû¿ÉÄÜ×¢²áµÄÑÓ³Ù¹¤×÷½ø³Ì£¬Èç¹û¸Ã¹¤×÷½ø³ÌÒѾÔÚÖ´ÐÐÁË£¬
1999 ÔòÔÚÕâÀÐèÒªµÈ´ý¸Ã¹¤×÷½ø³ÌÖ´ÐÐÍê³ÉÖ®ºó£¬discconect²Å»á¼ÌÐøÍùÏÂÖ´ÐС£*/
2000 cancel_work_sync(&dev
->kevent
);
2003 unregister_netdev (net
);
2005 /* we don't hold rtnl here ... */
2006 flush_scheduled_work ();
2008 hw_cdc_unbind(dev
, intf
);
2013 EXPORT_SYMBOL_GPL(hw_disconnect
);
2016 /*-------------------------------------------------------------------------*/
2017 #if !(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30))
2018 static int hw_eth_mac_addr(struct net_device
*dev
, void *p
)
2020 dev
->dev_addr
[0] = 0x00;
2021 dev
->dev_addr
[1] = 0x1e;
2022 dev
->dev_addr
[2] = 0x10;
2023 dev
->dev_addr
[3] = 0x1f;
2024 dev
->dev_addr
[4] = 0x00;
2025 dev
->dev_addr
[5] = 0x01;
2029 static const struct net_device_ops hw_netdev_ops
= {
2030 .ndo_open
= hw_open
,
2031 .ndo_stop
= hw_stop
,
2032 .ndo_start_xmit
= hw_start_xmit
,
2033 .ndo_tx_timeout
= hw_tx_timeout
,
2034 .ndo_change_mtu
= hw_change_mtu
,
2035 .ndo_set_mac_address
= hw_eth_mac_addr
,
2036 .ndo_validate_addr
= eth_validate_addr
,
2037 .ndo_get_stats
= hw_get_stats
, //ÓÃÓÚÁ÷Á¿Í³¼Æ
2041 int hw_send_tlp_download_request(struct usb_interface
*intf
);
2042 // precondition: never called in_interrupt
2043 int hw_check_conn_status(struct usb_interface
*intf
);
2046 static int is_ncm_interface(struct usb_interface
*intf
)
2051 bif_class
= intf
->cur_altsetting
->desc
.bInterfaceClass
;
2052 bif_subclass
= intf
->cur_altsetting
->desc
.bInterfaceSubClass
;
2053 bif_protocol
= intf
->cur_altsetting
->desc
.bInterfaceProtocol
;
2055 if(( bif_class
== 0x02 && bif_subclass
== 0x0d)
2056 ||( bif_class
== 0xff && bif_subclass
== 0x02 && bif_protocol
== 0x16)
2057 ||( bif_class
== 0xff && bif_subclass
== 0x02 && bif_protocol
== 0x46)
2058 ||( bif_class
== 0xff && bif_subclass
== 0x02 && bif_protocol
== 0x76)
2067 static int cdc_ncm_config(struct ncm_ctx
*ctx
)
2070 struct usb_device
*udev
= ctx
->ndev
->udev
;
2073 unsigned int tx_pipe
;
2074 unsigned int rx_pipe
;
2075 struct usb_cdc_ncm_ntb_parameter_hw
*ntb_params
;
2078 #define NCM_MAX_CONTROL_MSG sizeof (*ntb_params)
2080 b
= kmalloc(NCM_MAX_CONTROL_MSG
, GFP_KERNEL
);
2081 if (unlikely(b
== NULL
)){
2085 net_caps
= ctx
->ncm_desc
->bmNetworkCapabilities
;
2086 control_if
= ctx
->control
->cur_altsetting
->desc
.bInterfaceNumber
;
2087 tx_pipe
= usb_sndctrlpipe(udev
, 0);
2088 rx_pipe
= usb_rcvctrlpipe(udev
, 0);
2090 err
= usb_control_msg(udev
, rx_pipe
, USB_CDC_GET_NTB_PARAMETERS
,
2091 USB_TYPE_CLASS
| USB_RECIP_INTERFACE
| USB_DIR_IN
, 0,
2092 control_if
, b
, sizeof(*ntb_params
), NCM_CONTROL_TIMEOUT
);
2094 dev_dbg(&udev
->dev
, "cannot read NTB params\n");
2097 if (err
< sizeof(*ntb_params
)) {
2098 dev_dbg(&udev
->dev
, "the read NTB params block is too short\n");
2103 ntb_params
= (void *)b
;
2104 ctx
->formats
= le16_to_cpu(ntb_params
->bmNtbFormatSupported
);
2105 ctx
->rx_max_ntb
= le32_to_cpu(ntb_params
->dwNtbInMaxSize
);
2106 ctx
->tx_max_ntb
= le32_to_cpu(ntb_params
->dwNtbOutMaxSize
);
2107 ctx
->tx_divisor
= le16_to_cpu(ntb_params
->wNdpOutDivisor
);
2108 ctx
->tx_remainder
= le16_to_cpu(ntb_params
->wNdpOutPayloadRemainder
);
2109 ctx
->tx_align
= le16_to_cpu(ntb_params
->wNdpOutAlignment
);
2111 devdbg(ctx
->ndev
,"rx_max_ntb:%d,tx_max_ntb:%d,tx_align:%d",
2112 ctx
->rx_max_ntb
,ctx
->tx_max_ntb
,ctx
->tx_align
);
2114 if (unlikely(!(ctx
->formats
& NTB_FORMAT_SUPPORTED_16BIT
))) {
2115 deverr(ctx
->ndev
, "device does not support 16-bit mode\n");
2120 if (unlikely(ctx
->tx_align
< NCM_NDP_MIN_ALIGNMENT
)) {
2121 deverr(ctx
->ndev
, "wNdpOutAlignment (%u) must be at least "
2122 "%u\n", ctx
->tx_align
, NCM_NDP_MIN_ALIGNMENT
);
2127 if (unlikely(!IS_POWER2(ctx
->tx_align
))) {
2128 deverr(ctx
->ndev
, "wNdpOutAlignment (%u) must be a power of "
2129 "2\n", ctx
->tx_align
);
2134 if (unlikely(ctx
->rx_max_ntb
< NCM_NTB_MIN_IN_SIZE
)) {
2135 deverr(ctx
->ndev
, "dwNtbInMaxSize (%u) must be at least "
2136 "%u\n", ctx
->rx_max_ntb
, NCM_NTB_MIN_IN_SIZE
);
2141 if (ctx
->rx_max_ntb
> (u32
)NCM_NTB_HARD_MAX_IN_SIZE
) {
2142 devdbg(ctx
->ndev
, "dwNtbInMaxSize (%u) must be at most %u "
2143 ", setting the device to %u\n",
2144 ctx
->rx_max_ntb
, NCM_NTB_HARD_MAX_IN_SIZE
,
2145 NCM_NTB_HARD_MAX_IN_SIZE
);
2146 ctx
->rx_max_ntb
= NCM_NTB_HARD_MAX_IN_SIZE
;
2147 put_unaligned_le32(ctx
->rx_max_ntb
, b
);
2148 err
= usb_control_msg(udev
, tx_pipe
,
2149 USB_CDC_SET_NTB_INPUT_SIZE
,
2150 USB_TYPE_CLASS
| USB_RECIP_INTERFACE
| USB_DIR_OUT
,
2151 0, control_if
, b
, 4,
2152 NCM_CONTROL_TIMEOUT
);
2154 deverr(ctx
->ndev
, "failed setting NTB input size\n");
2161 if (unlikely(ctx
->tx_max_ntb
< NCM_NTB_MIN_OUT_SIZE
)) {
2162 deverr(ctx
->ndev
, "dwNtbOutMaxSize (%u) must be at least "
2163 "%u\n", ctx
->tx_max_ntb
, (u32
)NCM_NTB_MIN_OUT_SIZE
);
2168 ctx
->bit_mode
= NCM_BIT_MODE_16
;
2169 if (ncm_prefer_32
) {
2170 if (ctx
->formats
& NTB_FORMAT_SUPPORTED_32BIT
) {
2171 ctx
->bit_mode
= NCM_BIT_MODE_32
;
2174 devinfo(ctx
->ndev
, "device does not support 32-bit "
2175 "mode, using 16-bit mode\n");
2179 /* The spec defines a USB_CDC_SET_NTB_FORMAT as an optional feature.
2180 * The test for 32-bit support is actually a test if the device
2181 * implements this request
2183 if (ctx
->formats
& NTB_FORMAT_SUPPORTED_32BIT
) {
2184 err
= usb_control_msg(udev
, tx_pipe
,
2185 USB_CDC_SET_NTB_FORMAT
,
2186 USB_TYPE_CLASS
| USB_RECIP_INTERFACE
| USB_DIR_OUT
,
2187 ctx
->bit_mode
, control_if
, NULL
, 0,
2188 NCM_CONTROL_TIMEOUT
);
2190 deverr(ctx
->ndev
, "failed setting bit-mode\n");
2195 ctx
->crc_mode
= NCM_CRC_MODE_NO
;
2196 if (ncm_prefer_crc
&& (net_caps
& NCM_NCAP_CRC_MODE
)) {
2197 ctx
->crc_mode
= NCM_CRC_MODE_YES
;
2198 err
= usb_control_msg(udev
, tx_pipe
,
2199 USB_CDC_SET_CRC_MODE
,
2200 USB_TYPE_CLASS
| USB_RECIP_INTERFACE
| USB_DIR_OUT
,
2201 NCM_CRC_MODE_YES
, control_if
, NULL
, 0,
2202 NCM_CONTROL_TIMEOUT
);
2204 deverr(ctx
->ndev
, "failed setting crc-mode\n");
2209 switch (ctx
->bit_mode
)
2211 case NCM_BIT_MODE_16
:
2212 memcpy(&ctx
->popts
, &ndp16_opts
,
2213 sizeof (struct ndp_parser_opts_hw
));
2214 if (ctx
->crc_mode
== NCM_CRC_MODE_YES
){
2215 ctx
->popts
.ndp_sign
= NCM_NDP16_CRC_SIGN
;
2218 case NCM_BIT_MODE_32
:
2219 memcpy(&ctx
->popts
, &ndp32_opts
,
2220 sizeof (struct ndp_parser_opts_hw
));
2221 if (ctx
->crc_mode
== NCM_CRC_MODE_YES
){
2222 ctx
->popts
.ndp_sign
= NCM_NDP32_CRC_SIGN
;
2230 #undef NCM_MAX_CONTROL_MSG
2233 /* TODO: add crc support */
2234 int cdc_ncm_rx_fixup(struct hw_cdc_net
*dev
, struct sk_buff
*skb
)
2236 #define NCM_BITS(ctx) (((ctx)->bit_mode == NCM_BIT_MODE_16) ? 16 : 32)
2237 /* Minimal NDP has a header and two entries (each entry has 2 items). */
2238 #define MIN_NDP_LEN(ndp_hdr_size, item_len) ((ndp_hdr_size) + \
2239 2 * 2 * (sizeof(__le16) * (item_len)))
2240 struct ncm_ctx
*ctx
= dev
->ncm_ctx
;
2241 struct usb_device
*udev
= dev
->udev
;
2242 struct ndp_parser_opts_hw
*popts
= &ctx
->popts
;
2243 struct sk_buff
*skb2
;
2244 unsigned skb_len
= skb
->len
;
2245 __le16
*p
= (void *)skb
->data
;
2248 unsigned dgram_item_len
= popts
->dgram_item_len
;
2249 unsigned curr_dgram_idx
;
2250 unsigned curr_dgram_len
;
2251 unsigned next_dgram_idx
;
2252 unsigned next_dgram_len
;
2260 if (unlikely(skb_len
< popts
->nth_size
)) {
2261 dev_dbg(&udev
->dev
, "skb len (%u) is shorter than NTH%u len "
2262 "(%u)\n", skb_len
, NCM_BITS(ctx
), popts
->nth_size
);
2266 if (get_ncm_le32(p
) != popts
->nth_sign
) {
2267 dev_dbg(&udev
->dev
, "corrupt NTH%u signature\n", NCM_BITS(ctx
));
2271 if (get_ncm_le16(p
) != popts
->nth_size
) {
2272 dev_dbg(&udev
->dev
, "wrong NTH%u len\n", NCM_BITS(ctx
));
2276 /* skip sequence num */
2279 if (unlikely(get_ncm(&p
, popts
->block_length
) > skb_len
)) {
2280 dev_dbg(&udev
->dev
, "bogus NTH%u block length\n",
2285 idx
= get_ncm(&p
, popts
->fp_index
);
2286 if (unlikely(idx
> skb_len
)) {
2287 dev_dbg(&udev
->dev
, "NTH%u fp_index (%u) bigger than skb len "
2288 "(%u)\n", NCM_BITS(ctx
), idx
, skb_len
);
2292 p
= (void *)(skb
->data
+ idx
);
2294 if (get_ncm_le32(p
) != popts
->ndp_sign
) {
2295 dev_dbg(&udev
->dev
, "corrupt NDP%u signature\n", NCM_BITS(ctx
));
2299 ndp_len
= get_ncm_le16(p
);
2300 if (((ndp_len
+ popts
->nth_size
) > skb_len
)
2301 || (ndp_len
< (MIN_NDP_LEN(popts
->ndp_size
, dgram_item_len
)))) {
2302 dev_dbg(&udev
->dev
, "bogus NDP%u len (%u)\n", NCM_BITS(ctx
),
2307 p
+= popts
->reserved1
;
2308 /* next_fp_index is defined as reserved in the spec */
2309 p
+= popts
->next_fp_index
;
2310 p
+= popts
->reserved2
;
2312 curr_dgram_idx
= get_ncm(&p
, dgram_item_len
);
2313 curr_dgram_len
= get_ncm(&p
, dgram_item_len
);
2314 next_dgram_idx
= get_ncm(&p
, dgram_item_len
);
2315 next_dgram_len
= get_ncm(&p
, dgram_item_len
);
2318 /* Parse all the datagrams in the NTB except for the last one. Pass
2319 * all the parsed datagrams to the networking stack directly
2322 while (next_dgram_idx
&& next_dgram_len
) {
2323 if (unlikely((curr_dgram_idx
+ curr_dgram_len
) > skb_len
)){
2326 skb2
= skb_clone(skb
, GFP_ATOMIC
);
2327 if (unlikely(!skb2
)){
2331 if (unlikely(!skb_pull(skb2
, curr_dgram_idx
))){
2334 skb_trim(skb2
, curr_dgram_len
);
2336 rep_len
+= skb2
->len
;
2337 hw_skb_return(dev
, skb2
);
2339 curr_dgram_idx
= next_dgram_idx
;
2340 curr_dgram_len
= next_dgram_len
;
2341 next_dgram_idx
= get_ncm(&p
, dgram_item_len
);
2342 next_dgram_len
= get_ncm(&p
, dgram_item_len
);
2345 /* Update 'skb' to represent the last datagram in the NTB and forward
2346 * it to usbnet which in turn will push it up to the networking stack.
2348 if (unlikely((curr_dgram_idx
+ curr_dgram_len
) > skb_len
)){
2351 if (unlikely(!skb_pull(skb
, curr_dgram_idx
))){
2354 skb_trim(skb
, curr_dgram_len
);
2355 rep_len
+= skb
->len
;
2359 dev_kfree_skb(skb2
);
2361 devdbg(dev
,"cdc_ncm_rx_fixup error\n");
2367 static inline unsigned ndp_dgram_pad(struct ncm_ctx
*ctx
, unsigned dgram_off
)
2369 unsigned rem
= dgram_off
% ctx
->tx_divisor
;
2370 unsigned tmp
= ctx
->tx_remainder
;
2371 if (rem
> ctx
->tx_remainder
){
2372 tmp
+= ctx
->tx_divisor
;
2377 static inline void ntb_clear(struct ntb
*n
)
2381 INIT_LIST_HEAD(&n
->entries
);
2384 static inline int ntb_init(struct ncm_ctx
*ctx
, struct ntb
*n
, unsigned size
)
2386 struct ndp_parser_opts_hw
*popts
= &ctx
->popts
;
2387 unsigned dgrams_end
;
2390 dgrams_end
= popts
->nth_size
;
2392 n
->ndp_off
= ALIGN(dgrams_end
, ctx
->tx_align
);
2393 n
->ndp_len
= popts
->ndp_size
+ 2 * 2 * popts
->dgram_item_len
;
2394 n
->dgrams_end
= dgrams_end
;
2396 if (NTB_LEN(n
)> n
->max_len
){
2404 static inline int ntb_add_dgram(struct ncm_ctx
*ctx
, struct ntb
*n
,
2405 unsigned dgram_len
, u8
*data
, gfp_t flags
)
2407 struct ndp_parser_opts_hw
*popts
= &ctx
->popts
;
2408 unsigned new_ndp_off
;
2409 unsigned new_ndp_len
;
2410 unsigned new_dgrams_end
;
2412 struct ndp_entry
*entry
;
2414 dgram_off
= n
->dgrams_end
+ ndp_dgram_pad(ctx
, n
->dgrams_end
);
2415 new_dgrams_end
= dgram_off
+ dgram_len
;
2417 new_ndp_off
= ALIGN(new_dgrams_end
, ctx
->tx_align
);
2418 new_ndp_len
= n
->ndp_len
+ 2 * 2 * popts
->dgram_item_len
;
2420 if ((new_ndp_off
+ new_ndp_len
) > n
->max_len
){
2424 /* TODO: optimize to use a kernel lookaside cache (kmem_cache) */
2425 entry
= kmalloc(sizeof(*entry
), flags
);
2426 if (unlikely(entry
== NULL
)){
2430 entry
->idx
= dgram_off
;
2431 entry
->len
= dgram_len
;
2432 list_add_tail(&entry
->list
, &n
->entries
);
2434 memcpy(n
->skb
->data
+ dgram_off
, data
, dgram_len
);
2438 n
->ndp_off
= new_ndp_off
;
2439 n
->ndp_len
= new_ndp_len
;
2440 n
->dgrams_end
= new_dgrams_end
;
2446 static inline void ntb_free_dgram_list(struct ntb
*n
)
2448 struct list_head
*p
;
2449 struct list_head
*tmp
;
2451 list_for_each_safe(p
, tmp
, &n
->entries
) {
2452 struct ndp_entry
*e
= list_entry(p
, struct ndp_entry
, list
);
2458 static struct sk_buff
*ntb_finalize(struct ncm_ctx
*ctx
, struct ntb
*n
)
2460 struct ndp_parser_opts_hw
*popts
= &ctx
->popts
;
2461 __le16
*p
= (void *)n
->skb
->data
;
2462 struct ndp_entry
*entry
;
2463 struct sk_buff
*skb
;
2465 put_ncm_le32(popts
->nth_sign
, p
);
2466 put_ncm_le16(popts
->nth_size
, p
);
2468 /* TODO: add sequence numbers */
2471 put_ncm(&p
, popts
->block_length
, NTB_LEN(n
));
2472 put_ncm(&p
, popts
->fp_index
, n
->ndp_off
);
2474 p
= (void *)(n
->skb
->data
+ n
->ndp_off
);
2475 memset(p
, 0, popts
->ndp_size
);
2477 put_ncm_le32(popts
->ndp_sign
, p
);
2478 put_ncm_le16(n
->ndp_len
, p
);
2480 p
+= popts
->reserved1
;
2481 p
+= popts
->next_fp_index
;
2482 p
+= popts
->reserved2
;
2484 list_for_each_entry(entry
, &n
->entries
, list
) {
2485 put_ncm(&p
, popts
->dgram_item_len
, entry
->idx
);
2486 put_ncm(&p
, popts
->dgram_item_len
, entry
->len
);
2489 put_ncm(&p
, popts
->dgram_item_len
, 0);
2490 put_ncm(&p
, popts
->dgram_item_len
, 0);
2492 ntb_free_dgram_list(n
);
2493 __skb_put(n
->skb
, NTB_LEN(n
));
2502 static inline struct sk_buff
*ncm_get_skb(struct ncm_ctx
*ctx
)
2504 struct sk_buff
*skb
= NULL
;
2507 /* 'skb_shared' will return 0 for an SKB after this SKB was
2508 * deallocated by usbnet
2510 for (i
= 0; i
< ctx
->skb_pool_size
&& skb_shared(ctx
->skb_pool
[i
]);
2513 if (likely(i
< ctx
->skb_pool_size
)){
2514 skb
= skb_get(ctx
->skb_pool
[i
]);
2517 if (likely(skb
!= NULL
)){
2525 /* Must be run with tx_lock held */
2526 static inline int ncm_init_curr_ntb(struct ncm_ctx
*ctx
)
2528 struct usb_device
*udev
= ctx
->ndev
->udev
;
2531 err
= ntb_init(ctx
, &ctx
->curr_ntb
, ctx
->tx_max_ntb
);
2532 if (unlikely(err
< 0)) {
2533 dev_dbg(&udev
->dev
, "error initializing current-NTB with size "
2534 "%u\n", ctx
->tx_max_ntb
);
2538 ctx
->curr_ntb
.skb
= ncm_get_skb(ctx
);
2539 if (unlikely(ctx
->curr_ntb
.skb
== NULL
)) {
2540 dev_dbg(&udev
->dev
, "failed getting an SKB from the pool\n");
2548 static inline void ncm_uninit_curr_ntb(struct ncm_ctx
*ctx
)
2550 dev_kfree_skb_any(ctx
->curr_ntb
.skb
);
2551 ntb_clear(&ctx
->curr_ntb
);
2555 /* if 'skb' is NULL (timer context), we will finish the current ntb and
2556 * return it to usbnet
2558 struct sk_buff
* cdc_ncm_tx_fixup(struct hw_cdc_net
*dev
, struct sk_buff
*skb
,
2561 struct ncm_ctx
*ctx
= dev
->ncm_ctx
;
2562 struct ntb
*curr_ntb
= &ctx
->curr_ntb
;
2563 struct sk_buff
*skb2
= NULL
;
2565 unsigned long flags
;
2566 unsigned ndgrams
= 0;
2567 unsigned is_skb_added
= 0;
2568 unsigned is_curr_ntb_new
= 0;
2571 spin_lock_irqsave(&ctx
->tx_lock
, flags
);
2575 if (NTB_IS_EMPTY(curr_ntb
)) {
2576 /* we have nothing to send */
2579 ndgrams
= curr_ntb
->ndgrams
;
2580 skb2
= ntb_finalize(ctx
, curr_ntb
);
2584 /* non-timer context */
2585 if (NTB_IS_EMPTY(curr_ntb
)) {
2586 err
= ncm_init_curr_ntb(ctx
);
2587 if (unlikely(err
< 0)){
2590 is_curr_ntb_new
= 1;
2596 sn
= be32_to_cpu(*(u32
*)(skb
->data
+ 0x2a));
2597 devdbg(dev
, "get pc ACK SN:%x time:%ld-%ld",
2598 sn
,current_kernel_time().tv_sec
,current_kernel_time().tv_nsec
);
2602 sn
= be32_to_cpu(*(u32
*)(skb
->data
+ 0x26));
2603 devdbg(dev
, "get pc PACKETS SN:%x, time:%ld-%ld",
2604 sn
,current_kernel_time().tv_sec
,current_kernel_time().tv_nsec
);
2607 err
= ntb_add_dgram(ctx
, curr_ntb
, skb
->len
, skb
->data
, GFP_ATOMIC
);
2610 /* The datagram was successfully added to the current-NTB */
2612 if(!ctx
->tx_timeout_jiffies
)
2614 ndgrams
= curr_ntb
->ndgrams
;
2615 skb2
= ntb_finalize(ctx
, curr_ntb
);
2619 /* not enough space in current-NTB */
2620 ndgrams
= curr_ntb
->ndgrams
;
2621 /* finalize the current-NTB */
2622 skb2
= ntb_finalize(ctx
, curr_ntb
);
2623 /* setup a new current-NTB */
2624 err
= ncm_init_curr_ntb(ctx
);
2625 if (unlikely(err
< 0)){
2629 is_curr_ntb_new
= 1;
2631 err
= ntb_add_dgram(ctx
, curr_ntb
, skb
->len
, skb
->data
,
2633 if (unlikely(err
< 0)) {
2634 ncm_uninit_curr_ntb(ctx
);
2641 if (is_curr_ntb_new
){
2642 ncm_uninit_curr_ntb(ctx
);
2649 devdbg(dev
, "tx fixup failed (err %d)\n", err
);
2653 dev_kfree_skb_any(skb
);
2656 /* When NULL is returned, usbnet will increment the drop count of the
2657 * net device. If 'skb' was successfully added to the current-NTB,
2658 * decrement the drop-count ahead
2660 if (skb2
== NULL
&& (is_skb_added
|| skb
== NULL
))
2663 dev
->stats
.tx_dropped
--;
2666 /* If a finished NTB is returned to usbnet, it will add 1 to packet
2667 * count. All other packets that we previously 'dropped' by usbnet must
2671 dev
->stats
.tx_packets
+= ndgrams
- 1;
2674 /* reschedule the timer if successfully added a first datagram to a
2675 * newly allocated current-NTB
2677 if (is_curr_ntb_new
&& is_skb_added
&& ctx
->tx_timeout_jiffies
){
2678 mod_timer(&ctx
->tx_timer
, jiffies
+ ctx
->tx_timeout_jiffies
);
2681 spin_unlock_irqrestore(&ctx
->tx_lock
, flags
);
2686 static void ncm_tx_timer_cb(unsigned long param
)
2688 struct ncm_ctx
*ctx
= (void *)param
;
2689 if (!netif_queue_stopped(ctx
->ndev
->net
)){
2690 hw_start_xmit(NULL
, ctx
->ndev
->net
);
2697 hw_cdc_probe (struct usb_interface
*udev
, const struct usb_device_id
*prod
)
2699 struct hw_cdc_net
*dev
;
2700 struct net_device
*net
;
2701 struct usb_host_interface
*interface
;
2702 struct usb_device
*xdev
;
2705 // DECLARE_MAC_BUF(mac);
2707 name
= udev
->dev
.driver
->name
;
2708 xdev
= interface_to_usbdev (udev
);
2709 interface
= udev
->cur_altsetting
;
2715 // set up our own records
2716 net
= alloc_etherdev(sizeof(*dev
));
2718 dbg ("can't kmalloc dev");
2722 dev
= netdev_priv(net
);
2725 /* Add for DTS2011050903736 lxz 20110520 start*/
2726 /* linux kernel > 2.6.37: PowerManager needs disable_depth ==0 */
2727 #ifdef CONFIG_PM_RUNTIME
2728 if(LINUX_VERSION37_LATER
)
2730 dev
->intf
->dev
.power
.disable_depth
= 0;
2733 /* Add for DTS2011050903736 lxz 20110520 end*/
2735 dev
->driver_name
= name
;
2736 dev
->driver_desc
= "Huawei Ethernet Device";
2737 dev
->msg_enable
= netif_msg_init (msg_level
, NETIF_MSG_DRV
2738 | NETIF_MSG_PROBE
| NETIF_MSG_LINK
);
2739 skb_queue_head_init (&dev
->rxq
);
2740 skb_queue_head_init (&dev
->txq
);
2741 skb_queue_head_init (&dev
->done
);
2742 dev
->bh
.func
= hw_bh
;
2743 dev
->bh
.data
= (unsigned long) dev
;
2744 INIT_WORK (&dev
->kevent
, kevent
);
2745 dev
->delay
.function
= hw_bh
;
2746 dev
->delay
.data
= (unsigned long) dev
;
2747 init_timer (&dev
->delay
);
2748 mutex_init (&dev
->phy_mutex
);
2751 //strcpy (net->name, "eth%d");
2752 memcpy (net
->dev_addr
, node_id
, sizeof node_id
);
2754 /* rx and tx sides can use different message sizes;
2755 * bind() should set rx_urb_size in that case.
2757 dev
->hard_mtu
= net
->mtu
+ net
->hard_header_len
;
2759 #if !(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30))
2760 net
->netdev_ops
= &hw_netdev_ops
;
2762 net
->change_mtu
= hw_change_mtu
;
2763 net
->get_stats
= hw_get_stats
;
2764 net
->hard_start_xmit
= hw_start_xmit
;
2765 net
->open
= hw_open
;
2766 net
->stop
= hw_stop
;
2767 net
->tx_timeout
= hw_tx_timeout
;
2769 net
->watchdog_timeo
= TX_TIMEOUT_JIFFIES
;
2770 net
->ethtool_ops
= &hw_ethtool_ops
;
2773 status
= hw_cdc_bind (dev
, udev
);
2779 strcpy (net
->name
, "eth%d");
2782 /* maybe the remote can't receive an Ethernet MTU */
2783 if (net
->mtu
> (dev
->hard_mtu
- net
->hard_header_len
)){
2784 net
->mtu
= dev
->hard_mtu
- net
->hard_header_len
;
2787 if (status
>= 0 && dev
->status
){
2788 status
= init_status (dev
, udev
);
2795 dev
->rx_urb_size
= dev
->ncm_ctx
->rx_max_ntb
;
2796 }else if (!dev
->rx_urb_size
){
2797 dev
->rx_urb_size
= dev
->hard_mtu
;
2800 dev
->maxpacket
= usb_maxpacket (dev
->udev
, dev
->out
, 1);
2802 SET_NETDEV_DEV(net
, &udev
->dev
);
2803 status
= register_netdev (net
);
2808 if (netif_msg_probe (dev
)){
2809 devinfo (dev
, "register '%s', %s",
2814 // ok, it's ready to go.
2815 usb_set_intfdata (udev
, dev
);
2817 /*activate the download tlp feature*/
2818 if (0 < hw_send_tlp_download_request(udev
)){
2819 devdbg(dev
, "%s: The tlp is activated", __FUNCTION__
);
2820 dev
->hw_tlp_download_is_actived
= 1;//activated successfully
2822 dev
->hw_tlp_download_is_actived
= 0;//activated failed
2825 netif_device_attach (net
);
2827 //kernel_thread(hw_check_conn_status, (void *)net, 0);
2829 /*set the carrier off as default*/
2830 netif_carrier_off(net
);
2831 if (HW_JUNGO_BCDDEVICE_VALUE
!= dev
->udev
->descriptor
.bcdDevice
2832 && BINTERFACESUBCLASS
!= udev
->cur_altsetting
->desc
.bInterfaceSubClass
) {
2834 INIT_DELAYED_WORK(&dev
->status_work
, hw_cdc_check_status_work
);
2835 schedule_delayed_work(&dev
->status_work
, 10*HZ
);
2837 //hw_check_conn_status(udev);
2843 hw_cdc_unbind (dev
, udev
);
2850 EXPORT_SYMBOL_GPL(hw_cdc_probe
);
2852 /*-------------------------------------------------------------------------*/
2855 * suspend the whole driver as soon as the first interface is suspended
2856 * resume only when the last interface is resumed
2859 int hw_suspend (struct usb_interface
*intf
, pm_message_t message
)
2861 struct hw_cdc_net
*dev
= usb_get_intfdata(intf
);
2863 if (!dev
->suspend_count
++) {
2865 * accelerate emptying of the rx and queues, to avoid
2866 * having everything error out.
2868 netif_device_detach (dev
->net
);
2869 (void) unlink_urbs (dev
, &dev
->rxq
);
2870 (void) unlink_urbs (dev
, &dev
->txq
);
2872 * reattach so runtime management can use and
2875 netif_device_attach (dev
->net
);
2879 EXPORT_SYMBOL_GPL(hw_suspend
);
2881 int hw_resume (struct usb_interface
*intf
)
2883 struct hw_cdc_net
*dev
= usb_get_intfdata(intf
);
2885 if (!--dev
->suspend_count
){
2886 tasklet_schedule (&dev
->bh
);
2891 EXPORT_SYMBOL_GPL(hw_resume
);
2893 static int hw_cdc_reset_resume(struct usb_interface
*intf
)
2895 return hw_resume (intf
);
2898 int hw_send_tlp_download_request(struct usb_interface
*intf
)
2900 struct usb_device
*udev
= interface_to_usbdev(intf
);
2901 struct usb_host_interface
*interface
= intf
->cur_altsetting
;
2902 struct usbdevfs_ctrltransfer req
= {0};
2903 unsigned char buf
[256] = {0};
2905 req
.bRequestType
= 0xC0;
2906 req
.bRequest
= 0x02;//activating the download tlp feature request
2907 req
.wIndex
= interface
->desc
.bInterfaceNumber
;
2912 retval
= usb_control_msg(udev
, usb_rcvctrlpipe(udev
, 0), req
.bRequest
,
2913 req
.bRequestType
, req
.wValue
, req
.wIndex
,
2914 buf
, req
.wLength
, req
.timeout
);
2915 /*check the TLP feature is activated or not, response value 0x01 indicates success*/
2916 if (0 < retval
&& 0x01 == buf
[0]){
2922 ///////////////////////////////////////////////////////////////////////////////////////////////////////
2924 * probes control interface, claims data interface, collects the bulk
2925 * endpoints, activates data interface (if needed), maybe sets MTU.
2928 //int hw_generic_cdc_bind(struct hw_cdc_net *dev, struct usb_interface *intf)
2929 #define USB_DEVICE_HUAWEI_DATA 0xFF
2930 static int hw_cdc_bind(struct hw_cdc_net
*dev
, struct usb_interface
*intf
)
2932 u8
*buf
= intf
->cur_altsetting
->extra
;
2933 int len
= intf
->cur_altsetting
->extralen
;
2934 struct usb_interface_descriptor
*d
;
2935 struct hw_dev_state
*info
= (void *) &dev
->data
;
2937 struct usb_driver
*driver
= driver_of(intf
);
2939 struct ncm_ctx
*ctx
= NULL
;
2941 devdbg(dev
, "hw_cdc_bind enter\n");
2943 if (sizeof dev
->data
< sizeof *info
){
2947 dev
->ncm_ctx
= NULL
;
2948 dev
->is_ncm
= is_ncm_interface(intf
);
2952 devdbg(dev
, "this is ncm interface\n");
2953 dev
->ncm_ctx
= kzalloc(sizeof(struct ncm_ctx
), GFP_KERNEL
);
2954 if (dev
->ncm_ctx
== NULL
){
2960 spin_lock_init(&ctx
->tx_lock
);
2962 ctx
->tx_timer
.function
= ncm_tx_timer_cb
;
2963 ctx
->tx_timer
.data
= (unsigned long)ctx
;
2964 init_timer(&ctx
->tx_timer
);
2968 ctx
->tx_timeout_jiffies
= msecs_to_jiffies(ncm_tx_timeout
);
2970 ctx
->tx_timeout_jiffies
= 0;
2973 devdbg(dev
,"ctx->tx_timeout_jiffies:%ld",ctx
->tx_timeout_jiffies
);
2977 memset(info
, 0, sizeof *info
);
2978 info
->control
= intf
;
2980 if (buf
[1] != USB_DT_CS_INTERFACE
){
2985 case USB_CDC_HEADER_TYPE
:
2987 dev_dbg(&intf
->dev
, "extra CDC header\n");
2990 info
->header
= (void *) buf
;
2991 if (info
->header
->bLength
!= sizeof *info
->header
) {
2992 dev_dbg(&intf
->dev
, "CDC header len %u\n",
2993 info
->header
->bLength
);
2997 case USB_CDC_UNION_TYPE
:
2999 dev_dbg(&intf
->dev
, "extra CDC union\n");
3002 info
->u
= (void *) buf
;
3003 if (info
->u
->bLength
!= sizeof *info
->u
) {
3004 dev_dbg(&intf
->dev
, "CDC union len %u\n",
3009 /* we need a master/control interface (what we're
3010 * probed with) and a slave/data interface; union
3011 * descriptors sort this all out.
3013 info
->control
= usb_ifnum_to_if(dev
->udev
,
3014 info
->u
->bMasterInterface0
);
3015 info
->data
= usb_ifnum_to_if(dev
->udev
,
3016 info
->u
->bSlaveInterface0
);
3017 if (!info
->control
|| !info
->data
) {
3019 "master #%u/%p slave #%u/%p\n",
3020 info
->u
->bMasterInterface0
,
3022 info
->u
->bSlaveInterface0
,
3026 if (info
->control
!= intf
) {
3027 dev_dbg(&intf
->dev
, "bogus CDC Union\n");
3028 /* Ambit USB Cable Modem (and maybe others)
3029 * interchanges master and slave interface.
3031 if (info
->data
== intf
) {
3032 info
->data
= info
->control
;
3033 info
->control
= intf
;
3039 /*For Jungo solution, the NDIS device has no data interface, so needn't detect data interface*/
3040 if (HW_JUNGO_BCDDEVICE_VALUE
!= dev
->udev
->descriptor
.bcdDevice
3041 && BINTERFACESUBCLASS
!= intf
->cur_altsetting
->desc
.bInterfaceSubClass
) {
3042 /* a data interface altsetting does the real i/o */
3043 d
= &info
->data
->cur_altsetting
->desc
;
3044 //if (d->bInterfaceClass != USB_CLASS_CDC_DATA) { /*delete the standard CDC slave class detect*/
3045 if (d
->bInterfaceClass
!= USB_DEVICE_HUAWEI_DATA
3046 && d
->bInterfaceClass
!= USB_CLASS_CDC_DATA
) {
3047 /*Add to detect CDC slave class either Huawei defined or standard*/
3048 dev_dbg(&intf
->dev
, "slave class %u\n",
3049 d
->bInterfaceClass
);
3054 case USB_CDC_ETHERNET_TYPE
:
3056 dev_dbg(&intf
->dev
, "extra CDC ether\n");
3059 info
->ether
= (void *) buf
;
3060 if (info
->ether
->bLength
!= sizeof *info
->ether
) {
3061 dev_dbg(&intf
->dev
, "CDC ether len %u\n",
3062 info
->ether
->bLength
);
3065 dev
->hard_mtu
= le16_to_cpu(
3066 info
->ether
->wMaxSegmentSize
);
3067 /* because of Zaurus, we may be ignoring the host
3068 * side link address we were given.
3071 case USB_CDC_NCM_TYPE
:
3072 if (dev
->ncm_ctx
->ncm_desc
){
3073 dev_dbg(&intf
->dev
, "extra NCM descriptor\n");
3075 dev
->ncm_ctx
->ncm_desc
= (void *)buf
;
3080 len
-= buf
[0]; /* bLength */
3084 if (!info
->header
|| !info
->u
|| (!dev
->is_ncm
&&!info
->ether
) ||
3085 (dev
->is_ncm
&& !dev
->ncm_ctx
->ncm_desc
)) {
3086 dev_dbg(&intf
->dev
, "missing cdc %s%s%s%sdescriptor\n",
3087 info
->header
? "" : "header ",
3088 info
->u
? "" : "union ",
3089 info
->ether
? "" : "ether ",
3090 dev
->ncm_ctx
->ncm_desc
? "" : "ncm ");
3096 ctx
->control
= info
->control
;
3097 ctx
->data
= info
->data
;
3098 status
= cdc_ncm_config(ctx
);
3103 dev
->rx_urb_size
= ctx
->rx_max_ntb
;
3105 /* We must always have one spare SKB for the current-NTB (of which
3106 * usbnet has no account)
3108 ctx
->skb_pool_size
= TX_QLEN_NCM
;
3110 ctx
->skb_pool
= kzalloc(sizeof(struct sk_buff
*) * ctx
->skb_pool_size
,
3112 if (ctx
->skb_pool
== NULL
) {
3113 dev_dbg(&intf
->dev
, "failed allocating the SKB pool\n");
3117 for (i
= 0; i
< ctx
->skb_pool_size
; i
++) {
3118 ctx
->skb_pool
[i
] = alloc_skb(ctx
->tx_max_ntb
, GFP_KERNEL
);
3119 if (ctx
->skb_pool
[i
] == NULL
) {
3120 dev_dbg(&intf
->dev
, "failed allocating an SKB for the "
3126 ntb_clear(&ctx
->curr_ntb
);
3131 /*if the NDIS device is not Jungo solution, then assume that it has the data interface, and claim for it*/
3132 if (HW_JUNGO_BCDDEVICE_VALUE
!= dev
->udev
->descriptor
.bcdDevice
3133 && BINTERFACESUBCLASS
!= intf
->cur_altsetting
->desc
.bInterfaceSubClass
)
3135 /* claim data interface and set it up ... with side effects.
3136 * network traffic can't flow until an altsetting is enabled.
3139 /*Begin:add by h00122846 for ndis bind error at 20101106*/
3140 if(info
->data
->dev
.driver
!= NULL
)
3142 usb_driver_release_interface(driver
, info
->data
);
3144 /*End:add by h00122846 for ndis bind error at 20101106*/
3146 status
= usb_driver_claim_interface(driver
, info
->data
, dev
);
3152 status
= hw_get_endpoints(dev
, info
->data
);
3154 /* ensure immediate exit from hw_disconnect */
3158 /* status endpoint: optional for CDC Ethernet, */
3160 if (HW_JUNGO_BCDDEVICE_VALUE
== dev
->udev
->descriptor
.bcdDevice
3161 || BINTERFACESUBCLASS
== intf
->cur_altsetting
->desc
.bInterfaceSubClass
3162 || info
->control
->cur_altsetting
->desc
.bNumEndpoints
== 1)
3164 struct usb_endpoint_descriptor
*desc
;
3165 dev
->status
= &info
->control
->cur_altsetting
->endpoint
[0];
3166 desc
= &dev
->status
->desc
;
3167 if (((desc
->bmAttributes
& USB_ENDPOINT_XFERTYPE_MASK
) != USB_ENDPOINT_XFER_INT
)
3168 || ((desc
->bEndpointAddress
& USB_ENDPOINT_DIR_MASK
) != USB_DIR_IN
)
3169 || (le16_to_cpu(desc
->wMaxPacketSize
)
3170 < sizeof(struct usb_cdc_notification
))
3171 || !desc
->bInterval
) {
3172 printk(KERN_ERR
"fxz-%s:bad notification endpoint\n", __func__
);
3177 return hw_get_ethernet_addr(dev
);
3181 for ( i
= 0; i
< ctx
->skb_pool_size
&& ctx
->skb_pool
[i
]; i
++){
3182 dev_kfree_skb_any(ctx
->skb_pool
[i
]);
3184 kfree(ctx
->skb_pool
);
3187 /* ensure immediate exit from cdc_disconnect */
3188 usb_set_intfdata(info
->data
, NULL
);
3189 usb_driver_release_interface(driver_of(intf
), info
->data
);
3192 kfree(dev
->ncm_ctx
);
3197 devinfo(dev
, "bad CDC descriptors\n");
3201 void hw_cdc_unbind(struct hw_cdc_net
*dev
, struct usb_interface
*intf
)
3203 struct hw_dev_state
*info
= (void *) &dev
->data
;
3204 struct usb_driver
*driver
= driver_of(intf
);
3207 /* disconnect master --> disconnect slave */
3208 if (intf
== info
->control
&& info
->data
) {
3209 /* ensure immediate exit from usbnet_disconnect */
3210 usb_set_intfdata(info
->data
, NULL
);
3211 usb_driver_release_interface(driver
, info
->data
);
3215 /* and vice versa (just in case) */
3216 else if (intf
== info
->data
&& info
->control
) {
3217 /* ensure immediate exit from usbnet_disconnect */
3218 usb_set_intfdata(info
->control
, NULL
);
3219 usb_driver_release_interface(driver
, info
->control
);
3220 info
->control
= NULL
;
3222 if(dev
->is_ncm
&& dev
->ncm_ctx
){
3223 del_timer_sync(&dev
->ncm_ctx
->tx_timer
);
3225 ntb_free_dgram_list(&dev
->ncm_ctx
->curr_ntb
);
3226 for (i
= 0; i
< dev
->ncm_ctx
->skb_pool_size
; i
++){
3227 dev_kfree_skb_any(dev
->ncm_ctx
->skb_pool
[i
]);
3229 kfree(dev
->ncm_ctx
->skb_pool
);
3230 kfree(dev
->ncm_ctx
);
3231 dev
->ncm_ctx
= NULL
;
3236 EXPORT_SYMBOL_GPL(hw_cdc_unbind
);
3239 /*-------------------------------------------------------------------------
3241 * Communications Device Class, Ethernet Control model
3243 * Takes two interfaces. The DATA interface is inactive till an altsetting
3244 * is selected. Configuration data includes class descriptors. There's
3245 * an optional status endpoint on the control interface.
3247 * This should interop with whatever the 2.4 "CDCEther.c" driver
3248 * (by Brad Hards) talked with, with more functionality.
3250 *-------------------------------------------------------------------------*/
3252 static void dumpspeed(struct hw_cdc_net
*dev
, __le32
*speeds
)
3254 if (netif_msg_timer(dev
)){
3255 devinfo(dev
, "link speeds: %u kbps up, %u kbps down",
3256 __le32_to_cpu(speeds
[0]) / 1000,
3257 __le32_to_cpu(speeds
[1]) / 1000);
3261 static inline int hw_get_ethernet_addr(struct hw_cdc_net
*dev
)
3264 dev
->net
->dev_addr
[0] = 0x00;
3265 dev
->net
->dev_addr
[1] = 0x1e;
3267 dev
->net
->dev_addr
[2] = 0x10;
3268 dev
->net
->dev_addr
[3] = 0x1f;
3269 dev
->net
->dev_addr
[4] = 0x00;
3270 dev
->net
->dev_addr
[5] = 0x01;/*change 0x04 into 0x01 20100129*/
3276 enum {WRITE_REQUEST
= 0x21, READ_RESPONSE
= 0xa1};
3278 #define HW_CDC_FAIL -1
3279 /*-------------------------------------------------------------------------*/
3280 /*The ioctl is called to send the qmi request to the device
3281 * or get the qmi response from the device*/
3282 static int hw_cdc_ioctl (struct usb_interface
*intf
, unsigned int code
,
3285 struct usb_device
*udev
= interface_to_usbdev(intf
);
3286 struct hw_cdc_net
*hwnet
= (struct hw_cdc_net
*)dev_get_drvdata(&intf
->dev
);
3287 struct usb_host_interface
*interface
= intf
->cur_altsetting
;
3288 struct usbdevfs_ctrltransfer
*req
= (struct usbdevfs_ctrltransfer
*)buf
;
3291 if (HW_JUNGO_BCDDEVICE_VALUE
!= hwnet
->udev
->descriptor
.bcdDevice
3292 && BINTERFACESUBCLASS
!= intf
->cur_altsetting
->desc
.bInterfaceSubClass
){
3293 if (1 == hwnet
->qmi_sync
) {
3294 deverr(hwnet
, "%s: The ndis port is busy.", __FUNCTION__
);
3299 if (USBDEVFS_CONTROL
!= code
|| NULL
== req
){
3300 deverr(hwnet
, "%s: The request is not supported.", __FUNCTION__
);
3304 if (0 < req
->wLength
){
3305 pbuf
= (char *)kmalloc(req
->wLength
+ 1, GFP_KERNEL
);
3307 deverr(hwnet
, "%s: Kmalloc the buffer failed.", __FUNCTION__
);
3310 memset(pbuf
, 0, req
->wLength
);
3313 switch (req
->bRequestType
)
3317 if (NULL
!= req
->data
&& 0 < req
->wLength
){
3318 if (copy_from_user(pbuf
, req
->data
, req
->wLength
)){
3319 deverr(hwnet
, "usbnet_cdc_ioctl: copy_from_user failed");
3327 pbuf
[req
->wLength
] = 0;
3328 ret
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0), req
->bRequest
,
3329 req
->bRequestType
, req
->wValue
, interface
->desc
.bInterfaceNumber
,
3330 pbuf
, req
->wLength
, req
->timeout
);
3335 if (NULL
== req
->data
|| 0 >= req
->wLength
|| NULL
== pbuf
){
3336 deverr(hwnet
, "%s: The buffer is null, can not read the response.",
3340 ret
= usb_control_msg(udev
,
3341 usb_rcvctrlpipe(udev
, 0),
3345 interface
->desc
.bInterfaceNumber
,
3351 if (HW_JUNGO_BCDDEVICE_VALUE
!= hwnet
->udev
->descriptor
.bcdDevice
3352 && BINTERFACESUBCLASS
!= intf
->cur_altsetting
->desc
.bInterfaceSubClass
)
3354 /*check the connection indication*/
3355 if (0x04 == pbuf
[6] && 0x22 == pbuf
[9] && 0x00 == pbuf
[10]){
3356 if (0x02 == pbuf
[16]){
3358 netif_carrier_on(hwnet
->net
);
3359 devinfo(dev
, "CDC: network connection: connected\n");
3363 netif_carrier_off(hwnet
->net
);
3364 devinfo(dev
, "CDC: network connection: disconnected\n");
3369 if (copy_to_user(req
->data
, pbuf
, req
->wLength
)){
3370 deverr(hwnet
, "%s: copy_from_user failed", __FUNCTION__
);
3395 /* delete by lKF36757 2011/12/26,prevent hilink load hw_cdc_driver.ko*/
3397 *#define HUAWEI_ETHER_INTERFACE \
3398 * .bInterfaceClass = USB_CLASS_COMM, \
3399 * .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
3400 * .bInterfaceProtocol = USB_CDC_PROTO_NONE
3404 #define HUAWEI_NDIS_INTERFACE \
3405 .bInterfaceClass = USB_CLASS_COMM, \
3406 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
3407 .bInterfaceProtocol = 0xff
3409 #define HUAWEI_NCM_INTERFACE \
3410 .bInterfaceClass = USB_CLASS_COMM, \
3411 .bInterfaceSubClass = 0x0d, \
3412 .bInterfaceProtocol = 0xff
3414 #define HUAWEI_NCM_INTERFACE2 \
3415 .bInterfaceClass = USB_CLASS_COMM, \
3416 .bInterfaceSubClass = 0x0d, \
3417 .bInterfaceProtocol = 0x00
3420 /*Add for PID optimized fangxz 20091105*/
3421 #define HUAWEI_NDIS_OPTIMIZED_INTERFACE \
3422 .bInterfaceClass = 0xFF, \
3423 .bInterfaceSubClass = 0x01, \
3424 .bInterfaceProtocol = 0x09
3426 /*Add for PID optimized lxz 20120508*/
3427 #define HUAWEI_NDIS_OPTIMIZED_INTERFACE_JUNGO \
3428 .bInterfaceClass = 0xFF, \
3429 .bInterfaceSubClass = 0x02, \
3430 .bInterfaceProtocol = 0x09
3432 /*Add for PID optimized marui 20100628*/
3433 #define HUAWEI_NDIS_OPTIMIZED_INTERFACE_VDF \
3434 .bInterfaceClass = 0xFF, \
3435 .bInterfaceSubClass = 0x01, \
3436 .bInterfaceProtocol = 0x39
3438 /*Add for PID optimized lxz 20120508*/
3439 #define HUAWEI_NDIS_OPTIMIZED_INTERFACE_VDF_JUNGO \
3440 .bInterfaceClass = 0xFF, \
3441 .bInterfaceSubClass = 0x02, \
3442 .bInterfaceProtocol = 0x39
3444 /*Add for PID optimized lxz 20120508*/
3445 #define HUAWEI_NDIS_SINGLE_INTERFACE \
3446 .bInterfaceClass = 0xFF, \
3447 .bInterfaceSubClass = 0x01, \
3448 .bInterfaceProtocol = 0x07
3450 /*Add for PID optimized marui 20100811*/
3451 #define HUAWEI_NDIS_SINGLE_INTERFACE_JUNGO \
3452 .bInterfaceClass = 0xFF, \
3453 .bInterfaceSubClass = 0x02, \
3454 .bInterfaceProtocol = 0x07
3456 /*Add for PID optimized lxz 20120508*/
3457 #define HUAWEI_NDIS_SINGLE_INTERFACE_VDF \
3458 .bInterfaceClass = 0xFF, \
3459 .bInterfaceSubClass = 0x01, \
3460 .bInterfaceProtocol = 0x37
3462 /*Add for PID optimized marui 20100811*/
3463 #define HUAWEI_NDIS_SINGLE_INTERFACE_VDF_JUNGO \
3464 .bInterfaceClass = 0xFF, \
3465 .bInterfaceSubClass = 0x02, \
3466 .bInterfaceProtocol = 0x37
3468 /*Add for PID optimized lxz 20120508*/
3469 #define HUAWEI_NCM_OPTIMIZED_INTERFACE \
3470 .bInterfaceClass = 0xFF, \
3471 .bInterfaceSubClass = 0x01, \
3472 .bInterfaceProtocol = 0x16
3474 /*Add for PID optimized liaojianping 20100811*/
3475 #define HUAWEI_NCM_OPTIMIZED_INTERFACE_JUNGO \
3476 .bInterfaceClass = 0xFF, \
3477 .bInterfaceSubClass = 0x02, \
3478 .bInterfaceProtocol = 0x16
3480 /*Add for PID optimized lxz 20120508*/
3481 #define HUAWEI_NCM_OPTIMIZED_INTERFACE_VDF \
3482 .bInterfaceClass = 0xFF, \
3483 .bInterfaceSubClass = 0x01, \
3484 .bInterfaceProtocol = 0x46
3486 #define HUAWEI_NCM_OPTIMIZED_INTERFACE_VDF_JUNGO \
3487 .bInterfaceClass = 0xFF, \
3488 .bInterfaceSubClass = 0x02, \
3489 .bInterfaceProtocol = 0x46
3491 /*Add for PID optimized xiaruihu 20110825*/
3492 #define HUAWEI_INTERFACE_NDIS_NO_3G_JUNGO \
3493 .bInterfaceClass = 0xFF, \
3494 .bInterfaceSubClass = 0x02, \
3495 .bInterfaceProtocol = 0x11
3497 #define HUAWEI_INTERFACE_NDIS_NO_3G_QUALCOMM \
3498 .bInterfaceClass = 0xFF, \
3499 .bInterfaceSubClass = 0x01, \
3500 .bInterfaceProtocol = 0x11
3502 /*Add for PID optimized xiaruihu 20111008*/
3503 #define HUAWEI_INTERFACE_NDIS_HW_QUALCOMM \
3504 .bInterfaceClass = 0xFF, \
3505 .bInterfaceSubClass = 0x01, \
3506 .bInterfaceProtocol = 0x67
3508 #define HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM \
3509 .bInterfaceClass = 0xFF, \
3510 .bInterfaceSubClass = 0x01, \
3511 .bInterfaceProtocol = 0x69
3513 #define HUAWEI_INTERFACE_NDIS_NCM_QUALCOMM \
3514 .bInterfaceClass = 0xFF, \
3515 .bInterfaceSubClass = 0x01, \
3516 .bInterfaceProtocol = 0x76
3518 #define HUAWEI_INTERFACE_NDIS_HW_JUNGO \
3519 .bInterfaceClass = 0xFF, \
3520 .bInterfaceSubClass = 0x02, \
3521 .bInterfaceProtocol = 0x67
3523 #define HUAWEI_INTERFACE_NDIS_CONTROL_JUNGO \
3524 .bInterfaceClass = 0xFF, \
3525 .bInterfaceSubClass = 0x02, \
3526 .bInterfaceProtocol = 0x69
3528 #define HUAWEI_INTERFACE_NDIS_NCM_JUNGO \
3529 .bInterfaceClass = 0xFF, \
3530 .bInterfaceSubClass = 0x02, \
3531 .bInterfaceProtocol = 0x76
3534 static const struct usb_device_id hw_products
[] = {
3535 /*ɾ³ý¶ÔPRODUCT IDµÄ±È½Ï£¬Ä¬ÈÏÄܹ»Ö§³ÖËùÓÐHUAWEI_ETHER_INTERFACE ½Ó¿ÚÀàÐ͵ÄNDISÉ豸*/
3536 /* delete by lKF36757 2011/12/26,prevent hilink load hw_cdc_driver.ko*/
3539 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3540 | USB_DEVICE_ID_MATCH_VENDOR,
3542 HUAWEI_ETHER_INTERFACE,
3545 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3546 | USB_DEVICE_ID_MATCH_VENDOR
,
3548 HUAWEI_NDIS_INTERFACE
,
3550 /*Add for PID optimized fangxz 20091105*/
3552 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3553 | USB_DEVICE_ID_MATCH_VENDOR
,
3555 HUAWEI_NDIS_OPTIMIZED_INTERFACE
,
3557 /*Add for VDF PID optimized marui 20100628*/
3559 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3560 | USB_DEVICE_ID_MATCH_VENDOR
,
3562 HUAWEI_NDIS_OPTIMIZED_INTERFACE_VDF
,
3564 /*Add for PID optimized marui 20100811*/
3566 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3567 | USB_DEVICE_ID_MATCH_VENDOR
,
3569 HUAWEI_NDIS_OPTIMIZED_INTERFACE_JUNGO
,
3571 /*Add for VDF PID optimized marui 20100811*/
3573 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3574 | USB_DEVICE_ID_MATCH_VENDOR
,
3576 HUAWEI_NDIS_OPTIMIZED_INTERFACE_VDF_JUNGO
,
3578 /*Add for NCM PID optimized lxz 20120508*/
3580 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3581 | USB_DEVICE_ID_MATCH_VENDOR
,
3583 HUAWEI_NCM_OPTIMIZED_INTERFACE
,
3585 /*Add for NCM PID optimized liaojianping 20100911*/
3587 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3588 | USB_DEVICE_ID_MATCH_VENDOR
,
3590 HUAWEI_NCM_OPTIMIZED_INTERFACE_JUNGO
,
3592 /*Add for VDF NCM PID optimized lxz 20120508*/
3594 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3595 | USB_DEVICE_ID_MATCH_VENDOR
,
3597 HUAWEI_NCM_OPTIMIZED_INTERFACE_VDF
,
3599 /*Add for VDF NCM PID optimized liaojianping 20100911*/
3601 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3602 | USB_DEVICE_ID_MATCH_VENDOR
,
3604 HUAWEI_NCM_OPTIMIZED_INTERFACE_VDF_JUNGO
,
3606 /*Add for ncm liaojianping 20100911*/
3608 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3609 | USB_DEVICE_ID_MATCH_VENDOR
,
3611 HUAWEI_NCM_INTERFACE
,
3613 /*Add for VDF NCM PID optimized liaojianping 20100911*/
3615 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3616 | USB_DEVICE_ID_MATCH_VENDOR
,
3618 HUAWEI_NCM_INTERFACE2
,
3620 /*Add for PID optimized xiaruihu 20110825*/
3622 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3623 | USB_DEVICE_ID_MATCH_VENDOR
,
3625 HUAWEI_INTERFACE_NDIS_NO_3G_JUNGO
3628 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3629 | USB_DEVICE_ID_MATCH_VENDOR
,
3631 HUAWEI_INTERFACE_NDIS_NO_3G_QUALCOMM
3633 /*Add for PID optimized xiaruihu 20111008*/
3635 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3636 | USB_DEVICE_ID_MATCH_VENDOR
,
3638 HUAWEI_INTERFACE_NDIS_HW_QUALCOMM
3641 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3642 | USB_DEVICE_ID_MATCH_VENDOR
,
3644 HUAWEI_INTERFACE_NDIS_HW_JUNGO
3647 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3648 | USB_DEVICE_ID_MATCH_VENDOR
,
3650 HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM
3653 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3654 | USB_DEVICE_ID_MATCH_VENDOR
,
3656 HUAWEI_INTERFACE_NDIS_CONTROL_JUNGO
3659 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3660 | USB_DEVICE_ID_MATCH_VENDOR
,
3662 HUAWEI_INTERFACE_NDIS_NCM_QUALCOMM
3665 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3666 | USB_DEVICE_ID_MATCH_VENDOR
,
3668 HUAWEI_INTERFACE_NDIS_NCM_JUNGO
3671 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3672 | USB_DEVICE_ID_MATCH_VENDOR
,
3674 HUAWEI_NDIS_SINGLE_INTERFACE
3677 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3678 | USB_DEVICE_ID_MATCH_VENDOR
,
3680 HUAWEI_NDIS_SINGLE_INTERFACE_JUNGO
3683 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3684 | USB_DEVICE_ID_MATCH_VENDOR
,
3686 HUAWEI_NDIS_SINGLE_INTERFACE_VDF
3689 .match_flags
= USB_DEVICE_ID_MATCH_INT_INFO
3690 | USB_DEVICE_ID_MATCH_VENDOR
,
3692 HUAWEI_NDIS_SINGLE_INTERFACE_VDF_JUNGO
3696 MODULE_DEVICE_TABLE(usb
, hw_products
);
3698 static int hw_cdc_reset_resume(struct usb_interface
*intf
);
3699 static struct usb_driver hw_ether_driver
= {
3700 .name
= "huawei_ether",
3701 .id_table
= hw_products
,
3702 .probe
= hw_cdc_probe
,
3703 .disconnect
= hw_disconnect
,
3704 #if !(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36))
3705 .unlocked_ioctl
= hw_cdc_ioctl
,
3707 .ioctl
= hw_cdc_ioctl
,
3709 .suspend
= hw_suspend
,
3710 .resume
= hw_resume
,
3711 .reset_resume
= hw_cdc_reset_resume
,
3715 static void hw_cdc_status(struct hw_cdc_net
*dev
, struct urb
*urb
)
3717 struct usb_cdc_notification
*event
;
3719 if (urb
->actual_length
< sizeof *event
){
3723 /* SPEED_CHANGE can get split into two 8-byte packets */
3724 if (test_and_clear_bit(EVENT_STS_SPLIT
, &dev
->flags
)) {
3725 devdbg(dev
, "The speed is changed by status event");
3726 dumpspeed(dev
, (__le32
*) urb
->transfer_buffer
);
3730 event
= urb
->transfer_buffer
;
3731 switch (event
->bNotificationType
) {
3732 case USB_CDC_NOTIFY_NETWORK_CONNECTION
:
3734 if (netif_msg_timer(dev
)){
3735 devdbg(dev
, "CDC: carrier %s",
3736 event
->wValue
? "on" : "off");
3739 netif_carrier_on(dev
->net
);
3740 devdbg(dev
, "CDC: network connection: connected\n");
3742 netif_carrier_off(dev
->net
);
3743 devdbg(dev
, "CDC: network connection: disconnected\n");
3747 case USB_CDC_NOTIFY_SPEED_CHANGE
: /* tx/rx rates */
3748 if (netif_msg_timer(dev
)){
3749 devdbg(dev
, "CDC: speed change (len %d)",
3750 urb
->actual_length
);
3752 if (urb
->actual_length
!= (sizeof *event
+ 8)){
3753 set_bit(EVENT_STS_SPLIT
, &dev
->flags
);
3755 dumpspeed(dev
, (__le32
*) &event
[1]);
3759 case USB_CDC_NOTIFY_RESPONSE_AVAILABLE
:
3765 devdbg(dev
, "%s: CDC: unexpected notification %02x!", __FUNCTION__
,
3766 event
->bNotificationType
);
3772 static int __init
hw_cdc_init(void)
3774 BUG_ON((sizeof(((struct hw_cdc_net
*)0)->data
)
3775 < sizeof(struct hw_dev_state
)));
3777 return usb_register(&hw_ether_driver
);
3779 fs_initcall(hw_cdc_init
);
3781 static int hw_send_qmi_request(struct usb_interface
*intf
,
3782 unsigned char *snd_req
, int snd_len
,
3783 unsigned char *read_resp
, int resp_len
);
3784 static int hw_send_qmi_request_no_resp(struct usb_interface
*intf
,
3785 unsigned char *snd_req
, int snd_len
,
3786 unsigned char *read_resp
, int resp_len
);
3789 //int hw_check_conn_status(struct usb_interface *intf)
3790 static void hw_cdc_check_status_work(struct work_struct
*work
)
3793 //struct hw_cdc_net *net = usb_get_intfdata(intf);
3794 //usb_device *udev = interface_to_usbdev(intf);
3795 struct hw_cdc_net
*dev
= container_of(work
, struct hw_cdc_net
, status_work
.work
);
3799 unsigned char resp_buf
[56] = {0};
3800 unsigned char client_id_req
[0x10] = {0x01, 0x0f, 0x00, 0x00, 0x00,
3801 0x00, 0x00, 0x06, 0x22, 0x00,
3802 0x04, 0x00, 0x01, 0x01, 0x00, 0x01};
3803 unsigned char rel_client_id_req
[0x11] = {0x01, 0x10, 0x00, 0x00, 0x00,
3804 0x00, 0x00, 0x00, 0x23,0x00,
3805 0x05, 0x00, 0x01, 0x02, 0x00,
3807 unsigned char status_req
[13] = {0x01, 0x0c, 0x00, 0x00, 0x01,
3808 0x00, 0x00, 0x02, 0x00,
3809 0x22, 0x00, 0x00, 0x00};
3810 unsigned char set_instance_req
[0x10] = {0x01, 0x0f, 0x00, 0x00, 0x00,
3811 0x00, 0x00, 0x06, 0x20, 0x00,
3812 0x04, 0x00, 0x01, 0x01, 0x00, 0x00};
3815 hw_send_qmi_request_no_resp(dev
->intf
, set_instance_req
, 0x10, resp_buf
, 56);
3817 ret
= hw_send_qmi_request(dev
->intf
, client_id_req
, 0x10, resp_buf
, 56);
3819 printk(KERN_ERR
"%s: Get client ID failed\n", __FUNCTION__
);
3822 status_req
[5] = resp_buf
[23];
3823 memset(resp_buf
, 0, 56 * sizeof (unsigned char));
3825 //for (repeat = 0; repeat < 3; repeat ++)
3826 for (repeat
= 0; repeat
< 3; repeat
++)
3828 ret
= hw_send_qmi_request(dev
->intf
, status_req
, 13, resp_buf
, 56);
3830 printk(KERN_ERR
"%s: Get connection status failed\n", __FUNCTION__
);
3834 if (0x02 == resp_buf
[23]){
3835 printk(KERN_ERR
"%s: carrier on\n", __FUNCTION__
);
3836 netif_carrier_on(dev
->net
);
3840 printk(KERN_ERR
"%s: carrier off\n", __FUNCTION__
);
3841 //netif_carrier_off(dev->net);
3845 rel_client_id_req
[0x0f] = 0x02;
3846 rel_client_id_req
[0x10] = status_req
[5];
3847 memset(resp_buf
, 0, 56 * sizeof (unsigned char));
3849 ret
= hw_send_qmi_request_no_resp(dev
->intf
, rel_client_id_req
, 0x11, resp_buf
, 56);
3852 cancel_delayed_work(&dev
->status_work
);
3853 //memset(resp_buf, 0, 56 * sizeof (unsigned char));
3857 static int hw_send_qmi_request_no_resp(struct usb_interface
*intf
,
3858 unsigned char *snd_req
, int snd_len
,
3859 unsigned char *read_resp
, int resp_len
)
3863 struct usb_device
*udev
= interface_to_usbdev(intf
);
3864 for (index
= 0; index
< 3; index
++)
3866 ret
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0), 0x00,
3867 0x21, 0x00, intf
->cur_altsetting
->desc
.bInterfaceNumber
,
3868 snd_req
, snd_len
, 5000);
3870 printk(KERN_ERR
"%s: send the qmi request failed\n", __FUNCTION__
);
3880 static int hw_send_qmi_request(struct usb_interface
*intf
,
3881 unsigned char *snd_req
, int snd_len
,
3882 unsigned char *read_resp
, int resp_len
)
3886 struct usb_device
*udev
= interface_to_usbdev(intf
);
3887 struct hw_cdc_net
*net
= usb_get_intfdata(intf
);
3889 ret
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0), 0x00,
3890 0x21, 0x00, intf
->cur_altsetting
->desc
.bInterfaceNumber
,
3891 snd_req
, snd_len
, 5000);
3894 printk(KERN_ERR
"%s: send the qmi request failed\n", __FUNCTION__
);
3899 ret
= usb_control_msg(udev
, usb_rcvctrlpipe(udev
, 0), 0x01,
3900 0xA1, 0x00, intf
->cur_altsetting
->desc
.bInterfaceNumber
,
3901 read_resp
, resp_len
, 1000);
3903 printk(KERN_ERR
"%s: %d Get response failed\n", __FUNCTION__
, index
);
3906 if (0x00 == read_resp
[4]){
3907 if (0x01 == read_resp
[6] && snd_req
[5] == read_resp
[5]
3908 && snd_req
[8] == read_resp
[8] && snd_req
[9] == read_resp
[9]) {
3912 } else if (0x01 == read_resp
[4]) {
3913 if (0x02 == read_resp
[6] && snd_req
[5] == read_resp
[5]
3914 && snd_req
[9] == read_resp
[9] && snd_req
[10] == read_resp
[10]) {
3915 printk(KERN_ERR
"%s: get the conn status req=%02x resp\n",
3916 __FUNCTION__
, snd_req
[9]);
3920 } else if (0x04 == read_resp
[4]){
3921 if (snd_req
[9] == read_resp
[9] && snd_req
[10] == read_resp
[10]
3922 && 0x02 == read_resp
[16]) {
3923 printk(KERN_ERR
"%s: get the conn status ind= carrier on\n",
3925 netif_carrier_on(net
->net
);
3939 static void __exit
hw_cdc_exit(void)
3941 usb_deregister(&hw_ether_driver
);
3943 module_exit(hw_cdc_exit
);
3946 MODULE_AUTHOR(DRIVER_AUTHOR
);
3947 MODULE_DESCRIPTION(DRIVER_DESC
);
3948 MODULE_VERSION(DRIVER_VERSION
);
3949 MODULE_LICENSE("GPL");