huawei_ether: write connection status info to log
[tomato.git] / release / src-rt / linux / linux-2.6 / drivers / net / usb / huawei_ether.c
blob1f4d3c747cb9140a9fcde2f86d17b2ef5589aa85
1 /*
2 * CDC Ethernet based the networking peripherals of Huawei data card devices
3 * This driver is developed based on usbnet.c and cdc_ether.c
4 * Copyright (C) 2009 by Franko Fang (Huawei Technologies Co., Ltd.)
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will support Huawei data card devices for Linux networking,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/workqueue.h>
29 #include <linux/mii.h>
30 #include <linux/usb.h>
31 #include <linux/sched.h>
32 #include <linux/ctype.h>
33 #include <linux/usb/cdc.h>
34 #include <linux/usbdevice_fs.h>
36 #include <linux/version.h>
37 /////////////////////////////////////////////////////////////////////////////////////////////////
38 #define DRIVER_VERSION "v2.07.00.00"
39 #define DRIVER_AUTHOR "Franko Fang <huananhu@huawei.com>"
40 #define DRIVER_DESC "Huawei ether driver for 3G data card ether device"
41 //////////////////////////////////////////////////////////////////////////////////////////////////////
42 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
43 #define RX_QLEN(dev) ( ((dev)->udev->speed == USB_SPEED_HIGH) ? \
44 (RX_MAX_QUEUE_MEMORY / (dev)->rx_urb_size) : 4)
45 #define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
46 (RX_MAX_QUEUE_MEMORY / (dev)->hard_mtu) : 4)
48 // reawaken network queue this soon after stopping; else watchdog barks
49 #define TX_TIMEOUT_JIFFIES (5 * HZ)
51 // throttle rx/tx briefly after some faults, so khubd might disconnect()
52 // us (it polls at HZ/4 usually) before we report too many false errors.
53 #define THROTTLE_JIFFIES (HZ / 8)
55 // between wakeups
56 #define UNLINK_TIMEOUT_MS 3
57 //////////////////////////////////////////////////////////////////////////////////////////////
58 // randomly generated ethernet address
59 static u8 node_id [ETH_ALEN];
61 static const char driver_name [] = "hw_cdc_net";
63 /* use ethtool to change the level for any given device */
64 static int msg_level = -1;
65 module_param (msg_level, int, 0);
66 MODULE_PARM_DESC (msg_level, "Override default message level");
67 //////////////////////////////////////////////////////////////////////////////////////////
68 #define HW_TLP_MASK_SYNC 0xF800
69 #define HW_TLP_MASK_LENGTH 0x07FF
70 #define HW_TLP_BITS_SYNC 0xF800
71 #pragma pack(push, 1)
72 struct hw_cdc_tlp
74 unsigned short pktlength;
75 unsigned char payload;
77 #define HW_TLP_HDR_LENGTH sizeof(unsigned short)
78 #pragma pack(pop)
80 typedef enum __HW_TLP_BUF_STATE {
81 HW_TLP_BUF_STATE_IDLE = 0,
82 HW_TLP_BUF_STATE_PARTIAL_FILL,
83 HW_TLP_BUF_STATE_PARTIAL_HDR,
84 HW_TLP_BUF_STATE_HDR_ONLY,
85 HW_TLP_BUF_STATE_ERROR
86 }HW_TLP_BUF_STATE;
88 struct hw_cdc_tlp_tmp{
89 void *buffer;
90 unsigned short pktlength;
91 unsigned short bytesneeded;
93 /*max ethernet pkt size 1514*/
94 #define HW_USB_RECEIVE_BUFFER_SIZE 1600L
95 /*for Tin-layer-protocol (TLP)*/
96 #define HW_USB_MRECEIVE_BUFFER_SIZE 4096L
97 /*for TLP*/
98 #define HW_USB_MRECEIVE_MAX_BUFFER_SIZE (1024 * 16)
100 #define HW_JUNGO_BCDDEVICE_VALUE 0x0102
101 #define BINTERFACESUBCLASS 0x02
102 ///////////////////////////////////////////////////////////////////////////////////////////
103 #define EVENT_TX_HALT 0
104 #define EVENT_RX_HALT 1
105 #define EVENT_RX_MEMORY 2
106 #define EVENT_STS_SPLIT 3
107 #define EVENT_LINK_RESET 4
110 #define NCM_TX_DEFAULT_TIMEOUT_MS 2
112 static int ncm_prefer_32 = 1;
113 //module_param(ncm_prefer_32, bool, S_IRUGO);
114 module_param(ncm_prefer_32, int, S_IRUGO);
116 static int ncm_prefer_crc = 0;
117 //module_param(ncm_prefer_crc, bool, S_IRUGO);
118 module_param(ncm_prefer_crc, int, S_IRUGO);
120 static unsigned long ncm_tx_timeout = NCM_TX_DEFAULT_TIMEOUT_MS;
121 module_param(ncm_tx_timeout, ulong, S_IRUGO);
123 static unsigned int ncm_read_buf_count = 4;
124 module_param(ncm_read_buf_count, uint, S_IRUGO);
126 static unsigned short ncm_read_size_in1k = 4;
127 module_param(ncm_read_size_in1k, short , S_IRUGO);
129 static int rt_debug = 0;
130 //module_param(rt_debug, bool, S_IRUGO|S_IWUSR);
131 module_param(rt_debug, int, S_IRUGO | S_IWUSR);
134 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
135 //#include <linux/unaligned/access_ok.h>
136 #else
137 static inline u16 get_unaligned_le16(const void *p)
139 return le16_to_cpup((__le16 *)p);
142 static inline u32 get_unaligned_le32(const void *p)
144 return le32_to_cpup((__le32 *)p);
147 static inline void put_unaligned_le16(u16 val, void *p)
149 *((__le16 *)p) = cpu_to_le16(val);
152 static inline void put_unaligned_le32(u32 val, void *p)
154 *((__le32 *)p) = cpu_to_le32(val);
156 #endif
158 /* Add for DTS2011050903736 lxz 20110520 start*/
159 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
160 #define LINUX_VERSION37_LATER 1
161 #else
162 #define LINUX_VERSION37_LATER 0
163 #endif
164 /* Add for DTS2011050903736 lxz 20110520 end*/
168 >2.6.36 some syetem not find ncm.h but find cdc.h
169 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
170 #include <linux/usb/ncm.h>
171 #else
173 #define USB_CDC_NCM_TYPE 0x1a
175 /* NCM Functional Descriptor */
176 /* change usb_cdc_ncm_desc -> usb_cdc_ncm_desc_hw ,prevent cdc.h redefinition 11-05*/
177 struct usb_cdc_ncm_desc_hw {
178 __u8 bLength;
179 __u8 bDescriptorType;
180 __u8 bDescriptorSubType;
181 __le16 bcdNcmVersion;
182 __u8 bmNetworkCapabilities;
183 } __attribute__ ((packed));
185 #ifdef NCM_NCAP_ETH_FILTER
186 #undef NCM_NCAP_ETH_FILTER
187 #endif
188 #ifdef NCM_NCAP_NET_ADDRESS
189 #undef NCM_NCAP_NET_ADDRESS
190 #endif
191 #ifdef NCM_NCAP_ENCAP_COMM
192 #undef NCM_NCAP_ENCAP_COMM
193 #endif
194 #ifdef NCM_NCAP_MAX_DGRAM
195 #undef NCM_NCAP_MAX_DGRAM
196 #endif
197 #ifdef NCM_NCAP_CRC_MODE
198 #undef NCM_NCAP_CRC_MODE
199 #endif
201 #define NCM_NCAP_ETH_FILTER (1 << 0)
202 #define NCM_NCAP_NET_ADDRESS (1 << 1)
203 #define NCM_NCAP_ENCAP_COMM (1 << 2)
204 #define NCM_NCAP_MAX_DGRAM (1 << 3)
205 #define NCM_NCAP_CRC_MODE (1 << 4)
207 #ifdef USB_CDC_GET_NTB_PARAMETERS
208 #undef USB_CDC_GET_NTB_PARAMETERS
209 #endif
210 #ifdef USB_CDC_GET_NET_ADDRESS
211 #undef USB_CDC_GET_NET_ADDRESS
212 #endif
213 #ifdef USB_CDC_SET_NET_ADDRESS
214 #undef USB_CDC_SET_NET_ADDRESS
215 #endif
216 #ifdef USB_CDC_GET_NTB_FORMAT
217 #undef USB_CDC_GET_NTB_FORMAT
218 #endif
219 #ifdef USB_CDC_SET_NTB_FORMAT
220 #undef USB_CDC_SET_NTB_FORMAT
221 #endif
222 #ifdef USB_CDC_GET_NTB_INPUT_SIZE
223 #undef USB_CDC_GET_NTB_INPUT_SIZE
224 #endif
225 #ifdef USB_CDC_SET_NTB_INPUT_SIZE
226 #undef USB_CDC_SET_NTB_INPUT_SIZE
227 #endif
228 #ifdef USB_CDC_GET_MAX_DATAGRAM_SIZE
229 #undef USB_CDC_GET_MAX_DATAGRAM_SIZE
230 #endif
231 #ifdef USB_CDC_SET_MAX_DATAGRAM_SIZE
232 #undef USB_CDC_SET_MAX_DATAGRAM_SIZE
233 #endif
234 #ifdef USB_CDC_GET_CRC_MODE
235 #undef USB_CDC_GET_CRC_MODE
236 #endif
237 #ifdef USB_CDC_SET_CRC_MODE
238 #undef USB_CDC_SET_CRC_MODE
239 #endif
241 #define USB_CDC_GET_NTB_PARAMETERS 0x80
242 #define USB_CDC_GET_NET_ADDRESS 0x81
243 #define USB_CDC_SET_NET_ADDRESS 0x82
244 #define USB_CDC_GET_NTB_FORMAT 0x83
245 #define USB_CDC_SET_NTB_FORMAT 0x84
246 #define USB_CDC_GET_NTB_INPUT_SIZE 0x85
247 #define USB_CDC_SET_NTB_INPUT_SIZE 0x86
248 #define USB_CDC_GET_MAX_DATAGRAM_SIZE 0x87
249 #define USB_CDC_SET_MAX_DATAGRAM_SIZE 0x88
250 #define USB_CDC_GET_CRC_MODE 0x89
251 #define USB_CDC_SET_CRC_MODE 0x8a
254 * Class Specific structures and constants
256 * CDC NCM parameter structure, CDC NCM subclass 6.2.1
259 struct usb_cdc_ncm_ntb_parameter_hw {
260 __le16 wLength;
261 __le16 bmNtbFormatSupported;
262 __le32 dwNtbInMaxSize;
263 __le16 wNdpInDivisor;
264 __le16 wNdpInPayloadRemainder;
265 __le16 wNdpInAlignment;
266 __le16 wPadding1;
267 __le32 dwNtbOutMaxSize;
268 __le16 wNdpOutDivisor;
269 __le16 wNdpOutPayloadRemainder;
270 __le16 wNdpOutAlignment;
271 __le16 wPadding2;
272 } __attribute__ ((packed));
275 * CDC NCM transfer headers, CDC NCM subclass 3.2
277 #ifdef NCM_NTH16_SIGN
278 #undef NCM_NTH16_SIGN
279 #endif
280 #ifdef NCM_NTH32_SIGN
281 #undef NCM_NTH32_SIGN
282 #endif
284 #define NCM_NTH16_SIGN 0x484D434E /* NCMH */
285 #define NCM_NTH32_SIGN 0x686D636E /* ncmh */
287 /* change usb_cdc_ncm_nth16 -> usb_cdc_ncm_nth16_hw ,prevent cdc.h redefinition */
288 struct usb_cdc_ncm_nth16_hw {
289 __le32 dwSignature;
290 __le16 wHeaderLength;
291 __le16 wSequence;
292 __le16 wBlockLength;
293 __le16 wFpIndex;
294 } __attribute__ ((packed));
296 /* change usb_cdc_ncm_nth32 -> usb_cdc_ncm_nth_hw ,prevent cdc.h redefinition */
297 struct usb_cdc_ncm_nth32_hw {
298 __le32 dwSignature;
299 __le16 wHeaderLength;
300 __le16 wSequence;
301 __le32 dwBlockLength;
302 __le32 dwFpIndex;
303 } __attribute__ ((packed));
306 * CDC NCM datagram pointers, CDC NCM subclass 3.3
308 #ifdef NCM_NDP16_CRC_SIGN
309 #undef NCM_NDP16_CRC_SIGN
310 #endif
311 #ifdef NCM_NDP16_NOCRC_SIGN
312 #undef NCM_NDP16_NOCRC_SIGN
313 #endif
314 #ifdef NCM_NDP32_CRC_SIGN
315 #undef NCM_NDP32_CRC_SIGN
316 #endif
317 #ifdef NCM_NDP32_NOCRC_SIGN
318 #undef NCM_NDP32_NOCRC_SIGN
319 #endif
321 #define NCM_NDP16_CRC_SIGN 0x314D434E /* NCM1 */
322 #define NCM_NDP16_NOCRC_SIGN 0x304D434E /* NCM0 */
323 #define NCM_NDP32_CRC_SIGN 0x316D636E /* ncm1 */
324 #define NCM_NDP32_NOCRC_SIGN 0x306D636E /* ncm0 */
326 /* change usb_cdc_ncm_ndp16 -> usb_cdc_ncm_ndp16_hw ,prevent cdc.h redefinition */
327 struct usb_cdc_ncm_ndp16_hw {
328 __le32 dwSignature;
329 __le16 wLength;
330 __le16 wNextFpIndex;
331 __u8 data[0];
332 } __attribute__ ((packed));
334 /* change usb_cdc_ncm_ndp32 -> usb_cdc_ncm_ndp32_hw ,prevent cdc.h redefinition */
335 struct usb_cdc_ncm_ndp32_hw {
336 __le32 dwSignature;
337 __le16 wLength;
338 __le16 wReserved6;
339 __le32 dwNextFpIndex;
340 __le32 dwReserved12;
341 __u8 data[0];
342 } __attribute__ ((packed));
345 * Here are options for NCM Datagram Pointer table (NDP) parser.
346 * There are 2 different formats: NDP16 and NDP32 in the spec (ch. 3),
347 * in NDP16 offsets and sizes fields are 1 16bit word wide,
348 * in NDP32 -- 2 16bit words wide. Also signatures are different.
349 * To make the parser code the same, put the differences in the structure,
350 * and switch pointers to the structures when the format is changed.
353 /* change usb_cdc_ncm_ndp32 -> usb_cdc_ncm_ndp32_hw ,prevent redefinition */
354 struct ndp_parser_opts_hw {
355 u32 nth_sign;
356 u32 ndp_sign;
357 unsigned nth_size;
358 unsigned ndp_size;
359 unsigned ndplen_align;
360 /* sizes in u16 units */
361 unsigned dgram_item_len; /* index or length */
362 unsigned block_length;
363 unsigned fp_index;
364 unsigned reserved1;
365 unsigned reserved2;
366 unsigned next_fp_index;
369 #ifdef INIT_NDP16_OPTS
370 #undef INIT_NDP16_OPTS
371 #endif
372 #ifdef INIT_NDP32_OPTS
373 #undef INIT_NDP32_OPTS
374 #endif
376 #define INIT_NDP16_OPTS { \
377 .nth_sign = NCM_NTH16_SIGN, \
378 .ndp_sign = NCM_NDP16_NOCRC_SIGN, \
379 .nth_size = sizeof(struct usb_cdc_ncm_nth16_hw), \
380 .ndp_size = sizeof(struct usb_cdc_ncm_ndp16_hw), \
381 .ndplen_align = 4, \
382 .dgram_item_len = 1, \
383 .block_length = 1, \
384 .fp_index = 1, \
385 .reserved1 = 0, \
386 .reserved2 = 0, \
387 .next_fp_index = 1, \
390 #define INIT_NDP32_OPTS { \
391 .nth_sign = NCM_NTH32_SIGN, \
392 .ndp_sign = NCM_NDP32_NOCRC_SIGN, \
393 .nth_size = sizeof(struct usb_cdc_ncm_nth32_hw), \
394 .ndp_size = sizeof(struct usb_cdc_ncm_ndp32_hw), \
395 .ndplen_align = 8, \
396 .dgram_item_len = 2, \
397 .block_length = 2, \
398 .fp_index = 2, \
399 .reserved1 = 1, \
400 .reserved2 = 2, \
401 .next_fp_index = 2, \
404 static inline void put_ncm(__le16 **p, unsigned size, unsigned val)
406 switch (size) {
407 case 1:
408 put_unaligned_le16((u16)val, *p);
409 break;
410 case 2:
411 put_unaligned_le32((u32)val, *p);
413 break;
414 default:
415 BUG();
418 *p += size;
421 static inline unsigned get_ncm(__le16 **p, unsigned size)
423 unsigned tmp;
425 switch (size) {
426 case 1:
427 tmp = get_unaligned_le16(*p);
428 break;
429 case 2:
430 tmp = get_unaligned_le32(*p);
431 break;
432 default:
433 BUG();
436 *p += size;
437 return tmp;
440 #ifdef NCM_CONTROL_TIMEOUT
441 #undef NCM_CONTROL_TIMEOUT
442 #endif
444 #define NCM_CONTROL_TIMEOUT (5 * 1000)
445 /*#endif*/
447 /* 'u' must be of unsigned type */
448 #define IS_POWER2(u) (((u) > 0) && !((u) & ((u) - 1)))
450 /* 'p' must designate a variable of type * __le16 (in all get/put_ncm_leXX) */
451 #define get_ncm_le16(p) \
452 ({ __le16 val = get_unaligned_le16(p); p += 1; val; })
454 #define get_ncm_le32(p) \
455 ({ __le32 val = get_unaligned_le32(p); p += 2; val; })
457 #define put_ncm_le16(val, p) \
458 ({ put_unaligned_le16((val), p); p += 1; })
460 #define put_ncm_le32(val, p) \
461 ({ put_unaligned_le32((val), p); p += 2; })
463 #define NCM_NDP_MIN_ALIGNMENT 4
465 #ifdef NCM_NTB_MIN_IN_SIZE
466 #undef NCM_NTB_MIN_IN_SIZE
467 #endif
468 #define NCM_NTB_MIN_IN_SIZE 2048
470 #ifdef NCM_NTB_MIN_OUT_SIZE
471 #undef NCM_NTB_MIN_OUT_SIZE
472 #endif
474 #define NCM_NDP16_ENTRY_LEN 4
476 /* NTB16 must include: NTB16 header, NDP16 header, datagram pointer entry,
477 * terminating (NULL) datagram entry
479 #define NCM_NTB_MIN_OUT_SIZE (sizeof(struct usb_cdc_ncm_nth16_hw) \
480 + sizeof(struct usb_cdc_ncm_ndp16_hw) + 2 * NCM_NDP16_ENTRY_LEN)
482 #ifndef max
483 #define max(_a, _b) (((_a) > (_b)) ? (_a) : (_b))
484 #endif
486 #ifndef min
487 #define min(_a, _b) (((_a) < (_b)) ? (_a) : (_b))
488 #endif
490 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
491 #define NCM_NTB_HARD_MAX_IN_SIZE ((u32)(max(16,(int)ncm_read_size_in1k) * 1024))
492 #else
493 #define NCM_NTB_HARD_MAX_IN_SIZE ((u32)(max(2,(int)ncm_read_size_in1k) * 1024))
494 #endif
496 #define RX_QLEN_NCM ncm_read_buf_count
497 #define TX_QLEN_NCM 4
499 /* These are actually defined in usbnet.c and we need to redefine these here in
500 * order to calculate the size of the SKB pool
504 static struct ndp_parser_opts_hw ndp16_opts = INIT_NDP16_OPTS;
505 static struct ndp_parser_opts_hw ndp32_opts = INIT_NDP32_OPTS;
507 struct ndp_entry {
508 struct list_head list;
509 unsigned idx;
510 unsigned len;
513 struct ntb {
514 /* Maximum possible length of this NTB */
515 unsigned max_len;
516 /* The current offset of the NDP */
517 unsigned ndp_off;
518 /* The current length of the NDP */
519 unsigned ndp_len;
520 /* End of the datagrams section */
521 unsigned dgrams_end;
522 /* Entries list (datagram index/lenght pairs) */
523 struct list_head entries;
524 /* Number of datagrams in this NTB */
525 unsigned ndgrams;
526 /* The SKB with the actual NTB data */
527 struct sk_buff *skb;
530 #define NTB_LEN(n) ((n)->ndp_off + (n)->ndp_len)
531 #define NTB_IS_EMPTY(n) ((n)->ndgrams == 0)
533 struct ncm_ctx {
534 struct usb_cdc_ncm_desc_hw *ncm_desc;
535 //struct usbnet *unet;
536 struct hw_cdc_net *ndev;
537 struct usb_interface *control;
538 struct usb_interface *data;
540 #define NTB_FORMAT_SUPPORTED_16BIT 0x0001
541 #define NTB_FORMAT_SUPPORTED_32BIT 0x0002
542 u16 formats;
543 u32 rx_max_ntb;
544 u32 tx_max_ntb;
545 u16 tx_divisor;
546 u16 tx_remainder;
547 u16 tx_align;
549 #define NCM_BIT_MODE_16 0
550 #define NCM_BIT_MODE_32 1
551 u8 bit_mode;
552 #define NCM_CRC_MODE_NO 0
553 #define NCM_CRC_MODE_YES 1
554 u8 crc_mode;
556 struct ndp_parser_opts_hw popts;
558 struct ntb curr_ntb;
559 spinlock_t tx_lock;
560 struct sk_buff **skb_pool;
561 unsigned skb_pool_size;
562 struct timer_list tx_timer;
563 /* The maximum amount of jiffies that a datagram can be held (in the
564 * current-NTB) before it must be sent on the bus
566 unsigned long tx_timeout_jiffies;
567 #ifdef CONFIG_CDC_ENCAP_COMMAND
568 struct cdc_encap *cdc_encap_ctx;
569 #endif
573 struct hw_cdc_net{
574 /* housekeeping */
575 struct usb_device *udev;
576 struct usb_interface *intf;
577 const char *driver_name;
578 const char *driver_desc;
579 void *driver_priv;
580 wait_queue_head_t *wait;
581 struct mutex phy_mutex;
582 unsigned char suspend_count;
584 /* i/o info: pipes etc */
585 unsigned in, out;
586 struct usb_host_endpoint *status;
587 unsigned maxpacket;
588 struct timer_list delay;
590 /* protocol/interface state */
591 struct net_device *net;
592 struct net_device_stats stats;
593 int msg_enable;
594 unsigned long data [5];
595 u32 xid;
596 u32 hard_mtu; /* count any extra framing */
597 size_t rx_urb_size; /* size for rx urbs */
598 struct mii_if_info mii;
600 /* various kinds of pending driver work */
601 struct sk_buff_head rxq;
602 struct sk_buff_head txq;
603 struct sk_buff_head done;
604 struct urb *interrupt;
605 struct tasklet_struct bh;
607 struct work_struct kevent;
608 struct delayed_work status_work;//fangxiaozhi added for work
609 int qmi_sync;
610 unsigned long flags;
612 /*The state and buffer for the data of TLP*/
613 HW_TLP_BUF_STATE hw_tlp_buffer_state;
614 struct hw_cdc_tlp_tmp hw_tlp_tmp_buf;
615 /*indicate the download tlp feature is activated or not*/
616 int hw_tlp_download_is_actived;
618 /*Add for ncm */
619 int is_ncm;
620 struct ncm_ctx *ncm_ctx;
624 static inline struct usb_driver *driver_of(struct usb_interface *intf)
626 return to_usb_driver(intf->dev.driver);
630 /* Drivers that reuse some of the standard USB CDC infrastructure
631 * (notably, using multiple interfaces according to the CDC
632 * union descriptor) get some helper code.
634 struct hw_dev_state {
635 struct usb_cdc_header_desc *header;
636 struct usb_cdc_union_desc *u;
637 struct usb_cdc_ether_desc *ether;
638 struct usb_interface *control;
639 struct usb_interface *data;
643 /* we record the state for each of our queued skbs */
644 enum skb_state {
645 illegal = 0,
646 tx_start, tx_done,
647 rx_start, rx_done, rx_cleanup
650 struct skb_data { /* skb->cb is one of these */
651 struct urb *urb;
652 struct hw_cdc_net *dev;
653 enum skb_state state;
654 size_t length;
656 ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
657 #define devdbg(hw_cdc_net, fmt, arg...) \
658 ((void)(rt_debug && printk(KERN_ERR "Hw_cdc_driver######: " fmt "\n" , ## arg)))
661 #define deverr(hw_cdc_net, fmt, arg...) \
662 printk(KERN_ERR "%s: " fmt "\n" , (hw_cdc_net)->net->name , ## arg)
663 #define devwarn(hw_cdc_net, fmt, arg...) \
664 printk(KERN_WARNING "%s: " fmt "\n" , (hw_cdc_net)->net->name , ## arg)
666 #define devinfo(hw_cdc_net, fmt, arg...) \
667 printk(KERN_INFO "%s: " fmt "\n" , (hw_cdc_net)->net->name , ## arg); \
670 ////////////////////////////////////////////////////////////////////////////////
671 static void hw_cdc_status(struct hw_cdc_net *dev, struct urb *urb);
672 static inline int hw_get_ethernet_addr(struct hw_cdc_net *dev);
673 static int hw_cdc_bind(struct hw_cdc_net *dev, struct usb_interface *intf);
674 void hw_cdc_unbind(struct hw_cdc_net *dev, struct usb_interface *intf);
675 int cdc_ncm_rx_fixup(struct hw_cdc_net *dev, struct sk_buff *skb);
676 struct sk_buff * cdc_ncm_tx_fixup(struct hw_cdc_net *dev, struct sk_buff *skb,
677 gfp_t mem_flags);
678 ///////////////////////////
679 int hw_get_endpoints(struct hw_cdc_net *, struct usb_interface *);
680 void hw_skb_return (struct hw_cdc_net *, struct sk_buff *);
681 void hw_unlink_rx_urbs(struct hw_cdc_net *);
682 void hw_defer_kevent (struct hw_cdc_net *, int );
683 int hw_get_settings (struct net_device *, struct ethtool_cmd *);
684 int hw_set_settings (struct net_device *, struct ethtool_cmd *);
685 u32 hw_get_link (struct net_device *);
686 int hw_nway_reset(struct net_device *);
687 void hw_get_drvinfo (struct net_device *, struct ethtool_drvinfo *);
688 u32 hw_get_msglevel (struct net_device *);
689 void hw_set_msglevel (struct net_device *, u32 );
690 void hw_disconnect (struct usb_interface *);
691 int hw_cdc_probe (struct usb_interface *, const struct usb_device_id *);
692 int hw_resume (struct usb_interface *);
693 int hw_suspend (struct usb_interface *, pm_message_t );
694 //////////////////////////
696 /*Begin : fangxiaozhi added for work*/
697 static void hw_cdc_check_status_work(struct work_struct *work);
699 struct delayed_work *option_suspend_wq
702 /*End : fangxiaozhi added for work*/
710 /* handles CDC Ethernet and many other network "bulk data" interfaces */
711 int hw_get_endpoints(struct hw_cdc_net *dev, struct usb_interface *intf)
713 int tmp;
714 struct usb_host_interface *alt = NULL;
715 struct usb_host_endpoint *in = NULL, *out = NULL;
716 struct usb_host_endpoint *status = NULL;
718 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
719 unsigned ep;
721 //in = out = status = NULL;
722 in = NULL;
723 out = NULL;
724 status = NULL;
725 alt = intf->altsetting + tmp;
727 /* take the first altsetting with in-bulk + out-bulk;
728 * remember any status endpoint, just in case;
729 * ignore other endpoints and altsetttings.
731 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
733 struct usb_host_endpoint *e;
734 int intr = 0;
736 e = alt->endpoint + ep;
737 switch (e->desc.bmAttributes) {
738 case USB_ENDPOINT_XFER_INT:
739 if (!usb_endpoint_dir_in(&e->desc)){
740 continue;
742 intr = 1;
743 /* FALLTHROUGH */
744 case USB_ENDPOINT_XFER_BULK:
745 break;
746 default:
747 continue;
749 if (usb_endpoint_dir_in(&e->desc)) {
750 if (!intr && !in){
751 in = e;
752 }else if (intr && !status){
753 status = e;
755 } else {
756 if (!out){
757 out = e;
761 if (in && out){
762 break;
765 if (!alt || !in || !out){
766 return -EINVAL;
768 if (alt->desc.bAlternateSetting != 0) {
769 tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber,
770 alt->desc.bAlternateSetting);
771 if (tmp < 0){
772 return tmp;
776 dev->in = usb_rcvbulkpipe (dev->udev,
777 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
778 dev->out = usb_sndbulkpipe (dev->udev,
779 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
780 dev->status = status;
781 return 0;
783 EXPORT_SYMBOL_GPL(hw_get_endpoints);
785 static void intr_complete (struct urb *urb);
787 static int init_status (struct hw_cdc_net *dev, struct usb_interface *intf)
789 char *buf = NULL;
790 unsigned pipe = 0;
791 unsigned maxp;
792 unsigned period;
795 pipe = usb_rcvintpipe (dev->udev,
796 dev->status->desc.bEndpointAddress
797 & USB_ENDPOINT_NUMBER_MASK);
798 maxp = usb_maxpacket (dev->udev, pipe, 0);
800 /* avoid 1 msec chatter: min 8 msec poll rate */
801 period = max ((int) dev->status->desc.bInterval,
802 (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3);
804 buf = kmalloc (maxp, GFP_KERNEL);
805 if (buf) {
806 dev->interrupt = usb_alloc_urb (0, GFP_KERNEL);
807 if (!dev->interrupt) {
808 kfree (buf);
809 return -ENOMEM;
810 } else {
811 usb_fill_int_urb(dev->interrupt, dev->udev, pipe,
812 buf, maxp, intr_complete, dev, period);
813 dev_dbg(&intf->dev,
814 "status ep%din, %d bytes period %d\n",
815 usb_pipeendpoint(pipe), maxp, period);
818 return 0;
822 /* Passes this packet up the stack, updating its accounting.
823 * Some link protocols batch packets, so their rx_fixup paths
824 * can return clones as well as just modify the original skb.
826 void hw_skb_return (struct hw_cdc_net *dev, struct sk_buff *skb)
828 int status;
829 u32 sn;
831 if(skb->len > 128)
833 sn = be32_to_cpu(*(u32 *)(skb->data + 0x26));
834 devdbg(dev,"hw_skb_return,len:%d receive sn:%x, time:%ld-%ld",
835 skb->len,sn,current_kernel_time().tv_sec,current_kernel_time().tv_nsec);
837 else
839 sn = be32_to_cpu(*(u32 *)(skb->data + 0x2a));
840 devdbg(dev,"hw_skb_return,len:%d receive ack sn:%x, time:%ld-%ld",
841 skb->len,sn,current_kernel_time().tv_sec,current_kernel_time().tv_nsec);
844 skb->protocol = eth_type_trans (skb, dev->net);
845 dev->stats.rx_packets++;
846 dev->stats.rx_bytes += skb->len;
848 if (netif_msg_rx_status (dev)){
849 devdbg (dev, "< rx, len %zu, type 0x%x",
850 skb->len + sizeof (struct ethhdr), skb->protocol);
852 memset (skb->cb, 0, sizeof (struct skb_data));
853 status = netif_rx (skb);
854 if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev)){
855 devdbg (dev, "netif_rx status %d", status);
858 EXPORT_SYMBOL_GPL(hw_skb_return);
860 // unlink pending rx/tx; completion handlers do all other cleanup
862 static int unlink_urbs (struct hw_cdc_net *dev, struct sk_buff_head *q)
864 unsigned long flags;
865 struct sk_buff *skb, *skbnext;
866 int count = 0;
868 spin_lock_irqsave (&q->lock, flags);
869 for (skb = q->next; skb != (struct sk_buff *) q; skb = skbnext) {
870 struct skb_data *entry;
871 struct urb *urb;
872 int retval;
874 entry = (struct skb_data *) skb->cb;
875 urb = entry->urb;
876 skbnext = skb->next;
878 // during some PM-driven resume scenarios,
879 // these (async) unlinks complete immediately
880 retval = usb_unlink_urb (urb);
881 if (retval != -EINPROGRESS && retval != 0){
882 devdbg (dev, "unlink urb err, %d", retval);
884 else
886 count++;
889 spin_unlock_irqrestore (&q->lock, flags);
890 return count;
894 // Flush all pending rx urbs
895 // minidrivers may need to do this when the MTU changes
897 void hw_unlink_rx_urbs(struct hw_cdc_net *dev)
899 if (netif_running(dev->net)) {
900 (void) unlink_urbs (dev, &dev->rxq);
901 tasklet_schedule(&dev->bh);
904 EXPORT_SYMBOL_GPL(hw_unlink_rx_urbs);
907 /*-------------------------------------------------------------------------
909 * Network Device Driver (peer link to "Host Device", from USB host)
911 *-------------------------------------------------------------------------*/
913 static int hw_change_mtu (struct net_device *net, int new_mtu)
915 struct hw_cdc_net *dev = netdev_priv(net);
916 int ll_mtu = new_mtu + net->hard_header_len;
917 int old_hard_mtu = dev->hard_mtu;
918 int old_rx_urb_size = dev->rx_urb_size;
921 if (new_mtu <= 0){
922 return -EINVAL;
924 // no second zero-length packet read wanted after mtu-sized packets
925 if ((ll_mtu % dev->maxpacket) == 0){
926 return -EDOM;
928 net->mtu = new_mtu;
930 dev->hard_mtu = net->mtu + net->hard_header_len;
931 if (dev->rx_urb_size == old_hard_mtu && !dev->is_ncm) {
932 dev->rx_urb_size = dev->hard_mtu;
933 if (dev->rx_urb_size > old_rx_urb_size)
935 hw_unlink_rx_urbs(dev);
939 devdbg(dev,"change mtu :%d, urb_size:%u",new_mtu,(u32)dev->rx_urb_size);
941 return 0;
944 /*-------------------------------------------------------------------------*/
945 //#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
946 static struct net_device_stats *hw_get_stats (struct net_device *net)
948 struct hw_cdc_net *dev = netdev_priv(net);
949 return &dev->stats;
951 //#endif
952 /*-------------------------------------------------------------------------*/
954 static void tx_defer_bh(struct hw_cdc_net *dev,
955 struct sk_buff *skb,
956 struct sk_buff_head *list)
958 unsigned long flags;
960 spin_lock_irqsave(&list->lock, flags);
961 __skb_unlink(skb, list);
962 spin_unlock(&list->lock);
963 spin_lock(&dev->done.lock);
964 __skb_queue_tail(&dev->done, skb);
965 if (1 <= dev->done.qlen){
966 tasklet_schedule(&dev->bh);
968 spin_unlock_irqrestore(&dev->done.lock, flags);
970 ////////////////////////////////////////////
971 static HW_TLP_BUF_STATE submit_skb(struct hw_cdc_net *dev,
972 unsigned char *data,
973 unsigned int len)
975 struct sk_buff *skb;
976 struct skb_data * entry;
978 unsigned long flags;
980 if (len > dev->rx_urb_size){
981 devdbg(dev, "The package length is too large\n");
982 return HW_TLP_BUF_STATE_ERROR;
985 if ((skb = alloc_skb (len + NET_IP_ALIGN, GFP_ATOMIC)) == NULL) {
986 return HW_TLP_BUF_STATE_ERROR;
988 skb_reserve (skb, NET_IP_ALIGN);
991 entry = (struct skb_data *) skb->cb;
992 entry->urb = NULL;
993 entry->dev = dev;
994 entry->state = rx_done;
995 entry->length = skb->len;
997 memcpy(skb->data, data, len);
998 skb->len = len;
1000 spin_lock_irqsave(&dev->done.lock, flags);
1001 __skb_queue_tail(&dev->done, skb);
1002 if (1 <= dev->done.qlen){
1003 tasklet_schedule(&dev->bh);
1005 spin_unlock_irqrestore(&dev->done.lock, flags);
1006 return HW_TLP_BUF_STATE_IDLE;
1008 static void reset_tlp_tmp_buf(struct hw_cdc_net *dev)
1010 dev->hw_tlp_tmp_buf.bytesneeded = 0;
1011 dev->hw_tlp_tmp_buf.pktlength = 0;
1013 static void rx_tlp_parse(struct hw_cdc_net *dev, struct sk_buff *skb)
1015 struct hw_cdc_tlp *tlp = NULL;
1016 int remain_bytes = (int)skb->len;
1017 unsigned short pktlen = 0;
1018 unsigned char *cur_ptr = skb->data;
1019 unsigned char *payload_ptr = NULL;
1020 unsigned char *buf_start = skb->data;
1021 unsigned char *buf_end = buf_start + skb->len;
1022 unsigned char *ptr = NULL;
1024 /*decoding the TLP packets into the ether packet*/
1025 while (remain_bytes > 0){
1026 switch (dev->hw_tlp_buffer_state){
1027 case HW_TLP_BUF_STATE_IDLE:
1029 if (HW_TLP_HDR_LENGTH < remain_bytes ){
1030 tlp = (struct hw_cdc_tlp *)cur_ptr;
1031 pktlen = (tlp->pktlength & HW_TLP_MASK_LENGTH);
1032 payload_ptr = (unsigned char *)&(tlp->payload);
1034 //validate the tlp packet header
1035 if (HW_TLP_BITS_SYNC != (tlp->pktlength & HW_TLP_MASK_SYNC)){
1036 devdbg(dev, "The pktlength is error");
1037 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_ERROR;
1038 break;
1040 /*The receiced buffer has the whole ether packet */
1041 if ( (payload_ptr + pktlen) <= buf_end){
1042 /*Get the ether packet from the TLP packet, and put it into the done queue*/
1043 submit_skb(dev, payload_ptr, pktlen);
1044 cur_ptr = payload_ptr + pktlen;
1045 remain_bytes = buf_end - cur_ptr;
1046 }else{/*has the part of the ether packet*/
1047 if (pktlen > dev->rx_urb_size){
1048 devdbg(dev, "The pktlen is invalid");
1049 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_ERROR;
1050 break;
1052 dev->hw_tlp_tmp_buf.bytesneeded = (payload_ptr + pktlen) - buf_end;
1053 dev->hw_tlp_tmp_buf.pktlength = buf_end - payload_ptr;
1054 memcpy(dev->hw_tlp_tmp_buf.buffer, payload_ptr,
1055 dev->hw_tlp_tmp_buf.pktlength);
1056 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_PARTIAL_FILL;
1057 remain_bytes = 0;
1060 else if (HW_TLP_HDR_LENGTH == remain_bytes){
1061 memcpy(dev->hw_tlp_tmp_buf.buffer, cur_ptr, remain_bytes);
1062 dev->hw_tlp_tmp_buf.bytesneeded = 0;
1063 dev->hw_tlp_tmp_buf.pktlength = remain_bytes;
1064 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_HDR_ONLY;
1065 remain_bytes = 0;
1067 else if (remain_bytes > 0){
1068 memcpy(dev->hw_tlp_tmp_buf.buffer, cur_ptr, remain_bytes);
1069 dev->hw_tlp_tmp_buf.bytesneeded = HW_TLP_HDR_LENGTH - remain_bytes;
1070 dev->hw_tlp_tmp_buf.pktlength = remain_bytes;
1071 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_PARTIAL_HDR;
1072 remain_bytes = 0;
1074 else{
1075 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_ERROR;
1077 break;
1079 case HW_TLP_BUF_STATE_HDR_ONLY:
1081 tlp->pktlength = *((unsigned short*)dev->hw_tlp_tmp_buf.buffer);
1082 pktlen = (tlp->pktlength & HW_TLP_MASK_LENGTH);
1083 payload_ptr = cur_ptr;
1084 reset_tlp_tmp_buf(dev);
1085 /*validate the tlp packet header*/
1086 if (HW_TLP_BITS_SYNC != (tlp->pktlength & HW_TLP_MASK_SYNC)){
1087 devdbg(dev, "The pktlength is error");
1088 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_ERROR;
1089 break;
1091 if ( (payload_ptr + pktlen) <= buf_end){
1092 submit_skb(dev, payload_ptr, pktlen);
1093 cur_ptr = payload_ptr + pktlen;
1094 remain_bytes = buf_end - cur_ptr;
1095 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_IDLE;
1096 }else{
1097 if (pktlen > dev->rx_urb_size){
1098 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_ERROR;
1099 break;
1101 dev->hw_tlp_tmp_buf.bytesneeded = (payload_ptr + pktlen) - buf_end;
1102 dev->hw_tlp_tmp_buf.pktlength = buf_end - payload_ptr;
1103 memcpy(dev->hw_tlp_tmp_buf.buffer, payload_ptr,
1104 dev->hw_tlp_tmp_buf.pktlength);
1105 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_PARTIAL_FILL;
1106 remain_bytes = 0;
1108 break;
1110 case HW_TLP_BUF_STATE_PARTIAL_HDR:
1112 memcpy(dev->hw_tlp_tmp_buf.buffer + dev->hw_tlp_tmp_buf.pktlength,
1113 cur_ptr, dev->hw_tlp_tmp_buf.bytesneeded);
1114 cur_ptr += dev->hw_tlp_tmp_buf.bytesneeded;
1115 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_HDR_ONLY;
1116 remain_bytes -= dev->hw_tlp_tmp_buf.bytesneeded;
1117 break;
1119 case HW_TLP_BUF_STATE_PARTIAL_FILL:
1121 if (remain_bytes < dev->hw_tlp_tmp_buf.bytesneeded){
1122 memcpy(dev->hw_tlp_tmp_buf.buffer + dev->hw_tlp_tmp_buf.pktlength,
1123 cur_ptr, remain_bytes);
1124 dev->hw_tlp_tmp_buf.pktlength += remain_bytes;
1125 dev->hw_tlp_tmp_buf.bytesneeded -= remain_bytes;
1126 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_PARTIAL_FILL;
1127 cur_ptr += remain_bytes;
1128 remain_bytes = 0;
1129 }else{
1130 unsigned short tmplen = dev->hw_tlp_tmp_buf.bytesneeded
1131 + dev->hw_tlp_tmp_buf.pktlength;
1132 if (HW_USB_RECEIVE_BUFFER_SIZE < tmplen){
1133 devdbg(dev, "The tlp length is larger than 1600");
1134 ptr = (unsigned char *)kmalloc(dev->hw_tlp_tmp_buf.bytesneeded
1135 + dev->hw_tlp_tmp_buf.pktlength,GFP_KERNEL);
1136 if (NULL != ptr){
1137 memcpy(ptr, dev->hw_tlp_tmp_buf.buffer,
1138 dev->hw_tlp_tmp_buf.pktlength);
1139 memcpy(ptr + dev->hw_tlp_tmp_buf.pktlength, cur_ptr,
1140 dev->hw_tlp_tmp_buf.bytesneeded);
1141 submit_skb(dev, ptr, tmplen);
1142 kfree(ptr);
1145 }else{
1146 memcpy(dev->hw_tlp_tmp_buf.buffer + dev->hw_tlp_tmp_buf.pktlength,
1147 cur_ptr, dev->hw_tlp_tmp_buf.bytesneeded);
1148 submit_skb(dev, dev->hw_tlp_tmp_buf.buffer, tmplen);
1150 remain_bytes -= dev->hw_tlp_tmp_buf.bytesneeded;
1151 cur_ptr += dev->hw_tlp_tmp_buf.bytesneeded;
1152 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_IDLE;
1153 reset_tlp_tmp_buf(dev);
1155 break;
1157 case HW_TLP_BUF_STATE_ERROR:
1158 default:
1160 remain_bytes = 0;
1161 reset_tlp_tmp_buf(dev);
1162 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_IDLE;
1163 break;
1169 static void rx_defer_bh(struct hw_cdc_net *dev,
1170 struct sk_buff *skb,
1171 struct sk_buff_head *list)
1173 unsigned long flags;
1174 spin_lock_irqsave(&list->lock, flags);
1175 __skb_unlink(skb, list);
1176 spin_unlock_irqrestore(&list->lock, flags);
1178 /*deal with the download tlp feature*/
1179 if (1 == dev->hw_tlp_download_is_actived){
1180 rx_tlp_parse(dev, skb);
1181 dev_kfree_skb_any(skb);
1182 }else{
1183 spin_lock_irqsave(&dev->done.lock, flags);
1184 __skb_queue_tail(&dev->done, skb);
1185 if (1 <= dev->done.qlen){
1186 tasklet_schedule(&dev->bh);
1188 spin_unlock_irqrestore(&dev->done.lock, flags);
1191 ////////////////////////
1193 /* some work can't be done in tasklets, so we use keventd
1195 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1196 * but tasklet_schedule() doesn't. hope the failure is rare.
1198 void hw_defer_kevent (struct hw_cdc_net *dev, int work)
1200 set_bit (work, &dev->flags);
1201 if (!schedule_work (&dev->kevent)){
1202 deverr (dev, "kevent %d may have been dropped", work);
1204 else{
1205 devdbg (dev, "kevent %d scheduled", work);
1208 EXPORT_SYMBOL_GPL(hw_defer_kevent);
1210 /*-------------------------------------------------------------------------*/
1215 static void rx_complete (struct urb *urb);
1216 static void rx_submit (struct hw_cdc_net *dev, struct urb *urb, gfp_t flags)
1218 struct sk_buff *skb;
1219 struct skb_data *entry;
1220 int retval = 0;
1221 unsigned long lockflags;
1222 size_t size = dev->rx_urb_size;
1225 if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
1226 deverr (dev, "no rx skb");
1227 hw_defer_kevent (dev, EVENT_RX_MEMORY);
1228 usb_free_urb (urb);
1229 return;
1231 skb_reserve (skb, NET_IP_ALIGN);
1233 entry = (struct skb_data *) skb->cb;
1234 entry->urb = urb;
1235 entry->dev = dev;
1236 entry->state = rx_start;
1237 entry->length = 0;
1240 usb_fill_bulk_urb (urb, dev->udev, dev->in,
1241 skb->data, size, rx_complete, skb);
1243 spin_lock_irqsave (&dev->rxq.lock, lockflags);
1246 if (netif_running (dev->net)
1247 && netif_device_present (dev->net)
1248 && !test_bit (EVENT_RX_HALT, &dev->flags)) {
1249 switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
1251 case 0://submit successfully
1252 __skb_queue_tail (&dev->rxq, skb);
1253 break;
1254 case -EPIPE:
1255 hw_defer_kevent (dev, EVENT_RX_HALT);
1256 break;
1257 case -ENOMEM:
1258 hw_defer_kevent (dev, EVENT_RX_MEMORY);
1259 break;
1260 case -ENODEV:
1261 if (netif_msg_ifdown (dev)){
1262 devdbg (dev, "device gone");
1264 netif_device_detach (dev->net);
1265 break;
1266 default:
1267 if (netif_msg_rx_err (dev)){
1268 devdbg (dev, "rx submit, %d", retval);
1270 tasklet_schedule (&dev->bh);
1271 break;
1273 } else {
1274 if (netif_msg_ifdown (dev)){
1275 devdbg (dev, "rx: stopped");
1277 retval = -ENOLINK;
1279 spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
1281 devdbg (dev, "usb_submit_urb status:%x, time:%ld-%ld",
1282 retval,current_kernel_time().tv_sec,current_kernel_time().tv_nsec);
1284 if (retval) {
1286 dev_kfree_skb_any (skb);
1287 usb_free_urb (urb);
1291 /*-------------------------------------------------------------------------*/
1293 static inline void rx_process (struct hw_cdc_net *dev, struct sk_buff *skb)
1296 if (dev->is_ncm)
1298 if(!cdc_ncm_rx_fixup(dev, skb)){
1299 goto error;
1302 if (skb->len){
1303 hw_skb_return (dev, skb);
1305 else {
1306 if (netif_msg_rx_err (dev)){
1307 devdbg (dev, "drop");
1309 error:
1310 dev->stats.rx_errors++;
1311 skb_queue_tail (&dev->done, skb);
1315 /*-------------------------------------------------------------------------*/
1316 static void rx_complete (struct urb *urb)
1318 struct sk_buff *skb = (struct sk_buff *) urb->context;
1319 struct skb_data *entry = (struct skb_data *) skb->cb;
1320 struct hw_cdc_net *dev = entry->dev;
1321 int urb_status = urb->status;
1324 devdbg (dev, "rx_complete,urb:%p,rx length %d, time %ld-%ld",
1325 urb, urb->actual_length,current_kernel_time().tv_sec,
1326 current_kernel_time().tv_nsec);
1327 skb_put (skb, urb->actual_length);
1328 entry->state = rx_done;
1329 entry->urb = NULL;
1331 switch (urb_status) {
1332 /* success */
1333 case 0:
1334 if (skb->len < dev->net->hard_header_len) {
1335 entry->state = rx_cleanup;
1336 dev->stats.rx_errors++;
1337 dev->stats.rx_length_errors++;
1338 if (netif_msg_rx_err (dev)){
1339 devdbg (dev, "rx length %d", skb->len);
1342 break;
1344 /* stalls need manual reset. this is rare ... except that
1345 * when going through USB 2.0 TTs, unplug appears this way.
1346 * we avoid the highspeed version of the ETIMEOUT/EILSEQ
1347 * storm, recovering as needed.
1349 case -EPIPE:
1350 dev->stats.rx_errors++;
1351 hw_defer_kevent (dev, EVENT_RX_HALT);
1352 // FALLTHROUGH
1354 /* software-driven interface shutdown */
1355 case -ECONNRESET: /* async unlink */
1356 case -ESHUTDOWN: /* hardware gone */
1357 if (netif_msg_ifdown (dev)){
1358 devdbg (dev, "rx shutdown, code %d", urb_status);
1360 goto block;
1362 /* we get controller i/o faults during khubd disconnect() delays.
1363 * throttle down resubmits, to avoid log floods; just temporarily,
1364 * so we still recover when the fault isn't a khubd delay.
1366 case -EPROTO:
1367 case -ETIME:
1368 case -EILSEQ:
1369 dev->stats.rx_errors++;
1370 if (!timer_pending (&dev->delay)) {
1371 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
1372 if (netif_msg_link (dev)){
1373 devdbg (dev, "rx throttle %d", urb_status);
1376 block:
1377 entry->state = rx_cleanup;
1378 entry->urb = urb;
1379 urb = NULL;
1380 break;
1382 /* data overrun ... flush fifo? */
1383 case -EOVERFLOW:
1384 dev->stats.rx_over_errors++;
1385 // FALLTHROUGH
1387 default:
1388 entry->state = rx_cleanup;
1389 dev->stats.rx_errors++;
1390 if (netif_msg_rx_err (dev)){
1391 devdbg (dev, "rx status %d", urb_status);
1393 break;
1396 rx_defer_bh(dev, skb, &dev->rxq);
1398 if (urb) {
1399 if (netif_running (dev->net)
1400 && !test_bit (EVENT_RX_HALT, &dev->flags)) {
1401 rx_submit (dev, urb, GFP_ATOMIC);
1402 return;
1404 usb_free_urb (urb);
1406 if (netif_msg_rx_err (dev)){
1407 devdbg (dev, "no read resubmitted");
1410 static void intr_complete (struct urb *urb)
1412 struct hw_cdc_net *dev = urb->context;
1413 int status = urb->status;
1414 switch (status) {
1415 /* success */
1416 case 0:
1417 hw_cdc_status(dev, urb);
1418 break;
1420 /* software-driven interface shutdown */
1421 case -ENOENT: /* urb killed */
1422 case -ESHUTDOWN: /* hardware gone */
1423 if (netif_msg_ifdown (dev)){
1424 devdbg (dev, "intr shutdown, code %d", status);
1426 return;
1428 /* NOTE: not throttling like RX/TX, since this endpoint
1429 * already polls infrequently
1431 default:
1432 devdbg (dev, "intr status %d", status);
1433 break;
1436 if (!netif_running (dev->net)){
1437 return;
1440 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
1441 status = usb_submit_urb (urb, GFP_ATOMIC);
1442 if (status != 0 && netif_msg_timer (dev)){
1443 deverr(dev, "intr resubmit --> %d", status);
1447 /*-------------------------------------------------------------------------*/
1452 /*-------------------------------------------------------------------------*/
1454 // precondition: never called in_interrupt
1456 static int hw_stop (struct net_device *net)
1458 struct hw_cdc_net *dev = netdev_priv(net);
1459 int temp;
1460 DECLARE_WAIT_QUEUE_HEAD_ONSTACK (unlink_wakeup);
1461 DECLARE_WAITQUEUE (wait, current);
1463 netif_stop_queue (net);
1465 if (netif_msg_ifdown (dev)){
1466 devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
1467 dev->stats.rx_packets, dev->stats.tx_packets,
1468 dev->stats.rx_errors, dev->stats.tx_errors
1472 // ensure there are no more active urbs
1473 add_wait_queue (&unlink_wakeup, &wait);
1474 dev->wait = &unlink_wakeup;
1475 temp = unlink_urbs (dev, &dev->txq) + unlink_urbs (dev, &dev->rxq);
1477 // maybe wait for deletions to finish.
1478 while (!skb_queue_empty(&dev->rxq)
1479 && !skb_queue_empty(&dev->txq)
1480 && !skb_queue_empty(&dev->done)) {
1481 msleep(UNLINK_TIMEOUT_MS);
1482 if (netif_msg_ifdown (dev)){
1483 devdbg (dev, "waited for %d urb completions", temp);
1486 dev->wait = NULL;
1487 remove_wait_queue (&unlink_wakeup, &wait);
1489 /*cleanup the data for TLP*/
1490 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_IDLE;
1491 if (NULL != dev->hw_tlp_tmp_buf.buffer){
1492 kfree(dev->hw_tlp_tmp_buf.buffer);
1493 dev->hw_tlp_tmp_buf.buffer = NULL;
1495 dev->hw_tlp_tmp_buf.pktlength = 0;
1496 dev->hw_tlp_tmp_buf.bytesneeded = 0;
1498 usb_kill_urb(dev->interrupt);
1500 /* deferred work (task, timer, softirq) must also stop.
1501 * can't flush_scheduled_work() until we drop rtnl (later),
1502 * else workers could deadlock; so make workers a NOP.
1504 dev->flags = 0;
1505 del_timer_sync (&dev->delay);
1506 tasklet_kill (&dev->bh);
1507 usb_autopm_put_interface(dev->intf);
1509 return 0;
1512 /*-------------------------------------------------------------------------*/
1514 // posts reads, and enables write queuing
1516 // precondition: never called in_interrupt
1518 static int hw_open (struct net_device *net)
1520 struct hw_cdc_net *dev = netdev_priv(net);
1521 int retval;
1522 if ((retval = usb_autopm_get_interface(dev->intf)) < 0) {
1523 if (netif_msg_ifup (dev)){
1524 devinfo (dev,
1525 "resumption fail (%d) hw_cdc_net usb-%s-%s, %s",
1526 retval,
1527 dev->udev->bus->bus_name, dev->udev->devpath,
1528 dev->driver_desc);
1530 goto done_nopm;
1533 /*Initialized the data for TLP*/
1534 dev->hw_tlp_buffer_state = HW_TLP_BUF_STATE_IDLE;
1535 dev->hw_tlp_tmp_buf.buffer = kmalloc(HW_USB_RECEIVE_BUFFER_SIZE, GFP_KERNEL);
1536 if (NULL != dev->hw_tlp_tmp_buf.buffer){
1537 memset(dev->hw_tlp_tmp_buf.buffer, 0, HW_USB_RECEIVE_BUFFER_SIZE);
1539 dev->hw_tlp_tmp_buf.pktlength = 0;
1540 dev->hw_tlp_tmp_buf.bytesneeded = 0;
1543 /* start any status interrupt transfer */
1544 if (dev->interrupt) {
1545 retval = usb_submit_urb (dev->interrupt, GFP_KERNEL);
1546 if (retval < 0) {
1547 if (netif_msg_ifup (dev)){
1548 deverr (dev, "intr submit %d", retval);
1550 goto done;
1554 netif_start_queue (net);
1556 // delay posting reads until we're fully open
1557 tasklet_schedule (&dev->bh);
1558 return retval;
1559 done:
1560 usb_autopm_put_interface(dev->intf);
1561 done_nopm:
1562 return retval;
1565 /*-------------------------------------------------------------------------*/
1567 /* ethtool methods; minidrivers may need to add some more, but
1568 * they'll probably want to use this base set.
1571 int hw_get_settings (struct net_device *net, struct ethtool_cmd *cmd)
1573 struct hw_cdc_net *dev = netdev_priv(net);
1575 if (!dev->mii.mdio_read){
1576 return -EOPNOTSUPP;
1579 return mii_ethtool_gset(&dev->mii, cmd);
1581 EXPORT_SYMBOL_GPL(hw_get_settings);
1583 int hw_set_settings (struct net_device *net, struct ethtool_cmd *cmd)
1585 struct hw_cdc_net *dev = netdev_priv(net);
1586 int retval;
1588 if (!dev->mii.mdio_write){
1589 return -EOPNOTSUPP;
1592 retval = mii_ethtool_sset(&dev->mii, cmd);
1594 return retval;
1597 EXPORT_SYMBOL_GPL(hw_set_settings);
1599 u32 hw_get_link (struct net_device *net)
1601 struct hw_cdc_net *dev = netdev_priv(net);
1603 /* if the device has mii operations, use those */
1604 if (dev->mii.mdio_read){
1605 return mii_link_ok(&dev->mii);
1608 /* Otherwise, say we're up (to avoid breaking scripts) */
1609 return 1;
1611 EXPORT_SYMBOL_GPL(hw_get_link);
1613 int hw_nway_reset(struct net_device *net)
1615 struct hw_cdc_net *dev = netdev_priv(net);
1617 if (!dev->mii.mdio_write){
1618 return -EOPNOTSUPP;
1621 return mii_nway_restart(&dev->mii);
1623 EXPORT_SYMBOL_GPL(hw_nway_reset);
1625 void hw_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
1627 struct hw_cdc_net *dev = netdev_priv(net);
1629 strncpy (info->driver, dev->driver_name, sizeof info->driver);
1630 strncpy (info->version, DRIVER_VERSION, sizeof info->version);
1631 strncpy (info->fw_version, dev->driver_desc,
1632 sizeof info->fw_version);
1633 usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
1635 EXPORT_SYMBOL_GPL(hw_get_drvinfo);
1637 u32 hw_get_msglevel (struct net_device *net)
1639 struct hw_cdc_net *dev = netdev_priv(net);
1641 return dev->msg_enable;
1643 EXPORT_SYMBOL_GPL(hw_get_msglevel);
1645 void hw_set_msglevel (struct net_device *net, u32 level)
1647 struct hw_cdc_net *dev = netdev_priv(net);
1649 dev->msg_enable = level;
1651 EXPORT_SYMBOL_GPL(hw_set_msglevel);
1653 /* drivers may override default ethtool_ops in their bind() routine */
1654 static struct ethtool_ops hw_ethtool_ops = {
1655 .get_settings = hw_get_settings,
1656 .set_settings = hw_set_settings,
1657 .get_link = hw_get_link,
1658 .nway_reset = hw_nway_reset,
1659 .get_drvinfo = hw_get_drvinfo,
1660 .get_msglevel = hw_get_msglevel,
1661 .set_msglevel = hw_set_msglevel,
1664 /*-------------------------------------------------------------------------*/
1666 /* work that cannot be done in interrupt context uses keventd.
1668 * NOTE: with 2.5 we could do more of this using completion callbacks,
1669 * especially now that control transfers can be queued.
1671 static void
1672 kevent (struct work_struct *work)
1674 struct hw_cdc_net *dev =
1675 container_of(work, struct hw_cdc_net, kevent);
1676 int status;
1678 /* usb_clear_halt() needs a thread context */
1679 if (test_bit (EVENT_TX_HALT, &dev->flags)) {
1680 unlink_urbs (dev, &dev->txq);
1681 status = usb_clear_halt (dev->udev, dev->out);
1682 if (status < 0
1683 && status != -EPIPE
1684 && status != -ESHUTDOWN) {
1685 if (netif_msg_tx_err (dev)){
1686 deverr (dev, "can't clear tx halt, status %d",
1687 status);
1689 } else {
1690 clear_bit (EVENT_TX_HALT, &dev->flags);
1691 if (status != -ESHUTDOWN){
1692 netif_wake_queue (dev->net);
1696 if (test_bit (EVENT_RX_HALT, &dev->flags)) {
1697 unlink_urbs (dev, &dev->rxq);
1698 status = usb_clear_halt (dev->udev, dev->in);
1699 if (status < 0
1700 && status != -EPIPE
1701 && status != -ESHUTDOWN) {
1702 if (netif_msg_rx_err (dev)){
1703 deverr (dev, "can't clear rx halt, status %d",
1704 status);
1706 } else {
1707 clear_bit (EVENT_RX_HALT, &dev->flags);
1708 tasklet_schedule (&dev->bh);
1712 /* tasklet could resubmit itself forever if memory is tight */
1713 if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
1714 struct urb *urb = NULL;
1716 if (netif_running (dev->net)){
1717 urb = usb_alloc_urb (0, GFP_KERNEL);
1718 }else{
1719 clear_bit (EVENT_RX_MEMORY, &dev->flags);
1721 if (urb != NULL) {
1722 clear_bit (EVENT_RX_MEMORY, &dev->flags);
1723 rx_submit (dev, urb, GFP_KERNEL);
1724 tasklet_schedule (&dev->bh);
1728 if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
1729 clear_bit (EVENT_LINK_RESET, &dev->flags);
1732 if (dev->flags){
1733 devdbg (dev, "kevent done, flags = 0x%lx",
1734 dev->flags);
1738 /*-------------------------------------------------------------------------*/
1740 static void tx_complete (struct urb *urb)
1742 struct sk_buff *skb = (struct sk_buff *) urb->context;
1743 struct skb_data *entry = (struct skb_data *) skb->cb;
1744 struct hw_cdc_net *dev = entry->dev;
1746 devdbg(dev,"tx_complete,status:%d,len:%d, *********time:%ld-%ld",
1747 urb->status,(int)entry->length,
1748 current_kernel_time().tv_sec,
1749 current_kernel_time().tv_nsec);
1751 if (urb->status == 0) {
1752 dev->stats.tx_packets++;
1753 dev->stats.tx_bytes += entry->length;
1754 } else {
1755 dev->stats.tx_errors++;
1757 switch (urb->status) {
1758 case -EPIPE:
1759 hw_defer_kevent (dev, EVENT_TX_HALT);
1760 break;
1762 /* software-driven interface shutdown */
1763 case -ECONNRESET: // async unlink
1764 case -ESHUTDOWN: // hardware gone
1765 break;
1767 // like rx, tx gets controller i/o faults during khubd delays
1768 // and so it uses the same throttling mechanism.
1769 case -EPROTO:
1770 case -ETIME:
1771 case -EILSEQ:
1772 if (!timer_pending (&dev->delay)) {
1773 mod_timer (&dev->delay,
1774 jiffies + THROTTLE_JIFFIES);
1775 if (netif_msg_link (dev)){
1776 devdbg (dev, "tx throttle %d",
1777 urb->status);
1780 netif_stop_queue (dev->net);
1781 break;
1782 default:
1783 if (netif_msg_tx_err (dev)){
1784 devdbg (dev, "tx err %d", entry->urb->status);
1786 break;
1790 urb->dev = NULL;
1791 entry->state = tx_done;
1792 tx_defer_bh(dev, skb, &dev->txq);
1795 /*-------------------------------------------------------------------------*/
1797 static void hw_tx_timeout (struct net_device *net)
1799 struct hw_cdc_net *dev = netdev_priv(net);
1801 unlink_urbs (dev, &dev->txq);
1802 tasklet_schedule (&dev->bh);
1804 // FIXME: device recovery -- reset?
1807 /*-------------------------------------------------------------------------*/
1809 static int hw_start_xmit (struct sk_buff *skb, struct net_device *net)
1811 struct hw_cdc_net *dev = netdev_priv(net);
1812 int length;
1813 int retval = NET_XMIT_SUCCESS;
1814 struct urb *urb = NULL;
1815 struct skb_data *entry;
1816 unsigned long flags;
1818 if (dev->is_ncm ) {
1819 skb = cdc_ncm_tx_fixup (dev, skb, GFP_ATOMIC);
1820 if (!skb) {
1821 if (netif_msg_tx_err (dev)){
1822 devdbg (dev, "can't tx_fixup skb");
1824 goto drop;
1828 length = skb->len;
1830 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
1831 if (netif_msg_tx_err (dev)){
1832 devdbg (dev, "no urb");
1834 goto drop;
1837 entry = (struct skb_data *) skb->cb;
1838 entry->urb = urb;
1839 entry->dev = dev;
1840 entry->state = tx_start;
1841 entry->length = length;
1843 usb_fill_bulk_urb (urb, dev->udev, dev->out,
1844 skb->data, skb->len, tx_complete, skb);
1846 /* don't assume the hardware handles USB_ZERO_PACKET
1847 * NOTE: strictly conforming cdc-ether devices should expect
1848 * the ZLP here, but ignore the one-byte packet.
1850 if ((length % dev->maxpacket) == 0) {
1851 urb->transfer_buffer_length++;
1852 if (skb_tailroom(skb)) {
1853 skb->data[skb->len] = 0;
1854 __skb_put(skb, 1);
1858 devdbg(dev,"hw_start_xmit ,usb_submit_urb,len:%d, time:%ld-%ld",
1859 skb->len,current_kernel_time().tv_sec,current_kernel_time().tv_nsec);
1861 spin_lock_irqsave (&dev->txq.lock, flags);
1863 switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
1864 case -EPIPE:
1865 netif_stop_queue (net);
1866 hw_defer_kevent (dev, EVENT_TX_HALT);
1867 break;
1868 default:
1869 if (netif_msg_tx_err (dev)){
1870 devdbg (dev, "tx: submit urb err %d", retval);
1872 break;
1873 case 0:
1874 net->trans_start = jiffies;
1875 __skb_queue_tail (&dev->txq, skb);
1876 if (dev->txq.qlen >= TX_QLEN (dev)){
1877 netif_stop_queue (net);
1880 spin_unlock_irqrestore (&dev->txq.lock, flags);
1882 if (retval) {
1883 if (netif_msg_tx_err (dev)){
1884 devdbg (dev, "drop, code %d", retval);
1886 drop:
1887 retval = NET_XMIT_SUCCESS;
1888 dev->stats.tx_dropped++;
1889 if (skb){
1890 dev_kfree_skb_any (skb);
1892 usb_free_urb (urb);
1893 } else if (netif_msg_tx_queued (dev)) {
1894 devdbg (dev, "> tx, len %d, type 0x%x",
1895 length, skb->protocol);
1897 return retval;
1901 /*-------------------------------------------------------------------------*/
1903 // tasklet (work deferred from completions, in_irq) or timer
1905 static void hw_bh (unsigned long param)
1907 struct hw_cdc_net *dev = (struct hw_cdc_net *) param;
1908 struct sk_buff *skb;
1909 struct skb_data *entry;
1911 while ((skb = skb_dequeue (&dev->done))) {
1912 entry = (struct skb_data *) skb->cb;
1913 switch (entry->state) {
1914 case rx_done:
1915 entry->state = rx_cleanup;
1916 rx_process (dev, skb);
1917 continue;
1918 case tx_done:
1919 case rx_cleanup:
1920 usb_free_urb (entry->urb);
1921 dev_kfree_skb (skb);
1922 continue;
1923 default:
1924 devdbg (dev, "bogus skb state %d", entry->state);
1928 // waiting for all pending urbs to complete?
1929 if (dev->wait) {
1930 if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
1931 wake_up (dev->wait);
1934 // or are we maybe short a few urbs?
1935 } else if (netif_running (dev->net)
1936 && netif_device_present (dev->net)
1937 && !timer_pending (&dev->delay)
1938 && !test_bit (EVENT_RX_HALT, &dev->flags)) {
1939 int temp = dev->rxq.qlen;
1940 int qlen = dev->is_ncm ? RX_QLEN_NCM : RX_QLEN (dev);
1943 if (temp < qlen) {
1944 struct urb *urb;
1945 int i;
1947 // don't refill the queue all at once
1948 for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
1949 urb = usb_alloc_urb (0, GFP_ATOMIC);
1950 if (urb != NULL){
1951 rx_submit (dev, urb, GFP_ATOMIC);
1954 if (temp != dev->rxq.qlen && netif_msg_link (dev)){
1955 devdbg (dev, "rxqlen %d --> %d",
1956 temp, dev->rxq.qlen);
1958 if (dev->rxq.qlen < qlen){
1959 tasklet_schedule (&dev->bh);
1962 if (dev->txq.qlen < (dev->is_ncm ? TX_QLEN_NCM :TX_QLEN (dev))){
1963 netif_wake_queue (dev->net);
1969 /*-------------------------------------------------------------------------
1971 * USB Device Driver support
1973 *-------------------------------------------------------------------------*/
1975 // precondition: never called in_interrupt
1977 void hw_disconnect (struct usb_interface *intf)
1979 struct hw_cdc_net *dev;
1980 struct usb_device *xdev;
1981 struct net_device *net;
1983 dev = usb_get_intfdata(intf);
1984 usb_set_intfdata(intf, NULL);
1985 if (!dev){
1986 return;
1989 xdev = interface_to_usbdev (intf);
1991 if (netif_msg_probe (dev)){
1992 devinfo (dev, "unregister '%s' usb-%s-%s, %s",
1993 intf->dev.driver->name,
1994 xdev->bus->bus_name, xdev->devpath,
1995 dev->driver_desc);
1998 /*ͬ²½È¡Ïû¿ÉÄÜ×¢²áµÄÑÓ³Ù¹¤×÷½ø³Ì£¬Èç¹û¸Ã¹¤×÷½ø³ÌÒѾ­ÔÚÖ´ÐÐÁË£¬
1999 ÔòÔÚÕâÀÐèÒªµÈ´ý¸Ã¹¤×÷½ø³ÌÖ´ÐÐÍê³ÉÖ®ºó£¬discconect²Å»á¼ÌÐøÍùÏÂÖ´ÐС£*/
2000 cancel_work_sync(&dev->kevent);
2002 net = dev->net;
2003 unregister_netdev (net);
2005 /* we don't hold rtnl here ... */
2006 flush_scheduled_work ();
2008 hw_cdc_unbind(dev, intf);
2010 free_netdev(net);
2011 usb_put_dev (xdev);
2013 EXPORT_SYMBOL_GPL(hw_disconnect);
2016 /*-------------------------------------------------------------------------*/
2017 #if !(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30))
2018 static int hw_eth_mac_addr(struct net_device *dev, void *p)
2020 dev->dev_addr[0] = 0x00;
2021 dev->dev_addr[1] = 0x1e;
2022 dev->dev_addr[2] = 0x10;
2023 dev->dev_addr[3] = 0x1f;
2024 dev->dev_addr[4] = 0x00;
2025 dev->dev_addr[5] = 0x01;
2027 return 0;
2029 static const struct net_device_ops hw_netdev_ops = {
2030 .ndo_open = hw_open,
2031 .ndo_stop = hw_stop,
2032 .ndo_start_xmit = hw_start_xmit,
2033 .ndo_tx_timeout = hw_tx_timeout,
2034 .ndo_change_mtu = hw_change_mtu,
2035 .ndo_set_mac_address = hw_eth_mac_addr,
2036 .ndo_validate_addr = eth_validate_addr,
2037 .ndo_get_stats = hw_get_stats, //ÓÃÓÚÁ÷Á¿Í³¼Æ
2039 #endif
2041 int hw_send_tlp_download_request(struct usb_interface *intf);
2042 // precondition: never called in_interrupt
2043 int hw_check_conn_status(struct usb_interface *intf);
2046 static int is_ncm_interface(struct usb_interface *intf)
2048 u8 bif_class;
2049 u8 bif_subclass;
2050 u8 bif_protocol;
2051 bif_class = intf->cur_altsetting->desc.bInterfaceClass;
2052 bif_subclass = intf->cur_altsetting->desc.bInterfaceSubClass;
2053 bif_protocol = intf->cur_altsetting->desc.bInterfaceProtocol;
2055 if(( bif_class == 0x02 && bif_subclass == 0x0d)
2056 ||( bif_class == 0xff && bif_subclass == 0x02 && bif_protocol == 0x16)
2057 ||( bif_class == 0xff && bif_subclass == 0x02 && bif_protocol == 0x46)
2058 ||( bif_class == 0xff && bif_subclass == 0x02 && bif_protocol == 0x76)
2060 return 1;
2062 return 0;
2067 static int cdc_ncm_config(struct ncm_ctx *ctx)
2069 int err;
2070 struct usb_device *udev = ctx->ndev->udev;
2071 u8 net_caps;
2072 u8 control_if;
2073 unsigned int tx_pipe;
2074 unsigned int rx_pipe;
2075 struct usb_cdc_ncm_ntb_parameter_hw *ntb_params;
2076 u8 *b;
2078 #define NCM_MAX_CONTROL_MSG sizeof (*ntb_params)
2080 b = kmalloc(NCM_MAX_CONTROL_MSG, GFP_KERNEL);
2081 if (unlikely(b == NULL)){
2082 return -ENOMEM;
2085 net_caps = ctx->ncm_desc->bmNetworkCapabilities;
2086 control_if = ctx->control->cur_altsetting->desc.bInterfaceNumber;
2087 tx_pipe = usb_sndctrlpipe(udev, 0);
2088 rx_pipe = usb_rcvctrlpipe(udev, 0);
2090 err = usb_control_msg(udev, rx_pipe, USB_CDC_GET_NTB_PARAMETERS,
2091 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, 0,
2092 control_if, b, sizeof(*ntb_params), NCM_CONTROL_TIMEOUT);
2093 if (err < 0) {
2094 dev_dbg(&udev->dev, "cannot read NTB params\n");
2095 goto exit;
2097 if (err < sizeof(*ntb_params)) {
2098 dev_dbg(&udev->dev, "the read NTB params block is too short\n");
2099 err = -EINVAL;
2100 goto exit;
2103 ntb_params = (void *)b;
2104 ctx->formats = le16_to_cpu(ntb_params->bmNtbFormatSupported);
2105 ctx->rx_max_ntb = le32_to_cpu(ntb_params->dwNtbInMaxSize);
2106 ctx->tx_max_ntb = le32_to_cpu(ntb_params->dwNtbOutMaxSize);
2107 ctx->tx_divisor = le16_to_cpu(ntb_params->wNdpOutDivisor);
2108 ctx->tx_remainder = le16_to_cpu(ntb_params->wNdpOutPayloadRemainder);
2109 ctx->tx_align = le16_to_cpu(ntb_params->wNdpOutAlignment);
2111 devdbg(ctx->ndev,"rx_max_ntb:%d,tx_max_ntb:%d,tx_align:%d",
2112 ctx->rx_max_ntb,ctx->tx_max_ntb,ctx->tx_align);
2114 if (unlikely(!(ctx->formats & NTB_FORMAT_SUPPORTED_16BIT))) {
2115 deverr(ctx->ndev, "device does not support 16-bit mode\n");
2116 err = -EINVAL;
2117 goto exit;
2120 if (unlikely(ctx->tx_align < NCM_NDP_MIN_ALIGNMENT)) {
2121 deverr(ctx->ndev, "wNdpOutAlignment (%u) must be at least "
2122 "%u\n", ctx->tx_align, NCM_NDP_MIN_ALIGNMENT);
2123 err = -EINVAL;
2124 goto exit;
2127 if (unlikely(!IS_POWER2(ctx->tx_align))) {
2128 deverr(ctx->ndev, "wNdpOutAlignment (%u) must be a power of "
2129 "2\n", ctx->tx_align);
2130 err = -EINVAL;
2131 goto exit;
2134 if (unlikely(ctx->rx_max_ntb < NCM_NTB_MIN_IN_SIZE)) {
2135 deverr(ctx->ndev, "dwNtbInMaxSize (%u) must be at least "
2136 "%u\n", ctx->rx_max_ntb, NCM_NTB_MIN_IN_SIZE);
2137 err = -EINVAL;
2138 goto exit;
2141 if (ctx->rx_max_ntb > (u32)NCM_NTB_HARD_MAX_IN_SIZE) {
2142 devdbg(ctx->ndev, "dwNtbInMaxSize (%u) must be at most %u "
2143 ", setting the device to %u\n",
2144 ctx->rx_max_ntb, NCM_NTB_HARD_MAX_IN_SIZE,
2145 NCM_NTB_HARD_MAX_IN_SIZE);
2146 ctx->rx_max_ntb = NCM_NTB_HARD_MAX_IN_SIZE;
2147 put_unaligned_le32(ctx->rx_max_ntb, b);
2148 err = usb_control_msg(udev, tx_pipe,
2149 USB_CDC_SET_NTB_INPUT_SIZE,
2150 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
2151 0, control_if, b, 4,
2152 NCM_CONTROL_TIMEOUT);
2153 if (err < 0) {
2154 deverr(ctx->ndev, "failed setting NTB input size\n");
2155 goto exit;
2161 if (unlikely(ctx->tx_max_ntb < NCM_NTB_MIN_OUT_SIZE)) {
2162 deverr(ctx->ndev, "dwNtbOutMaxSize (%u) must be at least "
2163 "%u\n", ctx->tx_max_ntb, (u32)NCM_NTB_MIN_OUT_SIZE);
2164 err = -EINVAL;
2165 goto exit;
2168 ctx->bit_mode = NCM_BIT_MODE_16;
2169 if (ncm_prefer_32) {
2170 if (ctx->formats & NTB_FORMAT_SUPPORTED_32BIT) {
2171 ctx->bit_mode = NCM_BIT_MODE_32;
2173 else {
2174 devinfo(ctx->ndev, "device does not support 32-bit "
2175 "mode, using 16-bit mode\n");
2179 /* The spec defines a USB_CDC_SET_NTB_FORMAT as an optional feature.
2180 * The test for 32-bit support is actually a test if the device
2181 * implements this request
2183 if (ctx->formats & NTB_FORMAT_SUPPORTED_32BIT) {
2184 err = usb_control_msg(udev, tx_pipe,
2185 USB_CDC_SET_NTB_FORMAT,
2186 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
2187 ctx->bit_mode, control_if, NULL, 0,
2188 NCM_CONTROL_TIMEOUT);
2189 if (err < 0) {
2190 deverr(ctx->ndev, "failed setting bit-mode\n");
2191 goto exit;
2195 ctx->crc_mode = NCM_CRC_MODE_NO;
2196 if (ncm_prefer_crc && (net_caps & NCM_NCAP_CRC_MODE)) {
2197 ctx->crc_mode = NCM_CRC_MODE_YES;
2198 err = usb_control_msg(udev, tx_pipe,
2199 USB_CDC_SET_CRC_MODE,
2200 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
2201 NCM_CRC_MODE_YES, control_if, NULL, 0,
2202 NCM_CONTROL_TIMEOUT);
2203 if (err < 0) {
2204 deverr(ctx->ndev, "failed setting crc-mode\n");
2205 goto exit;
2209 switch (ctx->bit_mode)
2211 case NCM_BIT_MODE_16:
2212 memcpy(&ctx->popts, &ndp16_opts,
2213 sizeof (struct ndp_parser_opts_hw));
2214 if (ctx->crc_mode == NCM_CRC_MODE_YES){
2215 ctx->popts.ndp_sign = NCM_NDP16_CRC_SIGN;
2217 break;
2218 case NCM_BIT_MODE_32:
2219 memcpy(&ctx->popts, &ndp32_opts,
2220 sizeof (struct ndp_parser_opts_hw));
2221 if (ctx->crc_mode == NCM_CRC_MODE_YES){
2222 ctx->popts.ndp_sign = NCM_NDP32_CRC_SIGN;
2224 break;
2227 exit:
2228 kfree(b);
2229 return err;
2230 #undef NCM_MAX_CONTROL_MSG
2233 /* TODO: add crc support */
2234 int cdc_ncm_rx_fixup(struct hw_cdc_net *dev, struct sk_buff *skb)
2236 #define NCM_BITS(ctx) (((ctx)->bit_mode == NCM_BIT_MODE_16) ? 16 : 32)
2237 /* Minimal NDP has a header and two entries (each entry has 2 items). */
2238 #define MIN_NDP_LEN(ndp_hdr_size, item_len) ((ndp_hdr_size) + \
2239 2 * 2 * (sizeof(__le16) * (item_len)))
2240 struct ncm_ctx *ctx = dev->ncm_ctx;
2241 struct usb_device *udev = dev->udev;
2242 struct ndp_parser_opts_hw *popts = &ctx->popts;
2243 struct sk_buff *skb2;
2244 unsigned skb_len = skb->len;
2245 __le16 *p = (void *)skb->data;
2246 __le32 idx;
2247 __le16 ndp_len;
2248 unsigned dgram_item_len = popts->dgram_item_len;
2249 unsigned curr_dgram_idx;
2250 unsigned curr_dgram_len;
2251 unsigned next_dgram_idx;
2252 unsigned next_dgram_len;
2254 u32 rx_len;
2255 u32 rep_len;
2256 rx_len = skb->len;
2260 if (unlikely(skb_len < popts->nth_size)) {
2261 dev_dbg(&udev->dev, "skb len (%u) is shorter than NTH%u len "
2262 "(%u)\n", skb_len, NCM_BITS(ctx), popts->nth_size);
2263 goto error;
2266 if (get_ncm_le32(p) != popts->nth_sign) {
2267 dev_dbg(&udev->dev, "corrupt NTH%u signature\n", NCM_BITS(ctx));
2268 goto error;
2271 if (get_ncm_le16(p) != popts->nth_size) {
2272 dev_dbg(&udev->dev, "wrong NTH%u len\n", NCM_BITS(ctx));
2273 goto error;
2276 /* skip sequence num */
2277 p += 1;
2279 if (unlikely(get_ncm(&p, popts->block_length) > skb_len)) {
2280 dev_dbg(&udev->dev, "bogus NTH%u block length\n",
2281 NCM_BITS(ctx));
2282 goto error;
2285 idx = get_ncm(&p, popts->fp_index);
2286 if (unlikely(idx > skb_len)) {
2287 dev_dbg(&udev->dev, "NTH%u fp_index (%u) bigger than skb len "
2288 "(%u)\n", NCM_BITS(ctx), idx, skb_len);
2289 goto error;
2292 p = (void *)(skb->data + idx);
2294 if (get_ncm_le32(p) != popts->ndp_sign) {
2295 dev_dbg(&udev->dev, "corrupt NDP%u signature\n", NCM_BITS(ctx));
2296 goto error;
2299 ndp_len = get_ncm_le16(p);
2300 if (((ndp_len + popts->nth_size) > skb_len)
2301 || (ndp_len < (MIN_NDP_LEN(popts->ndp_size, dgram_item_len)))) {
2302 dev_dbg(&udev->dev, "bogus NDP%u len (%u)\n", NCM_BITS(ctx),
2303 ndp_len);
2304 goto error;
2307 p += popts->reserved1;
2308 /* next_fp_index is defined as reserved in the spec */
2309 p += popts->next_fp_index;
2310 p += popts->reserved2;
2312 curr_dgram_idx = get_ncm(&p, dgram_item_len);
2313 curr_dgram_len = get_ncm(&p, dgram_item_len);
2314 next_dgram_idx = get_ncm(&p, dgram_item_len);
2315 next_dgram_len = get_ncm(&p, dgram_item_len);
2318 /* Parse all the datagrams in the NTB except for the last one. Pass
2319 * all the parsed datagrams to the networking stack directly
2321 rep_len = 0;
2322 while (next_dgram_idx && next_dgram_len) {
2323 if (unlikely((curr_dgram_idx + curr_dgram_len) > skb_len)){
2324 goto error;
2326 skb2 = skb_clone(skb, GFP_ATOMIC);
2327 if (unlikely(!skb2)){
2328 goto error;
2331 if (unlikely(!skb_pull(skb2, curr_dgram_idx))){
2332 goto error2;
2334 skb_trim(skb2, curr_dgram_len);
2336 rep_len += skb2->len;
2337 hw_skb_return(dev, skb2);
2339 curr_dgram_idx = next_dgram_idx;
2340 curr_dgram_len = next_dgram_len;
2341 next_dgram_idx = get_ncm(&p, dgram_item_len);
2342 next_dgram_len = get_ncm(&p, dgram_item_len);
2345 /* Update 'skb' to represent the last datagram in the NTB and forward
2346 * it to usbnet which in turn will push it up to the networking stack.
2348 if (unlikely((curr_dgram_idx + curr_dgram_len) > skb_len)){
2349 goto error;
2351 if (unlikely(!skb_pull(skb, curr_dgram_idx))){
2352 goto error;
2354 skb_trim(skb, curr_dgram_len);
2355 rep_len += skb->len;
2357 return 1;
2358 error2:
2359 dev_kfree_skb(skb2);
2360 error:
2361 devdbg(dev,"cdc_ncm_rx_fixup error\n");
2362 return 0;
2363 #undef NCM_BITS
2364 #undef MIN_NDP_LEN
2367 static inline unsigned ndp_dgram_pad(struct ncm_ctx *ctx, unsigned dgram_off)
2369 unsigned rem = dgram_off % ctx->tx_divisor;
2370 unsigned tmp = ctx->tx_remainder;
2371 if (rem > ctx->tx_remainder){
2372 tmp += ctx->tx_divisor;
2374 return tmp - rem;
2377 static inline void ntb_clear(struct ntb *n)
2379 n->ndgrams = 0;
2380 n->skb = NULL;
2381 INIT_LIST_HEAD(&n->entries);
2384 static inline int ntb_init(struct ncm_ctx *ctx, struct ntb *n, unsigned size)
2386 struct ndp_parser_opts_hw *popts = &ctx->popts;
2387 unsigned dgrams_end;
2389 n->max_len = size;
2390 dgrams_end = popts->nth_size;
2392 n->ndp_off = ALIGN(dgrams_end, ctx->tx_align);
2393 n->ndp_len = popts->ndp_size + 2 * 2 * popts->dgram_item_len;
2394 n->dgrams_end = dgrams_end;
2396 if (NTB_LEN(n)> n->max_len){
2397 return -EINVAL;
2400 ntb_clear(n);
2401 return 0;
2404 static inline int ntb_add_dgram(struct ncm_ctx *ctx, struct ntb *n,
2405 unsigned dgram_len, u8 *data, gfp_t flags)
2407 struct ndp_parser_opts_hw *popts = &ctx->popts;
2408 unsigned new_ndp_off;
2409 unsigned new_ndp_len;
2410 unsigned new_dgrams_end;
2411 unsigned dgram_off;
2412 struct ndp_entry *entry;
2414 dgram_off = n->dgrams_end + ndp_dgram_pad(ctx, n->dgrams_end);
2415 new_dgrams_end = dgram_off + dgram_len;
2417 new_ndp_off = ALIGN(new_dgrams_end, ctx->tx_align);
2418 new_ndp_len = n->ndp_len + 2 * 2 * popts->dgram_item_len;
2420 if ((new_ndp_off + new_ndp_len) > n->max_len){
2421 return -EINVAL;
2424 /* TODO: optimize to use a kernel lookaside cache (kmem_cache) */
2425 entry = kmalloc(sizeof(*entry), flags);
2426 if (unlikely(entry == NULL)){
2427 return -ENOMEM;
2430 entry->idx = dgram_off;
2431 entry->len = dgram_len;
2432 list_add_tail(&entry->list, &n->entries);
2434 memcpy(n->skb->data + dgram_off, data, dgram_len);
2436 n->ndgrams++;
2438 n->ndp_off = new_ndp_off;
2439 n->ndp_len = new_ndp_len;
2440 n->dgrams_end = new_dgrams_end;
2442 return 0;
2446 static inline void ntb_free_dgram_list(struct ntb *n)
2448 struct list_head *p;
2449 struct list_head *tmp;
2451 list_for_each_safe(p, tmp, &n->entries) {
2452 struct ndp_entry *e = list_entry(p, struct ndp_entry, list);
2453 list_del(p);
2454 kfree(e);
2458 static struct sk_buff *ntb_finalize(struct ncm_ctx *ctx, struct ntb *n)
2460 struct ndp_parser_opts_hw *popts = &ctx->popts;
2461 __le16 *p = (void *)n->skb->data;
2462 struct ndp_entry *entry;
2463 struct sk_buff *skb;
2465 put_ncm_le32(popts->nth_sign, p);
2466 put_ncm_le16(popts->nth_size, p);
2468 /* TODO: add sequence numbers */
2469 put_ncm_le16(0, p);
2471 put_ncm(&p, popts->block_length, NTB_LEN(n));
2472 put_ncm(&p, popts->fp_index, n->ndp_off);
2474 p = (void *)(n->skb->data + n->ndp_off);
2475 memset(p, 0, popts->ndp_size);
2477 put_ncm_le32(popts->ndp_sign, p);
2478 put_ncm_le16(n->ndp_len, p);
2480 p += popts->reserved1;
2481 p += popts->next_fp_index;
2482 p += popts->reserved2;
2484 list_for_each_entry(entry, &n->entries, list) {
2485 put_ncm(&p, popts->dgram_item_len, entry->idx);
2486 put_ncm(&p, popts->dgram_item_len, entry->len);
2489 put_ncm(&p, popts->dgram_item_len, 0);
2490 put_ncm(&p, popts->dgram_item_len, 0);
2492 ntb_free_dgram_list(n);
2493 __skb_put(n->skb, NTB_LEN(n));
2495 skb = n->skb;
2496 ntb_clear(n);
2498 return skb;
2502 static inline struct sk_buff *ncm_get_skb(struct ncm_ctx *ctx)
2504 struct sk_buff *skb = NULL;
2505 unsigned i;
2507 /* 'skb_shared' will return 0 for an SKB after this SKB was
2508 * deallocated by usbnet
2510 for (i = 0; i < ctx->skb_pool_size && skb_shared(ctx->skb_pool[i]);
2511 i++);
2513 if (likely(i < ctx->skb_pool_size)){
2514 skb = skb_get(ctx->skb_pool[i]);
2517 if (likely(skb != NULL)){
2518 __skb_trim(skb, 0);
2521 return skb;
2525 /* Must be run with tx_lock held */
2526 static inline int ncm_init_curr_ntb(struct ncm_ctx *ctx)
2528 struct usb_device *udev = ctx->ndev->udev;
2529 int err;
2531 err = ntb_init(ctx, &ctx->curr_ntb, ctx->tx_max_ntb);
2532 if (unlikely(err < 0)) {
2533 dev_dbg(&udev->dev, "error initializing current-NTB with size "
2534 "%u\n", ctx->tx_max_ntb);
2535 return err;
2538 ctx->curr_ntb.skb = ncm_get_skb(ctx);
2539 if (unlikely(ctx->curr_ntb.skb == NULL)) {
2540 dev_dbg(&udev->dev, "failed getting an SKB from the pool\n");
2541 return -ENOMEM;
2544 return 0;
2548 static inline void ncm_uninit_curr_ntb(struct ncm_ctx *ctx)
2550 dev_kfree_skb_any(ctx->curr_ntb.skb);
2551 ntb_clear(&ctx->curr_ntb);
2555 /* if 'skb' is NULL (timer context), we will finish the current ntb and
2556 * return it to usbnet
2558 struct sk_buff * cdc_ncm_tx_fixup(struct hw_cdc_net *dev, struct sk_buff *skb,
2559 gfp_t mem_flags)
2561 struct ncm_ctx *ctx = dev->ncm_ctx;
2562 struct ntb *curr_ntb = &ctx->curr_ntb;
2563 struct sk_buff *skb2 = NULL;
2564 int err = 0;
2565 unsigned long flags;
2566 unsigned ndgrams = 0;
2567 unsigned is_skb_added = 0;
2568 unsigned is_curr_ntb_new = 0;
2569 u32 sn;
2571 spin_lock_irqsave(&ctx->tx_lock, flags);
2573 if (skb == NULL) {
2574 /* Timer context */
2575 if (NTB_IS_EMPTY(curr_ntb)) {
2576 /* we have nothing to send */
2577 goto exit;
2579 ndgrams = curr_ntb->ndgrams;
2580 skb2 = ntb_finalize(ctx, curr_ntb);
2581 goto exit;
2584 /* non-timer context */
2585 if (NTB_IS_EMPTY(curr_ntb)) {
2586 err = ncm_init_curr_ntb(ctx);
2587 if (unlikely(err < 0)){
2588 goto exit;
2590 is_curr_ntb_new = 1;
2594 if(skb->len < 128)
2596 sn = be32_to_cpu(*(u32 *)(skb->data + 0x2a));
2597 devdbg(dev, "get pc ACK SN:%x time:%ld-%ld",
2598 sn,current_kernel_time().tv_sec,current_kernel_time().tv_nsec);
2600 else
2602 sn = be32_to_cpu(*(u32 *)(skb->data + 0x26));
2603 devdbg(dev, "get pc PACKETS SN:%x, time:%ld-%ld",
2604 sn,current_kernel_time().tv_sec,current_kernel_time().tv_nsec);
2607 err = ntb_add_dgram(ctx, curr_ntb, skb->len, skb->data, GFP_ATOMIC);
2608 switch (err) {
2609 case 0:
2610 /* The datagram was successfully added to the current-NTB */
2611 is_skb_added = 1;
2612 if(!ctx->tx_timeout_jiffies)
2614 ndgrams = curr_ntb->ndgrams;
2615 skb2 = ntb_finalize(ctx, curr_ntb);
2617 break;
2618 case -EINVAL:
2619 /* not enough space in current-NTB */
2620 ndgrams = curr_ntb->ndgrams;
2621 /* finalize the current-NTB */
2622 skb2 = ntb_finalize(ctx, curr_ntb);
2623 /* setup a new current-NTB */
2624 err = ncm_init_curr_ntb(ctx);
2625 if (unlikely(err < 0)){
2626 break;
2629 is_curr_ntb_new = 1;
2631 err = ntb_add_dgram(ctx, curr_ntb, skb->len, skb->data,
2632 GFP_ATOMIC);
2633 if (unlikely(err < 0)) {
2634 ncm_uninit_curr_ntb(ctx);
2635 break;
2638 is_skb_added = 1;
2639 break;
2640 default:
2641 if (is_curr_ntb_new){
2642 ncm_uninit_curr_ntb(ctx);
2644 break;
2647 exit:
2648 if (err){
2649 devdbg(dev, "tx fixup failed (err %d)\n", err);
2652 if (skb){
2653 dev_kfree_skb_any(skb);
2656 /* When NULL is returned, usbnet will increment the drop count of the
2657 * net device. If 'skb' was successfully added to the current-NTB,
2658 * decrement the drop-count ahead
2660 if (skb2 == NULL && (is_skb_added || skb == NULL))
2662 if(is_skb_added){
2663 dev->stats.tx_dropped--;
2666 /* If a finished NTB is returned to usbnet, it will add 1 to packet
2667 * count. All other packets that we previously 'dropped' by usbnet must
2668 * be compensated
2670 if (skb2 != NULL){
2671 dev->stats.tx_packets += ndgrams - 1;
2674 /* reschedule the timer if successfully added a first datagram to a
2675 * newly allocated current-NTB
2677 if (is_curr_ntb_new && is_skb_added && ctx->tx_timeout_jiffies){
2678 mod_timer(&ctx->tx_timer, jiffies + ctx->tx_timeout_jiffies);
2681 spin_unlock_irqrestore(&ctx->tx_lock, flags);
2683 return skb2;
2686 static void ncm_tx_timer_cb(unsigned long param)
2688 struct ncm_ctx *ctx = (void *)param;
2689 if (!netif_queue_stopped(ctx->ndev->net)){
2690 hw_start_xmit(NULL, ctx->ndev->net);
2697 hw_cdc_probe (struct usb_interface *udev, const struct usb_device_id *prod)
2699 struct hw_cdc_net *dev;
2700 struct net_device *net;
2701 struct usb_host_interface *interface;
2702 struct usb_device *xdev;
2703 int status;
2704 const char *name;
2705 // DECLARE_MAC_BUF(mac);
2707 name = udev->dev.driver->name;
2708 xdev = interface_to_usbdev (udev);
2709 interface = udev->cur_altsetting;
2711 usb_get_dev (xdev);
2713 status = -ENOMEM;
2715 // set up our own records
2716 net = alloc_etherdev(sizeof(*dev));
2717 if (!net) {
2718 dbg ("can't kmalloc dev");
2719 goto out;
2722 dev = netdev_priv(net);
2723 dev->udev = xdev;
2724 dev->intf = udev;
2725 /* Add for DTS2011050903736 lxz 20110520 start*/
2726 /* linux kernel > 2.6.37: PowerManager needs disable_depth ==0 */
2727 #ifdef CONFIG_PM_RUNTIME
2728 if(LINUX_VERSION37_LATER)
2730 dev->intf->dev.power.disable_depth = 0;
2732 #endif
2733 /* Add for DTS2011050903736 lxz 20110520 end*/
2735 dev->driver_name = name;
2736 dev->driver_desc = "Huawei Ethernet Device";
2737 dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
2738 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
2739 skb_queue_head_init (&dev->rxq);
2740 skb_queue_head_init (&dev->txq);
2741 skb_queue_head_init (&dev->done);
2742 dev->bh.func = hw_bh;
2743 dev->bh.data = (unsigned long) dev;
2744 INIT_WORK (&dev->kevent, kevent);
2745 dev->delay.function = hw_bh;
2746 dev->delay.data = (unsigned long) dev;
2747 init_timer (&dev->delay);
2748 mutex_init (&dev->phy_mutex);
2750 dev->net = net;
2751 //strcpy (net->name, "eth%d");
2752 memcpy (net->dev_addr, node_id, sizeof node_id);
2754 /* rx and tx sides can use different message sizes;
2755 * bind() should set rx_urb_size in that case.
2757 dev->hard_mtu = net->mtu + net->hard_header_len;
2759 #if !(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30))
2760 net->netdev_ops = &hw_netdev_ops;
2761 #else
2762 net->change_mtu = hw_change_mtu;
2763 net->get_stats = hw_get_stats;
2764 net->hard_start_xmit = hw_start_xmit;
2765 net->open = hw_open;
2766 net->stop = hw_stop;
2767 net->tx_timeout = hw_tx_timeout;
2768 #endif
2769 net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
2770 net->ethtool_ops = &hw_ethtool_ops;
2773 status = hw_cdc_bind (dev, udev);
2774 if (status < 0){
2775 goto out1;
2779 strcpy (net->name, "eth%d");
2782 /* maybe the remote can't receive an Ethernet MTU */
2783 if (net->mtu > (dev->hard_mtu - net->hard_header_len)){
2784 net->mtu = dev->hard_mtu - net->hard_header_len;
2787 if (status >= 0 && dev->status){
2788 status = init_status (dev, udev);
2790 if (status < 0){
2791 goto out3;
2794 if (dev->is_ncm){
2795 dev->rx_urb_size = dev->ncm_ctx->rx_max_ntb;
2796 }else if (!dev->rx_urb_size){
2797 dev->rx_urb_size = dev->hard_mtu;
2800 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
2802 SET_NETDEV_DEV(net, &udev->dev);
2803 status = register_netdev (net);
2804 if (status){
2805 goto out3;
2808 if (netif_msg_probe (dev)){
2809 devinfo (dev, "register '%s', %s",
2810 dev->driver_name,
2811 dev->driver_desc);
2814 // ok, it's ready to go.
2815 usb_set_intfdata (udev, dev);
2817 /*activate the download tlp feature*/
2818 if (0 < hw_send_tlp_download_request(udev)){
2819 devdbg(dev, "%s: The tlp is activated", __FUNCTION__);
2820 dev->hw_tlp_download_is_actived = 1;//activated successfully
2821 }else{
2822 dev->hw_tlp_download_is_actived = 0;//activated failed
2825 netif_device_attach (net);
2827 //kernel_thread(hw_check_conn_status, (void *)net, 0);
2829 /*set the carrier off as default*/
2830 netif_carrier_off(net);
2831 if (HW_JUNGO_BCDDEVICE_VALUE != dev->udev->descriptor.bcdDevice
2832 && BINTERFACESUBCLASS != udev->cur_altsetting->desc.bInterfaceSubClass) {
2833 dev->qmi_sync = 0;
2834 INIT_DELAYED_WORK(&dev->status_work, hw_cdc_check_status_work);
2835 schedule_delayed_work(&dev->status_work, 10*HZ);
2837 //hw_check_conn_status(udev);
2840 return 0;
2842 out3:
2843 hw_cdc_unbind (dev, udev);
2844 out1:
2845 free_netdev(net);
2846 out:
2847 usb_put_dev(xdev);
2848 return status;
2850 EXPORT_SYMBOL_GPL(hw_cdc_probe);
2852 /*-------------------------------------------------------------------------*/
2855 * suspend the whole driver as soon as the first interface is suspended
2856 * resume only when the last interface is resumed
2859 int hw_suspend (struct usb_interface *intf, pm_message_t message)
2861 struct hw_cdc_net *dev = usb_get_intfdata(intf);
2863 if (!dev->suspend_count++) {
2865 * accelerate emptying of the rx and queues, to avoid
2866 * having everything error out.
2868 netif_device_detach (dev->net);
2869 (void) unlink_urbs (dev, &dev->rxq);
2870 (void) unlink_urbs (dev, &dev->txq);
2872 * reattach so runtime management can use and
2873 * wake the device
2875 netif_device_attach (dev->net);
2877 return 0;
2879 EXPORT_SYMBOL_GPL(hw_suspend);
2881 int hw_resume (struct usb_interface *intf)
2883 struct hw_cdc_net *dev = usb_get_intfdata(intf);
2885 if (!--dev->suspend_count){
2886 tasklet_schedule (&dev->bh);
2889 return 0;
2891 EXPORT_SYMBOL_GPL(hw_resume);
2893 static int hw_cdc_reset_resume(struct usb_interface *intf)
2895 return hw_resume (intf);
2898 int hw_send_tlp_download_request(struct usb_interface *intf)
2900 struct usb_device *udev = interface_to_usbdev(intf);
2901 struct usb_host_interface *interface = intf->cur_altsetting;
2902 struct usbdevfs_ctrltransfer req = {0};
2903 unsigned char buf[256] = {0};
2904 int retval = 0;
2905 req.bRequestType = 0xC0;
2906 req.bRequest = 0x02;//activating the download tlp feature request
2907 req.wIndex = interface->desc.bInterfaceNumber;
2908 req.wValue = 1;
2909 req.wLength = 1;
2910 //req.data = buf;
2911 req.timeout = 1000;
2912 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), req.bRequest,
2913 req.bRequestType, req.wValue, req.wIndex,
2914 buf, req.wLength, req.timeout);
2915 /*check the TLP feature is activated or not, response value 0x01 indicates success*/
2916 if (0 < retval && 0x01 == buf[0]){
2917 return retval;
2918 }else{
2919 return 0;
2922 ///////////////////////////////////////////////////////////////////////////////////////////////////////
2924 * probes control interface, claims data interface, collects the bulk
2925 * endpoints, activates data interface (if needed), maybe sets MTU.
2926 * all pure cdc
2928 //int hw_generic_cdc_bind(struct hw_cdc_net *dev, struct usb_interface *intf)
2929 #define USB_DEVICE_HUAWEI_DATA 0xFF
2930 static int hw_cdc_bind(struct hw_cdc_net *dev, struct usb_interface *intf)
2932 u8 *buf = intf->cur_altsetting->extra;
2933 int len = intf->cur_altsetting->extralen;
2934 struct usb_interface_descriptor *d;
2935 struct hw_dev_state *info = (void *) &dev->data;
2936 int status;
2937 struct usb_driver *driver = driver_of(intf);
2938 int i;
2939 struct ncm_ctx *ctx = NULL;
2941 devdbg(dev, "hw_cdc_bind enter\n");
2943 if (sizeof dev->data < sizeof *info){
2944 return -EDOM;
2947 dev->ncm_ctx = NULL;
2948 dev->is_ncm = is_ncm_interface(intf);
2950 if(dev->is_ncm)
2952 devdbg(dev, "this is ncm interface\n");
2953 dev->ncm_ctx = kzalloc(sizeof(struct ncm_ctx), GFP_KERNEL);
2954 if (dev->ncm_ctx == NULL){
2955 return -ENOMEM;
2957 ctx = dev->ncm_ctx;
2958 ctx->ndev = dev;
2960 spin_lock_init(&ctx->tx_lock);
2962 ctx->tx_timer.function = ncm_tx_timer_cb;
2963 ctx->tx_timer.data = (unsigned long)ctx;
2964 init_timer(&ctx->tx_timer);
2967 if(ncm_tx_timeout){
2968 ctx->tx_timeout_jiffies = msecs_to_jiffies(ncm_tx_timeout);
2969 }else{
2970 ctx->tx_timeout_jiffies = 0;
2973 devdbg(dev,"ctx->tx_timeout_jiffies:%ld",ctx->tx_timeout_jiffies);
2977 memset(info, 0, sizeof *info);
2978 info->control = intf;
2979 while (len > 3) {
2980 if (buf [1] != USB_DT_CS_INTERFACE){
2981 goto next_desc;
2984 switch (buf [2]) {
2985 case USB_CDC_HEADER_TYPE:
2986 if (info->header) {
2987 dev_dbg(&intf->dev, "extra CDC header\n");
2988 goto bad_desc;
2990 info->header = (void *) buf;
2991 if (info->header->bLength != sizeof *info->header) {
2992 dev_dbg(&intf->dev, "CDC header len %u\n",
2993 info->header->bLength);
2994 goto bad_desc;
2996 break;
2997 case USB_CDC_UNION_TYPE:
2998 if (info->u) {
2999 dev_dbg(&intf->dev, "extra CDC union\n");
3000 goto bad_desc;
3002 info->u = (void *) buf;
3003 if (info->u->bLength != sizeof *info->u) {
3004 dev_dbg(&intf->dev, "CDC union len %u\n",
3005 info->u->bLength);
3006 goto bad_desc;
3009 /* we need a master/control interface (what we're
3010 * probed with) and a slave/data interface; union
3011 * descriptors sort this all out.
3013 info->control = usb_ifnum_to_if(dev->udev,
3014 info->u->bMasterInterface0);
3015 info->data = usb_ifnum_to_if(dev->udev,
3016 info->u->bSlaveInterface0);
3017 if (!info->control || !info->data) {
3018 dev_dbg(&intf->dev,
3019 "master #%u/%p slave #%u/%p\n",
3020 info->u->bMasterInterface0,
3021 info->control,
3022 info->u->bSlaveInterface0,
3023 info->data);
3024 goto bad_desc;
3026 if (info->control != intf) {
3027 dev_dbg(&intf->dev, "bogus CDC Union\n");
3028 /* Ambit USB Cable Modem (and maybe others)
3029 * interchanges master and slave interface.
3031 if (info->data == intf) {
3032 info->data = info->control;
3033 info->control = intf;
3034 } else{
3035 goto bad_desc;
3039 /*For Jungo solution, the NDIS device has no data interface, so needn't detect data interface*/
3040 if (HW_JUNGO_BCDDEVICE_VALUE != dev->udev->descriptor.bcdDevice
3041 && BINTERFACESUBCLASS != intf->cur_altsetting->desc.bInterfaceSubClass) {
3042 /* a data interface altsetting does the real i/o */
3043 d = &info->data->cur_altsetting->desc;
3044 //if (d->bInterfaceClass != USB_CLASS_CDC_DATA) { /*delete the standard CDC slave class detect*/
3045 if (d->bInterfaceClass != USB_DEVICE_HUAWEI_DATA
3046 && d->bInterfaceClass != USB_CLASS_CDC_DATA) {
3047 /*Add to detect CDC slave class either Huawei defined or standard*/
3048 dev_dbg(&intf->dev, "slave class %u\n",
3049 d->bInterfaceClass);
3050 goto bad_desc;
3053 break;
3054 case USB_CDC_ETHERNET_TYPE:
3055 if (info->ether) {
3056 dev_dbg(&intf->dev, "extra CDC ether\n");
3057 goto bad_desc;
3059 info->ether = (void *) buf;
3060 if (info->ether->bLength != sizeof *info->ether) {
3061 dev_dbg(&intf->dev, "CDC ether len %u\n",
3062 info->ether->bLength);
3063 goto bad_desc;
3065 dev->hard_mtu = le16_to_cpu(
3066 info->ether->wMaxSegmentSize);
3067 /* because of Zaurus, we may be ignoring the host
3068 * side link address we were given.
3070 break;
3071 case USB_CDC_NCM_TYPE:
3072 if (dev->ncm_ctx->ncm_desc){
3073 dev_dbg(&intf->dev, "extra NCM descriptor\n");
3074 }else{
3075 dev->ncm_ctx->ncm_desc = (void *)buf;
3077 break;
3079 next_desc:
3080 len -= buf [0]; /* bLength */
3081 buf += buf [0];
3084 if (!info->header || !info->u || (!dev->is_ncm &&!info->ether) ||
3085 (dev->is_ncm && !dev->ncm_ctx->ncm_desc)) {
3086 dev_dbg(&intf->dev, "missing cdc %s%s%s%sdescriptor\n",
3087 info->header ? "" : "header ",
3088 info->u ? "" : "union ",
3089 info->ether ? "" : "ether ",
3090 dev->ncm_ctx->ncm_desc ? "" : "ncm ");
3091 goto bad_desc;
3093 if(dev->is_ncm)
3095 ctx = dev->ncm_ctx;
3096 ctx->control = info->control;
3097 ctx->data = info->data;
3098 status = cdc_ncm_config(ctx);
3099 if (status < 0){
3100 goto error2;
3103 dev->rx_urb_size = ctx->rx_max_ntb;
3105 /* We must always have one spare SKB for the current-NTB (of which
3106 * usbnet has no account)
3108 ctx->skb_pool_size = TX_QLEN_NCM;
3110 ctx->skb_pool = kzalloc(sizeof(struct sk_buff *) * ctx->skb_pool_size,
3111 GFP_KERNEL);
3112 if (ctx->skb_pool == NULL) {
3113 dev_dbg(&intf->dev, "failed allocating the SKB pool\n");
3114 goto error2;
3117 for (i = 0; i < ctx->skb_pool_size; i++) {
3118 ctx->skb_pool[i] = alloc_skb(ctx->tx_max_ntb, GFP_KERNEL);
3119 if (ctx->skb_pool[i] == NULL) {
3120 dev_dbg(&intf->dev, "failed allocating an SKB for the "
3121 "SKB pool\n");
3122 goto error3;
3126 ntb_clear(&ctx->curr_ntb);
3131 /*if the NDIS device is not Jungo solution, then assume that it has the data interface, and claim for it*/
3132 if (HW_JUNGO_BCDDEVICE_VALUE != dev->udev->descriptor.bcdDevice
3133 && BINTERFACESUBCLASS != intf->cur_altsetting->desc.bInterfaceSubClass)
3135 /* claim data interface and set it up ... with side effects.
3136 * network traffic can't flow until an altsetting is enabled.
3139 /*Begin:add by h00122846 for ndis bind error at 20101106*/
3140 if(info->data->dev.driver != NULL)
3142 usb_driver_release_interface(driver, info->data);
3144 /*End:add by h00122846 for ndis bind error at 20101106*/
3146 status = usb_driver_claim_interface(driver, info->data, dev);
3147 if (status < 0){
3148 return status;
3152 status = hw_get_endpoints(dev, info->data);
3153 if (status < 0) {
3154 /* ensure immediate exit from hw_disconnect */
3155 goto error3;
3158 /* status endpoint: optional for CDC Ethernet, */
3159 dev->status = NULL;
3160 if (HW_JUNGO_BCDDEVICE_VALUE == dev->udev->descriptor.bcdDevice
3161 || BINTERFACESUBCLASS == intf->cur_altsetting->desc.bInterfaceSubClass
3162 || info->control->cur_altsetting->desc.bNumEndpoints == 1)
3164 struct usb_endpoint_descriptor *desc;
3165 dev->status = &info->control->cur_altsetting->endpoint [0];
3166 desc = &dev->status->desc;
3167 if (((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_INT)
3168 || ((desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) != USB_DIR_IN)
3169 || (le16_to_cpu(desc->wMaxPacketSize)
3170 < sizeof(struct usb_cdc_notification))
3171 || !desc->bInterval) {
3172 printk(KERN_ERR"fxz-%s:bad notification endpoint\n", __func__);
3173 dev->status = NULL;
3177 return hw_get_ethernet_addr(dev);
3179 error3:
3180 if(dev->is_ncm){
3181 for ( i = 0; i < ctx->skb_pool_size && ctx->skb_pool[i]; i++){
3182 dev_kfree_skb_any(ctx->skb_pool[i]);
3184 kfree(ctx->skb_pool);
3186 error2:
3187 /* ensure immediate exit from cdc_disconnect */
3188 usb_set_intfdata(info->data, NULL);
3189 usb_driver_release_interface(driver_of(intf), info->data);
3191 if(dev->ncm_ctx){
3192 kfree(dev->ncm_ctx);
3194 return status;
3196 bad_desc:
3197 devinfo(dev, "bad CDC descriptors\n");
3198 return -ENODEV;
3201 void hw_cdc_unbind(struct hw_cdc_net *dev, struct usb_interface *intf)
3203 struct hw_dev_state *info = (void *) &dev->data;
3204 struct usb_driver *driver = driver_of(intf);
3205 int i;
3207 /* disconnect master --> disconnect slave */
3208 if (intf == info->control && info->data) {
3209 /* ensure immediate exit from usbnet_disconnect */
3210 usb_set_intfdata(info->data, NULL);
3211 usb_driver_release_interface(driver, info->data);
3212 info->data = NULL;
3215 /* and vice versa (just in case) */
3216 else if (intf == info->data && info->control) {
3217 /* ensure immediate exit from usbnet_disconnect */
3218 usb_set_intfdata(info->control, NULL);
3219 usb_driver_release_interface(driver, info->control);
3220 info->control = NULL;
3222 if(dev->is_ncm && dev->ncm_ctx){
3223 del_timer_sync(&dev->ncm_ctx->tx_timer);
3225 ntb_free_dgram_list(&dev->ncm_ctx->curr_ntb);
3226 for (i = 0; i < dev->ncm_ctx->skb_pool_size; i++){
3227 dev_kfree_skb_any(dev->ncm_ctx->skb_pool[i]);
3229 kfree(dev->ncm_ctx->skb_pool);
3230 kfree(dev->ncm_ctx);
3231 dev->ncm_ctx = NULL;
3236 EXPORT_SYMBOL_GPL(hw_cdc_unbind);
3239 /*-------------------------------------------------------------------------
3241 * Communications Device Class, Ethernet Control model
3243 * Takes two interfaces. The DATA interface is inactive till an altsetting
3244 * is selected. Configuration data includes class descriptors. There's
3245 * an optional status endpoint on the control interface.
3247 * This should interop with whatever the 2.4 "CDCEther.c" driver
3248 * (by Brad Hards) talked with, with more functionality.
3250 *-------------------------------------------------------------------------*/
3252 static void dumpspeed(struct hw_cdc_net *dev, __le32 *speeds)
3254 if (netif_msg_timer(dev)){
3255 devinfo(dev, "link speeds: %u kbps up, %u kbps down",
3256 __le32_to_cpu(speeds[0]) / 1000,
3257 __le32_to_cpu(speeds[1]) / 1000);
3261 static inline int hw_get_ethernet_addr(struct hw_cdc_net *dev)
3264 dev->net->dev_addr[0] = 0x00;
3265 dev->net->dev_addr[1] = 0x1e;
3267 dev->net->dev_addr[2] = 0x10;
3268 dev->net->dev_addr[3] = 0x1f;
3269 dev->net->dev_addr[4] = 0x00;
3270 dev->net->dev_addr[5] = 0x01;/*change 0x04 into 0x01 20100129*/
3272 return 0;
3276 enum {WRITE_REQUEST = 0x21, READ_RESPONSE = 0xa1};
3277 #define HW_CDC_OK 0
3278 #define HW_CDC_FAIL -1
3279 /*-------------------------------------------------------------------------*/
3280 /*The ioctl is called to send the qmi request to the device
3281 * or get the qmi response from the device*/
3282 static int hw_cdc_ioctl (struct usb_interface *intf, unsigned int code,
3283 void *buf)
3285 struct usb_device *udev = interface_to_usbdev(intf);
3286 struct hw_cdc_net *hwnet = (struct hw_cdc_net *)dev_get_drvdata(&intf->dev);
3287 struct usb_host_interface *interface = intf->cur_altsetting;
3288 struct usbdevfs_ctrltransfer *req = (struct usbdevfs_ctrltransfer *)buf;
3289 char *pbuf = NULL;
3290 int ret = -1;
3291 if (HW_JUNGO_BCDDEVICE_VALUE != hwnet->udev->descriptor.bcdDevice
3292 && BINTERFACESUBCLASS != intf->cur_altsetting->desc.bInterfaceSubClass){
3293 if (1 == hwnet->qmi_sync) {
3294 deverr(hwnet, "%s: The ndis port is busy.", __FUNCTION__);
3295 return HW_CDC_FAIL;
3299 if (USBDEVFS_CONTROL != code || NULL == req){
3300 deverr(hwnet, "%s: The request is not supported.", __FUNCTION__);
3301 return HW_CDC_FAIL;
3304 if (0 < req->wLength){
3305 pbuf = (char *)kmalloc(req->wLength + 1, GFP_KERNEL);
3306 if (NULL == pbuf){
3307 deverr(hwnet, "%s: Kmalloc the buffer failed.", __FUNCTION__);
3308 return HW_CDC_FAIL;
3310 memset(pbuf, 0, req->wLength);
3313 switch (req->bRequestType)
3315 case WRITE_REQUEST:
3317 if (NULL != req->data && 0 < req->wLength){
3318 if (copy_from_user(pbuf, req->data, req->wLength)){
3319 deverr(hwnet, "usbnet_cdc_ioctl: copy_from_user failed");
3320 goto op_error;
3323 }else{
3324 pbuf = NULL;
3325 req->wLength = 0;
3327 pbuf[req->wLength] = 0;
3328 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), req->bRequest,
3329 req->bRequestType, req->wValue, interface->desc.bInterfaceNumber,
3330 pbuf, req->wLength, req->timeout);
3331 break;
3333 case READ_RESPONSE:
3335 if (NULL == req->data || 0 >= req->wLength || NULL == pbuf){
3336 deverr(hwnet, "%s: The buffer is null, can not read the response.",
3337 __FUNCTION__);
3338 goto op_error;
3340 ret = usb_control_msg(udev,
3341 usb_rcvctrlpipe(udev, 0),
3342 req->bRequest,
3343 req->bRequestType,
3344 req->wValue,
3345 interface->desc.bInterfaceNumber,
3346 pbuf,
3347 req->wLength,
3348 req->timeout);
3350 if (0 < ret){
3351 if (HW_JUNGO_BCDDEVICE_VALUE != hwnet->udev->descriptor.bcdDevice
3352 && BINTERFACESUBCLASS != intf->cur_altsetting->desc.bInterfaceSubClass)
3354 /*check the connection indication*/
3355 if (0x04 == pbuf[6] && 0x22 == pbuf[9] && 0x00 == pbuf[10]){
3356 if (0x02 == pbuf[16]){
3357 if (hwnet){
3358 netif_carrier_on(hwnet->net);
3359 devinfo(dev, "CDC: network connection: connected\n");
3361 }else{
3362 if (hwnet){
3363 netif_carrier_off(hwnet->net);
3364 devinfo(dev, "CDC: network connection: disconnected\n");
3369 if (copy_to_user(req->data, pbuf, req->wLength)){
3370 deverr(hwnet, "%s: copy_from_user failed", __FUNCTION__);
3371 goto op_error;
3374 break;
3376 default:
3377 break;
3380 if (NULL != pbuf){
3381 kfree(pbuf);
3382 pbuf = NULL;
3385 return HW_CDC_OK;
3387 op_error:
3388 if (NULL != pbuf){
3389 kfree(pbuf);
3390 pbuf = NULL;
3392 return HW_CDC_FAIL;
3395 /* delete by lKF36757 2011/12/26,prevent hilink load hw_cdc_driver.ko*/
3397 *#define HUAWEI_ETHER_INTERFACE \
3398 * .bInterfaceClass = USB_CLASS_COMM, \
3399 * .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
3400 * .bInterfaceProtocol = USB_CDC_PROTO_NONE
3404 #define HUAWEI_NDIS_INTERFACE \
3405 .bInterfaceClass = USB_CLASS_COMM, \
3406 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
3407 .bInterfaceProtocol = 0xff
3409 #define HUAWEI_NCM_INTERFACE \
3410 .bInterfaceClass = USB_CLASS_COMM, \
3411 .bInterfaceSubClass = 0x0d, \
3412 .bInterfaceProtocol = 0xff
3414 #define HUAWEI_NCM_INTERFACE2 \
3415 .bInterfaceClass = USB_CLASS_COMM, \
3416 .bInterfaceSubClass = 0x0d, \
3417 .bInterfaceProtocol = 0x00
3420 /*Add for PID optimized fangxz 20091105*/
3421 #define HUAWEI_NDIS_OPTIMIZED_INTERFACE \
3422 .bInterfaceClass = 0xFF, \
3423 .bInterfaceSubClass = 0x01, \
3424 .bInterfaceProtocol = 0x09
3426 /*Add for PID optimized lxz 20120508*/
3427 #define HUAWEI_NDIS_OPTIMIZED_INTERFACE_JUNGO \
3428 .bInterfaceClass = 0xFF, \
3429 .bInterfaceSubClass = 0x02, \
3430 .bInterfaceProtocol = 0x09
3432 /*Add for PID optimized marui 20100628*/
3433 #define HUAWEI_NDIS_OPTIMIZED_INTERFACE_VDF \
3434 .bInterfaceClass = 0xFF, \
3435 .bInterfaceSubClass = 0x01, \
3436 .bInterfaceProtocol = 0x39
3438 /*Add for PID optimized lxz 20120508*/
3439 #define HUAWEI_NDIS_OPTIMIZED_INTERFACE_VDF_JUNGO \
3440 .bInterfaceClass = 0xFF, \
3441 .bInterfaceSubClass = 0x02, \
3442 .bInterfaceProtocol = 0x39
3444 /*Add for PID optimized lxz 20120508*/
3445 #define HUAWEI_NDIS_SINGLE_INTERFACE \
3446 .bInterfaceClass = 0xFF, \
3447 .bInterfaceSubClass = 0x01, \
3448 .bInterfaceProtocol = 0x07
3450 /*Add for PID optimized marui 20100811*/
3451 #define HUAWEI_NDIS_SINGLE_INTERFACE_JUNGO \
3452 .bInterfaceClass = 0xFF, \
3453 .bInterfaceSubClass = 0x02, \
3454 .bInterfaceProtocol = 0x07
3456 /*Add for PID optimized lxz 20120508*/
3457 #define HUAWEI_NDIS_SINGLE_INTERFACE_VDF \
3458 .bInterfaceClass = 0xFF, \
3459 .bInterfaceSubClass = 0x01, \
3460 .bInterfaceProtocol = 0x37
3462 /*Add for PID optimized marui 20100811*/
3463 #define HUAWEI_NDIS_SINGLE_INTERFACE_VDF_JUNGO \
3464 .bInterfaceClass = 0xFF, \
3465 .bInterfaceSubClass = 0x02, \
3466 .bInterfaceProtocol = 0x37
3468 /*Add for PID optimized lxz 20120508*/
3469 #define HUAWEI_NCM_OPTIMIZED_INTERFACE \
3470 .bInterfaceClass = 0xFF, \
3471 .bInterfaceSubClass = 0x01, \
3472 .bInterfaceProtocol = 0x16
3474 /*Add for PID optimized liaojianping 20100811*/
3475 #define HUAWEI_NCM_OPTIMIZED_INTERFACE_JUNGO \
3476 .bInterfaceClass = 0xFF, \
3477 .bInterfaceSubClass = 0x02, \
3478 .bInterfaceProtocol = 0x16
3480 /*Add for PID optimized lxz 20120508*/
3481 #define HUAWEI_NCM_OPTIMIZED_INTERFACE_VDF \
3482 .bInterfaceClass = 0xFF, \
3483 .bInterfaceSubClass = 0x01, \
3484 .bInterfaceProtocol = 0x46
3486 #define HUAWEI_NCM_OPTIMIZED_INTERFACE_VDF_JUNGO \
3487 .bInterfaceClass = 0xFF, \
3488 .bInterfaceSubClass = 0x02, \
3489 .bInterfaceProtocol = 0x46
3491 /*Add for PID optimized xiaruihu 20110825*/
3492 #define HUAWEI_INTERFACE_NDIS_NO_3G_JUNGO \
3493 .bInterfaceClass = 0xFF, \
3494 .bInterfaceSubClass = 0x02, \
3495 .bInterfaceProtocol = 0x11
3497 #define HUAWEI_INTERFACE_NDIS_NO_3G_QUALCOMM \
3498 .bInterfaceClass = 0xFF, \
3499 .bInterfaceSubClass = 0x01, \
3500 .bInterfaceProtocol = 0x11
3502 /*Add for PID optimized xiaruihu 20111008*/
3503 #define HUAWEI_INTERFACE_NDIS_HW_QUALCOMM \
3504 .bInterfaceClass = 0xFF, \
3505 .bInterfaceSubClass = 0x01, \
3506 .bInterfaceProtocol = 0x67
3508 #define HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM \
3509 .bInterfaceClass = 0xFF, \
3510 .bInterfaceSubClass = 0x01, \
3511 .bInterfaceProtocol = 0x69
3513 #define HUAWEI_INTERFACE_NDIS_NCM_QUALCOMM \
3514 .bInterfaceClass = 0xFF, \
3515 .bInterfaceSubClass = 0x01, \
3516 .bInterfaceProtocol = 0x76
3518 #define HUAWEI_INTERFACE_NDIS_HW_JUNGO \
3519 .bInterfaceClass = 0xFF, \
3520 .bInterfaceSubClass = 0x02, \
3521 .bInterfaceProtocol = 0x67
3523 #define HUAWEI_INTERFACE_NDIS_CONTROL_JUNGO \
3524 .bInterfaceClass = 0xFF, \
3525 .bInterfaceSubClass = 0x02, \
3526 .bInterfaceProtocol = 0x69
3528 #define HUAWEI_INTERFACE_NDIS_NCM_JUNGO \
3529 .bInterfaceClass = 0xFF, \
3530 .bInterfaceSubClass = 0x02, \
3531 .bInterfaceProtocol = 0x76
3534 static const struct usb_device_id hw_products [] = {
3535 /*ɾ³ý¶ÔPRODUCT IDµÄ±È½Ï£¬Ä¬ÈÏÄܹ»Ö§³ÖËùÓÐHUAWEI_ETHER_INTERFACE ½Ó¿ÚÀàÐ͵ÄNDISÉ豸*/
3536 /* delete by lKF36757 2011/12/26,prevent hilink load hw_cdc_driver.ko*/
3539 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3540 | USB_DEVICE_ID_MATCH_VENDOR,
3541 .idVendor = 0x12d1,
3542 HUAWEI_ETHER_INTERFACE,
3543 },*/
3545 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3546 | USB_DEVICE_ID_MATCH_VENDOR,
3547 .idVendor = 0x12d1,
3548 HUAWEI_NDIS_INTERFACE,
3550 /*Add for PID optimized fangxz 20091105*/
3552 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3553 | USB_DEVICE_ID_MATCH_VENDOR,
3554 .idVendor = 0x12d1,
3555 HUAWEI_NDIS_OPTIMIZED_INTERFACE,
3557 /*Add for VDF PID optimized marui 20100628*/
3559 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3560 | USB_DEVICE_ID_MATCH_VENDOR,
3561 .idVendor = 0x12d1,
3562 HUAWEI_NDIS_OPTIMIZED_INTERFACE_VDF,
3564 /*Add for PID optimized marui 20100811*/
3566 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3567 | USB_DEVICE_ID_MATCH_VENDOR,
3568 .idVendor = 0x12d1,
3569 HUAWEI_NDIS_OPTIMIZED_INTERFACE_JUNGO,
3571 /*Add for VDF PID optimized marui 20100811*/
3573 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3574 | USB_DEVICE_ID_MATCH_VENDOR,
3575 .idVendor = 0x12d1,
3576 HUAWEI_NDIS_OPTIMIZED_INTERFACE_VDF_JUNGO,
3578 /*Add for NCM PID optimized lxz 20120508*/
3580 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3581 | USB_DEVICE_ID_MATCH_VENDOR,
3582 .idVendor = 0x12d1,
3583 HUAWEI_NCM_OPTIMIZED_INTERFACE,
3585 /*Add for NCM PID optimized liaojianping 20100911*/
3587 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3588 | USB_DEVICE_ID_MATCH_VENDOR,
3589 .idVendor = 0x12d1,
3590 HUAWEI_NCM_OPTIMIZED_INTERFACE_JUNGO,
3592 /*Add for VDF NCM PID optimized lxz 20120508*/
3594 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3595 | USB_DEVICE_ID_MATCH_VENDOR,
3596 .idVendor = 0x12d1,
3597 HUAWEI_NCM_OPTIMIZED_INTERFACE_VDF,
3599 /*Add for VDF NCM PID optimized liaojianping 20100911*/
3601 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3602 | USB_DEVICE_ID_MATCH_VENDOR,
3603 .idVendor = 0x12d1,
3604 HUAWEI_NCM_OPTIMIZED_INTERFACE_VDF_JUNGO,
3606 /*Add for ncm liaojianping 20100911*/
3608 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3609 | USB_DEVICE_ID_MATCH_VENDOR,
3610 .idVendor = 0x12d1,
3611 HUAWEI_NCM_INTERFACE,
3613 /*Add for VDF NCM PID optimized liaojianping 20100911*/
3615 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3616 | USB_DEVICE_ID_MATCH_VENDOR,
3617 .idVendor = 0x12d1,
3618 HUAWEI_NCM_INTERFACE2,
3620 /*Add for PID optimized xiaruihu 20110825*/
3622 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3623 | USB_DEVICE_ID_MATCH_VENDOR,
3624 .idVendor = 0x12d1,
3625 HUAWEI_INTERFACE_NDIS_NO_3G_JUNGO
3628 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3629 | USB_DEVICE_ID_MATCH_VENDOR,
3630 .idVendor = 0x12d1,
3631 HUAWEI_INTERFACE_NDIS_NO_3G_QUALCOMM
3633 /*Add for PID optimized xiaruihu 20111008*/
3635 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3636 | USB_DEVICE_ID_MATCH_VENDOR,
3637 .idVendor = 0x12d1,
3638 HUAWEI_INTERFACE_NDIS_HW_QUALCOMM
3641 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3642 | USB_DEVICE_ID_MATCH_VENDOR,
3643 .idVendor = 0x12d1,
3644 HUAWEI_INTERFACE_NDIS_HW_JUNGO
3647 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3648 | USB_DEVICE_ID_MATCH_VENDOR,
3649 .idVendor = 0x12d1,
3650 HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM
3653 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3654 | USB_DEVICE_ID_MATCH_VENDOR,
3655 .idVendor = 0x12d1,
3656 HUAWEI_INTERFACE_NDIS_CONTROL_JUNGO
3659 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3660 | USB_DEVICE_ID_MATCH_VENDOR,
3661 .idVendor = 0x12d1,
3662 HUAWEI_INTERFACE_NDIS_NCM_QUALCOMM
3665 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3666 | USB_DEVICE_ID_MATCH_VENDOR,
3667 .idVendor = 0x12d1,
3668 HUAWEI_INTERFACE_NDIS_NCM_JUNGO
3671 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3672 | USB_DEVICE_ID_MATCH_VENDOR,
3673 .idVendor = 0x12d1,
3674 HUAWEI_NDIS_SINGLE_INTERFACE
3677 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3678 | USB_DEVICE_ID_MATCH_VENDOR,
3679 .idVendor = 0x12d1,
3680 HUAWEI_NDIS_SINGLE_INTERFACE_JUNGO
3683 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3684 | USB_DEVICE_ID_MATCH_VENDOR,
3685 .idVendor = 0x12d1,
3686 HUAWEI_NDIS_SINGLE_INTERFACE_VDF
3689 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
3690 | USB_DEVICE_ID_MATCH_VENDOR,
3691 .idVendor = 0x12d1,
3692 HUAWEI_NDIS_SINGLE_INTERFACE_VDF_JUNGO
3694 { }, // END
3696 MODULE_DEVICE_TABLE(usb, hw_products);
3698 static int hw_cdc_reset_resume(struct usb_interface *intf);
3699 static struct usb_driver hw_ether_driver = {
3700 .name = "huawei_ether",
3701 .id_table = hw_products,
3702 .probe = hw_cdc_probe,
3703 .disconnect = hw_disconnect,
3704 #if !(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36))
3705 .unlocked_ioctl = hw_cdc_ioctl,
3706 #else
3707 .ioctl = hw_cdc_ioctl,
3708 #endif
3709 .suspend = hw_suspend,
3710 .resume = hw_resume,
3711 .reset_resume = hw_cdc_reset_resume,
3715 static void hw_cdc_status(struct hw_cdc_net *dev, struct urb *urb)
3717 struct usb_cdc_notification *event;
3719 if (urb->actual_length < sizeof *event){
3720 return;
3723 /* SPEED_CHANGE can get split into two 8-byte packets */
3724 if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
3725 devdbg(dev, "The speed is changed by status event");
3726 dumpspeed(dev, (__le32 *) urb->transfer_buffer);
3727 return;
3730 event = urb->transfer_buffer;
3731 switch (event->bNotificationType) {
3732 case USB_CDC_NOTIFY_NETWORK_CONNECTION:
3734 if (netif_msg_timer(dev)){
3735 devdbg(dev, "CDC: carrier %s",
3736 event->wValue ? "on" : "off");
3738 if (event->wValue){
3739 netif_carrier_on(dev->net);
3740 devdbg(dev, "CDC: network connection: connected\n");
3741 }else{
3742 netif_carrier_off(dev->net);
3743 devdbg(dev, "CDC: network connection: disconnected\n");
3746 break;
3747 case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
3748 if (netif_msg_timer(dev)){
3749 devdbg(dev, "CDC: speed change (len %d)",
3750 urb->actual_length);
3752 if (urb->actual_length != (sizeof *event + 8)){
3753 set_bit(EVENT_STS_SPLIT, &dev->flags);
3754 }else{
3755 dumpspeed(dev, (__le32 *) &event[1]);
3757 break;
3759 case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
3761 break;
3764 default:
3765 devdbg(dev, "%s: CDC: unexpected notification %02x!", __FUNCTION__,
3766 event->bNotificationType);
3767 break;
3772 static int __init hw_cdc_init(void)
3774 BUG_ON((sizeof(((struct hw_cdc_net *)0)->data)
3775 < sizeof(struct hw_dev_state)));
3777 return usb_register(&hw_ether_driver);
3779 fs_initcall(hw_cdc_init);
3781 static int hw_send_qmi_request(struct usb_interface *intf,
3782 unsigned char *snd_req, int snd_len,
3783 unsigned char *read_resp, int resp_len);
3784 static int hw_send_qmi_request_no_resp(struct usb_interface *intf,
3785 unsigned char *snd_req, int snd_len,
3786 unsigned char *read_resp, int resp_len);
3789 //int hw_check_conn_status(struct usb_interface *intf)
3790 static void hw_cdc_check_status_work(struct work_struct *work)
3793 //struct hw_cdc_net *net = usb_get_intfdata(intf);
3794 //usb_device *udev = interface_to_usbdev(intf);
3795 struct hw_cdc_net *dev = container_of(work, struct hw_cdc_net, status_work.work);
3797 int ret;
3798 int repeat = 0;
3799 unsigned char resp_buf[56] = {0};
3800 unsigned char client_id_req[0x10] = {0x01, 0x0f, 0x00, 0x00, 0x00,
3801 0x00, 0x00, 0x06, 0x22, 0x00,
3802 0x04, 0x00, 0x01, 0x01, 0x00, 0x01};
3803 unsigned char rel_client_id_req[0x11] = {0x01, 0x10, 0x00, 0x00, 0x00,
3804 0x00, 0x00, 0x00, 0x23,0x00,
3805 0x05, 0x00, 0x01, 0x02, 0x00,
3806 0x01, 0x00};
3807 unsigned char status_req[13] = {0x01, 0x0c, 0x00, 0x00, 0x01,
3808 0x00, 0x00, 0x02, 0x00,
3809 0x22, 0x00, 0x00, 0x00};
3810 unsigned char set_instance_req[0x10] = {0x01, 0x0f, 0x00, 0x00, 0x00,
3811 0x00, 0x00, 0x06, 0x20, 0x00,
3812 0x04, 0x00, 0x01, 0x01, 0x00, 0x00};
3813 dev->qmi_sync = 1;
3815 hw_send_qmi_request_no_resp(dev->intf, set_instance_req, 0x10, resp_buf, 56);
3817 ret = hw_send_qmi_request(dev->intf, client_id_req, 0x10, resp_buf, 56);
3818 if (0 == ret){
3819 printk(KERN_ERR"%s: Get client ID failed\n", __FUNCTION__);
3820 goto failed;
3822 status_req[5] = resp_buf[23];
3823 memset(resp_buf, 0, 56 * sizeof (unsigned char));
3825 //for (repeat = 0; repeat < 3; repeat ++)
3826 for (repeat = 0; repeat < 3; repeat++)
3828 ret = hw_send_qmi_request(dev->intf, status_req, 13, resp_buf, 56);
3829 if (0 == ret){
3830 printk(KERN_ERR"%s: Get connection status failed\n", __FUNCTION__);
3831 continue;
3834 if (0x02 == resp_buf[23]){
3835 printk(KERN_ERR"%s: carrier on\n", __FUNCTION__);
3836 netif_carrier_on(dev->net);
3837 break;
3838 } else {
3840 printk(KERN_ERR"%s: carrier off\n", __FUNCTION__);
3841 //netif_carrier_off(dev->net);
3844 failed:
3845 rel_client_id_req[0x0f] = 0x02;
3846 rel_client_id_req[0x10] = status_req[5];
3847 memset(resp_buf, 0, 56 * sizeof (unsigned char));
3849 ret = hw_send_qmi_request_no_resp(dev->intf, rel_client_id_req, 0x11, resp_buf, 56);
3851 dev->qmi_sync = 0;
3852 cancel_delayed_work(&dev->status_work);
3853 //memset(resp_buf, 0, 56 * sizeof (unsigned char));
3854 return;
3857 static int hw_send_qmi_request_no_resp(struct usb_interface *intf,
3858 unsigned char *snd_req, int snd_len,
3859 unsigned char *read_resp, int resp_len)
3861 int ret;
3862 int index = 0;
3863 struct usb_device *udev = interface_to_usbdev(intf);
3864 for (index = 0; index < 3; index++)
3866 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00,
3867 0x21, 0x00, intf->cur_altsetting->desc.bInterfaceNumber,
3868 snd_req, snd_len, 5000);
3869 if (ret < 0){
3870 printk(KERN_ERR"%s: send the qmi request failed\n", __FUNCTION__);
3871 continue;
3873 else {
3874 break;
3877 return ret;
3880 static int hw_send_qmi_request(struct usb_interface *intf,
3881 unsigned char *snd_req, int snd_len,
3882 unsigned char *read_resp, int resp_len)
3884 int ret;
3885 int index = 0;
3886 struct usb_device *udev = interface_to_usbdev(intf);
3887 struct hw_cdc_net *net = usb_get_intfdata(intf);
3889 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00,
3890 0x21, 0x00, intf->cur_altsetting->desc.bInterfaceNumber,
3891 snd_req, snd_len, 5000);
3893 if (ret < 0){
3894 printk(KERN_ERR"%s: send the qmi request failed\n", __FUNCTION__);
3895 return ret;
3898 while(index < 10){
3899 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x01,
3900 0xA1, 0x00, intf->cur_altsetting->desc.bInterfaceNumber,
3901 read_resp, resp_len, 1000);
3902 if (ret <= 0){
3903 printk(KERN_ERR"%s: %d Get response failed\n", __FUNCTION__, index);
3904 msleep(10);
3905 } else {
3906 if (0x00 == read_resp[4]){
3907 if (0x01 == read_resp[6] && snd_req[5] == read_resp[5]
3908 && snd_req[8] == read_resp[8] && snd_req[9] == read_resp[9]) {
3909 ret = 1;
3910 break;
3912 } else if (0x01 == read_resp[4]) {
3913 if (0x02 == read_resp[6] && snd_req[5] == read_resp[5]
3914 && snd_req[9] == read_resp[9] && snd_req[10] == read_resp[10]) {
3915 printk(KERN_ERR"%s: get the conn status req=%02x resp\n",
3916 __FUNCTION__, snd_req[9]);
3917 ret = 1;
3918 break;
3920 } else if (0x04 == read_resp[4]){
3921 if (snd_req[9] == read_resp[9] && snd_req[10] == read_resp[10]
3922 && 0x02 == read_resp[16]) {
3923 printk(KERN_ERR"%s: get the conn status ind= carrier on\n",
3924 __FUNCTION__);
3925 netif_carrier_on(net->net);
3929 //index ++;
3930 index++;
3931 continue;
3934 if (index >= 10){
3935 ret = 0;
3937 return ret;
3939 static void __exit hw_cdc_exit(void)
3941 usb_deregister(&hw_ether_driver);
3943 module_exit(hw_cdc_exit);
3946 MODULE_AUTHOR(DRIVER_AUTHOR);
3947 MODULE_DESCRIPTION(DRIVER_DESC);
3948 MODULE_VERSION(DRIVER_VERSION);
3949 MODULE_LICENSE("GPL");