lan78xx: relocate mdix setting to phy driver
[linux-2.6/btrfs-unstable.git] / drivers / net / usb / lan78xx.c
blob0c459e92f1b3b6afd0b62b786d5294467f16fa35
1 /*
2 * Copyright (C) 2015 Microchip Technology
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <linux/interrupt.h>
34 #include <linux/irqdomain.h>
35 #include <linux/irq.h>
36 #include <linux/irqchip/chained_irq.h>
37 #include <linux/microchipphy.h>
38 #include "lan78xx.h"
40 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
41 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
42 #define DRIVER_NAME "lan78xx"
43 #define DRIVER_VERSION "1.0.5"
45 #define TX_TIMEOUT_JIFFIES (5 * HZ)
46 #define THROTTLE_JIFFIES (HZ / 8)
47 #define UNLINK_TIMEOUT_MS 3
49 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
51 #define SS_USB_PKT_SIZE (1024)
52 #define HS_USB_PKT_SIZE (512)
53 #define FS_USB_PKT_SIZE (64)
55 #define MAX_RX_FIFO_SIZE (12 * 1024)
56 #define MAX_TX_FIFO_SIZE (12 * 1024)
57 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
58 #define DEFAULT_BULK_IN_DELAY (0x0800)
59 #define MAX_SINGLE_PACKET_SIZE (9000)
60 #define DEFAULT_TX_CSUM_ENABLE (true)
61 #define DEFAULT_RX_CSUM_ENABLE (true)
62 #define DEFAULT_TSO_CSUM_ENABLE (true)
63 #define DEFAULT_VLAN_FILTER_ENABLE (true)
64 #define TX_OVERHEAD (8)
65 #define RXW_PADDING 2
67 #define LAN78XX_USB_VENDOR_ID (0x0424)
68 #define LAN7800_USB_PRODUCT_ID (0x7800)
69 #define LAN7850_USB_PRODUCT_ID (0x7850)
70 #define LAN78XX_EEPROM_MAGIC (0x78A5)
71 #define LAN78XX_OTP_MAGIC (0x78F3)
73 #define MII_READ 1
74 #define MII_WRITE 0
76 #define EEPROM_INDICATOR (0xA5)
77 #define EEPROM_MAC_OFFSET (0x01)
78 #define MAX_EEPROM_SIZE 512
79 #define OTP_INDICATOR_1 (0xF3)
80 #define OTP_INDICATOR_2 (0xF7)
82 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
83 WAKE_MCAST | WAKE_BCAST | \
84 WAKE_ARP | WAKE_MAGIC)
86 /* USB related defines */
87 #define BULK_IN_PIPE 1
88 #define BULK_OUT_PIPE 2
90 /* default autosuspend delay (mSec)*/
91 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
93 /* statistic update interval (mSec) */
94 #define STAT_UPDATE_TIMER (1 * 1000)
96 /* defines interrupts from interrupt EP */
97 #define MAX_INT_EP (32)
98 #define INT_EP_INTEP (31)
99 #define INT_EP_OTP_WR_DONE (28)
100 #define INT_EP_EEE_TX_LPI_START (26)
101 #define INT_EP_EEE_TX_LPI_STOP (25)
102 #define INT_EP_EEE_RX_LPI (24)
103 #define INT_EP_MAC_RESET_TIMEOUT (23)
104 #define INT_EP_RDFO (22)
105 #define INT_EP_TXE (21)
106 #define INT_EP_USB_STATUS (20)
107 #define INT_EP_TX_DIS (19)
108 #define INT_EP_RX_DIS (18)
109 #define INT_EP_PHY (17)
110 #define INT_EP_DP (16)
111 #define INT_EP_MAC_ERR (15)
112 #define INT_EP_TDFU (14)
113 #define INT_EP_TDFO (13)
114 #define INT_EP_UTX (12)
115 #define INT_EP_GPIO_11 (11)
116 #define INT_EP_GPIO_10 (10)
117 #define INT_EP_GPIO_9 (9)
118 #define INT_EP_GPIO_8 (8)
119 #define INT_EP_GPIO_7 (7)
120 #define INT_EP_GPIO_6 (6)
121 #define INT_EP_GPIO_5 (5)
122 #define INT_EP_GPIO_4 (4)
123 #define INT_EP_GPIO_3 (3)
124 #define INT_EP_GPIO_2 (2)
125 #define INT_EP_GPIO_1 (1)
126 #define INT_EP_GPIO_0 (0)
128 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
129 "RX FCS Errors",
130 "RX Alignment Errors",
131 "Rx Fragment Errors",
132 "RX Jabber Errors",
133 "RX Undersize Frame Errors",
134 "RX Oversize Frame Errors",
135 "RX Dropped Frames",
136 "RX Unicast Byte Count",
137 "RX Broadcast Byte Count",
138 "RX Multicast Byte Count",
139 "RX Unicast Frames",
140 "RX Broadcast Frames",
141 "RX Multicast Frames",
142 "RX Pause Frames",
143 "RX 64 Byte Frames",
144 "RX 65 - 127 Byte Frames",
145 "RX 128 - 255 Byte Frames",
146 "RX 256 - 511 Bytes Frames",
147 "RX 512 - 1023 Byte Frames",
148 "RX 1024 - 1518 Byte Frames",
149 "RX Greater 1518 Byte Frames",
150 "EEE RX LPI Transitions",
151 "EEE RX LPI Time",
152 "TX FCS Errors",
153 "TX Excess Deferral Errors",
154 "TX Carrier Errors",
155 "TX Bad Byte Count",
156 "TX Single Collisions",
157 "TX Multiple Collisions",
158 "TX Excessive Collision",
159 "TX Late Collisions",
160 "TX Unicast Byte Count",
161 "TX Broadcast Byte Count",
162 "TX Multicast Byte Count",
163 "TX Unicast Frames",
164 "TX Broadcast Frames",
165 "TX Multicast Frames",
166 "TX Pause Frames",
167 "TX 64 Byte Frames",
168 "TX 65 - 127 Byte Frames",
169 "TX 128 - 255 Byte Frames",
170 "TX 256 - 511 Bytes Frames",
171 "TX 512 - 1023 Byte Frames",
172 "TX 1024 - 1518 Byte Frames",
173 "TX Greater 1518 Byte Frames",
174 "EEE TX LPI Transitions",
175 "EEE TX LPI Time",
178 struct lan78xx_statstage {
179 u32 rx_fcs_errors;
180 u32 rx_alignment_errors;
181 u32 rx_fragment_errors;
182 u32 rx_jabber_errors;
183 u32 rx_undersize_frame_errors;
184 u32 rx_oversize_frame_errors;
185 u32 rx_dropped_frames;
186 u32 rx_unicast_byte_count;
187 u32 rx_broadcast_byte_count;
188 u32 rx_multicast_byte_count;
189 u32 rx_unicast_frames;
190 u32 rx_broadcast_frames;
191 u32 rx_multicast_frames;
192 u32 rx_pause_frames;
193 u32 rx_64_byte_frames;
194 u32 rx_65_127_byte_frames;
195 u32 rx_128_255_byte_frames;
196 u32 rx_256_511_bytes_frames;
197 u32 rx_512_1023_byte_frames;
198 u32 rx_1024_1518_byte_frames;
199 u32 rx_greater_1518_byte_frames;
200 u32 eee_rx_lpi_transitions;
201 u32 eee_rx_lpi_time;
202 u32 tx_fcs_errors;
203 u32 tx_excess_deferral_errors;
204 u32 tx_carrier_errors;
205 u32 tx_bad_byte_count;
206 u32 tx_single_collisions;
207 u32 tx_multiple_collisions;
208 u32 tx_excessive_collision;
209 u32 tx_late_collisions;
210 u32 tx_unicast_byte_count;
211 u32 tx_broadcast_byte_count;
212 u32 tx_multicast_byte_count;
213 u32 tx_unicast_frames;
214 u32 tx_broadcast_frames;
215 u32 tx_multicast_frames;
216 u32 tx_pause_frames;
217 u32 tx_64_byte_frames;
218 u32 tx_65_127_byte_frames;
219 u32 tx_128_255_byte_frames;
220 u32 tx_256_511_bytes_frames;
221 u32 tx_512_1023_byte_frames;
222 u32 tx_1024_1518_byte_frames;
223 u32 tx_greater_1518_byte_frames;
224 u32 eee_tx_lpi_transitions;
225 u32 eee_tx_lpi_time;
228 struct lan78xx_statstage64 {
229 u64 rx_fcs_errors;
230 u64 rx_alignment_errors;
231 u64 rx_fragment_errors;
232 u64 rx_jabber_errors;
233 u64 rx_undersize_frame_errors;
234 u64 rx_oversize_frame_errors;
235 u64 rx_dropped_frames;
236 u64 rx_unicast_byte_count;
237 u64 rx_broadcast_byte_count;
238 u64 rx_multicast_byte_count;
239 u64 rx_unicast_frames;
240 u64 rx_broadcast_frames;
241 u64 rx_multicast_frames;
242 u64 rx_pause_frames;
243 u64 rx_64_byte_frames;
244 u64 rx_65_127_byte_frames;
245 u64 rx_128_255_byte_frames;
246 u64 rx_256_511_bytes_frames;
247 u64 rx_512_1023_byte_frames;
248 u64 rx_1024_1518_byte_frames;
249 u64 rx_greater_1518_byte_frames;
250 u64 eee_rx_lpi_transitions;
251 u64 eee_rx_lpi_time;
252 u64 tx_fcs_errors;
253 u64 tx_excess_deferral_errors;
254 u64 tx_carrier_errors;
255 u64 tx_bad_byte_count;
256 u64 tx_single_collisions;
257 u64 tx_multiple_collisions;
258 u64 tx_excessive_collision;
259 u64 tx_late_collisions;
260 u64 tx_unicast_byte_count;
261 u64 tx_broadcast_byte_count;
262 u64 tx_multicast_byte_count;
263 u64 tx_unicast_frames;
264 u64 tx_broadcast_frames;
265 u64 tx_multicast_frames;
266 u64 tx_pause_frames;
267 u64 tx_64_byte_frames;
268 u64 tx_65_127_byte_frames;
269 u64 tx_128_255_byte_frames;
270 u64 tx_256_511_bytes_frames;
271 u64 tx_512_1023_byte_frames;
272 u64 tx_1024_1518_byte_frames;
273 u64 tx_greater_1518_byte_frames;
274 u64 eee_tx_lpi_transitions;
275 u64 eee_tx_lpi_time;
278 struct lan78xx_net;
280 struct lan78xx_priv {
281 struct lan78xx_net *dev;
282 u32 rfe_ctl;
283 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
284 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
285 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
286 struct mutex dataport_mutex; /* for dataport access */
287 spinlock_t rfe_ctl_lock; /* for rfe register access */
288 struct work_struct set_multicast;
289 struct work_struct set_vlan;
290 u32 wol;
293 enum skb_state {
294 illegal = 0,
295 tx_start,
296 tx_done,
297 rx_start,
298 rx_done,
299 rx_cleanup,
300 unlink_start
303 struct skb_data { /* skb->cb is one of these */
304 struct urb *urb;
305 struct lan78xx_net *dev;
306 enum skb_state state;
307 size_t length;
308 int num_of_packet;
311 struct usb_context {
312 struct usb_ctrlrequest req;
313 struct lan78xx_net *dev;
316 #define EVENT_TX_HALT 0
317 #define EVENT_RX_HALT 1
318 #define EVENT_RX_MEMORY 2
319 #define EVENT_STS_SPLIT 3
320 #define EVENT_LINK_RESET 4
321 #define EVENT_RX_PAUSED 5
322 #define EVENT_DEV_WAKING 6
323 #define EVENT_DEV_ASLEEP 7
324 #define EVENT_DEV_OPEN 8
325 #define EVENT_STAT_UPDATE 9
327 struct statstage {
328 struct mutex access_lock; /* for stats access */
329 struct lan78xx_statstage saved;
330 struct lan78xx_statstage rollover_count;
331 struct lan78xx_statstage rollover_max;
332 struct lan78xx_statstage64 curr_stat;
335 struct irq_domain_data {
336 struct irq_domain *irqdomain;
337 unsigned int phyirq;
338 struct irq_chip *irqchip;
339 irq_flow_handler_t irq_handler;
340 u32 irqenable;
341 struct mutex irq_lock; /* for irq bus access */
344 struct lan78xx_net {
345 struct net_device *net;
346 struct usb_device *udev;
347 struct usb_interface *intf;
348 void *driver_priv;
350 int rx_qlen;
351 int tx_qlen;
352 struct sk_buff_head rxq;
353 struct sk_buff_head txq;
354 struct sk_buff_head done;
355 struct sk_buff_head rxq_pause;
356 struct sk_buff_head txq_pend;
358 struct tasklet_struct bh;
359 struct delayed_work wq;
361 struct usb_host_endpoint *ep_blkin;
362 struct usb_host_endpoint *ep_blkout;
363 struct usb_host_endpoint *ep_intr;
365 int msg_enable;
367 struct urb *urb_intr;
368 struct usb_anchor deferred;
370 struct mutex phy_mutex; /* for phy access */
371 unsigned pipe_in, pipe_out, pipe_intr;
373 u32 hard_mtu; /* count any extra framing */
374 size_t rx_urb_size; /* size for rx urbs */
376 unsigned long flags;
378 wait_queue_head_t *wait;
379 unsigned char suspend_count;
381 unsigned maxpacket;
382 struct timer_list delay;
383 struct timer_list stat_monitor;
385 unsigned long data[5];
387 int link_on;
388 u8 mdix_ctrl;
390 u32 chipid;
391 u32 chiprev;
392 struct mii_bus *mdiobus;
394 int fc_autoneg;
395 u8 fc_request_control;
397 int delta;
398 struct statstage stats;
400 struct irq_domain_data domain_data;
403 /* use ethtool to change the level for any given device */
404 static int msg_level = -1;
405 module_param(msg_level, int, 0);
406 MODULE_PARM_DESC(msg_level, "Override default message level");
408 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
410 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
411 int ret;
413 if (!buf)
414 return -ENOMEM;
416 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
417 USB_VENDOR_REQUEST_READ_REGISTER,
418 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
419 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
420 if (likely(ret >= 0)) {
421 le32_to_cpus(buf);
422 *data = *buf;
423 } else {
424 netdev_warn(dev->net,
425 "Failed to read register index 0x%08x. ret = %d",
426 index, ret);
429 kfree(buf);
431 return ret;
434 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
436 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
437 int ret;
439 if (!buf)
440 return -ENOMEM;
442 *buf = data;
443 cpu_to_le32s(buf);
445 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
446 USB_VENDOR_REQUEST_WRITE_REGISTER,
447 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
448 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
449 if (unlikely(ret < 0)) {
450 netdev_warn(dev->net,
451 "Failed to write register index 0x%08x. ret = %d",
452 index, ret);
455 kfree(buf);
457 return ret;
460 static int lan78xx_read_stats(struct lan78xx_net *dev,
461 struct lan78xx_statstage *data)
463 int ret = 0;
464 int i;
465 struct lan78xx_statstage *stats;
466 u32 *src;
467 u32 *dst;
469 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
470 if (!stats)
471 return -ENOMEM;
473 ret = usb_control_msg(dev->udev,
474 usb_rcvctrlpipe(dev->udev, 0),
475 USB_VENDOR_REQUEST_GET_STATS,
476 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
479 (void *)stats,
480 sizeof(*stats),
481 USB_CTRL_SET_TIMEOUT);
482 if (likely(ret >= 0)) {
483 src = (u32 *)stats;
484 dst = (u32 *)data;
485 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
486 le32_to_cpus(&src[i]);
487 dst[i] = src[i];
489 } else {
490 netdev_warn(dev->net,
491 "Failed to read stat ret = 0x%x", ret);
494 kfree(stats);
496 return ret;
499 #define check_counter_rollover(struct1, dev_stats, member) { \
500 if (struct1->member < dev_stats.saved.member) \
501 dev_stats.rollover_count.member++; \
504 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
505 struct lan78xx_statstage *stats)
507 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
508 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
509 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
510 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
511 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
512 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
513 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
514 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
515 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
516 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
517 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
518 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
519 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
520 check_counter_rollover(stats, dev->stats, rx_pause_frames);
521 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
522 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
523 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
524 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
525 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
526 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
527 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
528 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
529 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
530 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
531 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
532 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
533 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
534 check_counter_rollover(stats, dev->stats, tx_single_collisions);
535 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
536 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
537 check_counter_rollover(stats, dev->stats, tx_late_collisions);
538 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
539 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
540 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
541 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
542 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
543 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
544 check_counter_rollover(stats, dev->stats, tx_pause_frames);
545 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
546 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
547 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
548 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
549 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
550 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
551 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
552 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
553 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
555 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
558 static void lan78xx_update_stats(struct lan78xx_net *dev)
560 u32 *p, *count, *max;
561 u64 *data;
562 int i;
563 struct lan78xx_statstage lan78xx_stats;
565 if (usb_autopm_get_interface(dev->intf) < 0)
566 return;
568 p = (u32 *)&lan78xx_stats;
569 count = (u32 *)&dev->stats.rollover_count;
570 max = (u32 *)&dev->stats.rollover_max;
571 data = (u64 *)&dev->stats.curr_stat;
573 mutex_lock(&dev->stats.access_lock);
575 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
576 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
578 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
579 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
581 mutex_unlock(&dev->stats.access_lock);
583 usb_autopm_put_interface(dev->intf);
586 /* Loop until the read is completed with timeout called with phy_mutex held */
587 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
589 unsigned long start_time = jiffies;
590 u32 val;
591 int ret;
593 do {
594 ret = lan78xx_read_reg(dev, MII_ACC, &val);
595 if (unlikely(ret < 0))
596 return -EIO;
598 if (!(val & MII_ACC_MII_BUSY_))
599 return 0;
600 } while (!time_after(jiffies, start_time + HZ));
602 return -EIO;
605 static inline u32 mii_access(int id, int index, int read)
607 u32 ret;
609 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
610 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
611 if (read)
612 ret |= MII_ACC_MII_READ_;
613 else
614 ret |= MII_ACC_MII_WRITE_;
615 ret |= MII_ACC_MII_BUSY_;
617 return ret;
620 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
622 unsigned long start_time = jiffies;
623 u32 val;
624 int ret;
626 do {
627 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
628 if (unlikely(ret < 0))
629 return -EIO;
631 if (!(val & E2P_CMD_EPC_BUSY_) ||
632 (val & E2P_CMD_EPC_TIMEOUT_))
633 break;
634 usleep_range(40, 100);
635 } while (!time_after(jiffies, start_time + HZ));
637 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
638 netdev_warn(dev->net, "EEPROM read operation timeout");
639 return -EIO;
642 return 0;
645 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
647 unsigned long start_time = jiffies;
648 u32 val;
649 int ret;
651 do {
652 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
653 if (unlikely(ret < 0))
654 return -EIO;
656 if (!(val & E2P_CMD_EPC_BUSY_))
657 return 0;
659 usleep_range(40, 100);
660 } while (!time_after(jiffies, start_time + HZ));
662 netdev_warn(dev->net, "EEPROM is busy");
663 return -EIO;
666 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
667 u32 length, u8 *data)
669 u32 val;
670 u32 saved;
671 int i, ret;
672 int retval;
674 /* depends on chip, some EEPROM pins are muxed with LED function.
675 * disable & restore LED function to access EEPROM.
677 ret = lan78xx_read_reg(dev, HW_CFG, &val);
678 saved = val;
679 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
680 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
681 ret = lan78xx_write_reg(dev, HW_CFG, val);
684 retval = lan78xx_eeprom_confirm_not_busy(dev);
685 if (retval)
686 return retval;
688 for (i = 0; i < length; i++) {
689 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
690 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
691 ret = lan78xx_write_reg(dev, E2P_CMD, val);
692 if (unlikely(ret < 0)) {
693 retval = -EIO;
694 goto exit;
697 retval = lan78xx_wait_eeprom(dev);
698 if (retval < 0)
699 goto exit;
701 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
702 if (unlikely(ret < 0)) {
703 retval = -EIO;
704 goto exit;
707 data[i] = val & 0xFF;
708 offset++;
711 retval = 0;
712 exit:
713 if (dev->chipid == ID_REV_CHIP_ID_7800_)
714 ret = lan78xx_write_reg(dev, HW_CFG, saved);
716 return retval;
719 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
720 u32 length, u8 *data)
722 u8 sig;
723 int ret;
725 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
726 if ((ret == 0) && (sig == EEPROM_INDICATOR))
727 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
728 else
729 ret = -EINVAL;
731 return ret;
734 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
735 u32 length, u8 *data)
737 u32 val;
738 u32 saved;
739 int i, ret;
740 int retval;
742 /* depends on chip, some EEPROM pins are muxed with LED function.
743 * disable & restore LED function to access EEPROM.
745 ret = lan78xx_read_reg(dev, HW_CFG, &val);
746 saved = val;
747 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
748 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
749 ret = lan78xx_write_reg(dev, HW_CFG, val);
752 retval = lan78xx_eeprom_confirm_not_busy(dev);
753 if (retval)
754 goto exit;
756 /* Issue write/erase enable command */
757 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
758 ret = lan78xx_write_reg(dev, E2P_CMD, val);
759 if (unlikely(ret < 0)) {
760 retval = -EIO;
761 goto exit;
764 retval = lan78xx_wait_eeprom(dev);
765 if (retval < 0)
766 goto exit;
768 for (i = 0; i < length; i++) {
769 /* Fill data register */
770 val = data[i];
771 ret = lan78xx_write_reg(dev, E2P_DATA, val);
772 if (ret < 0) {
773 retval = -EIO;
774 goto exit;
777 /* Send "write" command */
778 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
779 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
780 ret = lan78xx_write_reg(dev, E2P_CMD, val);
781 if (ret < 0) {
782 retval = -EIO;
783 goto exit;
786 retval = lan78xx_wait_eeprom(dev);
787 if (retval < 0)
788 goto exit;
790 offset++;
793 retval = 0;
794 exit:
795 if (dev->chipid == ID_REV_CHIP_ID_7800_)
796 ret = lan78xx_write_reg(dev, HW_CFG, saved);
798 return retval;
801 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
802 u32 length, u8 *data)
804 int i;
805 int ret;
806 u32 buf;
807 unsigned long timeout;
809 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
811 if (buf & OTP_PWR_DN_PWRDN_N_) {
812 /* clear it and wait to be cleared */
813 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
815 timeout = jiffies + HZ;
816 do {
817 usleep_range(1, 10);
818 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
819 if (time_after(jiffies, timeout)) {
820 netdev_warn(dev->net,
821 "timeout on OTP_PWR_DN");
822 return -EIO;
824 } while (buf & OTP_PWR_DN_PWRDN_N_);
827 for (i = 0; i < length; i++) {
828 ret = lan78xx_write_reg(dev, OTP_ADDR1,
829 ((offset + i) >> 8) & OTP_ADDR1_15_11);
830 ret = lan78xx_write_reg(dev, OTP_ADDR2,
831 ((offset + i) & OTP_ADDR2_10_3));
833 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
834 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
836 timeout = jiffies + HZ;
837 do {
838 udelay(1);
839 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
840 if (time_after(jiffies, timeout)) {
841 netdev_warn(dev->net,
842 "timeout on OTP_STATUS");
843 return -EIO;
845 } while (buf & OTP_STATUS_BUSY_);
847 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
849 data[i] = (u8)(buf & 0xFF);
852 return 0;
855 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
856 u32 length, u8 *data)
858 int i;
859 int ret;
860 u32 buf;
861 unsigned long timeout;
863 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
865 if (buf & OTP_PWR_DN_PWRDN_N_) {
866 /* clear it and wait to be cleared */
867 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
869 timeout = jiffies + HZ;
870 do {
871 udelay(1);
872 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
873 if (time_after(jiffies, timeout)) {
874 netdev_warn(dev->net,
875 "timeout on OTP_PWR_DN completion");
876 return -EIO;
878 } while (buf & OTP_PWR_DN_PWRDN_N_);
881 /* set to BYTE program mode */
882 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
884 for (i = 0; i < length; i++) {
885 ret = lan78xx_write_reg(dev, OTP_ADDR1,
886 ((offset + i) >> 8) & OTP_ADDR1_15_11);
887 ret = lan78xx_write_reg(dev, OTP_ADDR2,
888 ((offset + i) & OTP_ADDR2_10_3));
889 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
890 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
891 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
893 timeout = jiffies + HZ;
894 do {
895 udelay(1);
896 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
897 if (time_after(jiffies, timeout)) {
898 netdev_warn(dev->net,
899 "Timeout on OTP_STATUS completion");
900 return -EIO;
902 } while (buf & OTP_STATUS_BUSY_);
905 return 0;
908 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
909 u32 length, u8 *data)
911 u8 sig;
912 int ret;
914 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
916 if (ret == 0) {
917 if (sig == OTP_INDICATOR_1)
918 offset = offset;
919 else if (sig == OTP_INDICATOR_2)
920 offset += 0x100;
921 else
922 ret = -EINVAL;
923 ret = lan78xx_read_raw_otp(dev, offset, length, data);
926 return ret;
929 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
931 int i, ret;
933 for (i = 0; i < 100; i++) {
934 u32 dp_sel;
936 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
937 if (unlikely(ret < 0))
938 return -EIO;
940 if (dp_sel & DP_SEL_DPRDY_)
941 return 0;
943 usleep_range(40, 100);
946 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
948 return -EIO;
951 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
952 u32 addr, u32 length, u32 *buf)
954 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
955 u32 dp_sel;
956 int i, ret;
958 if (usb_autopm_get_interface(dev->intf) < 0)
959 return 0;
961 mutex_lock(&pdata->dataport_mutex);
963 ret = lan78xx_dataport_wait_not_busy(dev);
964 if (ret < 0)
965 goto done;
967 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
969 dp_sel &= ~DP_SEL_RSEL_MASK_;
970 dp_sel |= ram_select;
971 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
973 for (i = 0; i < length; i++) {
974 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
976 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
978 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
980 ret = lan78xx_dataport_wait_not_busy(dev);
981 if (ret < 0)
982 goto done;
985 done:
986 mutex_unlock(&pdata->dataport_mutex);
987 usb_autopm_put_interface(dev->intf);
989 return ret;
992 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
993 int index, u8 addr[ETH_ALEN])
995 u32 temp;
997 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
998 temp = addr[3];
999 temp = addr[2] | (temp << 8);
1000 temp = addr[1] | (temp << 8);
1001 temp = addr[0] | (temp << 8);
1002 pdata->pfilter_table[index][1] = temp;
1003 temp = addr[5];
1004 temp = addr[4] | (temp << 8);
1005 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1006 pdata->pfilter_table[index][0] = temp;
1010 /* returns hash bit number for given MAC address */
1011 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1013 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1016 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1018 struct lan78xx_priv *pdata =
1019 container_of(param, struct lan78xx_priv, set_multicast);
1020 struct lan78xx_net *dev = pdata->dev;
1021 int i;
1022 int ret;
1024 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1025 pdata->rfe_ctl);
1027 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1028 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1030 for (i = 1; i < NUM_OF_MAF; i++) {
1031 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1032 ret = lan78xx_write_reg(dev, MAF_LO(i),
1033 pdata->pfilter_table[i][1]);
1034 ret = lan78xx_write_reg(dev, MAF_HI(i),
1035 pdata->pfilter_table[i][0]);
1038 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1041 static void lan78xx_set_multicast(struct net_device *netdev)
1043 struct lan78xx_net *dev = netdev_priv(netdev);
1044 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1045 unsigned long flags;
1046 int i;
1048 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1050 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1051 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1053 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1054 pdata->mchash_table[i] = 0;
1055 /* pfilter_table[0] has own HW address */
1056 for (i = 1; i < NUM_OF_MAF; i++) {
1057 pdata->pfilter_table[i][0] =
1058 pdata->pfilter_table[i][1] = 0;
1061 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1063 if (dev->net->flags & IFF_PROMISC) {
1064 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1065 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1066 } else {
1067 if (dev->net->flags & IFF_ALLMULTI) {
1068 netif_dbg(dev, drv, dev->net,
1069 "receive all multicast enabled");
1070 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1074 if (netdev_mc_count(dev->net)) {
1075 struct netdev_hw_addr *ha;
1076 int i;
1078 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1080 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1082 i = 1;
1083 netdev_for_each_mc_addr(ha, netdev) {
1084 /* set first 32 into Perfect Filter */
1085 if (i < 33) {
1086 lan78xx_set_addr_filter(pdata, i, ha->addr);
1087 } else {
1088 u32 bitnum = lan78xx_hash(ha->addr);
1090 pdata->mchash_table[bitnum / 32] |=
1091 (1 << (bitnum % 32));
1092 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1094 i++;
1098 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1100 /* defer register writes to a sleepable context */
1101 schedule_work(&pdata->set_multicast);
1104 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1105 u16 lcladv, u16 rmtadv)
1107 u32 flow = 0, fct_flow = 0;
1108 int ret;
1109 u8 cap;
1111 if (dev->fc_autoneg)
1112 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1113 else
1114 cap = dev->fc_request_control;
1116 if (cap & FLOW_CTRL_TX)
1117 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1119 if (cap & FLOW_CTRL_RX)
1120 flow |= FLOW_CR_RX_FCEN_;
1122 if (dev->udev->speed == USB_SPEED_SUPER)
1123 fct_flow = 0x817;
1124 else if (dev->udev->speed == USB_SPEED_HIGH)
1125 fct_flow = 0x211;
1127 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1128 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1129 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1131 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1133 /* threshold value should be set before enabling flow */
1134 ret = lan78xx_write_reg(dev, FLOW, flow);
1136 return 0;
1139 static int lan78xx_link_reset(struct lan78xx_net *dev)
1141 struct phy_device *phydev = dev->net->phydev;
1142 struct ethtool_link_ksettings ecmd;
1143 int ladv, radv, ret;
1144 u32 buf;
1146 /* clear LAN78xx interrupt status */
1147 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1148 if (unlikely(ret < 0))
1149 return -EIO;
1151 phy_read_status(phydev);
1153 if (!phydev->link && dev->link_on) {
1154 dev->link_on = false;
1156 /* reset MAC */
1157 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1158 if (unlikely(ret < 0))
1159 return -EIO;
1160 buf |= MAC_CR_RST_;
1161 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1162 if (unlikely(ret < 0))
1163 return -EIO;
1165 del_timer(&dev->stat_monitor);
1166 } else if (phydev->link && !dev->link_on) {
1167 dev->link_on = true;
1169 phy_ethtool_ksettings_get(phydev, &ecmd);
1171 if (dev->udev->speed == USB_SPEED_SUPER) {
1172 if (ecmd.base.speed == 1000) {
1173 /* disable U2 */
1174 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1175 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1176 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1177 /* enable U1 */
1178 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1179 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1180 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1181 } else {
1182 /* enable U1 & U2 */
1183 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1184 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1185 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1186 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1190 ladv = phy_read(phydev, MII_ADVERTISE);
1191 if (ladv < 0)
1192 return ladv;
1194 radv = phy_read(phydev, MII_LPA);
1195 if (radv < 0)
1196 return radv;
1198 netif_dbg(dev, link, dev->net,
1199 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1200 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1202 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1203 radv);
1205 if (!timer_pending(&dev->stat_monitor)) {
1206 dev->delta = 1;
1207 mod_timer(&dev->stat_monitor,
1208 jiffies + STAT_UPDATE_TIMER);
1212 return ret;
1215 /* some work can't be done in tasklets, so we use keventd
1217 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1218 * but tasklet_schedule() doesn't. hope the failure is rare.
1220 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1222 set_bit(work, &dev->flags);
1223 if (!schedule_delayed_work(&dev->wq, 0))
1224 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1227 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1229 u32 intdata;
1231 if (urb->actual_length != 4) {
1232 netdev_warn(dev->net,
1233 "unexpected urb length %d", urb->actual_length);
1234 return;
1237 memcpy(&intdata, urb->transfer_buffer, 4);
1238 le32_to_cpus(&intdata);
1240 if (intdata & INT_ENP_PHY_INT) {
1241 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1242 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1244 if (dev->domain_data.phyirq > 0)
1245 generic_handle_irq(dev->domain_data.phyirq);
1246 } else
1247 netdev_warn(dev->net,
1248 "unexpected interrupt: 0x%08x\n", intdata);
1251 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1253 return MAX_EEPROM_SIZE;
1256 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1257 struct ethtool_eeprom *ee, u8 *data)
1259 struct lan78xx_net *dev = netdev_priv(netdev);
1261 ee->magic = LAN78XX_EEPROM_MAGIC;
1263 return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1266 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1267 struct ethtool_eeprom *ee, u8 *data)
1269 struct lan78xx_net *dev = netdev_priv(netdev);
1271 /* Allow entire eeprom update only */
1272 if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1273 (ee->offset == 0) &&
1274 (ee->len == 512) &&
1275 (data[0] == EEPROM_INDICATOR))
1276 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1277 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1278 (ee->offset == 0) &&
1279 (ee->len == 512) &&
1280 (data[0] == OTP_INDICATOR_1))
1281 return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1283 return -EINVAL;
1286 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1287 u8 *data)
1289 if (stringset == ETH_SS_STATS)
1290 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1293 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1295 if (sset == ETH_SS_STATS)
1296 return ARRAY_SIZE(lan78xx_gstrings);
1297 else
1298 return -EOPNOTSUPP;
1301 static void lan78xx_get_stats(struct net_device *netdev,
1302 struct ethtool_stats *stats, u64 *data)
1304 struct lan78xx_net *dev = netdev_priv(netdev);
1306 lan78xx_update_stats(dev);
1308 mutex_lock(&dev->stats.access_lock);
1309 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1310 mutex_unlock(&dev->stats.access_lock);
1313 static void lan78xx_get_wol(struct net_device *netdev,
1314 struct ethtool_wolinfo *wol)
1316 struct lan78xx_net *dev = netdev_priv(netdev);
1317 int ret;
1318 u32 buf;
1319 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1321 if (usb_autopm_get_interface(dev->intf) < 0)
1322 return;
1324 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1325 if (unlikely(ret < 0)) {
1326 wol->supported = 0;
1327 wol->wolopts = 0;
1328 } else {
1329 if (buf & USB_CFG_RMT_WKP_) {
1330 wol->supported = WAKE_ALL;
1331 wol->wolopts = pdata->wol;
1332 } else {
1333 wol->supported = 0;
1334 wol->wolopts = 0;
1338 usb_autopm_put_interface(dev->intf);
1341 static int lan78xx_set_wol(struct net_device *netdev,
1342 struct ethtool_wolinfo *wol)
1344 struct lan78xx_net *dev = netdev_priv(netdev);
1345 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1346 int ret;
1348 ret = usb_autopm_get_interface(dev->intf);
1349 if (ret < 0)
1350 return ret;
1352 pdata->wol = 0;
1353 if (wol->wolopts & WAKE_UCAST)
1354 pdata->wol |= WAKE_UCAST;
1355 if (wol->wolopts & WAKE_MCAST)
1356 pdata->wol |= WAKE_MCAST;
1357 if (wol->wolopts & WAKE_BCAST)
1358 pdata->wol |= WAKE_BCAST;
1359 if (wol->wolopts & WAKE_MAGIC)
1360 pdata->wol |= WAKE_MAGIC;
1361 if (wol->wolopts & WAKE_PHY)
1362 pdata->wol |= WAKE_PHY;
1363 if (wol->wolopts & WAKE_ARP)
1364 pdata->wol |= WAKE_ARP;
1366 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1368 phy_ethtool_set_wol(netdev->phydev, wol);
1370 usb_autopm_put_interface(dev->intf);
1372 return ret;
1375 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1377 struct lan78xx_net *dev = netdev_priv(net);
1378 struct phy_device *phydev = net->phydev;
1379 int ret;
1380 u32 buf;
1382 ret = usb_autopm_get_interface(dev->intf);
1383 if (ret < 0)
1384 return ret;
1386 ret = phy_ethtool_get_eee(phydev, edata);
1387 if (ret < 0)
1388 goto exit;
1390 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1391 if (buf & MAC_CR_EEE_EN_) {
1392 edata->eee_enabled = true;
1393 edata->eee_active = !!(edata->advertised &
1394 edata->lp_advertised);
1395 edata->tx_lpi_enabled = true;
1396 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1397 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1398 edata->tx_lpi_timer = buf;
1399 } else {
1400 edata->eee_enabled = false;
1401 edata->eee_active = false;
1402 edata->tx_lpi_enabled = false;
1403 edata->tx_lpi_timer = 0;
1406 ret = 0;
1407 exit:
1408 usb_autopm_put_interface(dev->intf);
1410 return ret;
1413 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1415 struct lan78xx_net *dev = netdev_priv(net);
1416 int ret;
1417 u32 buf;
1419 ret = usb_autopm_get_interface(dev->intf);
1420 if (ret < 0)
1421 return ret;
1423 if (edata->eee_enabled) {
1424 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1425 buf |= MAC_CR_EEE_EN_;
1426 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1428 phy_ethtool_set_eee(net->phydev, edata);
1430 buf = (u32)edata->tx_lpi_timer;
1431 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1432 } else {
1433 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1434 buf &= ~MAC_CR_EEE_EN_;
1435 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1438 usb_autopm_put_interface(dev->intf);
1440 return 0;
1443 static u32 lan78xx_get_link(struct net_device *net)
1445 phy_read_status(net->phydev);
1447 return net->phydev->link;
1450 static void lan78xx_get_drvinfo(struct net_device *net,
1451 struct ethtool_drvinfo *info)
1453 struct lan78xx_net *dev = netdev_priv(net);
1455 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1456 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1457 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1460 static u32 lan78xx_get_msglevel(struct net_device *net)
1462 struct lan78xx_net *dev = netdev_priv(net);
1464 return dev->msg_enable;
1467 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1469 struct lan78xx_net *dev = netdev_priv(net);
1471 dev->msg_enable = level;
1474 static int lan78xx_get_link_ksettings(struct net_device *net,
1475 struct ethtool_link_ksettings *cmd)
1477 struct lan78xx_net *dev = netdev_priv(net);
1478 struct phy_device *phydev = net->phydev;
1479 int ret;
1481 ret = usb_autopm_get_interface(dev->intf);
1482 if (ret < 0)
1483 return ret;
1485 ret = phy_ethtool_ksettings_get(phydev, cmd);
1487 usb_autopm_put_interface(dev->intf);
1489 return ret;
1492 static int lan78xx_set_link_ksettings(struct net_device *net,
1493 const struct ethtool_link_ksettings *cmd)
1495 struct lan78xx_net *dev = netdev_priv(net);
1496 struct phy_device *phydev = net->phydev;
1497 int ret = 0;
1498 int temp;
1500 ret = usb_autopm_get_interface(dev->intf);
1501 if (ret < 0)
1502 return ret;
1504 /* change speed & duplex */
1505 ret = phy_ethtool_ksettings_set(phydev, cmd);
1507 if (!cmd->base.autoneg) {
1508 /* force link down */
1509 temp = phy_read(phydev, MII_BMCR);
1510 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1511 mdelay(1);
1512 phy_write(phydev, MII_BMCR, temp);
1515 usb_autopm_put_interface(dev->intf);
1517 return ret;
1520 static void lan78xx_get_pause(struct net_device *net,
1521 struct ethtool_pauseparam *pause)
1523 struct lan78xx_net *dev = netdev_priv(net);
1524 struct phy_device *phydev = net->phydev;
1525 struct ethtool_link_ksettings ecmd;
1527 phy_ethtool_ksettings_get(phydev, &ecmd);
1529 pause->autoneg = dev->fc_autoneg;
1531 if (dev->fc_request_control & FLOW_CTRL_TX)
1532 pause->tx_pause = 1;
1534 if (dev->fc_request_control & FLOW_CTRL_RX)
1535 pause->rx_pause = 1;
1538 static int lan78xx_set_pause(struct net_device *net,
1539 struct ethtool_pauseparam *pause)
1541 struct lan78xx_net *dev = netdev_priv(net);
1542 struct phy_device *phydev = net->phydev;
1543 struct ethtool_link_ksettings ecmd;
1544 int ret;
1546 phy_ethtool_ksettings_get(phydev, &ecmd);
1548 if (pause->autoneg && !ecmd.base.autoneg) {
1549 ret = -EINVAL;
1550 goto exit;
1553 dev->fc_request_control = 0;
1554 if (pause->rx_pause)
1555 dev->fc_request_control |= FLOW_CTRL_RX;
1557 if (pause->tx_pause)
1558 dev->fc_request_control |= FLOW_CTRL_TX;
1560 if (ecmd.base.autoneg) {
1561 u32 mii_adv;
1562 u32 advertising;
1564 ethtool_convert_link_mode_to_legacy_u32(
1565 &advertising, ecmd.link_modes.advertising);
1567 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1568 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1569 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1571 ethtool_convert_legacy_u32_to_link_mode(
1572 ecmd.link_modes.advertising, advertising);
1574 phy_ethtool_ksettings_set(phydev, &ecmd);
1577 dev->fc_autoneg = pause->autoneg;
1579 ret = 0;
1580 exit:
1581 return ret;
1584 static const struct ethtool_ops lan78xx_ethtool_ops = {
1585 .get_link = lan78xx_get_link,
1586 .nway_reset = phy_ethtool_nway_reset,
1587 .get_drvinfo = lan78xx_get_drvinfo,
1588 .get_msglevel = lan78xx_get_msglevel,
1589 .set_msglevel = lan78xx_set_msglevel,
1590 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1591 .get_eeprom = lan78xx_ethtool_get_eeprom,
1592 .set_eeprom = lan78xx_ethtool_set_eeprom,
1593 .get_ethtool_stats = lan78xx_get_stats,
1594 .get_sset_count = lan78xx_get_sset_count,
1595 .get_strings = lan78xx_get_strings,
1596 .get_wol = lan78xx_get_wol,
1597 .set_wol = lan78xx_set_wol,
1598 .get_eee = lan78xx_get_eee,
1599 .set_eee = lan78xx_set_eee,
1600 .get_pauseparam = lan78xx_get_pause,
1601 .set_pauseparam = lan78xx_set_pause,
1602 .get_link_ksettings = lan78xx_get_link_ksettings,
1603 .set_link_ksettings = lan78xx_set_link_ksettings,
1606 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1608 if (!netif_running(netdev))
1609 return -EINVAL;
1611 return phy_mii_ioctl(netdev->phydev, rq, cmd);
1614 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1616 u32 addr_lo, addr_hi;
1617 int ret;
1618 u8 addr[6];
1620 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1621 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1623 addr[0] = addr_lo & 0xFF;
1624 addr[1] = (addr_lo >> 8) & 0xFF;
1625 addr[2] = (addr_lo >> 16) & 0xFF;
1626 addr[3] = (addr_lo >> 24) & 0xFF;
1627 addr[4] = addr_hi & 0xFF;
1628 addr[5] = (addr_hi >> 8) & 0xFF;
1630 if (!is_valid_ether_addr(addr)) {
1631 /* reading mac address from EEPROM or OTP */
1632 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1633 addr) == 0) ||
1634 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1635 addr) == 0)) {
1636 if (is_valid_ether_addr(addr)) {
1637 /* eeprom values are valid so use them */
1638 netif_dbg(dev, ifup, dev->net,
1639 "MAC address read from EEPROM");
1640 } else {
1641 /* generate random MAC */
1642 random_ether_addr(addr);
1643 netif_dbg(dev, ifup, dev->net,
1644 "MAC address set to random addr");
1647 addr_lo = addr[0] | (addr[1] << 8) |
1648 (addr[2] << 16) | (addr[3] << 24);
1649 addr_hi = addr[4] | (addr[5] << 8);
1651 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1652 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1653 } else {
1654 /* generate random MAC */
1655 random_ether_addr(addr);
1656 netif_dbg(dev, ifup, dev->net,
1657 "MAC address set to random addr");
1661 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1662 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1664 ether_addr_copy(dev->net->dev_addr, addr);
1667 /* MDIO read and write wrappers for phylib */
1668 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1670 struct lan78xx_net *dev = bus->priv;
1671 u32 val, addr;
1672 int ret;
1674 ret = usb_autopm_get_interface(dev->intf);
1675 if (ret < 0)
1676 return ret;
1678 mutex_lock(&dev->phy_mutex);
1680 /* confirm MII not busy */
1681 ret = lan78xx_phy_wait_not_busy(dev);
1682 if (ret < 0)
1683 goto done;
1685 /* set the address, index & direction (read from PHY) */
1686 addr = mii_access(phy_id, idx, MII_READ);
1687 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1689 ret = lan78xx_phy_wait_not_busy(dev);
1690 if (ret < 0)
1691 goto done;
1693 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1695 ret = (int)(val & 0xFFFF);
1697 done:
1698 mutex_unlock(&dev->phy_mutex);
1699 usb_autopm_put_interface(dev->intf);
1700 return ret;
1703 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1704 u16 regval)
1706 struct lan78xx_net *dev = bus->priv;
1707 u32 val, addr;
1708 int ret;
1710 ret = usb_autopm_get_interface(dev->intf);
1711 if (ret < 0)
1712 return ret;
1714 mutex_lock(&dev->phy_mutex);
1716 /* confirm MII not busy */
1717 ret = lan78xx_phy_wait_not_busy(dev);
1718 if (ret < 0)
1719 goto done;
1721 val = (u32)regval;
1722 ret = lan78xx_write_reg(dev, MII_DATA, val);
1724 /* set the address, index & direction (write to PHY) */
1725 addr = mii_access(phy_id, idx, MII_WRITE);
1726 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1728 ret = lan78xx_phy_wait_not_busy(dev);
1729 if (ret < 0)
1730 goto done;
1732 done:
1733 mutex_unlock(&dev->phy_mutex);
1734 usb_autopm_put_interface(dev->intf);
1735 return 0;
1738 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1740 int ret;
1742 dev->mdiobus = mdiobus_alloc();
1743 if (!dev->mdiobus) {
1744 netdev_err(dev->net, "can't allocate MDIO bus\n");
1745 return -ENOMEM;
1748 dev->mdiobus->priv = (void *)dev;
1749 dev->mdiobus->read = lan78xx_mdiobus_read;
1750 dev->mdiobus->write = lan78xx_mdiobus_write;
1751 dev->mdiobus->name = "lan78xx-mdiobus";
1753 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1754 dev->udev->bus->busnum, dev->udev->devnum);
1756 switch (dev->chipid) {
1757 case ID_REV_CHIP_ID_7800_:
1758 case ID_REV_CHIP_ID_7850_:
1759 /* set to internal PHY id */
1760 dev->mdiobus->phy_mask = ~(1 << 1);
1761 break;
1764 ret = mdiobus_register(dev->mdiobus);
1765 if (ret) {
1766 netdev_err(dev->net, "can't register MDIO bus\n");
1767 goto exit1;
1770 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1771 return 0;
1772 exit1:
1773 mdiobus_free(dev->mdiobus);
1774 return ret;
1777 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1779 mdiobus_unregister(dev->mdiobus);
1780 mdiobus_free(dev->mdiobus);
1783 static void lan78xx_link_status_change(struct net_device *net)
1785 struct phy_device *phydev = net->phydev;
1786 int ret, temp;
1788 /* At forced 100 F/H mode, chip may fail to set mode correctly
1789 * when cable is switched between long(~50+m) and short one.
1790 * As workaround, set to 10 before setting to 100
1791 * at forced 100 F/H mode.
1793 if (!phydev->autoneg && (phydev->speed == 100)) {
1794 /* disable phy interrupt */
1795 temp = phy_read(phydev, LAN88XX_INT_MASK);
1796 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1797 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1799 temp = phy_read(phydev, MII_BMCR);
1800 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1801 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1802 temp |= BMCR_SPEED100;
1803 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1805 /* clear pending interrupt generated while workaround */
1806 temp = phy_read(phydev, LAN88XX_INT_STS);
1808 /* enable phy interrupt back */
1809 temp = phy_read(phydev, LAN88XX_INT_MASK);
1810 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1811 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1815 static int irq_map(struct irq_domain *d, unsigned int irq,
1816 irq_hw_number_t hwirq)
1818 struct irq_domain_data *data = d->host_data;
1820 irq_set_chip_data(irq, data);
1821 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1822 irq_set_noprobe(irq);
1824 return 0;
1827 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1829 irq_set_chip_and_handler(irq, NULL, NULL);
1830 irq_set_chip_data(irq, NULL);
1833 static const struct irq_domain_ops chip_domain_ops = {
1834 .map = irq_map,
1835 .unmap = irq_unmap,
1838 static void lan78xx_irq_mask(struct irq_data *irqd)
1840 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1842 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1845 static void lan78xx_irq_unmask(struct irq_data *irqd)
1847 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1849 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1852 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1854 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1856 mutex_lock(&data->irq_lock);
1859 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1861 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1862 struct lan78xx_net *dev =
1863 container_of(data, struct lan78xx_net, domain_data);
1864 u32 buf;
1865 int ret;
1867 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1868 * are only two callbacks executed in non-atomic contex.
1870 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1871 if (buf != data->irqenable)
1872 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1874 mutex_unlock(&data->irq_lock);
1877 static struct irq_chip lan78xx_irqchip = {
1878 .name = "lan78xx-irqs",
1879 .irq_mask = lan78xx_irq_mask,
1880 .irq_unmask = lan78xx_irq_unmask,
1881 .irq_bus_lock = lan78xx_irq_bus_lock,
1882 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1885 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1887 struct device_node *of_node;
1888 struct irq_domain *irqdomain;
1889 unsigned int irqmap = 0;
1890 u32 buf;
1891 int ret = 0;
1893 of_node = dev->udev->dev.parent->of_node;
1895 mutex_init(&dev->domain_data.irq_lock);
1897 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1898 dev->domain_data.irqenable = buf;
1900 dev->domain_data.irqchip = &lan78xx_irqchip;
1901 dev->domain_data.irq_handler = handle_simple_irq;
1903 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1904 &chip_domain_ops, &dev->domain_data);
1905 if (irqdomain) {
1906 /* create mapping for PHY interrupt */
1907 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1908 if (!irqmap) {
1909 irq_domain_remove(irqdomain);
1911 irqdomain = NULL;
1912 ret = -EINVAL;
1914 } else {
1915 ret = -EINVAL;
1918 dev->domain_data.irqdomain = irqdomain;
1919 dev->domain_data.phyirq = irqmap;
1921 return ret;
1924 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1926 if (dev->domain_data.phyirq > 0) {
1927 irq_dispose_mapping(dev->domain_data.phyirq);
1929 if (dev->domain_data.irqdomain)
1930 irq_domain_remove(dev->domain_data.irqdomain);
1932 dev->domain_data.phyirq = 0;
1933 dev->domain_data.irqdomain = NULL;
1936 static int lan78xx_phy_init(struct lan78xx_net *dev)
1938 int ret;
1939 u32 mii_adv;
1940 struct phy_device *phydev = dev->net->phydev;
1942 phydev = phy_find_first(dev->mdiobus);
1943 if (!phydev) {
1944 netdev_err(dev->net, "no PHY found\n");
1945 return -EIO;
1948 /* if phyirq is not set, use polling mode in phylib */
1949 if (dev->domain_data.phyirq > 0)
1950 phydev->irq = dev->domain_data.phyirq;
1951 else
1952 phydev->irq = 0;
1953 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
1955 /* set to AUTOMDIX */
1956 phydev->mdix = ETH_TP_MDI_AUTO;
1958 ret = phy_connect_direct(dev->net, phydev,
1959 lan78xx_link_status_change,
1960 PHY_INTERFACE_MODE_GMII);
1961 if (ret) {
1962 netdev_err(dev->net, "can't attach PHY to %s\n",
1963 dev->mdiobus->id);
1964 return -EIO;
1967 /* MAC doesn't support 1000T Half */
1968 phydev->supported &= ~SUPPORTED_1000baseT_Half;
1970 /* support both flow controls */
1971 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
1972 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1973 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1974 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1976 genphy_config_aneg(phydev);
1978 dev->fc_autoneg = phydev->autoneg;
1980 phy_start(phydev);
1982 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1984 return 0;
1987 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1989 int ret = 0;
1990 u32 buf;
1991 bool rxenabled;
1993 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1995 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1997 if (rxenabled) {
1998 buf &= ~MAC_RX_RXEN_;
1999 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2002 /* add 4 to size for FCS */
2003 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2004 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2006 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2008 if (rxenabled) {
2009 buf |= MAC_RX_RXEN_;
2010 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2013 return 0;
2016 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2018 struct sk_buff *skb;
2019 unsigned long flags;
2020 int count = 0;
2022 spin_lock_irqsave(&q->lock, flags);
2023 while (!skb_queue_empty(q)) {
2024 struct skb_data *entry;
2025 struct urb *urb;
2026 int ret;
2028 skb_queue_walk(q, skb) {
2029 entry = (struct skb_data *)skb->cb;
2030 if (entry->state != unlink_start)
2031 goto found;
2033 break;
2034 found:
2035 entry->state = unlink_start;
2036 urb = entry->urb;
2038 /* Get reference count of the URB to avoid it to be
2039 * freed during usb_unlink_urb, which may trigger
2040 * use-after-free problem inside usb_unlink_urb since
2041 * usb_unlink_urb is always racing with .complete
2042 * handler(include defer_bh).
2044 usb_get_urb(urb);
2045 spin_unlock_irqrestore(&q->lock, flags);
2046 /* during some PM-driven resume scenarios,
2047 * these (async) unlinks complete immediately
2049 ret = usb_unlink_urb(urb);
2050 if (ret != -EINPROGRESS && ret != 0)
2051 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2052 else
2053 count++;
2054 usb_put_urb(urb);
2055 spin_lock_irqsave(&q->lock, flags);
2057 spin_unlock_irqrestore(&q->lock, flags);
2058 return count;
2061 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2063 struct lan78xx_net *dev = netdev_priv(netdev);
2064 int ll_mtu = new_mtu + netdev->hard_header_len;
2065 int old_hard_mtu = dev->hard_mtu;
2066 int old_rx_urb_size = dev->rx_urb_size;
2067 int ret;
2069 /* no second zero-length packet read wanted after mtu-sized packets */
2070 if ((ll_mtu % dev->maxpacket) == 0)
2071 return -EDOM;
2073 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2075 netdev->mtu = new_mtu;
2077 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2078 if (dev->rx_urb_size == old_hard_mtu) {
2079 dev->rx_urb_size = dev->hard_mtu;
2080 if (dev->rx_urb_size > old_rx_urb_size) {
2081 if (netif_running(dev->net)) {
2082 unlink_urbs(dev, &dev->rxq);
2083 tasklet_schedule(&dev->bh);
2088 return 0;
2091 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2093 struct lan78xx_net *dev = netdev_priv(netdev);
2094 struct sockaddr *addr = p;
2095 u32 addr_lo, addr_hi;
2096 int ret;
2098 if (netif_running(netdev))
2099 return -EBUSY;
2101 if (!is_valid_ether_addr(addr->sa_data))
2102 return -EADDRNOTAVAIL;
2104 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2106 addr_lo = netdev->dev_addr[0] |
2107 netdev->dev_addr[1] << 8 |
2108 netdev->dev_addr[2] << 16 |
2109 netdev->dev_addr[3] << 24;
2110 addr_hi = netdev->dev_addr[4] |
2111 netdev->dev_addr[5] << 8;
2113 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2114 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2116 return 0;
2119 /* Enable or disable Rx checksum offload engine */
2120 static int lan78xx_set_features(struct net_device *netdev,
2121 netdev_features_t features)
2123 struct lan78xx_net *dev = netdev_priv(netdev);
2124 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2125 unsigned long flags;
2126 int ret;
2128 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2130 if (features & NETIF_F_RXCSUM) {
2131 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2132 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2133 } else {
2134 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2135 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2138 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2139 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2140 else
2141 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2143 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2145 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2147 return 0;
2150 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2152 struct lan78xx_priv *pdata =
2153 container_of(param, struct lan78xx_priv, set_vlan);
2154 struct lan78xx_net *dev = pdata->dev;
2156 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2157 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2160 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2161 __be16 proto, u16 vid)
2163 struct lan78xx_net *dev = netdev_priv(netdev);
2164 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2165 u16 vid_bit_index;
2166 u16 vid_dword_index;
2168 vid_dword_index = (vid >> 5) & 0x7F;
2169 vid_bit_index = vid & 0x1F;
2171 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2173 /* defer register writes to a sleepable context */
2174 schedule_work(&pdata->set_vlan);
2176 return 0;
2179 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2180 __be16 proto, u16 vid)
2182 struct lan78xx_net *dev = netdev_priv(netdev);
2183 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2184 u16 vid_bit_index;
2185 u16 vid_dword_index;
2187 vid_dword_index = (vid >> 5) & 0x7F;
2188 vid_bit_index = vid & 0x1F;
2190 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2192 /* defer register writes to a sleepable context */
2193 schedule_work(&pdata->set_vlan);
2195 return 0;
2198 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2200 int ret;
2201 u32 buf;
2202 u32 regs[6] = { 0 };
2204 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2205 if (buf & USB_CFG1_LTM_ENABLE_) {
2206 u8 temp[2];
2207 /* Get values from EEPROM first */
2208 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2209 if (temp[0] == 24) {
2210 ret = lan78xx_read_raw_eeprom(dev,
2211 temp[1] * 2,
2213 (u8 *)regs);
2214 if (ret < 0)
2215 return;
2217 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2218 if (temp[0] == 24) {
2219 ret = lan78xx_read_raw_otp(dev,
2220 temp[1] * 2,
2222 (u8 *)regs);
2223 if (ret < 0)
2224 return;
2229 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2230 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2231 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2232 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2233 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2234 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2237 static int lan78xx_reset(struct lan78xx_net *dev)
2239 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2240 u32 buf;
2241 int ret = 0;
2242 unsigned long timeout;
2244 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2245 buf |= HW_CFG_LRST_;
2246 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2248 timeout = jiffies + HZ;
2249 do {
2250 mdelay(1);
2251 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2252 if (time_after(jiffies, timeout)) {
2253 netdev_warn(dev->net,
2254 "timeout on completion of LiteReset");
2255 return -EIO;
2257 } while (buf & HW_CFG_LRST_);
2259 lan78xx_init_mac_address(dev);
2261 /* save DEVID for later usage */
2262 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2263 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2264 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2266 /* Respond to the IN token with a NAK */
2267 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2268 buf |= USB_CFG_BIR_;
2269 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2271 /* Init LTM */
2272 lan78xx_init_ltm(dev);
2274 dev->net->hard_header_len += TX_OVERHEAD;
2275 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2277 if (dev->udev->speed == USB_SPEED_SUPER) {
2278 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2279 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2280 dev->rx_qlen = 4;
2281 dev->tx_qlen = 4;
2282 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2283 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2284 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2285 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2286 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2287 } else {
2288 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2289 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2290 dev->rx_qlen = 4;
2293 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2294 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2296 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2297 buf |= HW_CFG_MEF_;
2298 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2300 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2301 buf |= USB_CFG_BCE_;
2302 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2304 /* set FIFO sizes */
2305 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2306 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2308 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2309 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2311 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2312 ret = lan78xx_write_reg(dev, FLOW, 0);
2313 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2315 /* Don't need rfe_ctl_lock during initialisation */
2316 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2317 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2318 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2320 /* Enable or disable checksum offload engines */
2321 lan78xx_set_features(dev->net, dev->net->features);
2323 lan78xx_set_multicast(dev->net);
2325 /* reset PHY */
2326 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2327 buf |= PMT_CTL_PHY_RST_;
2328 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2330 timeout = jiffies + HZ;
2331 do {
2332 mdelay(1);
2333 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2334 if (time_after(jiffies, timeout)) {
2335 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2336 return -EIO;
2338 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2340 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2341 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2342 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2344 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2345 buf |= MAC_TX_TXEN_;
2346 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2348 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2349 buf |= FCT_TX_CTL_EN_;
2350 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2352 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2354 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2355 buf |= MAC_RX_RXEN_;
2356 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2358 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2359 buf |= FCT_RX_CTL_EN_;
2360 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2362 return 0;
2365 static void lan78xx_init_stats(struct lan78xx_net *dev)
2367 u32 *p;
2368 int i;
2370 /* initialize for stats update
2371 * some counters are 20bits and some are 32bits
2373 p = (u32 *)&dev->stats.rollover_max;
2374 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2375 p[i] = 0xFFFFF;
2377 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2378 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2379 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2380 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2381 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2382 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2383 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2384 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2385 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2386 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2388 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2391 static int lan78xx_open(struct net_device *net)
2393 struct lan78xx_net *dev = netdev_priv(net);
2394 int ret;
2396 ret = usb_autopm_get_interface(dev->intf);
2397 if (ret < 0)
2398 goto out;
2400 ret = lan78xx_reset(dev);
2401 if (ret < 0)
2402 goto done;
2404 ret = lan78xx_phy_init(dev);
2405 if (ret < 0)
2406 goto done;
2408 /* for Link Check */
2409 if (dev->urb_intr) {
2410 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2411 if (ret < 0) {
2412 netif_err(dev, ifup, dev->net,
2413 "intr submit %d\n", ret);
2414 goto done;
2418 lan78xx_init_stats(dev);
2420 set_bit(EVENT_DEV_OPEN, &dev->flags);
2422 netif_start_queue(net);
2424 dev->link_on = false;
2426 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2427 done:
2428 usb_autopm_put_interface(dev->intf);
2430 out:
2431 return ret;
2434 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2436 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2437 DECLARE_WAITQUEUE(wait, current);
2438 int temp;
2440 /* ensure there are no more active urbs */
2441 add_wait_queue(&unlink_wakeup, &wait);
2442 set_current_state(TASK_UNINTERRUPTIBLE);
2443 dev->wait = &unlink_wakeup;
2444 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2446 /* maybe wait for deletions to finish. */
2447 while (!skb_queue_empty(&dev->rxq) &&
2448 !skb_queue_empty(&dev->txq) &&
2449 !skb_queue_empty(&dev->done)) {
2450 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2451 set_current_state(TASK_UNINTERRUPTIBLE);
2452 netif_dbg(dev, ifdown, dev->net,
2453 "waited for %d urb completions\n", temp);
2455 set_current_state(TASK_RUNNING);
2456 dev->wait = NULL;
2457 remove_wait_queue(&unlink_wakeup, &wait);
2460 static int lan78xx_stop(struct net_device *net)
2462 struct lan78xx_net *dev = netdev_priv(net);
2464 if (timer_pending(&dev->stat_monitor))
2465 del_timer_sync(&dev->stat_monitor);
2467 phy_stop(net->phydev);
2468 phy_disconnect(net->phydev);
2469 net->phydev = NULL;
2471 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2472 netif_stop_queue(net);
2474 netif_info(dev, ifdown, dev->net,
2475 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2476 net->stats.rx_packets, net->stats.tx_packets,
2477 net->stats.rx_errors, net->stats.tx_errors);
2479 lan78xx_terminate_urbs(dev);
2481 usb_kill_urb(dev->urb_intr);
2483 skb_queue_purge(&dev->rxq_pause);
2485 /* deferred work (task, timer, softirq) must also stop.
2486 * can't flush_scheduled_work() until we drop rtnl (later),
2487 * else workers could deadlock; so make workers a NOP.
2489 dev->flags = 0;
2490 cancel_delayed_work_sync(&dev->wq);
2491 tasklet_kill(&dev->bh);
2493 usb_autopm_put_interface(dev->intf);
2495 return 0;
2498 static int lan78xx_linearize(struct sk_buff *skb)
2500 return skb_linearize(skb);
2503 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2504 struct sk_buff *skb, gfp_t flags)
2506 u32 tx_cmd_a, tx_cmd_b;
2508 if (skb_headroom(skb) < TX_OVERHEAD) {
2509 struct sk_buff *skb2;
2511 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2512 dev_kfree_skb_any(skb);
2513 skb = skb2;
2514 if (!skb)
2515 return NULL;
2518 if (lan78xx_linearize(skb) < 0)
2519 return NULL;
2521 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2523 if (skb->ip_summed == CHECKSUM_PARTIAL)
2524 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2526 tx_cmd_b = 0;
2527 if (skb_is_gso(skb)) {
2528 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2530 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2532 tx_cmd_a |= TX_CMD_A_LSO_;
2535 if (skb_vlan_tag_present(skb)) {
2536 tx_cmd_a |= TX_CMD_A_IVTG_;
2537 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2540 skb_push(skb, 4);
2541 cpu_to_le32s(&tx_cmd_b);
2542 memcpy(skb->data, &tx_cmd_b, 4);
2544 skb_push(skb, 4);
2545 cpu_to_le32s(&tx_cmd_a);
2546 memcpy(skb->data, &tx_cmd_a, 4);
2548 return skb;
2551 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2552 struct sk_buff_head *list, enum skb_state state)
2554 unsigned long flags;
2555 enum skb_state old_state;
2556 struct skb_data *entry = (struct skb_data *)skb->cb;
2558 spin_lock_irqsave(&list->lock, flags);
2559 old_state = entry->state;
2560 entry->state = state;
2562 __skb_unlink(skb, list);
2563 spin_unlock(&list->lock);
2564 spin_lock(&dev->done.lock);
2566 __skb_queue_tail(&dev->done, skb);
2567 if (skb_queue_len(&dev->done) == 1)
2568 tasklet_schedule(&dev->bh);
2569 spin_unlock_irqrestore(&dev->done.lock, flags);
2571 return old_state;
2574 static void tx_complete(struct urb *urb)
2576 struct sk_buff *skb = (struct sk_buff *)urb->context;
2577 struct skb_data *entry = (struct skb_data *)skb->cb;
2578 struct lan78xx_net *dev = entry->dev;
2580 if (urb->status == 0) {
2581 dev->net->stats.tx_packets += entry->num_of_packet;
2582 dev->net->stats.tx_bytes += entry->length;
2583 } else {
2584 dev->net->stats.tx_errors++;
2586 switch (urb->status) {
2587 case -EPIPE:
2588 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2589 break;
2591 /* software-driven interface shutdown */
2592 case -ECONNRESET:
2593 case -ESHUTDOWN:
2594 break;
2596 case -EPROTO:
2597 case -ETIME:
2598 case -EILSEQ:
2599 netif_stop_queue(dev->net);
2600 break;
2601 default:
2602 netif_dbg(dev, tx_err, dev->net,
2603 "tx err %d\n", entry->urb->status);
2604 break;
2608 usb_autopm_put_interface_async(dev->intf);
2610 defer_bh(dev, skb, &dev->txq, tx_done);
2613 static void lan78xx_queue_skb(struct sk_buff_head *list,
2614 struct sk_buff *newsk, enum skb_state state)
2616 struct skb_data *entry = (struct skb_data *)newsk->cb;
2618 __skb_queue_tail(list, newsk);
2619 entry->state = state;
2622 static netdev_tx_t
2623 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2625 struct lan78xx_net *dev = netdev_priv(net);
2626 struct sk_buff *skb2 = NULL;
2628 if (skb) {
2629 skb_tx_timestamp(skb);
2630 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2633 if (skb2) {
2634 skb_queue_tail(&dev->txq_pend, skb2);
2636 /* throttle TX patch at slower than SUPER SPEED USB */
2637 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2638 (skb_queue_len(&dev->txq_pend) > 10))
2639 netif_stop_queue(net);
2640 } else {
2641 netif_dbg(dev, tx_err, dev->net,
2642 "lan78xx_tx_prep return NULL\n");
2643 dev->net->stats.tx_errors++;
2644 dev->net->stats.tx_dropped++;
2647 tasklet_schedule(&dev->bh);
2649 return NETDEV_TX_OK;
2652 static int
2653 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2655 int tmp;
2656 struct usb_host_interface *alt = NULL;
2657 struct usb_host_endpoint *in = NULL, *out = NULL;
2658 struct usb_host_endpoint *status = NULL;
2660 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2661 unsigned ep;
2663 in = NULL;
2664 out = NULL;
2665 status = NULL;
2666 alt = intf->altsetting + tmp;
2668 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2669 struct usb_host_endpoint *e;
2670 int intr = 0;
2672 e = alt->endpoint + ep;
2673 switch (e->desc.bmAttributes) {
2674 case USB_ENDPOINT_XFER_INT:
2675 if (!usb_endpoint_dir_in(&e->desc))
2676 continue;
2677 intr = 1;
2678 /* FALLTHROUGH */
2679 case USB_ENDPOINT_XFER_BULK:
2680 break;
2681 default:
2682 continue;
2684 if (usb_endpoint_dir_in(&e->desc)) {
2685 if (!intr && !in)
2686 in = e;
2687 else if (intr && !status)
2688 status = e;
2689 } else {
2690 if (!out)
2691 out = e;
2694 if (in && out)
2695 break;
2697 if (!alt || !in || !out)
2698 return -EINVAL;
2700 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2701 in->desc.bEndpointAddress &
2702 USB_ENDPOINT_NUMBER_MASK);
2703 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2704 out->desc.bEndpointAddress &
2705 USB_ENDPOINT_NUMBER_MASK);
2706 dev->ep_intr = status;
2708 return 0;
2711 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2713 struct lan78xx_priv *pdata = NULL;
2714 int ret;
2715 int i;
2717 ret = lan78xx_get_endpoints(dev, intf);
2719 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2721 pdata = (struct lan78xx_priv *)(dev->data[0]);
2722 if (!pdata) {
2723 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2724 return -ENOMEM;
2727 pdata->dev = dev;
2729 spin_lock_init(&pdata->rfe_ctl_lock);
2730 mutex_init(&pdata->dataport_mutex);
2732 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2734 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2735 pdata->vlan_table[i] = 0;
2737 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2739 dev->net->features = 0;
2741 if (DEFAULT_TX_CSUM_ENABLE)
2742 dev->net->features |= NETIF_F_HW_CSUM;
2744 if (DEFAULT_RX_CSUM_ENABLE)
2745 dev->net->features |= NETIF_F_RXCSUM;
2747 if (DEFAULT_TSO_CSUM_ENABLE)
2748 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2750 dev->net->hw_features = dev->net->features;
2752 ret = lan78xx_setup_irq_domain(dev);
2753 if (ret < 0) {
2754 netdev_warn(dev->net,
2755 "lan78xx_setup_irq_domain() failed : %d", ret);
2756 kfree(pdata);
2757 return ret;
2760 /* Init all registers */
2761 ret = lan78xx_reset(dev);
2763 lan78xx_mdio_init(dev);
2765 dev->net->flags |= IFF_MULTICAST;
2767 pdata->wol = WAKE_MAGIC;
2769 return 0;
2772 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2774 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2776 lan78xx_remove_irq_domain(dev);
2778 lan78xx_remove_mdio(dev);
2780 if (pdata) {
2781 netif_dbg(dev, ifdown, dev->net, "free pdata");
2782 kfree(pdata);
2783 pdata = NULL;
2784 dev->data[0] = 0;
2788 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2789 struct sk_buff *skb,
2790 u32 rx_cmd_a, u32 rx_cmd_b)
2792 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2793 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2794 skb->ip_summed = CHECKSUM_NONE;
2795 } else {
2796 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2797 skb->ip_summed = CHECKSUM_COMPLETE;
2801 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2803 int status;
2805 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2806 skb_queue_tail(&dev->rxq_pause, skb);
2807 return;
2810 dev->net->stats.rx_packets++;
2811 dev->net->stats.rx_bytes += skb->len;
2813 skb->protocol = eth_type_trans(skb, dev->net);
2815 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2816 skb->len + sizeof(struct ethhdr), skb->protocol);
2817 memset(skb->cb, 0, sizeof(struct skb_data));
2819 if (skb_defer_rx_timestamp(skb))
2820 return;
2822 status = netif_rx(skb);
2823 if (status != NET_RX_SUCCESS)
2824 netif_dbg(dev, rx_err, dev->net,
2825 "netif_rx status %d\n", status);
2828 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2830 if (skb->len < dev->net->hard_header_len)
2831 return 0;
2833 while (skb->len > 0) {
2834 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2835 u16 rx_cmd_c;
2836 struct sk_buff *skb2;
2837 unsigned char *packet;
2839 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2840 le32_to_cpus(&rx_cmd_a);
2841 skb_pull(skb, sizeof(rx_cmd_a));
2843 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2844 le32_to_cpus(&rx_cmd_b);
2845 skb_pull(skb, sizeof(rx_cmd_b));
2847 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2848 le16_to_cpus(&rx_cmd_c);
2849 skb_pull(skb, sizeof(rx_cmd_c));
2851 packet = skb->data;
2853 /* get the packet length */
2854 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2855 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2857 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2858 netif_dbg(dev, rx_err, dev->net,
2859 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2860 } else {
2861 /* last frame in this batch */
2862 if (skb->len == size) {
2863 lan78xx_rx_csum_offload(dev, skb,
2864 rx_cmd_a, rx_cmd_b);
2866 skb_trim(skb, skb->len - 4); /* remove fcs */
2867 skb->truesize = size + sizeof(struct sk_buff);
2869 return 1;
2872 skb2 = skb_clone(skb, GFP_ATOMIC);
2873 if (unlikely(!skb2)) {
2874 netdev_warn(dev->net, "Error allocating skb");
2875 return 0;
2878 skb2->len = size;
2879 skb2->data = packet;
2880 skb_set_tail_pointer(skb2, size);
2882 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2884 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2885 skb2->truesize = size + sizeof(struct sk_buff);
2887 lan78xx_skb_return(dev, skb2);
2890 skb_pull(skb, size);
2892 /* padding bytes before the next frame starts */
2893 if (skb->len)
2894 skb_pull(skb, align_count);
2897 return 1;
2900 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2902 if (!lan78xx_rx(dev, skb)) {
2903 dev->net->stats.rx_errors++;
2904 goto done;
2907 if (skb->len) {
2908 lan78xx_skb_return(dev, skb);
2909 return;
2912 netif_dbg(dev, rx_err, dev->net, "drop\n");
2913 dev->net->stats.rx_errors++;
2914 done:
2915 skb_queue_tail(&dev->done, skb);
2918 static void rx_complete(struct urb *urb);
2920 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2922 struct sk_buff *skb;
2923 struct skb_data *entry;
2924 unsigned long lockflags;
2925 size_t size = dev->rx_urb_size;
2926 int ret = 0;
2928 skb = netdev_alloc_skb_ip_align(dev->net, size);
2929 if (!skb) {
2930 usb_free_urb(urb);
2931 return -ENOMEM;
2934 entry = (struct skb_data *)skb->cb;
2935 entry->urb = urb;
2936 entry->dev = dev;
2937 entry->length = 0;
2939 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2940 skb->data, size, rx_complete, skb);
2942 spin_lock_irqsave(&dev->rxq.lock, lockflags);
2944 if (netif_device_present(dev->net) &&
2945 netif_running(dev->net) &&
2946 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2947 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2948 ret = usb_submit_urb(urb, GFP_ATOMIC);
2949 switch (ret) {
2950 case 0:
2951 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2952 break;
2953 case -EPIPE:
2954 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2955 break;
2956 case -ENODEV:
2957 netif_dbg(dev, ifdown, dev->net, "device gone\n");
2958 netif_device_detach(dev->net);
2959 break;
2960 case -EHOSTUNREACH:
2961 ret = -ENOLINK;
2962 break;
2963 default:
2964 netif_dbg(dev, rx_err, dev->net,
2965 "rx submit, %d\n", ret);
2966 tasklet_schedule(&dev->bh);
2968 } else {
2969 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2970 ret = -ENOLINK;
2972 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2973 if (ret) {
2974 dev_kfree_skb_any(skb);
2975 usb_free_urb(urb);
2977 return ret;
2980 static void rx_complete(struct urb *urb)
2982 struct sk_buff *skb = (struct sk_buff *)urb->context;
2983 struct skb_data *entry = (struct skb_data *)skb->cb;
2984 struct lan78xx_net *dev = entry->dev;
2985 int urb_status = urb->status;
2986 enum skb_state state;
2988 skb_put(skb, urb->actual_length);
2989 state = rx_done;
2990 entry->urb = NULL;
2992 switch (urb_status) {
2993 case 0:
2994 if (skb->len < dev->net->hard_header_len) {
2995 state = rx_cleanup;
2996 dev->net->stats.rx_errors++;
2997 dev->net->stats.rx_length_errors++;
2998 netif_dbg(dev, rx_err, dev->net,
2999 "rx length %d\n", skb->len);
3001 usb_mark_last_busy(dev->udev);
3002 break;
3003 case -EPIPE:
3004 dev->net->stats.rx_errors++;
3005 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3006 /* FALLTHROUGH */
3007 case -ECONNRESET: /* async unlink */
3008 case -ESHUTDOWN: /* hardware gone */
3009 netif_dbg(dev, ifdown, dev->net,
3010 "rx shutdown, code %d\n", urb_status);
3011 state = rx_cleanup;
3012 entry->urb = urb;
3013 urb = NULL;
3014 break;
3015 case -EPROTO:
3016 case -ETIME:
3017 case -EILSEQ:
3018 dev->net->stats.rx_errors++;
3019 state = rx_cleanup;
3020 entry->urb = urb;
3021 urb = NULL;
3022 break;
3024 /* data overrun ... flush fifo? */
3025 case -EOVERFLOW:
3026 dev->net->stats.rx_over_errors++;
3027 /* FALLTHROUGH */
3029 default:
3030 state = rx_cleanup;
3031 dev->net->stats.rx_errors++;
3032 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3033 break;
3036 state = defer_bh(dev, skb, &dev->rxq, state);
3038 if (urb) {
3039 if (netif_running(dev->net) &&
3040 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3041 state != unlink_start) {
3042 rx_submit(dev, urb, GFP_ATOMIC);
3043 return;
3045 usb_free_urb(urb);
3047 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3050 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3052 int length;
3053 struct urb *urb = NULL;
3054 struct skb_data *entry;
3055 unsigned long flags;
3056 struct sk_buff_head *tqp = &dev->txq_pend;
3057 struct sk_buff *skb, *skb2;
3058 int ret;
3059 int count, pos;
3060 int skb_totallen, pkt_cnt;
3062 skb_totallen = 0;
3063 pkt_cnt = 0;
3064 count = 0;
3065 length = 0;
3066 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3067 if (skb_is_gso(skb)) {
3068 if (pkt_cnt) {
3069 /* handle previous packets first */
3070 break;
3072 count = 1;
3073 length = skb->len - TX_OVERHEAD;
3074 skb2 = skb_dequeue(tqp);
3075 goto gso_skb;
3078 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3079 break;
3080 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3081 pkt_cnt++;
3084 /* copy to a single skb */
3085 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3086 if (!skb)
3087 goto drop;
3089 skb_put(skb, skb_totallen);
3091 for (count = pos = 0; count < pkt_cnt; count++) {
3092 skb2 = skb_dequeue(tqp);
3093 if (skb2) {
3094 length += (skb2->len - TX_OVERHEAD);
3095 memcpy(skb->data + pos, skb2->data, skb2->len);
3096 pos += roundup(skb2->len, sizeof(u32));
3097 dev_kfree_skb(skb2);
3101 gso_skb:
3102 urb = usb_alloc_urb(0, GFP_ATOMIC);
3103 if (!urb)
3104 goto drop;
3106 entry = (struct skb_data *)skb->cb;
3107 entry->urb = urb;
3108 entry->dev = dev;
3109 entry->length = length;
3110 entry->num_of_packet = count;
3112 spin_lock_irqsave(&dev->txq.lock, flags);
3113 ret = usb_autopm_get_interface_async(dev->intf);
3114 if (ret < 0) {
3115 spin_unlock_irqrestore(&dev->txq.lock, flags);
3116 goto drop;
3119 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3120 skb->data, skb->len, tx_complete, skb);
3122 if (length % dev->maxpacket == 0) {
3123 /* send USB_ZERO_PACKET */
3124 urb->transfer_flags |= URB_ZERO_PACKET;
3127 #ifdef CONFIG_PM
3128 /* if this triggers the device is still a sleep */
3129 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3130 /* transmission will be done in resume */
3131 usb_anchor_urb(urb, &dev->deferred);
3132 /* no use to process more packets */
3133 netif_stop_queue(dev->net);
3134 usb_put_urb(urb);
3135 spin_unlock_irqrestore(&dev->txq.lock, flags);
3136 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3137 return;
3139 #endif
3141 ret = usb_submit_urb(urb, GFP_ATOMIC);
3142 switch (ret) {
3143 case 0:
3144 netif_trans_update(dev->net);
3145 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3146 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3147 netif_stop_queue(dev->net);
3148 break;
3149 case -EPIPE:
3150 netif_stop_queue(dev->net);
3151 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3152 usb_autopm_put_interface_async(dev->intf);
3153 break;
3154 default:
3155 usb_autopm_put_interface_async(dev->intf);
3156 netif_dbg(dev, tx_err, dev->net,
3157 "tx: submit urb err %d\n", ret);
3158 break;
3161 spin_unlock_irqrestore(&dev->txq.lock, flags);
3163 if (ret) {
3164 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3165 drop:
3166 dev->net->stats.tx_dropped++;
3167 if (skb)
3168 dev_kfree_skb_any(skb);
3169 usb_free_urb(urb);
3170 } else
3171 netif_dbg(dev, tx_queued, dev->net,
3172 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3175 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3177 struct urb *urb;
3178 int i;
3180 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3181 for (i = 0; i < 10; i++) {
3182 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3183 break;
3184 urb = usb_alloc_urb(0, GFP_ATOMIC);
3185 if (urb)
3186 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3187 return;
3190 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3191 tasklet_schedule(&dev->bh);
3193 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3194 netif_wake_queue(dev->net);
3197 static void lan78xx_bh(unsigned long param)
3199 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3200 struct sk_buff *skb;
3201 struct skb_data *entry;
3203 while ((skb = skb_dequeue(&dev->done))) {
3204 entry = (struct skb_data *)(skb->cb);
3205 switch (entry->state) {
3206 case rx_done:
3207 entry->state = rx_cleanup;
3208 rx_process(dev, skb);
3209 continue;
3210 case tx_done:
3211 usb_free_urb(entry->urb);
3212 dev_kfree_skb(skb);
3213 continue;
3214 case rx_cleanup:
3215 usb_free_urb(entry->urb);
3216 dev_kfree_skb(skb);
3217 continue;
3218 default:
3219 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3220 return;
3224 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3225 /* reset update timer delta */
3226 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3227 dev->delta = 1;
3228 mod_timer(&dev->stat_monitor,
3229 jiffies + STAT_UPDATE_TIMER);
3232 if (!skb_queue_empty(&dev->txq_pend))
3233 lan78xx_tx_bh(dev);
3235 if (!timer_pending(&dev->delay) &&
3236 !test_bit(EVENT_RX_HALT, &dev->flags))
3237 lan78xx_rx_bh(dev);
3241 static void lan78xx_delayedwork(struct work_struct *work)
3243 int status;
3244 struct lan78xx_net *dev;
3246 dev = container_of(work, struct lan78xx_net, wq.work);
3248 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3249 unlink_urbs(dev, &dev->txq);
3250 status = usb_autopm_get_interface(dev->intf);
3251 if (status < 0)
3252 goto fail_pipe;
3253 status = usb_clear_halt(dev->udev, dev->pipe_out);
3254 usb_autopm_put_interface(dev->intf);
3255 if (status < 0 &&
3256 status != -EPIPE &&
3257 status != -ESHUTDOWN) {
3258 if (netif_msg_tx_err(dev))
3259 fail_pipe:
3260 netdev_err(dev->net,
3261 "can't clear tx halt, status %d\n",
3262 status);
3263 } else {
3264 clear_bit(EVENT_TX_HALT, &dev->flags);
3265 if (status != -ESHUTDOWN)
3266 netif_wake_queue(dev->net);
3269 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3270 unlink_urbs(dev, &dev->rxq);
3271 status = usb_autopm_get_interface(dev->intf);
3272 if (status < 0)
3273 goto fail_halt;
3274 status = usb_clear_halt(dev->udev, dev->pipe_in);
3275 usb_autopm_put_interface(dev->intf);
3276 if (status < 0 &&
3277 status != -EPIPE &&
3278 status != -ESHUTDOWN) {
3279 if (netif_msg_rx_err(dev))
3280 fail_halt:
3281 netdev_err(dev->net,
3282 "can't clear rx halt, status %d\n",
3283 status);
3284 } else {
3285 clear_bit(EVENT_RX_HALT, &dev->flags);
3286 tasklet_schedule(&dev->bh);
3290 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3291 int ret = 0;
3293 clear_bit(EVENT_LINK_RESET, &dev->flags);
3294 status = usb_autopm_get_interface(dev->intf);
3295 if (status < 0)
3296 goto skip_reset;
3297 if (lan78xx_link_reset(dev) < 0) {
3298 usb_autopm_put_interface(dev->intf);
3299 skip_reset:
3300 netdev_info(dev->net, "link reset failed (%d)\n",
3301 ret);
3302 } else {
3303 usb_autopm_put_interface(dev->intf);
3307 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3308 lan78xx_update_stats(dev);
3310 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3312 mod_timer(&dev->stat_monitor,
3313 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3315 dev->delta = min((dev->delta * 2), 50);
3319 static void intr_complete(struct urb *urb)
3321 struct lan78xx_net *dev = urb->context;
3322 int status = urb->status;
3324 switch (status) {
3325 /* success */
3326 case 0:
3327 lan78xx_status(dev, urb);
3328 break;
3330 /* software-driven interface shutdown */
3331 case -ENOENT: /* urb killed */
3332 case -ESHUTDOWN: /* hardware gone */
3333 netif_dbg(dev, ifdown, dev->net,
3334 "intr shutdown, code %d\n", status);
3335 return;
3337 /* NOTE: not throttling like RX/TX, since this endpoint
3338 * already polls infrequently
3340 default:
3341 netdev_dbg(dev->net, "intr status %d\n", status);
3342 break;
3345 if (!netif_running(dev->net))
3346 return;
3348 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3349 status = usb_submit_urb(urb, GFP_ATOMIC);
3350 if (status != 0)
3351 netif_err(dev, timer, dev->net,
3352 "intr resubmit --> %d\n", status);
3355 static void lan78xx_disconnect(struct usb_interface *intf)
3357 struct lan78xx_net *dev;
3358 struct usb_device *udev;
3359 struct net_device *net;
3361 dev = usb_get_intfdata(intf);
3362 usb_set_intfdata(intf, NULL);
3363 if (!dev)
3364 return;
3366 udev = interface_to_usbdev(intf);
3368 net = dev->net;
3369 unregister_netdev(net);
3371 cancel_delayed_work_sync(&dev->wq);
3373 usb_scuttle_anchored_urbs(&dev->deferred);
3375 lan78xx_unbind(dev, intf);
3377 usb_kill_urb(dev->urb_intr);
3378 usb_free_urb(dev->urb_intr);
3380 free_netdev(net);
3381 usb_put_dev(udev);
3384 static void lan78xx_tx_timeout(struct net_device *net)
3386 struct lan78xx_net *dev = netdev_priv(net);
3388 unlink_urbs(dev, &dev->txq);
3389 tasklet_schedule(&dev->bh);
3392 static const struct net_device_ops lan78xx_netdev_ops = {
3393 .ndo_open = lan78xx_open,
3394 .ndo_stop = lan78xx_stop,
3395 .ndo_start_xmit = lan78xx_start_xmit,
3396 .ndo_tx_timeout = lan78xx_tx_timeout,
3397 .ndo_change_mtu = lan78xx_change_mtu,
3398 .ndo_set_mac_address = lan78xx_set_mac_addr,
3399 .ndo_validate_addr = eth_validate_addr,
3400 .ndo_do_ioctl = lan78xx_ioctl,
3401 .ndo_set_rx_mode = lan78xx_set_multicast,
3402 .ndo_set_features = lan78xx_set_features,
3403 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3404 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3407 static void lan78xx_stat_monitor(unsigned long param)
3409 struct lan78xx_net *dev;
3411 dev = (struct lan78xx_net *)param;
3413 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3416 static int lan78xx_probe(struct usb_interface *intf,
3417 const struct usb_device_id *id)
3419 struct lan78xx_net *dev;
3420 struct net_device *netdev;
3421 struct usb_device *udev;
3422 int ret;
3423 unsigned maxp;
3424 unsigned period;
3425 u8 *buf = NULL;
3427 udev = interface_to_usbdev(intf);
3428 udev = usb_get_dev(udev);
3430 ret = -ENOMEM;
3431 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3432 if (!netdev) {
3433 dev_err(&intf->dev, "Error: OOM\n");
3434 goto out1;
3437 /* netdev_printk() needs this */
3438 SET_NETDEV_DEV(netdev, &intf->dev);
3440 dev = netdev_priv(netdev);
3441 dev->udev = udev;
3442 dev->intf = intf;
3443 dev->net = netdev;
3444 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3445 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3447 skb_queue_head_init(&dev->rxq);
3448 skb_queue_head_init(&dev->txq);
3449 skb_queue_head_init(&dev->done);
3450 skb_queue_head_init(&dev->rxq_pause);
3451 skb_queue_head_init(&dev->txq_pend);
3452 mutex_init(&dev->phy_mutex);
3454 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3455 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3456 init_usb_anchor(&dev->deferred);
3458 netdev->netdev_ops = &lan78xx_netdev_ops;
3459 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3460 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3462 dev->stat_monitor.function = lan78xx_stat_monitor;
3463 dev->stat_monitor.data = (unsigned long)dev;
3464 dev->delta = 1;
3465 init_timer(&dev->stat_monitor);
3467 mutex_init(&dev->stats.access_lock);
3469 ret = lan78xx_bind(dev, intf);
3470 if (ret < 0)
3471 goto out2;
3472 strcpy(netdev->name, "eth%d");
3474 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3475 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3477 /* MTU range: 68 - 9000 */
3478 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3480 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3481 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3482 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3484 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3485 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3487 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3488 dev->ep_intr->desc.bEndpointAddress &
3489 USB_ENDPOINT_NUMBER_MASK);
3490 period = dev->ep_intr->desc.bInterval;
3492 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3493 buf = kmalloc(maxp, GFP_KERNEL);
3494 if (buf) {
3495 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3496 if (!dev->urb_intr) {
3497 kfree(buf);
3498 goto out3;
3499 } else {
3500 usb_fill_int_urb(dev->urb_intr, dev->udev,
3501 dev->pipe_intr, buf, maxp,
3502 intr_complete, dev, period);
3506 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3508 /* driver requires remote-wakeup capability during autosuspend. */
3509 intf->needs_remote_wakeup = 1;
3511 ret = register_netdev(netdev);
3512 if (ret != 0) {
3513 netif_err(dev, probe, netdev, "couldn't register the device\n");
3514 goto out2;
3517 usb_set_intfdata(intf, dev);
3519 ret = device_set_wakeup_enable(&udev->dev, true);
3521 /* Default delay of 2sec has more overhead than advantage.
3522 * Set to 10sec as default.
3524 pm_runtime_set_autosuspend_delay(&udev->dev,
3525 DEFAULT_AUTOSUSPEND_DELAY);
3527 return 0;
3529 out3:
3530 lan78xx_unbind(dev, intf);
3531 out2:
3532 free_netdev(netdev);
3533 out1:
3534 usb_put_dev(udev);
3536 return ret;
3539 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3541 const u16 crc16poly = 0x8005;
3542 int i;
3543 u16 bit, crc, msb;
3544 u8 data;
3546 crc = 0xFFFF;
3547 for (i = 0; i < len; i++) {
3548 data = *buf++;
3549 for (bit = 0; bit < 8; bit++) {
3550 msb = crc >> 15;
3551 crc <<= 1;
3553 if (msb ^ (u16)(data & 1)) {
3554 crc ^= crc16poly;
3555 crc |= (u16)0x0001U;
3557 data >>= 1;
3561 return crc;
3564 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3566 u32 buf;
3567 int ret;
3568 int mask_index;
3569 u16 crc;
3570 u32 temp_wucsr;
3571 u32 temp_pmt_ctl;
3572 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3573 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3574 const u8 arp_type[2] = { 0x08, 0x06 };
3576 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3577 buf &= ~MAC_TX_TXEN_;
3578 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3579 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3580 buf &= ~MAC_RX_RXEN_;
3581 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3583 ret = lan78xx_write_reg(dev, WUCSR, 0);
3584 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3585 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3587 temp_wucsr = 0;
3589 temp_pmt_ctl = 0;
3590 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3591 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3592 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3594 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3595 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3597 mask_index = 0;
3598 if (wol & WAKE_PHY) {
3599 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3601 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3602 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3603 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3605 if (wol & WAKE_MAGIC) {
3606 temp_wucsr |= WUCSR_MPEN_;
3608 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3609 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3610 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3612 if (wol & WAKE_BCAST) {
3613 temp_wucsr |= WUCSR_BCST_EN_;
3615 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3616 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3617 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3619 if (wol & WAKE_MCAST) {
3620 temp_wucsr |= WUCSR_WAKE_EN_;
3622 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3623 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3624 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3625 WUF_CFGX_EN_ |
3626 WUF_CFGX_TYPE_MCAST_ |
3627 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3628 (crc & WUF_CFGX_CRC16_MASK_));
3630 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3631 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3632 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3633 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3634 mask_index++;
3636 /* for IPv6 Multicast */
3637 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3638 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3639 WUF_CFGX_EN_ |
3640 WUF_CFGX_TYPE_MCAST_ |
3641 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3642 (crc & WUF_CFGX_CRC16_MASK_));
3644 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3645 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3646 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3647 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3648 mask_index++;
3650 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3651 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3652 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3654 if (wol & WAKE_UCAST) {
3655 temp_wucsr |= WUCSR_PFDA_EN_;
3657 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3658 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3659 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3661 if (wol & WAKE_ARP) {
3662 temp_wucsr |= WUCSR_WAKE_EN_;
3664 /* set WUF_CFG & WUF_MASK
3665 * for packettype (offset 12,13) = ARP (0x0806)
3667 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3668 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3669 WUF_CFGX_EN_ |
3670 WUF_CFGX_TYPE_ALL_ |
3671 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3672 (crc & WUF_CFGX_CRC16_MASK_));
3674 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3675 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3676 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3677 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3678 mask_index++;
3680 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3681 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3682 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3685 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3687 /* when multiple WOL bits are set */
3688 if (hweight_long((unsigned long)wol) > 1) {
3689 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3690 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3691 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3693 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3695 /* clear WUPS */
3696 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3697 buf |= PMT_CTL_WUPS_MASK_;
3698 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3700 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3701 buf |= MAC_RX_RXEN_;
3702 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3704 return 0;
3707 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3709 struct lan78xx_net *dev = usb_get_intfdata(intf);
3710 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3711 u32 buf;
3712 int ret;
3713 int event;
3715 event = message.event;
3717 if (!dev->suspend_count++) {
3718 spin_lock_irq(&dev->txq.lock);
3719 /* don't autosuspend while transmitting */
3720 if ((skb_queue_len(&dev->txq) ||
3721 skb_queue_len(&dev->txq_pend)) &&
3722 PMSG_IS_AUTO(message)) {
3723 spin_unlock_irq(&dev->txq.lock);
3724 ret = -EBUSY;
3725 goto out;
3726 } else {
3727 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3728 spin_unlock_irq(&dev->txq.lock);
3731 /* stop TX & RX */
3732 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3733 buf &= ~MAC_TX_TXEN_;
3734 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3735 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3736 buf &= ~MAC_RX_RXEN_;
3737 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3739 /* empty out the rx and queues */
3740 netif_device_detach(dev->net);
3741 lan78xx_terminate_urbs(dev);
3742 usb_kill_urb(dev->urb_intr);
3744 /* reattach */
3745 netif_device_attach(dev->net);
3748 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3749 del_timer(&dev->stat_monitor);
3751 if (PMSG_IS_AUTO(message)) {
3752 /* auto suspend (selective suspend) */
3753 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3754 buf &= ~MAC_TX_TXEN_;
3755 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3756 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3757 buf &= ~MAC_RX_RXEN_;
3758 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3760 ret = lan78xx_write_reg(dev, WUCSR, 0);
3761 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3762 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3764 /* set goodframe wakeup */
3765 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3767 buf |= WUCSR_RFE_WAKE_EN_;
3768 buf |= WUCSR_STORE_WAKE_;
3770 ret = lan78xx_write_reg(dev, WUCSR, buf);
3772 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3774 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3775 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3777 buf |= PMT_CTL_PHY_WAKE_EN_;
3778 buf |= PMT_CTL_WOL_EN_;
3779 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3780 buf |= PMT_CTL_SUS_MODE_3_;
3782 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3784 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3786 buf |= PMT_CTL_WUPS_MASK_;
3788 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3790 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3791 buf |= MAC_RX_RXEN_;
3792 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3793 } else {
3794 lan78xx_set_suspend(dev, pdata->wol);
3798 ret = 0;
3799 out:
3800 return ret;
3803 static int lan78xx_resume(struct usb_interface *intf)
3805 struct lan78xx_net *dev = usb_get_intfdata(intf);
3806 struct sk_buff *skb;
3807 struct urb *res;
3808 int ret;
3809 u32 buf;
3811 if (!timer_pending(&dev->stat_monitor)) {
3812 dev->delta = 1;
3813 mod_timer(&dev->stat_monitor,
3814 jiffies + STAT_UPDATE_TIMER);
3817 if (!--dev->suspend_count) {
3818 /* resume interrupt URBs */
3819 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3820 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3822 spin_lock_irq(&dev->txq.lock);
3823 while ((res = usb_get_from_anchor(&dev->deferred))) {
3824 skb = (struct sk_buff *)res->context;
3825 ret = usb_submit_urb(res, GFP_ATOMIC);
3826 if (ret < 0) {
3827 dev_kfree_skb_any(skb);
3828 usb_free_urb(res);
3829 usb_autopm_put_interface_async(dev->intf);
3830 } else {
3831 netif_trans_update(dev->net);
3832 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3836 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3837 spin_unlock_irq(&dev->txq.lock);
3839 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3840 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3841 netif_start_queue(dev->net);
3842 tasklet_schedule(&dev->bh);
3846 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3847 ret = lan78xx_write_reg(dev, WUCSR, 0);
3848 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3850 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3851 WUCSR2_ARP_RCD_ |
3852 WUCSR2_IPV6_TCPSYN_RCD_ |
3853 WUCSR2_IPV4_TCPSYN_RCD_);
3855 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3856 WUCSR_EEE_RX_WAKE_ |
3857 WUCSR_PFDA_FR_ |
3858 WUCSR_RFE_WAKE_FR_ |
3859 WUCSR_WUFR_ |
3860 WUCSR_MPR_ |
3861 WUCSR_BCST_FR_);
3863 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3864 buf |= MAC_TX_TXEN_;
3865 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3867 return 0;
3870 static int lan78xx_reset_resume(struct usb_interface *intf)
3872 struct lan78xx_net *dev = usb_get_intfdata(intf);
3874 lan78xx_reset(dev);
3876 lan78xx_phy_init(dev);
3878 return lan78xx_resume(intf);
3881 static const struct usb_device_id products[] = {
3883 /* LAN7800 USB Gigabit Ethernet Device */
3884 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3887 /* LAN7850 USB Gigabit Ethernet Device */
3888 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3892 MODULE_DEVICE_TABLE(usb, products);
3894 static struct usb_driver lan78xx_driver = {
3895 .name = DRIVER_NAME,
3896 .id_table = products,
3897 .probe = lan78xx_probe,
3898 .disconnect = lan78xx_disconnect,
3899 .suspend = lan78xx_suspend,
3900 .resume = lan78xx_resume,
3901 .reset_resume = lan78xx_reset_resume,
3902 .supports_autosuspend = 1,
3903 .disable_hub_initiated_lpm = 1,
3906 module_usb_driver(lan78xx_driver);
3908 MODULE_AUTHOR(DRIVER_AUTHOR);
3909 MODULE_DESCRIPTION(DRIVER_DESC);
3910 MODULE_LICENSE("GPL");