2 * This code is derived from the VIA reference driver (copyright message
3 * below) provided to Red Hat by VIA Networking Technologies, Inc. for
4 * addition to the Linux kernel.
6 * The code has been merged into one source file, cleaned up to follow
7 * Linux coding style, ported to the Linux 2.6 kernel tree and cleaned
8 * for 64bit hardware platforms.
12 * rx_copybreak/alignment
16 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@redhat.com>
17 * Additional fixes and clean up: Francois Romieu
19 * This source has not been verified for use in safety critical systems.
21 * Please direct queries about the revamped driver to the linux-kernel
26 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
27 * All rights reserved.
29 * This software may be redistributed and/or modified under
30 * the terms of the GNU General Public License as published by the Free
31 * Software Foundation; either version 2 of the License, or
34 * This program is distributed in the hope that it will be useful, but
35 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
36 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
39 * Author: Chuang Liang-Shing, AJ Jiang
43 * MODULE_LICENSE("GPL");
48 #include <linux/module.h>
49 #include <linux/types.h>
50 #include <linux/init.h>
52 #include <linux/errno.h>
53 #include <linux/ioport.h>
54 #include <linux/pci.h>
55 #include <linux/kernel.h>
56 #include <linux/netdevice.h>
57 #include <linux/etherdevice.h>
58 #include <linux/skbuff.h>
59 #include <linux/delay.h>
60 #include <linux/timer.h>
61 #include <linux/slab.h>
62 #include <linux/interrupt.h>
63 #include <linux/string.h>
64 #include <linux/wait.h>
67 #include <asm/uaccess.h>
68 #include <linux/proc_fs.h>
69 #include <linux/inetdevice.h>
70 #include <linux/reboot.h>
71 #include <linux/ethtool.h>
72 #include <linux/mii.h>
74 #include <linux/if_arp.h>
76 #include <linux/tcp.h>
77 #include <linux/udp.h>
78 #include <linux/crc-ccitt.h>
79 #include <linux/crc32.h>
81 #include "via-velocity.h"
84 static int velocity_nics
= 0;
85 static int msglevel
= MSG_LEVEL_INFO
;
88 static int velocity_mii_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
);
89 static struct ethtool_ops velocity_ethtool_ops
;
95 MODULE_AUTHOR("VIA Networking Technologies, Inc.");
96 MODULE_LICENSE("GPL");
97 MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
99 #define VELOCITY_PARAM(N,D) \
100 static int N[MAX_UNITS]=OPTION_DEFAULT;\
101 module_param_array(N, int, NULL, 0); \
102 MODULE_PARM_DESC(N, D);
104 #define RX_DESC_MIN 64
105 #define RX_DESC_MAX 255
106 #define RX_DESC_DEF 64
107 VELOCITY_PARAM(RxDescriptors
, "Number of receive descriptors");
109 #define TX_DESC_MIN 16
110 #define TX_DESC_MAX 256
111 #define TX_DESC_DEF 64
112 VELOCITY_PARAM(TxDescriptors
, "Number of transmit descriptors");
114 #define VLAN_ID_MIN 0
115 #define VLAN_ID_MAX 4095
116 #define VLAN_ID_DEF 0
117 /* VID_setting[] is used for setting the VID of NIC.
121 VELOCITY_PARAM(VID_setting
, "802.1Q VLAN ID");
123 #define RX_THRESH_MIN 0
124 #define RX_THRESH_MAX 3
125 #define RX_THRESH_DEF 0
126 /* rx_thresh[] is used for controlling the receive fifo threshold.
127 0: indicate the rxfifo threshold is 128 bytes.
128 1: indicate the rxfifo threshold is 512 bytes.
129 2: indicate the rxfifo threshold is 1024 bytes.
130 3: indicate the rxfifo threshold is store & forward.
132 VELOCITY_PARAM(rx_thresh
, "Receive fifo threshold");
134 #define DMA_LENGTH_MIN 0
135 #define DMA_LENGTH_MAX 7
136 #define DMA_LENGTH_DEF 0
138 /* DMA_length[] is used for controlling the DMA length
145 6: SF(flush till emply)
146 7: SF(flush till emply)
148 VELOCITY_PARAM(DMA_length
, "DMA length");
150 #define TAGGING_DEF 0
151 /* enable_tagging[] is used for enabling 802.1Q VID tagging.
152 0: disable VID seeting(default).
153 1: enable VID setting.
155 VELOCITY_PARAM(enable_tagging
, "Enable 802.1Q tagging");
157 #define IP_ALIG_DEF 0
158 /* IP_byte_align[] is used for IP header DWORD byte aligned
159 0: indicate the IP header won't be DWORD byte aligned.(Default) .
160 1: indicate the IP header will be DWORD byte aligned.
161 In some enviroment, the IP header should be DWORD byte aligned,
162 or the packet will be droped when we receive it. (eg: IPVS)
164 VELOCITY_PARAM(IP_byte_align
, "Enable IP header dword aligned");
166 #define TX_CSUM_DEF 1
167 /* txcsum_offload[] is used for setting the checksum offload ability of NIC.
168 (We only support RX checksum offload now)
169 0: disable csum_offload[checksum offload
170 1: enable checksum offload. (Default)
172 VELOCITY_PARAM(txcsum_offload
, "Enable transmit packet checksum offload");
174 #define FLOW_CNTL_DEF 1
175 #define FLOW_CNTL_MIN 1
176 #define FLOW_CNTL_MAX 5
178 /* flow_control[] is used for setting the flow control ability of NIC.
179 1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
180 2: enable TX flow control.
181 3: enable RX flow control.
182 4: enable RX/TX flow control.
185 VELOCITY_PARAM(flow_control
, "Enable flow control ability");
187 #define MED_LNK_DEF 0
188 #define MED_LNK_MIN 0
189 #define MED_LNK_MAX 4
190 /* speed_duplex[] is used for setting the speed and duplex mode of NIC.
191 0: indicate autonegotiation for both speed and duplex mode
192 1: indicate 100Mbps half duplex mode
193 2: indicate 100Mbps full duplex mode
194 3: indicate 10Mbps half duplex mode
195 4: indicate 10Mbps full duplex mode
198 if EEPROM have been set to the force mode, this option is ignored
201 VELOCITY_PARAM(speed_duplex
, "Setting the speed and duplex mode");
203 #define VAL_PKT_LEN_DEF 0
204 /* ValPktLen[] is used for setting the checksum offload ability of NIC.
205 0: Receive frame with invalid layer 2 length (Default)
206 1: Drop frame with invalid layer 2 length
208 VELOCITY_PARAM(ValPktLen
, "Receiving or Drop invalid 802.3 frame");
210 #define WOL_OPT_DEF 0
211 #define WOL_OPT_MIN 0
212 #define WOL_OPT_MAX 7
213 /* wol_opts[] is used for controlling wake on lan behavior.
214 0: Wake up if recevied a magic packet. (Default)
215 1: Wake up if link status is on/off.
216 2: Wake up if recevied an arp packet.
217 4: Wake up if recevied any unicast packet.
218 Those value can be sumed up to support more than one option.
220 VELOCITY_PARAM(wol_opts
, "Wake On Lan options");
222 #define INT_WORKS_DEF 20
223 #define INT_WORKS_MIN 10
224 #define INT_WORKS_MAX 64
226 VELOCITY_PARAM(int_works
, "Number of packets per interrupt services");
228 static int rx_copybreak
= 200;
229 module_param(rx_copybreak
, int, 0644);
230 MODULE_PARM_DESC(rx_copybreak
, "Copy breakpoint for copy-only-tiny-frames");
232 static void velocity_init_info(struct pci_dev
*pdev
, struct velocity_info
*vptr
, struct velocity_info_tbl
*info
);
233 static int velocity_get_pci_info(struct velocity_info
*, struct pci_dev
*pdev
);
234 static void velocity_print_info(struct velocity_info
*vptr
);
235 static int velocity_open(struct net_device
*dev
);
236 static int velocity_change_mtu(struct net_device
*dev
, int mtu
);
237 static int velocity_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
238 static int velocity_intr(int irq
, void *dev_instance
, struct pt_regs
*regs
);
239 static void velocity_set_multi(struct net_device
*dev
);
240 static struct net_device_stats
*velocity_get_stats(struct net_device
*dev
);
241 static int velocity_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
242 static int velocity_close(struct net_device
*dev
);
243 static int velocity_receive_frame(struct velocity_info
*, int idx
);
244 static int velocity_alloc_rx_buf(struct velocity_info
*, int idx
);
245 static void velocity_free_rd_ring(struct velocity_info
*vptr
);
246 static void velocity_free_tx_buf(struct velocity_info
*vptr
, struct velocity_td_info
*);
247 static int velocity_soft_reset(struct velocity_info
*vptr
);
248 static void mii_init(struct velocity_info
*vptr
, u32 mii_status
);
249 static u32
velocity_get_link(struct net_device
*dev
);
250 static u32
velocity_get_opt_media_mode(struct velocity_info
*vptr
);
251 static void velocity_print_link_status(struct velocity_info
*vptr
);
252 static void safe_disable_mii_autopoll(struct mac_regs __iomem
* regs
);
253 static void velocity_shutdown(struct velocity_info
*vptr
);
254 static void enable_flow_control_ability(struct velocity_info
*vptr
);
255 static void enable_mii_autopoll(struct mac_regs __iomem
* regs
);
256 static int velocity_mii_read(struct mac_regs __iomem
*, u8 byIdx
, u16
* pdata
);
257 static int velocity_mii_write(struct mac_regs __iomem
*, u8 byMiiAddr
, u16 data
);
258 static u32
mii_check_media_mode(struct mac_regs __iomem
* regs
);
259 static u32
check_connection_type(struct mac_regs __iomem
* regs
);
260 static int velocity_set_media_mode(struct velocity_info
*vptr
, u32 mii_status
);
264 static int velocity_suspend(struct pci_dev
*pdev
, pm_message_t state
);
265 static int velocity_resume(struct pci_dev
*pdev
);
267 static int velocity_netdev_event(struct notifier_block
*nb
, unsigned long notification
, void *ptr
);
269 static struct notifier_block velocity_inetaddr_notifier
= {
270 .notifier_call
= velocity_netdev_event
,
273 static DEFINE_SPINLOCK(velocity_dev_list_lock
);
274 static LIST_HEAD(velocity_dev_list
);
276 static void velocity_register_notifier(void)
278 register_inetaddr_notifier(&velocity_inetaddr_notifier
);
281 static void velocity_unregister_notifier(void)
283 unregister_inetaddr_notifier(&velocity_inetaddr_notifier
);
286 #else /* CONFIG_PM */
288 #define velocity_register_notifier() do {} while (0)
289 #define velocity_unregister_notifier() do {} while (0)
291 #endif /* !CONFIG_PM */
294 * Internal board variants. At the moment we have only one
297 static struct velocity_info_tbl chip_info_table
[] = {
298 {CHIP_TYPE_VT6110
, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 256, 1, 0x00FFFFFFUL
},
303 * Describe the PCI device identifiers that we support in this
304 * device driver. Used for hotplug autoloading.
307 static struct pci_device_id velocity_id_table
[] __devinitdata
= {
308 {PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_612X
,
309 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, (unsigned long) chip_info_table
},
313 MODULE_DEVICE_TABLE(pci
, velocity_id_table
);
316 * get_chip_name - identifier to name
317 * @id: chip identifier
319 * Given a chip identifier return a suitable description. Returns
320 * a pointer a static string valid while the driver is loaded.
323 static char __devinit
*get_chip_name(enum chip_type chip_id
)
326 for (i
= 0; chip_info_table
[i
].name
!= NULL
; i
++)
327 if (chip_info_table
[i
].chip_id
== chip_id
)
329 return chip_info_table
[i
].name
;
333 * velocity_remove1 - device unplug
334 * @pdev: PCI device being removed
336 * Device unload callback. Called on an unplug or on module
337 * unload for each active device that is present. Disconnects
338 * the device from the network layer and frees all the resources
341 static void __devexit
velocity_remove1(struct pci_dev
*pdev
)
343 struct net_device
*dev
= pci_get_drvdata(pdev
);
344 struct velocity_info
*vptr
= dev
->priv
;
349 spin_lock_irqsave(&velocity_dev_list_lock
, flags
);
350 if (!list_empty(&velocity_dev_list
))
351 list_del(&vptr
->list
);
352 spin_unlock_irqrestore(&velocity_dev_list_lock
, flags
);
354 unregister_netdev(dev
);
355 iounmap(vptr
->mac_regs
);
356 pci_release_regions(pdev
);
357 pci_disable_device(pdev
);
358 pci_set_drvdata(pdev
, NULL
);
365 * velocity_set_int_opt - parser for integer options
366 * @opt: pointer to option value
367 * @val: value the user requested (or -1 for default)
368 * @min: lowest value allowed
369 * @max: highest value allowed
370 * @def: default value
371 * @name: property name
374 * Set an integer property in the module options. This function does
375 * all the verification and checking as well as reporting so that
376 * we don't duplicate code for each option.
379 static void __devinit
velocity_set_int_opt(int *opt
, int val
, int min
, int max
, int def
, char *name
, char *devname
)
383 else if (val
< min
|| val
> max
) {
384 VELOCITY_PRT(MSG_LEVEL_INFO
, KERN_NOTICE
"%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
385 devname
, name
, min
, max
);
388 VELOCITY_PRT(MSG_LEVEL_INFO
, KERN_INFO
"%s: set value of parameter %s to %d\n",
395 * velocity_set_bool_opt - parser for boolean options
396 * @opt: pointer to option value
397 * @val: value the user requested (or -1 for default)
398 * @def: default value (yes/no)
399 * @flag: numeric value to set for true.
400 * @name: property name
403 * Set a boolean property in the module options. This function does
404 * all the verification and checking as well as reporting so that
405 * we don't duplicate code for each option.
408 static void __devinit
velocity_set_bool_opt(u32
* opt
, int val
, int def
, u32 flag
, char *name
, char *devname
)
412 *opt
|= (def
? flag
: 0);
413 else if (val
< 0 || val
> 1) {
414 printk(KERN_NOTICE
"%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
416 *opt
|= (def
? flag
: 0);
418 printk(KERN_INFO
"%s: set parameter %s to %s\n",
419 devname
, name
, val
? "TRUE" : "FALSE");
420 *opt
|= (val
? flag
: 0);
425 * velocity_get_options - set options on device
426 * @opts: option structure for the device
427 * @index: index of option to use in module options array
428 * @devname: device name
430 * Turn the module and command options into a single structure
431 * for the current device
434 static void __devinit
velocity_get_options(struct velocity_opt
*opts
, int index
, char *devname
)
437 velocity_set_int_opt(&opts
->rx_thresh
, rx_thresh
[index
], RX_THRESH_MIN
, RX_THRESH_MAX
, RX_THRESH_DEF
, "rx_thresh", devname
);
438 velocity_set_int_opt(&opts
->DMA_length
, DMA_length
[index
], DMA_LENGTH_MIN
, DMA_LENGTH_MAX
, DMA_LENGTH_DEF
, "DMA_length", devname
);
439 velocity_set_int_opt(&opts
->numrx
, RxDescriptors
[index
], RX_DESC_MIN
, RX_DESC_MAX
, RX_DESC_DEF
, "RxDescriptors", devname
);
440 velocity_set_int_opt(&opts
->numtx
, TxDescriptors
[index
], TX_DESC_MIN
, TX_DESC_MAX
, TX_DESC_DEF
, "TxDescriptors", devname
);
441 velocity_set_int_opt(&opts
->vid
, VID_setting
[index
], VLAN_ID_MIN
, VLAN_ID_MAX
, VLAN_ID_DEF
, "VID_setting", devname
);
442 velocity_set_bool_opt(&opts
->flags
, enable_tagging
[index
], TAGGING_DEF
, VELOCITY_FLAGS_TAGGING
, "enable_tagging", devname
);
443 velocity_set_bool_opt(&opts
->flags
, txcsum_offload
[index
], TX_CSUM_DEF
, VELOCITY_FLAGS_TX_CSUM
, "txcsum_offload", devname
);
444 velocity_set_int_opt(&opts
->flow_cntl
, flow_control
[index
], FLOW_CNTL_MIN
, FLOW_CNTL_MAX
, FLOW_CNTL_DEF
, "flow_control", devname
);
445 velocity_set_bool_opt(&opts
->flags
, IP_byte_align
[index
], IP_ALIG_DEF
, VELOCITY_FLAGS_IP_ALIGN
, "IP_byte_align", devname
);
446 velocity_set_bool_opt(&opts
->flags
, ValPktLen
[index
], VAL_PKT_LEN_DEF
, VELOCITY_FLAGS_VAL_PKT_LEN
, "ValPktLen", devname
);
447 velocity_set_int_opt((int *) &opts
->spd_dpx
, speed_duplex
[index
], MED_LNK_MIN
, MED_LNK_MAX
, MED_LNK_DEF
, "Media link mode", devname
);
448 velocity_set_int_opt((int *) &opts
->wol_opts
, wol_opts
[index
], WOL_OPT_MIN
, WOL_OPT_MAX
, WOL_OPT_DEF
, "Wake On Lan options", devname
);
449 velocity_set_int_opt((int *) &opts
->int_works
, int_works
[index
], INT_WORKS_MIN
, INT_WORKS_MAX
, INT_WORKS_DEF
, "Interrupt service works", devname
);
450 opts
->numrx
= (opts
->numrx
& ~3);
454 * velocity_init_cam_filter - initialise CAM
455 * @vptr: velocity to program
457 * Initialize the content addressable memory used for filters. Load
458 * appropriately according to the presence of VLAN
461 static void velocity_init_cam_filter(struct velocity_info
*vptr
)
463 struct mac_regs __iomem
* regs
= vptr
->mac_regs
;
465 /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
466 WORD_REG_BITS_SET(MCFG_PQEN
, MCFG_RTGOPT
, ®s
->MCFG
);
467 WORD_REG_BITS_ON(MCFG_VIDFR
, ®s
->MCFG
);
469 /* Disable all CAMs */
470 memset(vptr
->vCAMmask
, 0, sizeof(u8
) * 8);
471 memset(vptr
->mCAMmask
, 0, sizeof(u8
) * 8);
472 mac_set_cam_mask(regs
, vptr
->vCAMmask
, VELOCITY_VLAN_ID_CAM
);
473 mac_set_cam_mask(regs
, vptr
->mCAMmask
, VELOCITY_MULTICAST_CAM
);
475 /* Enable first VCAM */
476 if (vptr
->flags
& VELOCITY_FLAGS_TAGGING
) {
477 /* If Tagging option is enabled and VLAN ID is not zero, then
478 turn on MCFG_RTGOPT also */
479 if (vptr
->options
.vid
!= 0)
480 WORD_REG_BITS_ON(MCFG_RTGOPT
, ®s
->MCFG
);
482 mac_set_cam(regs
, 0, (u8
*) & (vptr
->options
.vid
), VELOCITY_VLAN_ID_CAM
);
483 vptr
->vCAMmask
[0] |= 1;
484 mac_set_cam_mask(regs
, vptr
->vCAMmask
, VELOCITY_VLAN_ID_CAM
);
487 mac_set_cam(regs
, 0, (u8
*) &temp
, VELOCITY_VLAN_ID_CAM
);
489 mac_set_cam_mask(regs
, (u8
*) &temp
, VELOCITY_VLAN_ID_CAM
);
494 * velocity_rx_reset - handle a receive reset
495 * @vptr: velocity we are resetting
497 * Reset the ownership and status for the receive ring side.
498 * Hand all the receive queue to the NIC.
501 static void velocity_rx_reset(struct velocity_info
*vptr
)
504 struct mac_regs __iomem
* regs
= vptr
->mac_regs
;
507 vptr
->rd_dirty
= vptr
->rd_filled
= vptr
->rd_curr
= 0;
510 * Init state, all RD entries belong to the NIC
512 for (i
= 0; i
< vptr
->options
.numrx
; ++i
)
513 vptr
->rd_ring
[i
].rdesc0
.owner
= OWNED_BY_NIC
;
515 writew(vptr
->options
.numrx
, ®s
->RBRDU
);
516 writel(vptr
->rd_pool_dma
, ®s
->RDBaseLo
);
517 writew(0, ®s
->RDIdx
);
518 writew(vptr
->options
.numrx
- 1, ®s
->RDCSize
);
522 * velocity_init_registers - initialise MAC registers
523 * @vptr: velocity to init
524 * @type: type of initialisation (hot or cold)
526 * Initialise the MAC on a reset or on first set up on the
530 static void velocity_init_registers(struct velocity_info
*vptr
,
531 enum velocity_init_type type
)
533 struct mac_regs __iomem
* regs
= vptr
->mac_regs
;
539 case VELOCITY_INIT_RESET
:
540 case VELOCITY_INIT_WOL
:
542 netif_stop_queue(vptr
->dev
);
545 * Reset RX to prevent RX pointer not on the 4X location
547 velocity_rx_reset(vptr
);
548 mac_rx_queue_run(regs
);
549 mac_rx_queue_wake(regs
);
551 mii_status
= velocity_get_opt_media_mode(vptr
);
552 if (velocity_set_media_mode(vptr
, mii_status
) != VELOCITY_LINK_CHANGE
) {
553 velocity_print_link_status(vptr
);
554 if (!(vptr
->mii_status
& VELOCITY_LINK_FAIL
))
555 netif_wake_queue(vptr
->dev
);
558 enable_flow_control_ability(vptr
);
561 writel(CR0_STOP
, ®s
->CR0Clr
);
562 writel((CR0_DPOLL
| CR0_TXON
| CR0_RXON
| CR0_STRT
),
567 case VELOCITY_INIT_COLD
:
572 velocity_soft_reset(vptr
);
575 mac_eeprom_reload(regs
);
576 for (i
= 0; i
< 6; i
++) {
577 writeb(vptr
->dev
->dev_addr
[i
], &(regs
->PAR
[i
]));
580 * clear Pre_ACPI bit.
582 BYTE_REG_BITS_OFF(CFGA_PACPI
, &(regs
->CFGA
));
583 mac_set_rx_thresh(regs
, vptr
->options
.rx_thresh
);
584 mac_set_dma_length(regs
, vptr
->options
.DMA_length
);
586 writeb(WOLCFG_SAM
| WOLCFG_SAB
, ®s
->WOLCFGSet
);
588 * Back off algorithm use original IEEE standard
590 BYTE_REG_BITS_SET(CFGB_OFSET
, (CFGB_CRANDOM
| CFGB_CAP
| CFGB_MBA
| CFGB_BAKOPT
), ®s
->CFGB
);
595 velocity_init_cam_filter(vptr
);
598 * Set packet filter: Receive directed and broadcast address
600 velocity_set_multi(vptr
->dev
);
603 * Enable MII auto-polling
605 enable_mii_autopoll(regs
);
607 vptr
->int_mask
= INT_MASK_DEF
;
609 writel(cpu_to_le32(vptr
->rd_pool_dma
), ®s
->RDBaseLo
);
610 writew(vptr
->options
.numrx
- 1, ®s
->RDCSize
);
611 mac_rx_queue_run(regs
);
612 mac_rx_queue_wake(regs
);
614 writew(vptr
->options
.numtx
- 1, ®s
->TDCSize
);
616 for (i
= 0; i
< vptr
->num_txq
; i
++) {
617 writel(cpu_to_le32(vptr
->td_pool_dma
[i
]), &(regs
->TDBaseLo
[i
]));
618 mac_tx_queue_run(regs
, i
);
621 init_flow_control_register(vptr
);
623 writel(CR0_STOP
, ®s
->CR0Clr
);
624 writel((CR0_DPOLL
| CR0_TXON
| CR0_RXON
| CR0_STRT
), ®s
->CR0Set
);
626 mii_status
= velocity_get_opt_media_mode(vptr
);
627 netif_stop_queue(vptr
->dev
);
629 mii_init(vptr
, mii_status
);
631 if (velocity_set_media_mode(vptr
, mii_status
) != VELOCITY_LINK_CHANGE
) {
632 velocity_print_link_status(vptr
);
633 if (!(vptr
->mii_status
& VELOCITY_LINK_FAIL
))
634 netif_wake_queue(vptr
->dev
);
637 enable_flow_control_ability(vptr
);
638 mac_hw_mibs_init(regs
);
639 mac_write_int_mask(vptr
->int_mask
, regs
);
646 * velocity_soft_reset - soft reset
647 * @vptr: velocity to reset
649 * Kick off a soft reset of the velocity adapter and then poll
650 * until the reset sequence has completed before returning.
653 static int velocity_soft_reset(struct velocity_info
*vptr
)
655 struct mac_regs __iomem
* regs
= vptr
->mac_regs
;
658 writel(CR0_SFRST
, ®s
->CR0Set
);
660 for (i
= 0; i
< W_MAX_TIMEOUT
; i
++) {
662 if (!DWORD_REG_BITS_IS_ON(CR0_SFRST
, ®s
->CR0Set
))
666 if (i
== W_MAX_TIMEOUT
) {
667 writel(CR0_FORSRST
, ®s
->CR0Set
);
668 /* FIXME: PCI POSTING */
676 * velocity_found1 - set up discovered velocity card
678 * @ent: PCI device table entry that matched
680 * Configure a discovered adapter from scratch. Return a negative
681 * errno error code on failure paths.
684 static int __devinit
velocity_found1(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
686 static int first
= 1;
687 struct net_device
*dev
;
689 struct velocity_info_tbl
*info
= (struct velocity_info_tbl
*) ent
->driver_data
;
690 struct velocity_info
*vptr
;
691 struct mac_regs __iomem
* regs
;
694 if (velocity_nics
>= MAX_UNITS
) {
695 printk(KERN_NOTICE VELOCITY_NAME
": already found %d NICs.\n",
700 dev
= alloc_etherdev(sizeof(struct velocity_info
));
703 printk(KERN_ERR VELOCITY_NAME
": allocate net device failed.\n");
707 /* Chain it all together */
709 SET_MODULE_OWNER(dev
);
710 SET_NETDEV_DEV(dev
, &pdev
->dev
);
715 printk(KERN_INFO
"%s Ver. %s\n",
716 VELOCITY_FULL_DRV_NAM
, VELOCITY_VERSION
);
717 printk(KERN_INFO
"Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
718 printk(KERN_INFO
"Copyright (c) 2004 Red Hat Inc.\n");
722 velocity_init_info(pdev
, vptr
, info
);
726 dev
->irq
= pdev
->irq
;
728 ret
= pci_enable_device(pdev
);
732 ret
= velocity_get_pci_info(vptr
, pdev
);
734 printk(KERN_ERR VELOCITY_NAME
": Failed to find PCI device.\n");
738 ret
= pci_request_regions(pdev
, VELOCITY_NAME
);
740 printk(KERN_ERR VELOCITY_NAME
": Failed to find PCI device.\n");
744 regs
= ioremap(vptr
->memaddr
, vptr
->io_size
);
747 goto err_release_res
;
750 vptr
->mac_regs
= regs
;
754 dev
->base_addr
= vptr
->ioaddr
;
756 for (i
= 0; i
< 6; i
++)
757 dev
->dev_addr
[i
] = readb(®s
->PAR
[i
]);
760 velocity_get_options(&vptr
->options
, velocity_nics
, dev
->name
);
763 * Mask out the options cannot be set to the chip
766 vptr
->options
.flags
&= info
->flags
;
769 * Enable the chip specified capbilities
772 vptr
->flags
= vptr
->options
.flags
| (info
->flags
& 0xFF000000UL
);
774 vptr
->wol_opts
= vptr
->options
.wol_opts
;
775 vptr
->flags
|= VELOCITY_FLAGS_WOL_ENABLED
;
777 vptr
->phy_id
= MII_GET_PHY_ID(vptr
->mac_regs
);
779 dev
->irq
= pdev
->irq
;
780 dev
->open
= velocity_open
;
781 dev
->hard_start_xmit
= velocity_xmit
;
782 dev
->stop
= velocity_close
;
783 dev
->get_stats
= velocity_get_stats
;
784 dev
->set_multicast_list
= velocity_set_multi
;
785 dev
->do_ioctl
= velocity_ioctl
;
786 dev
->ethtool_ops
= &velocity_ethtool_ops
;
787 dev
->change_mtu
= velocity_change_mtu
;
788 #ifdef VELOCITY_ZERO_COPY_SUPPORT
789 dev
->features
|= NETIF_F_SG
;
792 if (vptr
->flags
& VELOCITY_FLAGS_TX_CSUM
) {
793 dev
->features
|= NETIF_F_IP_CSUM
;
796 ret
= register_netdev(dev
);
800 if (velocity_get_link(dev
))
801 netif_carrier_off(dev
);
803 velocity_print_info(vptr
);
804 pci_set_drvdata(pdev
, dev
);
806 /* and leave the chip powered down */
808 pci_set_power_state(pdev
, PCI_D3hot
);
813 spin_lock_irqsave(&velocity_dev_list_lock
, flags
);
814 list_add(&vptr
->list
, &velocity_dev_list
);
815 spin_unlock_irqrestore(&velocity_dev_list_lock
, flags
);
825 pci_release_regions(pdev
);
827 pci_disable_device(pdev
);
834 * velocity_print_info - per driver data
837 * Print per driver data as the kernel driver finds Velocity
841 static void __devinit
velocity_print_info(struct velocity_info
*vptr
)
843 struct net_device
*dev
= vptr
->dev
;
845 printk(KERN_INFO
"%s: %s\n", dev
->name
, get_chip_name(vptr
->chip_id
));
846 printk(KERN_INFO
"%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
848 dev
->dev_addr
[0], dev
->dev_addr
[1], dev
->dev_addr
[2],
849 dev
->dev_addr
[3], dev
->dev_addr
[4], dev
->dev_addr
[5]);
853 * velocity_init_info - init private data
855 * @vptr: Velocity info
858 * Set up the initial velocity_info struct for the device that has been
862 static void __devinit
velocity_init_info(struct pci_dev
*pdev
, struct velocity_info
*vptr
, struct velocity_info_tbl
*info
)
864 memset(vptr
, 0, sizeof(struct velocity_info
));
867 vptr
->chip_id
= info
->chip_id
;
868 vptr
->io_size
= info
->io_size
;
869 vptr
->num_txq
= info
->txqueue
;
870 vptr
->multicast_limit
= MCAM_SIZE
;
871 spin_lock_init(&vptr
->lock
);
872 INIT_LIST_HEAD(&vptr
->list
);
876 * velocity_get_pci_info - retrieve PCI info for device
877 * @vptr: velocity device
878 * @pdev: PCI device it matches
880 * Retrieve the PCI configuration space data that interests us from
881 * the kernel PCI layer
884 static int __devinit
velocity_get_pci_info(struct velocity_info
*vptr
, struct pci_dev
*pdev
)
887 if(pci_read_config_byte(pdev
, PCI_REVISION_ID
, &vptr
->rev_id
) < 0)
890 pci_set_master(pdev
);
892 vptr
->ioaddr
= pci_resource_start(pdev
, 0);
893 vptr
->memaddr
= pci_resource_start(pdev
, 1);
895 if(!(pci_resource_flags(pdev
, 0) & IORESOURCE_IO
))
897 printk(KERN_ERR
"%s: region #0 is not an I/O resource, aborting.\n",
902 if((pci_resource_flags(pdev
, 1) & IORESOURCE_IO
))
904 printk(KERN_ERR
"%s: region #1 is an I/O resource, aborting.\n",
909 if(pci_resource_len(pdev
, 1) < 256)
911 printk(KERN_ERR
"%s: region #1 is too small.\n",
921 * velocity_init_rings - set up DMA rings
922 * @vptr: Velocity to set up
924 * Allocate PCI mapped DMA rings for the receive and transmit layer
928 static int velocity_init_rings(struct velocity_info
*vptr
)
937 * Allocate all RD/TD rings a single pool
940 psize
= vptr
->options
.numrx
* sizeof(struct rx_desc
) +
941 vptr
->options
.numtx
* sizeof(struct tx_desc
) * vptr
->num_txq
;
944 * pci_alloc_consistent() fulfills the requirement for 64 bytes
947 pool
= pci_alloc_consistent(vptr
->pdev
, psize
, &pool_dma
);
950 printk(KERN_ERR
"%s : DMA memory allocation failed.\n",
955 memset(pool
, 0, psize
);
957 vptr
->rd_ring
= (struct rx_desc
*) pool
;
959 vptr
->rd_pool_dma
= pool_dma
;
961 tsize
= vptr
->options
.numtx
* PKT_BUF_SZ
* vptr
->num_txq
;
962 vptr
->tx_bufs
= pci_alloc_consistent(vptr
->pdev
, tsize
,
965 if (vptr
->tx_bufs
== NULL
) {
966 printk(KERN_ERR
"%s: DMA memory allocation failed.\n",
968 pci_free_consistent(vptr
->pdev
, psize
, pool
, pool_dma
);
972 memset(vptr
->tx_bufs
, 0, vptr
->options
.numtx
* PKT_BUF_SZ
* vptr
->num_txq
);
974 i
= vptr
->options
.numrx
* sizeof(struct rx_desc
);
977 for (i
= 0; i
< vptr
->num_txq
; i
++) {
978 int offset
= vptr
->options
.numtx
* sizeof(struct tx_desc
);
980 vptr
->td_pool_dma
[i
] = pool_dma
;
981 vptr
->td_rings
[i
] = (struct tx_desc
*) pool
;
989 * velocity_free_rings - free PCI ring pointers
990 * @vptr: Velocity to free from
992 * Clean up the PCI ring buffers allocated to this velocity.
995 static void velocity_free_rings(struct velocity_info
*vptr
)
999 size
= vptr
->options
.numrx
* sizeof(struct rx_desc
) +
1000 vptr
->options
.numtx
* sizeof(struct tx_desc
) * vptr
->num_txq
;
1002 pci_free_consistent(vptr
->pdev
, size
, vptr
->rd_ring
, vptr
->rd_pool_dma
);
1004 size
= vptr
->options
.numtx
* PKT_BUF_SZ
* vptr
->num_txq
;
1006 pci_free_consistent(vptr
->pdev
, size
, vptr
->tx_bufs
, vptr
->tx_bufs_dma
);
1009 static inline void velocity_give_many_rx_descs(struct velocity_info
*vptr
)
1011 struct mac_regs __iomem
*regs
= vptr
->mac_regs
;
1012 int avail
, dirty
, unusable
;
1015 * RD number must be equal to 4X per hardware spec
1016 * (programming guide rev 1.20, p.13)
1018 if (vptr
->rd_filled
< 4)
1023 unusable
= vptr
->rd_filled
& 0x0003;
1024 dirty
= vptr
->rd_dirty
- unusable
;
1025 for (avail
= vptr
->rd_filled
& 0xfffc; avail
; avail
--) {
1026 dirty
= (dirty
> 0) ? dirty
- 1 : vptr
->options
.numrx
- 1;
1027 vptr
->rd_ring
[dirty
].rdesc0
.owner
= OWNED_BY_NIC
;
1030 writew(vptr
->rd_filled
& 0xfffc, ®s
->RBRDU
);
1031 vptr
->rd_filled
= unusable
;
1034 static int velocity_rx_refill(struct velocity_info
*vptr
)
1036 int dirty
= vptr
->rd_dirty
, done
= 0, ret
= 0;
1039 struct rx_desc
*rd
= vptr
->rd_ring
+ dirty
;
1041 /* Fine for an all zero Rx desc at init time as well */
1042 if (rd
->rdesc0
.owner
== OWNED_BY_NIC
)
1045 if (!vptr
->rd_info
[dirty
].skb
) {
1046 ret
= velocity_alloc_rx_buf(vptr
, dirty
);
1051 dirty
= (dirty
< vptr
->options
.numrx
- 1) ? dirty
+ 1 : 0;
1052 } while (dirty
!= vptr
->rd_curr
);
1055 vptr
->rd_dirty
= dirty
;
1056 vptr
->rd_filled
+= done
;
1057 velocity_give_many_rx_descs(vptr
);
1064 * velocity_init_rd_ring - set up receive ring
1065 * @vptr: velocity to configure
1067 * Allocate and set up the receive buffers for each ring slot and
1068 * assign them to the network adapter.
1071 static int velocity_init_rd_ring(struct velocity_info
*vptr
)
1074 unsigned int rsize
= sizeof(struct velocity_rd_info
) *
1075 vptr
->options
.numrx
;
1077 vptr
->rd_info
= kmalloc(rsize
, GFP_KERNEL
);
1078 if(vptr
->rd_info
== NULL
)
1080 memset(vptr
->rd_info
, 0, rsize
);
1082 vptr
->rd_filled
= vptr
->rd_dirty
= vptr
->rd_curr
= 0;
1084 ret
= velocity_rx_refill(vptr
);
1086 VELOCITY_PRT(MSG_LEVEL_ERR
, KERN_ERR
1087 "%s: failed to allocate RX buffer.\n", vptr
->dev
->name
);
1088 velocity_free_rd_ring(vptr
);
1095 * velocity_free_rd_ring - free receive ring
1096 * @vptr: velocity to clean up
1098 * Free the receive buffers for each ring slot and any
1099 * attached socket buffers that need to go away.
1102 static void velocity_free_rd_ring(struct velocity_info
*vptr
)
1106 if (vptr
->rd_info
== NULL
)
1109 for (i
= 0; i
< vptr
->options
.numrx
; i
++) {
1110 struct velocity_rd_info
*rd_info
= &(vptr
->rd_info
[i
]);
1111 struct rx_desc
*rd
= vptr
->rd_ring
+ i
;
1113 memset(rd
, 0, sizeof(*rd
));
1117 pci_unmap_single(vptr
->pdev
, rd_info
->skb_dma
, vptr
->rx_buf_sz
,
1118 PCI_DMA_FROMDEVICE
);
1119 rd_info
->skb_dma
= (dma_addr_t
) NULL
;
1121 dev_kfree_skb(rd_info
->skb
);
1122 rd_info
->skb
= NULL
;
1125 kfree(vptr
->rd_info
);
1126 vptr
->rd_info
= NULL
;
1130 * velocity_init_td_ring - set up transmit ring
1133 * Set up the transmit ring and chain the ring pointers together.
1134 * Returns zero on success or a negative posix errno code for
1138 static int velocity_init_td_ring(struct velocity_info
*vptr
)
1143 struct velocity_td_info
*td_info
;
1144 unsigned int tsize
= sizeof(struct velocity_td_info
) *
1145 vptr
->options
.numtx
;
1147 /* Init the TD ring entries */
1148 for (j
= 0; j
< vptr
->num_txq
; j
++) {
1149 curr
= vptr
->td_pool_dma
[j
];
1151 vptr
->td_infos
[j
] = kmalloc(tsize
, GFP_KERNEL
);
1152 if(vptr
->td_infos
[j
] == NULL
)
1155 kfree(vptr
->td_infos
[j
]);
1158 memset(vptr
->td_infos
[j
], 0, tsize
);
1160 for (i
= 0; i
< vptr
->options
.numtx
; i
++, curr
+= sizeof(struct tx_desc
)) {
1161 td
= &(vptr
->td_rings
[j
][i
]);
1162 td_info
= &(vptr
->td_infos
[j
][i
]);
1163 td_info
->buf
= vptr
->tx_bufs
+
1164 (j
* vptr
->options
.numtx
+ i
) * PKT_BUF_SZ
;
1165 td_info
->buf_dma
= vptr
->tx_bufs_dma
+
1166 (j
* vptr
->options
.numtx
+ i
) * PKT_BUF_SZ
;
1168 vptr
->td_tail
[j
] = vptr
->td_curr
[j
] = vptr
->td_used
[j
] = 0;
1174 * FIXME: could we merge this with velocity_free_tx_buf ?
1177 static void velocity_free_td_ring_entry(struct velocity_info
*vptr
,
1180 struct velocity_td_info
* td_info
= &(vptr
->td_infos
[q
][n
]);
1183 if (td_info
== NULL
)
1187 for (i
= 0; i
< td_info
->nskb_dma
; i
++)
1189 if (td_info
->skb_dma
[i
]) {
1190 pci_unmap_single(vptr
->pdev
, td_info
->skb_dma
[i
],
1191 td_info
->skb
->len
, PCI_DMA_TODEVICE
);
1192 td_info
->skb_dma
[i
] = (dma_addr_t
) NULL
;
1195 dev_kfree_skb(td_info
->skb
);
1196 td_info
->skb
= NULL
;
1201 * velocity_free_td_ring - free td ring
1204 * Free up the transmit ring for this particular velocity adapter.
1205 * We free the ring contents but not the ring itself.
1208 static void velocity_free_td_ring(struct velocity_info
*vptr
)
1212 for (j
= 0; j
< vptr
->num_txq
; j
++) {
1213 if (vptr
->td_infos
[j
] == NULL
)
1215 for (i
= 0; i
< vptr
->options
.numtx
; i
++) {
1216 velocity_free_td_ring_entry(vptr
, j
, i
);
1219 kfree(vptr
->td_infos
[j
]);
1220 vptr
->td_infos
[j
] = NULL
;
1225 * velocity_rx_srv - service RX interrupt
1227 * @status: adapter status (unused)
1229 * Walk the receive ring of the velocity adapter and remove
1230 * any received packets from the receive queue. Hand the ring
1231 * slots back to the adapter for reuse.
1234 static int velocity_rx_srv(struct velocity_info
*vptr
, int status
)
1236 struct net_device_stats
*stats
= &vptr
->stats
;
1237 int rd_curr
= vptr
->rd_curr
;
1241 struct rx_desc
*rd
= vptr
->rd_ring
+ rd_curr
;
1243 if (!vptr
->rd_info
[rd_curr
].skb
)
1246 if (rd
->rdesc0
.owner
== OWNED_BY_NIC
)
1252 * Don't drop CE or RL error frame although RXOK is off
1254 if ((rd
->rdesc0
.RSR
& RSR_RXOK
) || (!(rd
->rdesc0
.RSR
& RSR_RXOK
) && (rd
->rdesc0
.RSR
& (RSR_CE
| RSR_RL
)))) {
1255 if (velocity_receive_frame(vptr
, rd_curr
) < 0)
1256 stats
->rx_dropped
++;
1258 if (rd
->rdesc0
.RSR
& RSR_CRC
)
1259 stats
->rx_crc_errors
++;
1260 if (rd
->rdesc0
.RSR
& RSR_FAE
)
1261 stats
->rx_frame_errors
++;
1263 stats
->rx_dropped
++;
1268 vptr
->dev
->last_rx
= jiffies
;
1271 if (rd_curr
>= vptr
->options
.numrx
)
1273 } while (++works
<= 15);
1275 vptr
->rd_curr
= rd_curr
;
1277 if (works
> 0 && velocity_rx_refill(vptr
) < 0) {
1278 VELOCITY_PRT(MSG_LEVEL_ERR
, KERN_ERR
1279 "%s: rx buf allocation failure\n", vptr
->dev
->name
);
1287 * velocity_rx_csum - checksum process
1288 * @rd: receive packet descriptor
1289 * @skb: network layer packet buffer
1291 * Process the status bits for the received packet and determine
1292 * if the checksum was computed and verified by the hardware
1295 static inline void velocity_rx_csum(struct rx_desc
*rd
, struct sk_buff
*skb
)
1297 skb
->ip_summed
= CHECKSUM_NONE
;
1299 if (rd
->rdesc1
.CSM
& CSM_IPKT
) {
1300 if (rd
->rdesc1
.CSM
& CSM_IPOK
) {
1301 if ((rd
->rdesc1
.CSM
& CSM_TCPKT
) ||
1302 (rd
->rdesc1
.CSM
& CSM_UDPKT
)) {
1303 if (!(rd
->rdesc1
.CSM
& CSM_TUPOK
)) {
1307 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1313 * velocity_rx_copy - in place Rx copy for small packets
1314 * @rx_skb: network layer packet buffer candidate
1315 * @pkt_size: received data size
1316 * @rd: receive packet descriptor
1317 * @dev: network device
1319 * Replace the current skb that is scheduled for Rx processing by a
1320 * shorter, immediatly allocated skb, if the received packet is small
1321 * enough. This function returns a negative value if the received
1322 * packet is too big or if memory is exhausted.
1324 static inline int velocity_rx_copy(struct sk_buff
**rx_skb
, int pkt_size
,
1325 struct velocity_info
*vptr
)
1329 if (pkt_size
< rx_copybreak
) {
1330 struct sk_buff
*new_skb
;
1332 new_skb
= dev_alloc_skb(pkt_size
+ 2);
1334 new_skb
->dev
= vptr
->dev
;
1335 new_skb
->ip_summed
= rx_skb
[0]->ip_summed
;
1337 if (vptr
->flags
& VELOCITY_FLAGS_IP_ALIGN
)
1338 skb_reserve(new_skb
, 2);
1340 memcpy(new_skb
->data
, rx_skb
[0]->data
, pkt_size
);
1350 * velocity_iph_realign - IP header alignment
1351 * @vptr: velocity we are handling
1352 * @skb: network layer packet buffer
1353 * @pkt_size: received data size
1355 * Align IP header on a 2 bytes boundary. This behavior can be
1356 * configured by the user.
1358 static inline void velocity_iph_realign(struct velocity_info
*vptr
,
1359 struct sk_buff
*skb
, int pkt_size
)
1361 /* FIXME - memmove ? */
1362 if (vptr
->flags
& VELOCITY_FLAGS_IP_ALIGN
) {
1365 for (i
= pkt_size
; i
>= 0; i
--)
1366 *(skb
->data
+ i
+ 2) = *(skb
->data
+ i
);
1367 skb_reserve(skb
, 2);
1372 * velocity_receive_frame - received packet processor
1373 * @vptr: velocity we are handling
1376 * A packet has arrived. We process the packet and if appropriate
1377 * pass the frame up the network stack
1380 static int velocity_receive_frame(struct velocity_info
*vptr
, int idx
)
1382 void (*pci_action
)(struct pci_dev
*, dma_addr_t
, size_t, int);
1383 struct net_device_stats
*stats
= &vptr
->stats
;
1384 struct velocity_rd_info
*rd_info
= &(vptr
->rd_info
[idx
]);
1385 struct rx_desc
*rd
= &(vptr
->rd_ring
[idx
]);
1386 int pkt_len
= rd
->rdesc0
.len
;
1387 struct sk_buff
*skb
;
1389 if (rd
->rdesc0
.RSR
& (RSR_STP
| RSR_EDP
)) {
1390 VELOCITY_PRT(MSG_LEVEL_VERBOSE
, KERN_ERR
" %s : the received frame span multple RDs.\n", vptr
->dev
->name
);
1391 stats
->rx_length_errors
++;
1395 if (rd
->rdesc0
.RSR
& RSR_MAR
)
1396 vptr
->stats
.multicast
++;
1399 skb
->dev
= vptr
->dev
;
1401 pci_dma_sync_single_for_cpu(vptr
->pdev
, rd_info
->skb_dma
,
1402 vptr
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1405 * Drop frame not meeting IEEE 802.3
1408 if (vptr
->flags
& VELOCITY_FLAGS_VAL_PKT_LEN
) {
1409 if (rd
->rdesc0
.RSR
& RSR_RL
) {
1410 stats
->rx_length_errors
++;
1415 pci_action
= pci_dma_sync_single_for_device
;
1417 velocity_rx_csum(rd
, skb
);
1419 if (velocity_rx_copy(&skb
, pkt_len
, vptr
) < 0) {
1420 velocity_iph_realign(vptr
, skb
, pkt_len
);
1421 pci_action
= pci_unmap_single
;
1422 rd_info
->skb
= NULL
;
1425 pci_action(vptr
->pdev
, rd_info
->skb_dma
, vptr
->rx_buf_sz
,
1426 PCI_DMA_FROMDEVICE
);
1428 skb_put(skb
, pkt_len
- 4);
1429 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1431 stats
->rx_bytes
+= pkt_len
;
1438 * velocity_alloc_rx_buf - allocate aligned receive buffer
1442 * Allocate a new full sized buffer for the reception of a frame and
1443 * map it into PCI space for the hardware to use. The hardware
1444 * requires *64* byte alignment of the buffer which makes life
1445 * less fun than would be ideal.
1448 static int velocity_alloc_rx_buf(struct velocity_info
*vptr
, int idx
)
1450 struct rx_desc
*rd
= &(vptr
->rd_ring
[idx
]);
1451 struct velocity_rd_info
*rd_info
= &(vptr
->rd_info
[idx
]);
1453 rd_info
->skb
= dev_alloc_skb(vptr
->rx_buf_sz
+ 64);
1454 if (rd_info
->skb
== NULL
)
1458 * Do the gymnastics to get the buffer head for data at
1461 skb_reserve(rd_info
->skb
, (unsigned long) rd_info
->skb
->data
& 63);
1462 rd_info
->skb
->dev
= vptr
->dev
;
1463 rd_info
->skb_dma
= pci_map_single(vptr
->pdev
, rd_info
->skb
->data
, vptr
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1466 * Fill in the descriptor to match
1469 *((u32
*) & (rd
->rdesc0
)) = 0;
1470 rd
->len
= cpu_to_le32(vptr
->rx_buf_sz
);
1472 rd
->pa_low
= cpu_to_le32(rd_info
->skb_dma
);
1478 * tx_srv - transmit interrupt service
1482 * Scan the queues looking for transmitted packets that
1483 * we can complete and clean up. Update any statistics as
1487 static int velocity_tx_srv(struct velocity_info
*vptr
, u32 status
)
1494 struct velocity_td_info
*tdinfo
;
1495 struct net_device_stats
*stats
= &vptr
->stats
;
1497 for (qnum
= 0; qnum
< vptr
->num_txq
; qnum
++) {
1498 for (idx
= vptr
->td_tail
[qnum
]; vptr
->td_used
[qnum
] > 0;
1499 idx
= (idx
+ 1) % vptr
->options
.numtx
) {
1504 td
= &(vptr
->td_rings
[qnum
][idx
]);
1505 tdinfo
= &(vptr
->td_infos
[qnum
][idx
]);
1507 if (td
->tdesc0
.owner
== OWNED_BY_NIC
)
1513 if (td
->tdesc0
.TSR
& TSR0_TERR
) {
1515 stats
->tx_dropped
++;
1516 if (td
->tdesc0
.TSR
& TSR0_CDH
)
1517 stats
->tx_heartbeat_errors
++;
1518 if (td
->tdesc0
.TSR
& TSR0_CRS
)
1519 stats
->tx_carrier_errors
++;
1520 if (td
->tdesc0
.TSR
& TSR0_ABT
)
1521 stats
->tx_aborted_errors
++;
1522 if (td
->tdesc0
.TSR
& TSR0_OWC
)
1523 stats
->tx_window_errors
++;
1525 stats
->tx_packets
++;
1526 stats
->tx_bytes
+= tdinfo
->skb
->len
;
1528 velocity_free_tx_buf(vptr
, tdinfo
);
1529 vptr
->td_used
[qnum
]--;
1531 vptr
->td_tail
[qnum
] = idx
;
1533 if (AVAIL_TD(vptr
, qnum
) < 1) {
1538 * Look to see if we should kick the transmit network
1539 * layer for more work.
1541 if (netif_queue_stopped(vptr
->dev
) && (full
== 0)
1542 && (!(vptr
->mii_status
& VELOCITY_LINK_FAIL
))) {
1543 netif_wake_queue(vptr
->dev
);
1549 * velocity_print_link_status - link status reporting
1550 * @vptr: velocity to report on
1552 * Turn the link status of the velocity card into a kernel log
1553 * description of the new link state, detailing speed and duplex
1557 static void velocity_print_link_status(struct velocity_info
*vptr
)
1560 if (vptr
->mii_status
& VELOCITY_LINK_FAIL
) {
1561 VELOCITY_PRT(MSG_LEVEL_INFO
, KERN_NOTICE
"%s: failed to detect cable link\n", vptr
->dev
->name
);
1562 } else if (vptr
->options
.spd_dpx
== SPD_DPX_AUTO
) {
1563 VELOCITY_PRT(MSG_LEVEL_INFO
, KERN_NOTICE
"%s: Link autonegation", vptr
->dev
->name
);
1565 if (vptr
->mii_status
& VELOCITY_SPEED_1000
)
1566 VELOCITY_PRT(MSG_LEVEL_INFO
, " speed 1000M bps");
1567 else if (vptr
->mii_status
& VELOCITY_SPEED_100
)
1568 VELOCITY_PRT(MSG_LEVEL_INFO
, " speed 100M bps");
1570 VELOCITY_PRT(MSG_LEVEL_INFO
, " speed 10M bps");
1572 if (vptr
->mii_status
& VELOCITY_DUPLEX_FULL
)
1573 VELOCITY_PRT(MSG_LEVEL_INFO
, " full duplex\n");
1575 VELOCITY_PRT(MSG_LEVEL_INFO
, " half duplex\n");
1577 VELOCITY_PRT(MSG_LEVEL_INFO
, KERN_NOTICE
"%s: Link forced", vptr
->dev
->name
);
1578 switch (vptr
->options
.spd_dpx
) {
1579 case SPD_DPX_100_HALF
:
1580 VELOCITY_PRT(MSG_LEVEL_INFO
, " speed 100M bps half duplex\n");
1582 case SPD_DPX_100_FULL
:
1583 VELOCITY_PRT(MSG_LEVEL_INFO
, " speed 100M bps full duplex\n");
1585 case SPD_DPX_10_HALF
:
1586 VELOCITY_PRT(MSG_LEVEL_INFO
, " speed 10M bps half duplex\n");
1588 case SPD_DPX_10_FULL
:
1589 VELOCITY_PRT(MSG_LEVEL_INFO
, " speed 10M bps full duplex\n");
1598 * velocity_error - handle error from controller
1600 * @status: card status
1602 * Process an error report from the hardware and attempt to recover
1603 * the card itself. At the moment we cannot recover from some
1604 * theoretically impossible errors but this could be fixed using
1605 * the pci_device_failed logic to bounce the hardware
1609 static void velocity_error(struct velocity_info
*vptr
, int status
)
1612 if (status
& ISR_TXSTLI
) {
1613 struct mac_regs __iomem
* regs
= vptr
->mac_regs
;
1615 printk(KERN_ERR
"TD structure errror TDindex=%hx\n", readw(®s
->TDIdx
[0]));
1616 BYTE_REG_BITS_ON(TXESR_TDSTR
, ®s
->TXESR
);
1617 writew(TRDCSR_RUN
, ®s
->TDCSRClr
);
1618 netif_stop_queue(vptr
->dev
);
1620 /* FIXME: port over the pci_device_failed code and use it
1624 if (status
& ISR_SRCI
) {
1625 struct mac_regs __iomem
* regs
= vptr
->mac_regs
;
1628 if (vptr
->options
.spd_dpx
== SPD_DPX_AUTO
) {
1629 vptr
->mii_status
= check_connection_type(regs
);
1632 * If it is a 3119, disable frame bursting in
1633 * halfduplex mode and enable it in fullduplex
1636 if (vptr
->rev_id
< REV_ID_VT3216_A0
) {
1637 if (vptr
->mii_status
| VELOCITY_DUPLEX_FULL
)
1638 BYTE_REG_BITS_ON(TCR_TB2BDIS
, ®s
->TCR
);
1640 BYTE_REG_BITS_OFF(TCR_TB2BDIS
, ®s
->TCR
);
1643 * Only enable CD heart beat counter in 10HD mode
1645 if (!(vptr
->mii_status
& VELOCITY_DUPLEX_FULL
) && (vptr
->mii_status
& VELOCITY_SPEED_10
)) {
1646 BYTE_REG_BITS_OFF(TESTCFG_HBDIS
, ®s
->TESTCFG
);
1648 BYTE_REG_BITS_ON(TESTCFG_HBDIS
, ®s
->TESTCFG
);
1652 * Get link status from PHYSR0
1654 linked
= readb(®s
->PHYSR0
) & PHYSR0_LINKGD
;
1657 vptr
->mii_status
&= ~VELOCITY_LINK_FAIL
;
1658 netif_carrier_on(vptr
->dev
);
1660 vptr
->mii_status
|= VELOCITY_LINK_FAIL
;
1661 netif_carrier_off(vptr
->dev
);
1664 velocity_print_link_status(vptr
);
1665 enable_flow_control_ability(vptr
);
1668 * Re-enable auto-polling because SRCI will disable
1672 enable_mii_autopoll(regs
);
1674 if (vptr
->mii_status
& VELOCITY_LINK_FAIL
)
1675 netif_stop_queue(vptr
->dev
);
1677 netif_wake_queue(vptr
->dev
);
1680 if (status
& ISR_MIBFI
)
1681 velocity_update_hw_mibs(vptr
);
1682 if (status
& ISR_LSTEI
)
1683 mac_rx_queue_wake(vptr
->mac_regs
);
1687 * velocity_free_tx_buf - free transmit buffer
1691 * Release an transmit buffer. If the buffer was preallocated then
1692 * recycle it, if not then unmap the buffer.
1695 static void velocity_free_tx_buf(struct velocity_info
*vptr
, struct velocity_td_info
*tdinfo
)
1697 struct sk_buff
*skb
= tdinfo
->skb
;
1701 * Don't unmap the pre-allocated tx_bufs
1703 if (tdinfo
->skb_dma
&& (tdinfo
->skb_dma
[0] != tdinfo
->buf_dma
)) {
1705 for (i
= 0; i
< tdinfo
->nskb_dma
; i
++) {
1706 #ifdef VELOCITY_ZERO_COPY_SUPPORT
1707 pci_unmap_single(vptr
->pdev
, tdinfo
->skb_dma
[i
], td
->tdesc1
.len
, PCI_DMA_TODEVICE
);
1709 pci_unmap_single(vptr
->pdev
, tdinfo
->skb_dma
[i
], skb
->len
, PCI_DMA_TODEVICE
);
1711 tdinfo
->skb_dma
[i
] = 0;
1714 dev_kfree_skb_irq(skb
);
1719 * velocity_open - interface activation callback
1720 * @dev: network layer device to open
1722 * Called when the network layer brings the interface up. Returns
1723 * a negative posix error code on failure, or zero on success.
1725 * All the ring allocation and set up is done on open for this
1726 * adapter to minimise memory usage when inactive
1729 static int velocity_open(struct net_device
*dev
)
1731 struct velocity_info
*vptr
= dev
->priv
;
1734 vptr
->rx_buf_sz
= (dev
->mtu
<= 1504 ? PKT_BUF_SZ
: dev
->mtu
+ 32);
1736 ret
= velocity_init_rings(vptr
);
1740 ret
= velocity_init_rd_ring(vptr
);
1742 goto err_free_desc_rings
;
1744 ret
= velocity_init_td_ring(vptr
);
1746 goto err_free_rd_ring
;
1748 /* Ensure chip is running */
1749 pci_set_power_state(vptr
->pdev
, PCI_D0
);
1751 velocity_init_registers(vptr
, VELOCITY_INIT_COLD
);
1753 ret
= request_irq(vptr
->pdev
->irq
, &velocity_intr
, IRQF_SHARED
,
1756 /* Power down the chip */
1757 pci_set_power_state(vptr
->pdev
, PCI_D3hot
);
1758 goto err_free_td_ring
;
1761 mac_enable_int(vptr
->mac_regs
);
1762 netif_start_queue(dev
);
1763 vptr
->flags
|= VELOCITY_FLAGS_OPENED
;
1768 velocity_free_td_ring(vptr
);
1770 velocity_free_rd_ring(vptr
);
1771 err_free_desc_rings
:
1772 velocity_free_rings(vptr
);
1777 * velocity_change_mtu - MTU change callback
1778 * @dev: network device
1779 * @new_mtu: desired MTU
1781 * Handle requests from the networking layer for MTU change on
1782 * this interface. It gets called on a change by the network layer.
1783 * Return zero for success or negative posix error code.
1786 static int velocity_change_mtu(struct net_device
*dev
, int new_mtu
)
1788 struct velocity_info
*vptr
= dev
->priv
;
1789 unsigned long flags
;
1790 int oldmtu
= dev
->mtu
;
1793 if ((new_mtu
< VELOCITY_MIN_MTU
) || new_mtu
> (VELOCITY_MAX_MTU
)) {
1794 VELOCITY_PRT(MSG_LEVEL_ERR
, KERN_NOTICE
"%s: Invalid MTU.\n",
1799 if (new_mtu
!= oldmtu
) {
1800 spin_lock_irqsave(&vptr
->lock
, flags
);
1802 netif_stop_queue(dev
);
1803 velocity_shutdown(vptr
);
1805 velocity_free_td_ring(vptr
);
1806 velocity_free_rd_ring(vptr
);
1810 vptr
->rx_buf_sz
= 9 * 1024;
1811 else if (new_mtu
> 4096)
1812 vptr
->rx_buf_sz
= 8192;
1814 vptr
->rx_buf_sz
= 4 * 1024;
1816 ret
= velocity_init_rd_ring(vptr
);
1820 ret
= velocity_init_td_ring(vptr
);
1824 velocity_init_registers(vptr
, VELOCITY_INIT_COLD
);
1826 mac_enable_int(vptr
->mac_regs
);
1827 netif_start_queue(dev
);
1829 spin_unlock_irqrestore(&vptr
->lock
, flags
);
1836 * velocity_shutdown - shut down the chip
1837 * @vptr: velocity to deactivate
1839 * Shuts down the internal operations of the velocity and
1840 * disables interrupts, autopolling, transmit and receive
1843 static void velocity_shutdown(struct velocity_info
*vptr
)
1845 struct mac_regs __iomem
* regs
= vptr
->mac_regs
;
1846 mac_disable_int(regs
);
1847 writel(CR0_STOP
, ®s
->CR0Set
);
1848 writew(0xFFFF, ®s
->TDCSRClr
);
1849 writeb(0xFF, ®s
->RDCSRClr
);
1850 safe_disable_mii_autopoll(regs
);
1851 mac_clear_isr(regs
);
1855 * velocity_close - close adapter callback
1856 * @dev: network device
1858 * Callback from the network layer when the velocity is being
1859 * deactivated by the network layer
1862 static int velocity_close(struct net_device
*dev
)
1864 struct velocity_info
*vptr
= dev
->priv
;
1866 netif_stop_queue(dev
);
1867 velocity_shutdown(vptr
);
1869 if (vptr
->flags
& VELOCITY_FLAGS_WOL_ENABLED
)
1870 velocity_get_ip(vptr
);
1872 free_irq(dev
->irq
, dev
);
1874 /* Power down the chip */
1875 pci_set_power_state(vptr
->pdev
, PCI_D3hot
);
1877 /* Free the resources */
1878 velocity_free_td_ring(vptr
);
1879 velocity_free_rd_ring(vptr
);
1880 velocity_free_rings(vptr
);
1882 vptr
->flags
&= (~VELOCITY_FLAGS_OPENED
);
1887 * velocity_xmit - transmit packet callback
1888 * @skb: buffer to transmit
1889 * @dev: network device
1891 * Called by the networ layer to request a packet is queued to
1892 * the velocity. Returns zero on success.
1895 static int velocity_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1897 struct velocity_info
*vptr
= dev
->priv
;
1899 struct tx_desc
*td_ptr
;
1900 struct velocity_td_info
*tdinfo
;
1901 unsigned long flags
;
1904 int pktlen
= skb
->len
;
1906 #ifdef VELOCITY_ZERO_COPY_SUPPORT
1907 if (skb_shinfo(skb
)->nr_frags
> 6 && __skb_linearize(skb
)) {
1913 spin_lock_irqsave(&vptr
->lock
, flags
);
1915 index
= vptr
->td_curr
[qnum
];
1916 td_ptr
= &(vptr
->td_rings
[qnum
][index
]);
1917 tdinfo
= &(vptr
->td_infos
[qnum
][index
]);
1919 td_ptr
->tdesc1
.TCPLS
= TCPLS_NORMAL
;
1920 td_ptr
->tdesc1
.TCR
= TCR0_TIC
;
1921 td_ptr
->td_buf
[0].queue
= 0;
1926 if (pktlen
< ETH_ZLEN
) {
1927 /* Cannot occur until ZC support */
1929 memcpy(tdinfo
->buf
, skb
->data
, skb
->len
);
1930 memset(tdinfo
->buf
+ skb
->len
, 0, ETH_ZLEN
- skb
->len
);
1932 tdinfo
->skb_dma
[0] = tdinfo
->buf_dma
;
1933 td_ptr
->tdesc0
.pktsize
= pktlen
;
1934 td_ptr
->td_buf
[0].pa_low
= cpu_to_le32(tdinfo
->skb_dma
[0]);
1935 td_ptr
->td_buf
[0].pa_high
= 0;
1936 td_ptr
->td_buf
[0].bufsize
= td_ptr
->tdesc0
.pktsize
;
1937 tdinfo
->nskb_dma
= 1;
1938 td_ptr
->tdesc1
.CMDZ
= 2;
1940 #ifdef VELOCITY_ZERO_COPY_SUPPORT
1941 if (skb_shinfo(skb
)->nr_frags
> 0) {
1942 int nfrags
= skb_shinfo(skb
)->nr_frags
;
1945 memcpy(tdinfo
->buf
, skb
->data
, skb
->len
);
1946 tdinfo
->skb_dma
[0] = tdinfo
->buf_dma
;
1947 td_ptr
->tdesc0
.pktsize
=
1948 td_ptr
->td_buf
[0].pa_low
= cpu_to_le32(tdinfo
->skb_dma
[0]);
1949 td_ptr
->td_buf
[0].pa_high
= 0;
1950 td_ptr
->td_buf
[0].bufsize
= td_ptr
->tdesc0
.pktsize
;
1951 tdinfo
->nskb_dma
= 1;
1952 td_ptr
->tdesc1
.CMDZ
= 2;
1955 tdinfo
->nskb_dma
= 0;
1956 tdinfo
->skb_dma
[i
] = pci_map_single(vptr
->pdev
, skb
->data
, skb
->len
- skb
->data_len
, PCI_DMA_TODEVICE
);
1958 td_ptr
->tdesc0
.pktsize
= pktlen
;
1960 /* FIXME: support 48bit DMA later */
1961 td_ptr
->td_buf
[i
].pa_low
= cpu_to_le32(tdinfo
->skb_dma
);
1962 td_ptr
->td_buf
[i
].pa_high
= 0;
1963 td_ptr
->td_buf
[i
].bufsize
= skb
->len
->skb
->data_len
;
1965 for (i
= 0; i
< nfrags
; i
++) {
1966 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1967 void *addr
= ((void *) page_address(frag
->page
+ frag
->page_offset
));
1969 tdinfo
->skb_dma
[i
+ 1] = pci_map_single(vptr
->pdev
, addr
, frag
->size
, PCI_DMA_TODEVICE
);
1971 td_ptr
->td_buf
[i
+ 1].pa_low
= cpu_to_le32(tdinfo
->skb_dma
[i
+ 1]);
1972 td_ptr
->td_buf
[i
+ 1].pa_high
= 0;
1973 td_ptr
->td_buf
[i
+ 1].bufsize
= frag
->size
;
1975 tdinfo
->nskb_dma
= i
- 1;
1976 td_ptr
->tdesc1
.CMDZ
= i
;
1983 * Map the linear network buffer into PCI space and
1984 * add it to the transmit ring.
1987 tdinfo
->skb_dma
[0] = pci_map_single(vptr
->pdev
, skb
->data
, pktlen
, PCI_DMA_TODEVICE
);
1988 td_ptr
->tdesc0
.pktsize
= pktlen
;
1989 td_ptr
->td_buf
[0].pa_low
= cpu_to_le32(tdinfo
->skb_dma
[0]);
1990 td_ptr
->td_buf
[0].pa_high
= 0;
1991 td_ptr
->td_buf
[0].bufsize
= td_ptr
->tdesc0
.pktsize
;
1992 tdinfo
->nskb_dma
= 1;
1993 td_ptr
->tdesc1
.CMDZ
= 2;
1996 if (vptr
->flags
& VELOCITY_FLAGS_TAGGING
) {
1997 td_ptr
->tdesc1
.pqinf
.VID
= (vptr
->options
.vid
& 0xfff);
1998 td_ptr
->tdesc1
.pqinf
.priority
= 0;
1999 td_ptr
->tdesc1
.pqinf
.CFI
= 0;
2000 td_ptr
->tdesc1
.TCR
|= TCR0_VETAG
;
2004 * Handle hardware checksum
2006 if ((vptr
->flags
& VELOCITY_FLAGS_TX_CSUM
)
2007 && (skb
->ip_summed
== CHECKSUM_HW
)) {
2008 struct iphdr
*ip
= skb
->nh
.iph
;
2009 if (ip
->protocol
== IPPROTO_TCP
)
2010 td_ptr
->tdesc1
.TCR
|= TCR0_TCPCK
;
2011 else if (ip
->protocol
== IPPROTO_UDP
)
2012 td_ptr
->tdesc1
.TCR
|= (TCR0_UDPCK
);
2013 td_ptr
->tdesc1
.TCR
|= TCR0_IPCK
;
2017 int prev
= index
- 1;
2020 prev
= vptr
->options
.numtx
- 1;
2021 td_ptr
->tdesc0
.owner
= OWNED_BY_NIC
;
2022 vptr
->td_used
[qnum
]++;
2023 vptr
->td_curr
[qnum
] = (index
+ 1) % vptr
->options
.numtx
;
2025 if (AVAIL_TD(vptr
, qnum
) < 1)
2026 netif_stop_queue(dev
);
2028 td_ptr
= &(vptr
->td_rings
[qnum
][prev
]);
2029 td_ptr
->td_buf
[0].queue
= 1;
2030 mac_tx_queue_wake(vptr
->mac_regs
, qnum
);
2032 dev
->trans_start
= jiffies
;
2033 spin_unlock_irqrestore(&vptr
->lock
, flags
);
2038 * velocity_intr - interrupt callback
2039 * @irq: interrupt number
2040 * @dev_instance: interrupting device
2041 * @pt_regs: CPU register state at interrupt
2043 * Called whenever an interrupt is generated by the velocity
2044 * adapter IRQ line. We may not be the source of the interrupt
2045 * and need to identify initially if we are, and if not exit as
2046 * efficiently as possible.
2049 static int velocity_intr(int irq
, void *dev_instance
, struct pt_regs
*regs
)
2051 struct net_device
*dev
= dev_instance
;
2052 struct velocity_info
*vptr
= dev
->priv
;
2057 spin_lock(&vptr
->lock
);
2058 isr_status
= mac_read_isr(vptr
->mac_regs
);
2061 if (isr_status
== 0) {
2062 spin_unlock(&vptr
->lock
);
2066 mac_disable_int(vptr
->mac_regs
);
2069 * Keep processing the ISR until we have completed
2070 * processing and the isr_status becomes zero
2073 while (isr_status
!= 0) {
2074 mac_write_isr(vptr
->mac_regs
, isr_status
);
2075 if (isr_status
& (~(ISR_PRXI
| ISR_PPRXI
| ISR_PTXI
| ISR_PPTXI
)))
2076 velocity_error(vptr
, isr_status
);
2077 if (isr_status
& (ISR_PRXI
| ISR_PPRXI
))
2078 max_count
+= velocity_rx_srv(vptr
, isr_status
);
2079 if (isr_status
& (ISR_PTXI
| ISR_PPTXI
))
2080 max_count
+= velocity_tx_srv(vptr
, isr_status
);
2081 isr_status
= mac_read_isr(vptr
->mac_regs
);
2082 if (max_count
> vptr
->options
.int_works
)
2084 printk(KERN_WARNING
"%s: excessive work at interrupt.\n",
2089 spin_unlock(&vptr
->lock
);
2090 mac_enable_int(vptr
->mac_regs
);
2097 * velocity_set_multi - filter list change callback
2098 * @dev: network device
2100 * Called by the network layer when the filter lists need to change
2101 * for a velocity adapter. Reload the CAMs with the new address
2105 static void velocity_set_multi(struct net_device
*dev
)
2107 struct velocity_info
*vptr
= dev
->priv
;
2108 struct mac_regs __iomem
* regs
= vptr
->mac_regs
;
2111 struct dev_mc_list
*mclist
;
2113 if (dev
->flags
& IFF_PROMISC
) { /* Set promiscuous. */
2114 /* Unconditionally log net taps. */
2115 printk(KERN_NOTICE
"%s: Promiscuous mode enabled.\n", dev
->name
);
2116 writel(0xffffffff, ®s
->MARCAM
[0]);
2117 writel(0xffffffff, ®s
->MARCAM
[4]);
2118 rx_mode
= (RCR_AM
| RCR_AB
| RCR_PROM
);
2119 } else if ((dev
->mc_count
> vptr
->multicast_limit
)
2120 || (dev
->flags
& IFF_ALLMULTI
)) {
2121 writel(0xffffffff, ®s
->MARCAM
[0]);
2122 writel(0xffffffff, ®s
->MARCAM
[4]);
2123 rx_mode
= (RCR_AM
| RCR_AB
);
2125 int offset
= MCAM_SIZE
- vptr
->multicast_limit
;
2126 mac_get_cam_mask(regs
, vptr
->mCAMmask
, VELOCITY_MULTICAST_CAM
);
2128 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
; i
++, mclist
= mclist
->next
) {
2129 mac_set_cam(regs
, i
+ offset
, mclist
->dmi_addr
, VELOCITY_MULTICAST_CAM
);
2130 vptr
->mCAMmask
[(offset
+ i
) / 8] |= 1 << ((offset
+ i
) & 7);
2133 mac_set_cam_mask(regs
, vptr
->mCAMmask
, VELOCITY_MULTICAST_CAM
);
2134 rx_mode
= (RCR_AM
| RCR_AB
);
2136 if (dev
->mtu
> 1500)
2139 BYTE_REG_BITS_ON(rx_mode
, ®s
->RCR
);
2144 * velocity_get_status - statistics callback
2145 * @dev: network device
2147 * Callback from the network layer to allow driver statistics
2148 * to be resynchronized with hardware collected state. In the
2149 * case of the velocity we need to pull the MIB counters from
2150 * the hardware into the counters before letting the network
2151 * layer display them.
2154 static struct net_device_stats
*velocity_get_stats(struct net_device
*dev
)
2156 struct velocity_info
*vptr
= dev
->priv
;
2158 /* If the hardware is down, don't touch MII */
2159 if(!netif_running(dev
))
2160 return &vptr
->stats
;
2162 spin_lock_irq(&vptr
->lock
);
2163 velocity_update_hw_mibs(vptr
);
2164 spin_unlock_irq(&vptr
->lock
);
2166 vptr
->stats
.rx_packets
= vptr
->mib_counter
[HW_MIB_ifRxAllPkts
];
2167 vptr
->stats
.rx_errors
= vptr
->mib_counter
[HW_MIB_ifRxErrorPkts
];
2168 vptr
->stats
.rx_length_errors
= vptr
->mib_counter
[HW_MIB_ifInRangeLengthErrors
];
2170 // unsigned long rx_dropped; /* no space in linux buffers */
2171 vptr
->stats
.collisions
= vptr
->mib_counter
[HW_MIB_ifTxEtherCollisions
];
2172 /* detailed rx_errors: */
2173 // unsigned long rx_length_errors;
2174 // unsigned long rx_over_errors; /* receiver ring buff overflow */
2175 vptr
->stats
.rx_crc_errors
= vptr
->mib_counter
[HW_MIB_ifRxPktCRCE
];
2176 // unsigned long rx_frame_errors; /* recv'd frame alignment error */
2177 // unsigned long rx_fifo_errors; /* recv'r fifo overrun */
2178 // unsigned long rx_missed_errors; /* receiver missed packet */
2180 /* detailed tx_errors */
2181 // unsigned long tx_fifo_errors;
2183 return &vptr
->stats
;
2188 * velocity_ioctl - ioctl entry point
2189 * @dev: network device
2190 * @rq: interface request ioctl
2191 * @cmd: command code
2193 * Called when the user issues an ioctl request to the network
2194 * device in question. The velocity interface supports MII.
2197 static int velocity_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
2199 struct velocity_info
*vptr
= dev
->priv
;
2202 /* If we are asked for information and the device is power
2203 saving then we need to bring the device back up to talk to it */
2205 if (!netif_running(dev
))
2206 pci_set_power_state(vptr
->pdev
, PCI_D0
);
2209 case SIOCGMIIPHY
: /* Get address of MII PHY in use. */
2210 case SIOCGMIIREG
: /* Read MII PHY register. */
2211 case SIOCSMIIREG
: /* Write to MII PHY register. */
2212 ret
= velocity_mii_ioctl(dev
, rq
, cmd
);
2218 if (!netif_running(dev
))
2219 pci_set_power_state(vptr
->pdev
, PCI_D3hot
);
2226 * Definition for our device driver. The PCI layer interface
2227 * uses this to handle all our card discover and plugging
2230 static struct pci_driver velocity_driver
= {
2231 .name
= VELOCITY_NAME
,
2232 .id_table
= velocity_id_table
,
2233 .probe
= velocity_found1
,
2234 .remove
= __devexit_p(velocity_remove1
),
2236 .suspend
= velocity_suspend
,
2237 .resume
= velocity_resume
,
2242 * velocity_init_module - load time function
2244 * Called when the velocity module is loaded. The PCI driver
2245 * is registered with the PCI layer, and in turn will call
2246 * the probe functions for each velocity adapter installed
2250 static int __init
velocity_init_module(void)
2254 velocity_register_notifier();
2255 ret
= pci_module_init(&velocity_driver
);
2257 velocity_unregister_notifier();
2262 * velocity_cleanup - module unload
2264 * When the velocity hardware is unloaded this function is called.
2265 * It will clean up the notifiers and the unregister the PCI
2266 * driver interface for this hardware. This in turn cleans up
2267 * all discovered interfaces before returning from the function
2270 static void __exit
velocity_cleanup_module(void)
2272 velocity_unregister_notifier();
2273 pci_unregister_driver(&velocity_driver
);
2276 module_init(velocity_init_module
);
2277 module_exit(velocity_cleanup_module
);
2281 * MII access , media link mode setting functions
2286 * mii_init - set up MII
2287 * @vptr: velocity adapter
2288 * @mii_status: links tatus
2290 * Set up the PHY for the current link state.
2293 static void mii_init(struct velocity_info
*vptr
, u32 mii_status
)
2297 switch (PHYID_GET_PHY_ID(vptr
->phy_id
)) {
2298 case PHYID_CICADA_CS8201
:
2300 * Reset to hardware default
2302 MII_REG_BITS_OFF((ANAR_ASMDIR
| ANAR_PAUSE
), MII_REG_ANAR
, vptr
->mac_regs
);
2304 * Turn on ECHODIS bit in NWay-forced full mode and turn it
2305 * off it in NWay-forced half mode for NWay-forced v.s.
2306 * legacy-forced issue.
2308 if (vptr
->mii_status
& VELOCITY_DUPLEX_FULL
)
2309 MII_REG_BITS_ON(TCSR_ECHODIS
, MII_REG_TCSR
, vptr
->mac_regs
);
2311 MII_REG_BITS_OFF(TCSR_ECHODIS
, MII_REG_TCSR
, vptr
->mac_regs
);
2313 * Turn on Link/Activity LED enable bit for CIS8201
2315 MII_REG_BITS_ON(PLED_LALBE
, MII_REG_PLED
, vptr
->mac_regs
);
2317 case PHYID_VT3216_32BIT
:
2318 case PHYID_VT3216_64BIT
:
2320 * Reset to hardware default
2322 MII_REG_BITS_ON((ANAR_ASMDIR
| ANAR_PAUSE
), MII_REG_ANAR
, vptr
->mac_regs
);
2324 * Turn on ECHODIS bit in NWay-forced full mode and turn it
2325 * off it in NWay-forced half mode for NWay-forced v.s.
2326 * legacy-forced issue
2328 if (vptr
->mii_status
& VELOCITY_DUPLEX_FULL
)
2329 MII_REG_BITS_ON(TCSR_ECHODIS
, MII_REG_TCSR
, vptr
->mac_regs
);
2331 MII_REG_BITS_OFF(TCSR_ECHODIS
, MII_REG_TCSR
, vptr
->mac_regs
);
2334 case PHYID_MARVELL_1000
:
2335 case PHYID_MARVELL_1000S
:
2337 * Assert CRS on Transmit
2339 MII_REG_BITS_ON(PSCR_ACRSTX
, MII_REG_PSCR
, vptr
->mac_regs
);
2341 * Reset to hardware default
2343 MII_REG_BITS_ON((ANAR_ASMDIR
| ANAR_PAUSE
), MII_REG_ANAR
, vptr
->mac_regs
);
2348 velocity_mii_read(vptr
->mac_regs
, MII_REG_BMCR
, &BMCR
);
2349 if (BMCR
& BMCR_ISO
) {
2351 velocity_mii_write(vptr
->mac_regs
, MII_REG_BMCR
, BMCR
);
2356 * safe_disable_mii_autopoll - autopoll off
2357 * @regs: velocity registers
2359 * Turn off the autopoll and wait for it to disable on the chip
2362 static void safe_disable_mii_autopoll(struct mac_regs __iomem
* regs
)
2366 /* turn off MAUTO */
2367 writeb(0, ®s
->MIICR
);
2368 for (ww
= 0; ww
< W_MAX_TIMEOUT
; ww
++) {
2370 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE
, ®s
->MIISR
))
2376 * enable_mii_autopoll - turn on autopolling
2377 * @regs: velocity registers
2379 * Enable the MII link status autopoll feature on the Velocity
2380 * hardware. Wait for it to enable.
2383 static void enable_mii_autopoll(struct mac_regs __iomem
* regs
)
2387 writeb(0, &(regs
->MIICR
));
2388 writeb(MIIADR_SWMPL
, ®s
->MIIADR
);
2390 for (ii
= 0; ii
< W_MAX_TIMEOUT
; ii
++) {
2392 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE
, ®s
->MIISR
))
2396 writeb(MIICR_MAUTO
, ®s
->MIICR
);
2398 for (ii
= 0; ii
< W_MAX_TIMEOUT
; ii
++) {
2400 if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE
, ®s
->MIISR
))
2407 * velocity_mii_read - read MII data
2408 * @regs: velocity registers
2409 * @index: MII register index
2410 * @data: buffer for received data
2412 * Perform a single read of an MII 16bit register. Returns zero
2413 * on success or -ETIMEDOUT if the PHY did not respond.
2416 static int velocity_mii_read(struct mac_regs __iomem
*regs
, u8 index
, u16
*data
)
2421 * Disable MIICR_MAUTO, so that mii addr can be set normally
2423 safe_disable_mii_autopoll(regs
);
2425 writeb(index
, ®s
->MIIADR
);
2427 BYTE_REG_BITS_ON(MIICR_RCMD
, ®s
->MIICR
);
2429 for (ww
= 0; ww
< W_MAX_TIMEOUT
; ww
++) {
2430 if (!(readb(®s
->MIICR
) & MIICR_RCMD
))
2434 *data
= readw(®s
->MIIDATA
);
2436 enable_mii_autopoll(regs
);
2437 if (ww
== W_MAX_TIMEOUT
)
2443 * velocity_mii_write - write MII data
2444 * @regs: velocity registers
2445 * @index: MII register index
2446 * @data: 16bit data for the MII register
2448 * Perform a single write to an MII 16bit register. Returns zero
2449 * on success or -ETIMEDOUT if the PHY did not respond.
2452 static int velocity_mii_write(struct mac_regs __iomem
*regs
, u8 mii_addr
, u16 data
)
2457 * Disable MIICR_MAUTO, so that mii addr can be set normally
2459 safe_disable_mii_autopoll(regs
);
2461 /* MII reg offset */
2462 writeb(mii_addr
, ®s
->MIIADR
);
2464 writew(data
, ®s
->MIIDATA
);
2466 /* turn on MIICR_WCMD */
2467 BYTE_REG_BITS_ON(MIICR_WCMD
, ®s
->MIICR
);
2469 /* W_MAX_TIMEOUT is the timeout period */
2470 for (ww
= 0; ww
< W_MAX_TIMEOUT
; ww
++) {
2472 if (!(readb(®s
->MIICR
) & MIICR_WCMD
))
2475 enable_mii_autopoll(regs
);
2477 if (ww
== W_MAX_TIMEOUT
)
2483 * velocity_get_opt_media_mode - get media selection
2484 * @vptr: velocity adapter
2486 * Get the media mode stored in EEPROM or module options and load
2487 * mii_status accordingly. The requested link state information
2491 static u32
velocity_get_opt_media_mode(struct velocity_info
*vptr
)
2495 switch (vptr
->options
.spd_dpx
) {
2497 status
= VELOCITY_AUTONEG_ENABLE
;
2499 case SPD_DPX_100_FULL
:
2500 status
= VELOCITY_SPEED_100
| VELOCITY_DUPLEX_FULL
;
2502 case SPD_DPX_10_FULL
:
2503 status
= VELOCITY_SPEED_10
| VELOCITY_DUPLEX_FULL
;
2505 case SPD_DPX_100_HALF
:
2506 status
= VELOCITY_SPEED_100
;
2508 case SPD_DPX_10_HALF
:
2509 status
= VELOCITY_SPEED_10
;
2512 vptr
->mii_status
= status
;
2517 * mii_set_auto_on - autonegotiate on
2520 * Enable autonegotation on this interface
2523 static void mii_set_auto_on(struct velocity_info
*vptr
)
2525 if (MII_REG_BITS_IS_ON(BMCR_AUTO
, MII_REG_BMCR
, vptr
->mac_regs
))
2526 MII_REG_BITS_ON(BMCR_REAUTO
, MII_REG_BMCR
, vptr
->mac_regs
);
2528 MII_REG_BITS_ON(BMCR_AUTO
, MII_REG_BMCR
, vptr
->mac_regs
);
2533 static void mii_set_auto_off(struct velocity_info * vptr)
2535 MII_REG_BITS_OFF(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
2540 * set_mii_flow_control - flow control setup
2541 * @vptr: velocity interface
2543 * Set up the flow control on this interface according to
2544 * the supplied user/eeprom options.
2547 static void set_mii_flow_control(struct velocity_info
*vptr
)
2549 /*Enable or Disable PAUSE in ANAR */
2550 switch (vptr
->options
.flow_cntl
) {
2552 MII_REG_BITS_OFF(ANAR_PAUSE
, MII_REG_ANAR
, vptr
->mac_regs
);
2553 MII_REG_BITS_ON(ANAR_ASMDIR
, MII_REG_ANAR
, vptr
->mac_regs
);
2557 MII_REG_BITS_ON(ANAR_PAUSE
, MII_REG_ANAR
, vptr
->mac_regs
);
2558 MII_REG_BITS_ON(ANAR_ASMDIR
, MII_REG_ANAR
, vptr
->mac_regs
);
2561 case FLOW_CNTL_TX_RX
:
2562 MII_REG_BITS_ON(ANAR_PAUSE
, MII_REG_ANAR
, vptr
->mac_regs
);
2563 MII_REG_BITS_ON(ANAR_ASMDIR
, MII_REG_ANAR
, vptr
->mac_regs
);
2566 case FLOW_CNTL_DISABLE
:
2567 MII_REG_BITS_OFF(ANAR_PAUSE
, MII_REG_ANAR
, vptr
->mac_regs
);
2568 MII_REG_BITS_OFF(ANAR_ASMDIR
, MII_REG_ANAR
, vptr
->mac_regs
);
2576 * velocity_set_media_mode - set media mode
2577 * @mii_status: old MII link state
2579 * Check the media link state and configure the flow control
2580 * PHY and also velocity hardware setup accordingly. In particular
2581 * we need to set up CD polling and frame bursting.
2584 static int velocity_set_media_mode(struct velocity_info
*vptr
, u32 mii_status
)
2587 struct mac_regs __iomem
* regs
= vptr
->mac_regs
;
2589 vptr
->mii_status
= mii_check_media_mode(vptr
->mac_regs
);
2590 curr_status
= vptr
->mii_status
& (~VELOCITY_LINK_FAIL
);
2592 /* Set mii link status */
2593 set_mii_flow_control(vptr
);
2596 Check if new status is consisent with current status
2597 if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE)
2598 || (mii_status==curr_status)) {
2599 vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
2600 vptr->mii_status=check_connection_type(vptr->mac_regs);
2601 VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
2606 if (PHYID_GET_PHY_ID(vptr
->phy_id
) == PHYID_CICADA_CS8201
) {
2607 MII_REG_BITS_ON(AUXCR_MDPPS
, MII_REG_AUXCR
, vptr
->mac_regs
);
2611 * If connection type is AUTO
2613 if (mii_status
& VELOCITY_AUTONEG_ENABLE
) {
2614 VELOCITY_PRT(MSG_LEVEL_INFO
, "Velocity is AUTO mode\n");
2615 /* clear force MAC mode bit */
2616 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE
, ®s
->CHIPGCR
);
2617 /* set duplex mode of MAC according to duplex mode of MII */
2618 MII_REG_BITS_ON(ANAR_TXFD
| ANAR_TX
| ANAR_10FD
| ANAR_10
, MII_REG_ANAR
, vptr
->mac_regs
);
2619 MII_REG_BITS_ON(G1000CR_1000FD
| G1000CR_1000
, MII_REG_G1000CR
, vptr
->mac_regs
);
2620 MII_REG_BITS_ON(BMCR_SPEED1G
, MII_REG_BMCR
, vptr
->mac_regs
);
2622 /* enable AUTO-NEGO mode */
2623 mii_set_auto_on(vptr
);
2629 * 1. if it's 3119, disable frame bursting in halfduplex mode
2630 * and enable it in fullduplex mode
2631 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
2632 * 3. only enable CD heart beat counter in 10HD mode
2635 /* set force MAC mode bit */
2636 BYTE_REG_BITS_ON(CHIPGCR_FCMODE
, ®s
->CHIPGCR
);
2638 CHIPGCR
= readb(®s
->CHIPGCR
);
2639 CHIPGCR
&= ~CHIPGCR_FCGMII
;
2641 if (mii_status
& VELOCITY_DUPLEX_FULL
) {
2642 CHIPGCR
|= CHIPGCR_FCFDX
;
2643 writeb(CHIPGCR
, ®s
->CHIPGCR
);
2644 VELOCITY_PRT(MSG_LEVEL_INFO
, "set Velocity to forced full mode\n");
2645 if (vptr
->rev_id
< REV_ID_VT3216_A0
)
2646 BYTE_REG_BITS_OFF(TCR_TB2BDIS
, ®s
->TCR
);
2648 CHIPGCR
&= ~CHIPGCR_FCFDX
;
2649 VELOCITY_PRT(MSG_LEVEL_INFO
, "set Velocity to forced half mode\n");
2650 writeb(CHIPGCR
, ®s
->CHIPGCR
);
2651 if (vptr
->rev_id
< REV_ID_VT3216_A0
)
2652 BYTE_REG_BITS_ON(TCR_TB2BDIS
, ®s
->TCR
);
2655 MII_REG_BITS_OFF(G1000CR_1000FD
| G1000CR_1000
, MII_REG_G1000CR
, vptr
->mac_regs
);
2657 if (!(mii_status
& VELOCITY_DUPLEX_FULL
) && (mii_status
& VELOCITY_SPEED_10
)) {
2658 BYTE_REG_BITS_OFF(TESTCFG_HBDIS
, ®s
->TESTCFG
);
2660 BYTE_REG_BITS_ON(TESTCFG_HBDIS
, ®s
->TESTCFG
);
2662 /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */
2663 velocity_mii_read(vptr
->mac_regs
, MII_REG_ANAR
, &ANAR
);
2664 ANAR
&= (~(ANAR_TXFD
| ANAR_TX
| ANAR_10FD
| ANAR_10
));
2665 if (mii_status
& VELOCITY_SPEED_100
) {
2666 if (mii_status
& VELOCITY_DUPLEX_FULL
)
2671 if (mii_status
& VELOCITY_DUPLEX_FULL
)
2676 velocity_mii_write(vptr
->mac_regs
, MII_REG_ANAR
, ANAR
);
2677 /* enable AUTO-NEGO mode */
2678 mii_set_auto_on(vptr
);
2679 /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */
2681 /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
2682 /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
2683 return VELOCITY_LINK_CHANGE
;
2687 * mii_check_media_mode - check media state
2688 * @regs: velocity registers
2690 * Check the current MII status and determine the link status
2694 static u32
mii_check_media_mode(struct mac_regs __iomem
* regs
)
2699 if (!MII_REG_BITS_IS_ON(BMSR_LNK
, MII_REG_BMSR
, regs
))
2700 status
|= VELOCITY_LINK_FAIL
;
2702 if (MII_REG_BITS_IS_ON(G1000CR_1000FD
, MII_REG_G1000CR
, regs
))
2703 status
|= VELOCITY_SPEED_1000
| VELOCITY_DUPLEX_FULL
;
2704 else if (MII_REG_BITS_IS_ON(G1000CR_1000
, MII_REG_G1000CR
, regs
))
2705 status
|= (VELOCITY_SPEED_1000
);
2707 velocity_mii_read(regs
, MII_REG_ANAR
, &ANAR
);
2708 if (ANAR
& ANAR_TXFD
)
2709 status
|= (VELOCITY_SPEED_100
| VELOCITY_DUPLEX_FULL
);
2710 else if (ANAR
& ANAR_TX
)
2711 status
|= VELOCITY_SPEED_100
;
2712 else if (ANAR
& ANAR_10FD
)
2713 status
|= (VELOCITY_SPEED_10
| VELOCITY_DUPLEX_FULL
);
2715 status
|= (VELOCITY_SPEED_10
);
2718 if (MII_REG_BITS_IS_ON(BMCR_AUTO
, MII_REG_BMCR
, regs
)) {
2719 velocity_mii_read(regs
, MII_REG_ANAR
, &ANAR
);
2720 if ((ANAR
& (ANAR_TXFD
| ANAR_TX
| ANAR_10FD
| ANAR_10
))
2721 == (ANAR_TXFD
| ANAR_TX
| ANAR_10FD
| ANAR_10
)) {
2722 if (MII_REG_BITS_IS_ON(G1000CR_1000
| G1000CR_1000FD
, MII_REG_G1000CR
, regs
))
2723 status
|= VELOCITY_AUTONEG_ENABLE
;
2730 static u32
check_connection_type(struct mac_regs __iomem
* regs
)
2735 PHYSR0
= readb(®s
->PHYSR0
);
2738 if (!(PHYSR0 & PHYSR0_LINKGD))
2739 status|=VELOCITY_LINK_FAIL;
2742 if (PHYSR0
& PHYSR0_FDPX
)
2743 status
|= VELOCITY_DUPLEX_FULL
;
2745 if (PHYSR0
& PHYSR0_SPDG
)
2746 status
|= VELOCITY_SPEED_1000
;
2747 if (PHYSR0
& PHYSR0_SPD10
)
2748 status
|= VELOCITY_SPEED_10
;
2750 status
|= VELOCITY_SPEED_100
;
2752 if (MII_REG_BITS_IS_ON(BMCR_AUTO
, MII_REG_BMCR
, regs
)) {
2753 velocity_mii_read(regs
, MII_REG_ANAR
, &ANAR
);
2754 if ((ANAR
& (ANAR_TXFD
| ANAR_TX
| ANAR_10FD
| ANAR_10
))
2755 == (ANAR_TXFD
| ANAR_TX
| ANAR_10FD
| ANAR_10
)) {
2756 if (MII_REG_BITS_IS_ON(G1000CR_1000
| G1000CR_1000FD
, MII_REG_G1000CR
, regs
))
2757 status
|= VELOCITY_AUTONEG_ENABLE
;
2765 * enable_flow_control_ability - flow control
2766 * @vptr: veloity to configure
2768 * Set up flow control according to the flow control options
2769 * determined by the eeprom/configuration.
2772 static void enable_flow_control_ability(struct velocity_info
*vptr
)
2775 struct mac_regs __iomem
* regs
= vptr
->mac_regs
;
2777 switch (vptr
->options
.flow_cntl
) {
2779 case FLOW_CNTL_DEFAULT
:
2780 if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC
, ®s
->PHYSR0
))
2781 writel(CR0_FDXRFCEN
, ®s
->CR0Set
);
2783 writel(CR0_FDXRFCEN
, ®s
->CR0Clr
);
2785 if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC
, ®s
->PHYSR0
))
2786 writel(CR0_FDXTFCEN
, ®s
->CR0Set
);
2788 writel(CR0_FDXTFCEN
, ®s
->CR0Clr
);
2792 writel(CR0_FDXTFCEN
, ®s
->CR0Set
);
2793 writel(CR0_FDXRFCEN
, ®s
->CR0Clr
);
2797 writel(CR0_FDXRFCEN
, ®s
->CR0Set
);
2798 writel(CR0_FDXTFCEN
, ®s
->CR0Clr
);
2801 case FLOW_CNTL_TX_RX
:
2802 writel(CR0_FDXTFCEN
, ®s
->CR0Set
);
2803 writel(CR0_FDXRFCEN
, ®s
->CR0Set
);
2806 case FLOW_CNTL_DISABLE
:
2807 writel(CR0_FDXRFCEN
, ®s
->CR0Clr
);
2808 writel(CR0_FDXTFCEN
, ®s
->CR0Clr
);
2819 * velocity_ethtool_up - pre hook for ethtool
2820 * @dev: network device
2822 * Called before an ethtool operation. We need to make sure the
2823 * chip is out of D3 state before we poke at it.
2826 static int velocity_ethtool_up(struct net_device
*dev
)
2828 struct velocity_info
*vptr
= dev
->priv
;
2829 if (!netif_running(dev
))
2830 pci_set_power_state(vptr
->pdev
, PCI_D0
);
2835 * velocity_ethtool_down - post hook for ethtool
2836 * @dev: network device
2838 * Called after an ethtool operation. Restore the chip back to D3
2839 * state if it isn't running.
2842 static void velocity_ethtool_down(struct net_device
*dev
)
2844 struct velocity_info
*vptr
= dev
->priv
;
2845 if (!netif_running(dev
))
2846 pci_set_power_state(vptr
->pdev
, PCI_D3hot
);
2849 static int velocity_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2851 struct velocity_info
*vptr
= dev
->priv
;
2852 struct mac_regs __iomem
* regs
= vptr
->mac_regs
;
2854 status
= check_connection_type(vptr
->mac_regs
);
2856 cmd
->supported
= SUPPORTED_TP
| SUPPORTED_Autoneg
| SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
| SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
| SUPPORTED_1000baseT_Half
| SUPPORTED_1000baseT_Full
;
2857 if (status
& VELOCITY_SPEED_100
)
2858 cmd
->speed
= SPEED_100
;
2860 cmd
->speed
= SPEED_10
;
2861 cmd
->autoneg
= (status
& VELOCITY_AUTONEG_ENABLE
) ? AUTONEG_ENABLE
: AUTONEG_DISABLE
;
2862 cmd
->port
= PORT_TP
;
2863 cmd
->transceiver
= XCVR_INTERNAL
;
2864 cmd
->phy_address
= readb(®s
->MIIADR
) & 0x1F;
2866 if (status
& VELOCITY_DUPLEX_FULL
)
2867 cmd
->duplex
= DUPLEX_FULL
;
2869 cmd
->duplex
= DUPLEX_HALF
;
2874 static int velocity_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2876 struct velocity_info
*vptr
= dev
->priv
;
2881 curr_status
= check_connection_type(vptr
->mac_regs
);
2882 curr_status
&= (~VELOCITY_LINK_FAIL
);
2884 new_status
|= ((cmd
->autoneg
) ? VELOCITY_AUTONEG_ENABLE
: 0);
2885 new_status
|= ((cmd
->speed
== SPEED_100
) ? VELOCITY_SPEED_100
: 0);
2886 new_status
|= ((cmd
->speed
== SPEED_10
) ? VELOCITY_SPEED_10
: 0);
2887 new_status
|= ((cmd
->duplex
== DUPLEX_FULL
) ? VELOCITY_DUPLEX_FULL
: 0);
2889 if ((new_status
& VELOCITY_AUTONEG_ENABLE
) && (new_status
!= (curr_status
| VELOCITY_AUTONEG_ENABLE
)))
2892 velocity_set_media_mode(vptr
, new_status
);
2897 static u32
velocity_get_link(struct net_device
*dev
)
2899 struct velocity_info
*vptr
= dev
->priv
;
2900 struct mac_regs __iomem
* regs
= vptr
->mac_regs
;
2901 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD
, ®s
->PHYSR0
) ? 0 : 1;
2904 static void velocity_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
2906 struct velocity_info
*vptr
= dev
->priv
;
2907 strcpy(info
->driver
, VELOCITY_NAME
);
2908 strcpy(info
->version
, VELOCITY_VERSION
);
2909 strcpy(info
->bus_info
, pci_name(vptr
->pdev
));
2912 static void velocity_ethtool_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
2914 struct velocity_info
*vptr
= dev
->priv
;
2915 wol
->supported
= WAKE_PHY
| WAKE_MAGIC
| WAKE_UCAST
| WAKE_ARP
;
2916 wol
->wolopts
|= WAKE_MAGIC
;
2918 if (vptr->wol_opts & VELOCITY_WOL_PHY)
2919 wol.wolopts|=WAKE_PHY;
2921 if (vptr
->wol_opts
& VELOCITY_WOL_UCAST
)
2922 wol
->wolopts
|= WAKE_UCAST
;
2923 if (vptr
->wol_opts
& VELOCITY_WOL_ARP
)
2924 wol
->wolopts
|= WAKE_ARP
;
2925 memcpy(&wol
->sopass
, vptr
->wol_passwd
, 6);
2928 static int velocity_ethtool_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
2930 struct velocity_info
*vptr
= dev
->priv
;
2932 if (!(wol
->wolopts
& (WAKE_PHY
| WAKE_MAGIC
| WAKE_UCAST
| WAKE_ARP
)))
2934 vptr
->wol_opts
= VELOCITY_WOL_MAGIC
;
2937 if (wol.wolopts & WAKE_PHY) {
2938 vptr->wol_opts|=VELOCITY_WOL_PHY;
2939 vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
2943 if (wol
->wolopts
& WAKE_MAGIC
) {
2944 vptr
->wol_opts
|= VELOCITY_WOL_MAGIC
;
2945 vptr
->flags
|= VELOCITY_FLAGS_WOL_ENABLED
;
2947 if (wol
->wolopts
& WAKE_UCAST
) {
2948 vptr
->wol_opts
|= VELOCITY_WOL_UCAST
;
2949 vptr
->flags
|= VELOCITY_FLAGS_WOL_ENABLED
;
2951 if (wol
->wolopts
& WAKE_ARP
) {
2952 vptr
->wol_opts
|= VELOCITY_WOL_ARP
;
2953 vptr
->flags
|= VELOCITY_FLAGS_WOL_ENABLED
;
2955 memcpy(vptr
->wol_passwd
, wol
->sopass
, 6);
2959 static u32
velocity_get_msglevel(struct net_device
*dev
)
2964 static void velocity_set_msglevel(struct net_device
*dev
, u32 value
)
2969 static struct ethtool_ops velocity_ethtool_ops
= {
2970 .get_settings
= velocity_get_settings
,
2971 .set_settings
= velocity_set_settings
,
2972 .get_drvinfo
= velocity_get_drvinfo
,
2973 .get_wol
= velocity_ethtool_get_wol
,
2974 .set_wol
= velocity_ethtool_set_wol
,
2975 .get_msglevel
= velocity_get_msglevel
,
2976 .set_msglevel
= velocity_set_msglevel
,
2977 .get_link
= velocity_get_link
,
2978 .begin
= velocity_ethtool_up
,
2979 .complete
= velocity_ethtool_down
2983 * velocity_mii_ioctl - MII ioctl handler
2984 * @dev: network device
2985 * @ifr: the ifreq block for the ioctl
2988 * Process MII requests made via ioctl from the network layer. These
2989 * are used by tools like kudzu to interrogate the link state of the
2993 static int velocity_mii_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
2995 struct velocity_info
*vptr
= dev
->priv
;
2996 struct mac_regs __iomem
* regs
= vptr
->mac_regs
;
2997 unsigned long flags
;
2998 struct mii_ioctl_data
*miidata
= if_mii(ifr
);
3003 miidata
->phy_id
= readb(®s
->MIIADR
) & 0x1f;
3006 if (!capable(CAP_NET_ADMIN
))
3008 if(velocity_mii_read(vptr
->mac_regs
, miidata
->reg_num
& 0x1f, &(miidata
->val_out
)) < 0)
3012 if (!capable(CAP_NET_ADMIN
))
3014 spin_lock_irqsave(&vptr
->lock
, flags
);
3015 err
= velocity_mii_write(vptr
->mac_regs
, miidata
->reg_num
& 0x1f, miidata
->val_in
);
3016 spin_unlock_irqrestore(&vptr
->lock
, flags
);
3017 check_connection_type(vptr
->mac_regs
);
3030 * velocity_save_context - save registers
3032 * @context: buffer for stored context
3034 * Retrieve the current configuration from the velocity hardware
3035 * and stash it in the context structure, for use by the context
3036 * restore functions. This allows us to save things we need across
3040 static void velocity_save_context(struct velocity_info
*vptr
, struct velocity_context
* context
)
3042 struct mac_regs __iomem
* regs
= vptr
->mac_regs
;
3044 u8 __iomem
*ptr
= (u8 __iomem
*)regs
;
3046 for (i
= MAC_REG_PAR
; i
< MAC_REG_CR0_CLR
; i
+= 4)
3047 *((u32
*) (context
->mac_reg
+ i
)) = readl(ptr
+ i
);
3049 for (i
= MAC_REG_MAR
; i
< MAC_REG_TDCSR_CLR
; i
+= 4)
3050 *((u32
*) (context
->mac_reg
+ i
)) = readl(ptr
+ i
);
3052 for (i
= MAC_REG_RDBASE_LO
; i
< MAC_REG_FIFO_TEST0
; i
+= 4)
3053 *((u32
*) (context
->mac_reg
+ i
)) = readl(ptr
+ i
);
3058 * velocity_restore_context - restore registers
3060 * @context: buffer for stored context
3062 * Reload the register configuration from the velocity context
3063 * created by velocity_save_context.
3066 static void velocity_restore_context(struct velocity_info
*vptr
, struct velocity_context
*context
)
3068 struct mac_regs __iomem
* regs
= vptr
->mac_regs
;
3070 u8 __iomem
*ptr
= (u8 __iomem
*)regs
;
3072 for (i
= MAC_REG_PAR
; i
< MAC_REG_CR0_SET
; i
+= 4) {
3073 writel(*((u32
*) (context
->mac_reg
+ i
)), ptr
+ i
);
3077 for (i
= MAC_REG_CR1_SET
; i
< MAC_REG_CR0_CLR
; i
++) {
3079 writeb(~(*((u8
*) (context
->mac_reg
+ i
))), ptr
+ i
+ 4);
3081 writeb(*((u8
*) (context
->mac_reg
+ i
)), ptr
+ i
);
3084 for (i
= MAC_REG_MAR
; i
< MAC_REG_IMR
; i
+= 4) {
3085 writel(*((u32
*) (context
->mac_reg
+ i
)), ptr
+ i
);
3088 for (i
= MAC_REG_RDBASE_LO
; i
< MAC_REG_FIFO_TEST0
; i
+= 4) {
3089 writel(*((u32
*) (context
->mac_reg
+ i
)), ptr
+ i
);
3092 for (i
= MAC_REG_TDCSR_SET
; i
<= MAC_REG_RDCSR_SET
; i
++) {
3093 writeb(*((u8
*) (context
->mac_reg
+ i
)), ptr
+ i
);
3099 * wol_calc_crc - WOL CRC
3100 * @pattern: data pattern
3101 * @mask_pattern: mask
3103 * Compute the wake on lan crc hashes for the packet header
3104 * we are interested in.
3107 static u16
wol_calc_crc(int size
, u8
* pattern
, u8
*mask_pattern
)
3113 for (i
= 0; i
< size
; i
++) {
3114 mask
= mask_pattern
[i
];
3116 /* Skip this loop if the mask equals to zero */
3120 for (j
= 0; j
< 8; j
++) {
3121 if ((mask
& 0x01) == 0) {
3126 crc
= crc_ccitt(crc
, &(pattern
[i
* 8 + j
]), 1);
3129 /* Finally, invert the result once to get the correct data */
3131 return bitreverse(crc
) >> 16;
3135 * velocity_set_wol - set up for wake on lan
3136 * @vptr: velocity to set WOL status on
3138 * Set a card up for wake on lan either by unicast or by
3141 * FIXME: check static buffer is safe here
3144 static int velocity_set_wol(struct velocity_info
*vptr
)
3146 struct mac_regs __iomem
* regs
= vptr
->mac_regs
;
3150 static u32 mask_pattern
[2][4] = {
3151 {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
3152 {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff} /* Magic Packet */
3155 writew(0xFFFF, ®s
->WOLCRClr
);
3156 writeb(WOLCFG_SAB
| WOLCFG_SAM
, ®s
->WOLCFGSet
);
3157 writew(WOLCR_MAGIC_EN
, ®s
->WOLCRSet
);
3160 if (vptr->wol_opts & VELOCITY_WOL_PHY)
3161 writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), ®s->WOLCRSet);
3164 if (vptr
->wol_opts
& VELOCITY_WOL_UCAST
) {
3165 writew(WOLCR_UNICAST_EN
, ®s
->WOLCRSet
);
3168 if (vptr
->wol_opts
& VELOCITY_WOL_ARP
) {
3169 struct arp_packet
*arp
= (struct arp_packet
*) buf
;
3171 memset(buf
, 0, sizeof(struct arp_packet
) + 7);
3173 for (i
= 0; i
< 4; i
++)
3174 writel(mask_pattern
[0][i
], ®s
->ByteMask
[0][i
]);
3176 arp
->type
= htons(ETH_P_ARP
);
3177 arp
->ar_op
= htons(1);
3179 memcpy(arp
->ar_tip
, vptr
->ip_addr
, 4);
3181 crc
= wol_calc_crc((sizeof(struct arp_packet
) + 7) / 8, buf
,
3182 (u8
*) & mask_pattern
[0][0]);
3184 writew(crc
, ®s
->PatternCRC
[0]);
3185 writew(WOLCR_ARP_EN
, ®s
->WOLCRSet
);
3188 BYTE_REG_BITS_ON(PWCFG_WOLTYPE
, ®s
->PWCFGSet
);
3189 BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN
, ®s
->PWCFGSet
);
3191 writew(0x0FFF, ®s
->WOLSRClr
);
3193 if (vptr
->mii_status
& VELOCITY_AUTONEG_ENABLE
) {
3194 if (PHYID_GET_PHY_ID(vptr
->phy_id
) == PHYID_CICADA_CS8201
)
3195 MII_REG_BITS_ON(AUXCR_MDPPS
, MII_REG_AUXCR
, vptr
->mac_regs
);
3197 MII_REG_BITS_OFF(G1000CR_1000FD
| G1000CR_1000
, MII_REG_G1000CR
, vptr
->mac_regs
);
3200 if (vptr
->mii_status
& VELOCITY_SPEED_1000
)
3201 MII_REG_BITS_ON(BMCR_REAUTO
, MII_REG_BMCR
, vptr
->mac_regs
);
3203 BYTE_REG_BITS_ON(CHIPGCR_FCMODE
, ®s
->CHIPGCR
);
3207 GCR
= readb(®s
->CHIPGCR
);
3208 GCR
= (GCR
& ~CHIPGCR_FCGMII
) | CHIPGCR_FCFDX
;
3209 writeb(GCR
, ®s
->CHIPGCR
);
3212 BYTE_REG_BITS_OFF(ISR_PWEI
, ®s
->ISR
);
3213 /* Turn on SWPTAG just before entering power mode */
3214 BYTE_REG_BITS_ON(STICKHW_SWPTAG
, ®s
->STICKHW
);
3215 /* Go to bed ..... */
3216 BYTE_REG_BITS_ON((STICKHW_DS1
| STICKHW_DS0
), ®s
->STICKHW
);
3221 static int velocity_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3223 struct net_device
*dev
= pci_get_drvdata(pdev
);
3224 struct velocity_info
*vptr
= netdev_priv(dev
);
3225 unsigned long flags
;
3227 if(!netif_running(vptr
->dev
))
3230 netif_device_detach(vptr
->dev
);
3232 spin_lock_irqsave(&vptr
->lock
, flags
);
3233 pci_save_state(pdev
);
3235 if (vptr
->flags
& VELOCITY_FLAGS_WOL_ENABLED
) {
3236 velocity_get_ip(vptr
);
3237 velocity_save_context(vptr
, &vptr
->context
);
3238 velocity_shutdown(vptr
);
3239 velocity_set_wol(vptr
);
3240 pci_enable_wake(pdev
, 3, 1);
3241 pci_set_power_state(pdev
, PCI_D3hot
);
3243 velocity_save_context(vptr
, &vptr
->context
);
3244 velocity_shutdown(vptr
);
3245 pci_disable_device(pdev
);
3246 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
3249 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
3251 spin_unlock_irqrestore(&vptr
->lock
, flags
);
3255 static int velocity_resume(struct pci_dev
*pdev
)
3257 struct net_device
*dev
= pci_get_drvdata(pdev
);
3258 struct velocity_info
*vptr
= netdev_priv(dev
);
3259 unsigned long flags
;
3262 if(!netif_running(vptr
->dev
))
3265 pci_set_power_state(pdev
, PCI_D0
);
3266 pci_enable_wake(pdev
, 0, 0);
3267 pci_restore_state(pdev
);
3269 mac_wol_reset(vptr
->mac_regs
);
3271 spin_lock_irqsave(&vptr
->lock
, flags
);
3272 velocity_restore_context(vptr
, &vptr
->context
);
3273 velocity_init_registers(vptr
, VELOCITY_INIT_WOL
);
3274 mac_disable_int(vptr
->mac_regs
);
3276 velocity_tx_srv(vptr
, 0);
3278 for (i
= 0; i
< vptr
->num_txq
; i
++) {
3279 if (vptr
->td_used
[i
]) {
3280 mac_tx_queue_wake(vptr
->mac_regs
, i
);
3284 mac_enable_int(vptr
->mac_regs
);
3285 spin_unlock_irqrestore(&vptr
->lock
, flags
);
3286 netif_device_attach(vptr
->dev
);
3291 static int velocity_netdev_event(struct notifier_block
*nb
, unsigned long notification
, void *ptr
)
3293 struct in_ifaddr
*ifa
= (struct in_ifaddr
*) ptr
;
3296 struct net_device
*dev
= ifa
->ifa_dev
->dev
;
3297 struct velocity_info
*vptr
;
3298 unsigned long flags
;
3300 spin_lock_irqsave(&velocity_dev_list_lock
, flags
);
3301 list_for_each_entry(vptr
, &velocity_dev_list
, list
) {
3302 if (vptr
->dev
== dev
) {
3303 velocity_get_ip(vptr
);
3307 spin_unlock_irqrestore(&velocity_dev_list_lock
, flags
);