MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / drivers / net / amd8111e.c
blob91fa987dc8a868de846bfceb2691b4001b273d0d
2 /* Advanced Micro Devices Inc. AMD8111E Linux Network Driver
3 * Copyright (C) 2004 Advanced Micro Devices
5 *
6 * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
7 * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
8 * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
9 * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
10 * Copyright 1993 United States Government as represented by the
11 * Director, National Security Agency.[ pcnet32.c ]
12 * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
13 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 * USA
31 Module Name:
33 amd8111e.c
35 Abstract:
37 AMD8111 based 10/100 Ethernet Controller Driver.
39 Environment:
41 Kernel Mode
43 Revision History:
44 3.0.0
45 Initial Revision.
46 3.0.1
47 1. Dynamic interrupt coalescing.
48 2. Removed prev_stats.
49 3. MII support.
50 4. Dynamic IPG support
51 3.0.2 05/29/2003
52 1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
53 2. Bug fix: Fixed VLAN support failure.
54 3. Bug fix: Fixed receive interrupt coalescing bug.
55 4. Dynamic IPG support is disabled by default.
56 3.0.3 06/05/2003
57 1. Bug fix: Fixed failure to close the interface if SMP is enabled.
58 3.0.4 12/09/2003
59 1. Added set_mac_address routine for bonding driver support.
60 2. Tested the driver for bonding support
61 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
62 indicated to the h/w.
63 4. Modified amd8111e_rx() routine to receive all the received packets
64 in the first interrupt.
65 5. Bug fix: Corrected rx_errors reported in get_stats() function.
66 3.0.5 03/22/2004
67 1. Added NAPI support
72 #include <linux/config.h>
73 #include <linux/module.h>
74 #include <linux/kernel.h>
75 #include <linux/types.h>
76 #include <linux/compiler.h>
77 #include <linux/slab.h>
78 #include <linux/delay.h>
79 #include <linux/init.h>
80 #include <linux/ioport.h>
81 #include <linux/pci.h>
82 #include <linux/netdevice.h>
83 #include <linux/etherdevice.h>
84 #include <linux/skbuff.h>
85 #include <linux/ethtool.h>
86 #include <linux/mii.h>
87 #include <linux/if_vlan.h>
88 #include <linux/ctype.h>
89 #include <linux/crc32.h>
91 #include <asm/system.h>
92 #include <asm/io.h>
93 #include <asm/byteorder.h>
94 #include <asm/uaccess.h>
96 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
97 #define AMD8111E_VLAN_TAG_USED 1
98 #else
99 #define AMD8111E_VLAN_TAG_USED 0
100 #endif
102 #include "amd8111e.h"
103 #define MODULE_NAME "amd8111e"
104 #define MODULE_VERS "3.0.5"
105 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
106 MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version 3.0.3");
107 MODULE_LICENSE("GPL");
108 MODULE_PARM(speed_duplex, "1-" __MODULE_STRING (MAX_UNITS) "i");
109 MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotitate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
110 MODULE_PARM(coalesce, "1-" __MODULE_STRING(MAX_UNITS) "i");
111 MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
112 MODULE_PARM(dynamic_ipg, "1-" __MODULE_STRING(MAX_UNITS) "i");
113 MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
115 static struct pci_device_id amd8111e_pci_tbl[] = {
117 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
119 { 0, }
123 This function will read the PHY registers.
125 static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
127 void * mmio = lp->mmio;
128 unsigned int reg_val;
129 unsigned int repeat= REPEAT_CNT;
131 reg_val = readl(mmio + PHY_ACCESS);
132 while (reg_val & PHY_CMD_ACTIVE)
133 reg_val = readl( mmio + PHY_ACCESS );
135 writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
136 ((reg & 0x1f) << 16), mmio +PHY_ACCESS);
138 reg_val = readl(mmio + PHY_ACCESS);
139 udelay(30); /* It takes 30 us to read/write data */
140 } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
141 if(reg_val & PHY_RD_ERR)
142 goto err_phy_read;
144 *val = reg_val & 0xffff;
145 return 0;
146 err_phy_read:
147 *val = 0;
148 return -EINVAL;
153 This function will write into PHY registers.
155 static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
157 unsigned int repeat = REPEAT_CNT
158 void * mmio = lp->mmio;
159 unsigned int reg_val;
161 reg_val = readl(mmio + PHY_ACCESS);
162 while (reg_val & PHY_CMD_ACTIVE)
163 reg_val = readl( mmio + PHY_ACCESS );
165 writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
166 ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
169 reg_val = readl(mmio + PHY_ACCESS);
170 udelay(30); /* It takes 30 us to read/write the data */
171 } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
173 if(reg_val & PHY_RD_ERR)
174 goto err_phy_write;
176 return 0;
178 err_phy_write:
179 return -EINVAL;
183 This is the mii register read function provided to the mii interface.
185 static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
187 struct amd8111e_priv* lp = netdev_priv(dev);
188 unsigned int reg_val;
190 amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
191 return reg_val;
196 This is the mii register write function provided to the mii interface.
198 static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val)
200 struct amd8111e_priv* lp = netdev_priv(dev);
202 amd8111e_write_phy(lp, phy_id, reg_num, val);
206 This function will set PHY speed. During initialization sets the original speed to 100 full.
208 static void amd8111e_set_ext_phy(struct net_device *dev)
210 struct amd8111e_priv *lp = netdev_priv(dev);
211 u32 bmcr,advert,tmp;
213 /* Determine mii register values to set the speed */
214 advert = amd8111e_mdio_read(dev, PHY_ID, MII_ADVERTISE);
215 tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
216 switch (lp->ext_phy_option){
218 default:
219 case SPEED_AUTONEG: /* advertise all values */
220 tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
221 ADVERTISE_100HALF|ADVERTISE_100FULL) ;
222 break;
223 case SPEED10_HALF:
224 tmp |= ADVERTISE_10HALF;
225 break;
226 case SPEED10_FULL:
227 tmp |= ADVERTISE_10FULL;
228 break;
229 case SPEED100_HALF:
230 tmp |= ADVERTISE_100HALF;
231 break;
232 case SPEED100_FULL:
233 tmp |= ADVERTISE_100FULL;
234 break;
237 if(advert != tmp)
238 amd8111e_mdio_write(dev, PHY_ID, MII_ADVERTISE, tmp);
239 /* Restart auto negotiation */
240 bmcr = amd8111e_mdio_read(dev, PHY_ID, MII_BMCR);
241 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
242 amd8111e_mdio_write(dev, PHY_ID, MII_BMCR, bmcr);
247 This function will unmap skb->data space and will free
248 all transmit and receive skbuffs.
250 static int amd8111e_free_skbs(struct net_device *dev)
252 struct amd8111e_priv *lp = netdev_priv(dev);
253 struct sk_buff* rx_skbuff;
254 int i;
256 /* Freeing transmit skbs */
257 for(i = 0; i < NUM_TX_BUFFERS; i++){
258 if(lp->tx_skbuff[i]){
259 pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i], lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE);
260 dev_kfree_skb (lp->tx_skbuff[i]);
261 lp->tx_skbuff[i] = NULL;
262 lp->tx_dma_addr[i] = 0;
265 /* Freeing previously allocated receive buffers */
266 for (i = 0; i < NUM_RX_BUFFERS; i++){
267 rx_skbuff = lp->rx_skbuff[i];
268 if(rx_skbuff != NULL){
269 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i],
270 lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE);
271 dev_kfree_skb(lp->rx_skbuff[i]);
272 lp->rx_skbuff[i] = NULL;
273 lp->rx_dma_addr[i] = 0;
277 return 0;
281 This will set the receive buffer length corresponding to the mtu size of networkinterface.
283 static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
285 struct amd8111e_priv* lp = netdev_priv(dev);
286 unsigned int mtu = dev->mtu;
288 if (mtu > ETH_DATA_LEN){
289 /* MTU + ethernet header + FCS
290 + optional VLAN tag + skb reserve space 2 */
292 lp->rx_buff_len = mtu + ETH_HLEN + 10;
293 lp->options |= OPTION_JUMBO_ENABLE;
294 } else{
295 lp->rx_buff_len = PKT_BUFF_SZ;
296 lp->options &= ~OPTION_JUMBO_ENABLE;
301 This function will free all the previously allocated buffers, determine new receive buffer length and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors.
303 static int amd8111e_init_ring(struct net_device *dev)
305 struct amd8111e_priv *lp = netdev_priv(dev);
306 int i;
308 lp->rx_idx = lp->tx_idx = 0;
309 lp->tx_complete_idx = 0;
310 lp->tx_ring_idx = 0;
313 if(lp->opened)
314 /* Free previously allocated transmit and receive skbs */
315 amd8111e_free_skbs(dev);
317 else{
318 /* allocate the tx and rx descriptors */
319 if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
320 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
321 &lp->tx_ring_dma_addr)) == NULL)
323 goto err_no_mem;
325 if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
326 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
327 &lp->rx_ring_dma_addr)) == NULL)
329 goto err_free_tx_ring;
332 /* Set new receive buff size */
333 amd8111e_set_rx_buff_len(dev);
335 /* Allocating receive skbs */
336 for (i = 0; i < NUM_RX_BUFFERS; i++) {
338 if (!(lp->rx_skbuff[i] = dev_alloc_skb(lp->rx_buff_len))) {
339 /* Release previos allocated skbs */
340 for(--i; i >= 0 ;i--)
341 dev_kfree_skb(lp->rx_skbuff[i]);
342 goto err_free_rx_ring;
344 skb_reserve(lp->rx_skbuff[i],2);
346 /* Initilaizing receive descriptors */
347 for (i = 0; i < NUM_RX_BUFFERS; i++) {
348 lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev,
349 lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
351 lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
352 lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
353 lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
356 /* Initializing transmit descriptors */
357 for (i = 0; i < NUM_TX_RING_DR; i++) {
358 lp->tx_ring[i].buff_phy_addr = 0;
359 lp->tx_ring[i].tx_flags = 0;
360 lp->tx_ring[i].buff_count = 0;
363 return 0;
365 err_free_rx_ring:
367 pci_free_consistent(lp->pci_dev,
368 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring,
369 lp->rx_ring_dma_addr);
371 err_free_tx_ring:
373 pci_free_consistent(lp->pci_dev,
374 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
375 lp->tx_ring_dma_addr);
377 err_no_mem:
378 return -ENOMEM;
380 /* This function will set the interrupt coalescing according to the input arguments */
381 static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
383 unsigned int timeout;
384 unsigned int event_count;
386 struct amd8111e_priv *lp = netdev_priv(dev);
387 void* mmio = lp->mmio;
388 struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
391 switch(cmod)
393 case RX_INTR_COAL :
394 timeout = coal_conf->rx_timeout;
395 event_count = coal_conf->rx_event_count;
396 if( timeout > MAX_TIMEOUT ||
397 event_count > MAX_EVENT_COUNT )
398 return -EINVAL;
400 timeout = timeout * DELAY_TIMER_CONV;
401 writel(VAL0|STINTEN, mmio+INTEN0);
402 writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
403 mmio+DLY_INT_A);
404 break;
406 case TX_INTR_COAL :
407 timeout = coal_conf->tx_timeout;
408 event_count = coal_conf->tx_event_count;
409 if( timeout > MAX_TIMEOUT ||
410 event_count > MAX_EVENT_COUNT )
411 return -EINVAL;
414 timeout = timeout * DELAY_TIMER_CONV;
415 writel(VAL0|STINTEN,mmio+INTEN0);
416 writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
417 mmio+DLY_INT_B);
418 break;
420 case DISABLE_COAL:
421 writel(0,mmio+STVAL);
422 writel(STINTEN, mmio+INTEN0);
423 writel(0, mmio +DLY_INT_B);
424 writel(0, mmio+DLY_INT_A);
425 break;
426 case ENABLE_COAL:
427 /* Start the timer */
428 writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /* 0.5 sec */
429 writel(VAL0|STINTEN, mmio+INTEN0);
430 break;
431 default:
432 break;
435 return 0;
440 This function initializes the device registers and starts the device.
442 static int amd8111e_restart(struct net_device *dev)
444 struct amd8111e_priv *lp = netdev_priv(dev);
445 void * mmio = lp->mmio;
446 int i,reg_val;
448 /* stop the chip */
449 writel(RUN, mmio + CMD0);
451 if(amd8111e_init_ring(dev))
452 return -ENOMEM;
454 /* enable the port manager and set auto negotiation always */
455 writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
456 writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
458 amd8111e_set_ext_phy(dev);
460 /* set control registers */
461 reg_val = readl(mmio + CTRL1);
462 reg_val &= ~XMTSP_MASK;
463 writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
465 /* enable interrupt */
466 writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
467 APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
468 SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
470 writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
472 /* initialize tx and rx ring base addresses */
473 writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
474 writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
476 writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
477 writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
479 /* set default IPG to 96 */
480 writew((u32)DEFAULT_IPG,mmio+IPG);
481 writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
483 if(lp->options & OPTION_JUMBO_ENABLE){
484 writel((u32)VAL2|JUMBO, mmio + CMD3);
485 /* Reset REX_UFLO */
486 writel( REX_UFLO, mmio + CMD2);
487 /* Should not set REX_UFLO for jumbo frames */
488 writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
489 }else{
490 writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
491 writel((u32)JUMBO, mmio + CMD3);
494 #if AMD8111E_VLAN_TAG_USED
495 writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
496 #endif
497 writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
499 /* Setting the MAC address to the device */
500 for(i = 0; i < ETH_ADDR_LEN; i++)
501 writeb( dev->dev_addr[i], mmio + PADR + i );
503 /* Enable interrupt coalesce */
504 if(lp->options & OPTION_INTR_COAL_ENABLE){
505 printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n",
506 dev->name);
507 amd8111e_set_coalesce(dev,ENABLE_COAL);
510 /* set RUN bit to start the chip */
511 writel(VAL2 | RDMD0, mmio + CMD0);
512 writel(VAL0 | INTREN | RUN, mmio + CMD0);
514 /* To avoid PCI posting bug */
515 readl(mmio+CMD0);
516 return 0;
519 This function clears necessary the device registers.
521 static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
523 unsigned int reg_val;
524 unsigned int logic_filter[2] ={0,};
525 void * mmio = lp->mmio;
528 /* stop the chip */
529 writel(RUN, mmio + CMD0);
531 /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
532 writew( 0x8101, mmio + AUTOPOLL0);
534 /* Clear RCV_RING_BASE_ADDR */
535 writel(0, mmio + RCV_RING_BASE_ADDR0);
537 /* Clear XMT_RING_BASE_ADDR */
538 writel(0, mmio + XMT_RING_BASE_ADDR0);
539 writel(0, mmio + XMT_RING_BASE_ADDR1);
540 writel(0, mmio + XMT_RING_BASE_ADDR2);
541 writel(0, mmio + XMT_RING_BASE_ADDR3);
543 /* Clear CMD0 */
544 writel(CMD0_CLEAR,mmio + CMD0);
546 /* Clear CMD2 */
547 writel(CMD2_CLEAR, mmio +CMD2);
549 /* Clear CMD7 */
550 writel(CMD7_CLEAR , mmio + CMD7);
552 /* Clear DLY_INT_A and DLY_INT_B */
553 writel(0x0, mmio + DLY_INT_A);
554 writel(0x0, mmio + DLY_INT_B);
556 /* Clear FLOW_CONTROL */
557 writel(0x0, mmio + FLOW_CONTROL);
559 /* Clear INT0 write 1 to clear register */
560 reg_val = readl(mmio + INT0);
561 writel(reg_val, mmio + INT0);
563 /* Clear STVAL */
564 writel(0x0, mmio + STVAL);
566 /* Clear INTEN0 */
567 writel( INTEN0_CLEAR, mmio + INTEN0);
569 /* Clear LADRF */
570 writel(0x0 , mmio + LADRF);
572 /* Set SRAM_SIZE & SRAM_BOUNDARY registers */
573 writel( 0x80010,mmio + SRAM_SIZE);
575 /* Clear RCV_RING0_LEN */
576 writel(0x0, mmio + RCV_RING_LEN0);
578 /* Clear XMT_RING0/1/2/3_LEN */
579 writel(0x0, mmio + XMT_RING_LEN0);
580 writel(0x0, mmio + XMT_RING_LEN1);
581 writel(0x0, mmio + XMT_RING_LEN2);
582 writel(0x0, mmio + XMT_RING_LEN3);
584 /* Clear XMT_RING_LIMIT */
585 writel(0x0, mmio + XMT_RING_LIMIT);
587 /* Clear MIB */
588 writew(MIB_CLEAR, mmio + MIB_ADDR);
590 /* Clear LARF */
591 amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF);
593 /* SRAM_SIZE register */
594 reg_val = readl(mmio + SRAM_SIZE);
596 if(lp->options & OPTION_JUMBO_ENABLE)
597 writel( VAL2|JUMBO, mmio + CMD3);
598 #if AMD8111E_VLAN_TAG_USED
599 writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
600 #endif
601 /* Set default value to CTRL1 Register */
602 writel(CTRL1_DEFAULT, mmio + CTRL1);
604 /* To avoid PCI posting bug */
605 readl(mmio + CMD2);
610 This function disables the interrupt and clears all the pending
611 interrupts in INT0
613 static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
615 u32 intr0;
617 /* Disable interrupt */
618 writel(INTREN, lp->mmio + CMD0);
620 /* Clear INT0 */
621 intr0 = readl(lp->mmio + INT0);
622 writel(intr0, lp->mmio + INT0);
624 /* To avoid PCI posting bug */
625 readl(lp->mmio + INT0);
630 This function stops the chip.
632 static void amd8111e_stop_chip(struct amd8111e_priv* lp)
634 writel(RUN, lp->mmio + CMD0);
636 /* To avoid PCI posting bug */
637 readl(lp->mmio + CMD0);
641 This function frees the transmiter and receiver descriptor rings.
643 static void amd8111e_free_ring(struct amd8111e_priv* lp)
646 /* Free transmit and receive skbs */
647 amd8111e_free_skbs(lp->amd8111e_net_dev);
649 /* Free transmit and receive descriptor rings */
650 if(lp->rx_ring){
651 pci_free_consistent(lp->pci_dev,
652 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
653 lp->rx_ring, lp->rx_ring_dma_addr);
654 lp->rx_ring = NULL;
657 if(lp->tx_ring){
658 pci_free_consistent(lp->pci_dev,
659 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
660 lp->tx_ring, lp->tx_ring_dma_addr);
662 lp->tx_ring = NULL;
666 #if AMD8111E_VLAN_TAG_USED
668 This is the receive indication function for packets with vlan tag.
670 static int amd8111e_vlan_rx(struct amd8111e_priv *lp, struct sk_buff *skb, u16 vlan_tag)
672 #ifdef CONFIG_AMD8111E_NAPI
673 return vlan_hwaccel_receive_skb(skb, lp->vlgrp,vlan_tag);
674 #else
675 return vlan_hwaccel_rx(skb, lp->vlgrp, vlan_tag);
676 #endif /* CONFIG_AMD8111E_NAPI */
678 #endif
681 This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb.
683 static int amd8111e_tx(struct net_device *dev)
685 struct amd8111e_priv* lp = netdev_priv(dev);
686 int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
687 int status;
688 /* Complete all the transmit packet */
689 while (lp->tx_complete_idx != lp->tx_idx){
690 tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
691 status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
693 if(status & OWN_BIT)
694 break; /* It still hasn't been Txed */
696 lp->tx_ring[tx_index].buff_phy_addr = 0;
698 /* We must free the original skb */
699 if (lp->tx_skbuff[tx_index]) {
700 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
701 lp->tx_skbuff[tx_index]->len,
702 PCI_DMA_TODEVICE);
703 dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
704 lp->tx_skbuff[tx_index] = NULL;
705 lp->tx_dma_addr[tx_index] = 0;
707 lp->tx_complete_idx++;
708 /*COAL update tx coalescing parameters */
709 lp->coal_conf.tx_packets++;
710 lp->coal_conf.tx_bytes += lp->tx_ring[tx_index].buff_count;
712 if (netif_queue_stopped(dev) &&
713 lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
714 /* The ring is no longer full, clear tbusy. */
715 /* lp->tx_full = 0; */
716 netif_wake_queue (dev);
719 return 0;
722 #ifdef CONFIG_AMD8111E_NAPI
723 /* This function handles the driver receive operation in polling mode */
724 static int amd8111e_rx_poll(struct net_device *dev, int * budget)
726 struct amd8111e_priv *lp = dev->priv;
727 int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
728 void * mmio = lp->mmio;
729 struct sk_buff *skb,*new_skb;
730 int min_pkt_len, status;
731 unsigned int intr0;
732 int num_rx_pkt = 0;
733 /*int max_rx_pkt = NUM_RX_BUFFERS;*/
734 short pkt_len;
735 #if AMD8111E_VLAN_TAG_USED
736 short vtag;
737 #endif
738 int rx_pkt_limit = dev->quota;
740 do{
741 /* process receive packets until we use the quota*/
742 /* If we own the next entry, it's a new packet. Send it up. */
743 while(!(lp->rx_ring[rx_index].rx_flags & OWN_BIT)){
745 /* check if err summary bit is set */
746 if(le16_to_cpu(lp->rx_ring[rx_index].rx_flags)
747 & ERR_BIT){
749 * There is a tricky error noted by John Murphy,
750 * <murf@perftech.com> to Russ Nelson: Even with
751 * full-sized * buffers it's possible for a
752 * jabber packet to use two buffers, with only
753 * the last correctly noting the error.
756 /* reseting flags */
757 lp->rx_ring[rx_index].rx_flags &=RESET_RX_FLAGS;
758 goto err_next_pkt;
761 /* check for STP and ENP */
762 status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
763 if(!((status & STP_BIT) && (status & ENP_BIT))){
764 /* reseting flags */
765 lp->rx_ring[rx_index].rx_flags &=RESET_RX_FLAGS;
766 goto err_next_pkt;
768 pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
770 #if AMD8111E_VLAN_TAG_USED
771 vtag = le16_to_cpu(lp->rx_ring[rx_index].rx_flags) & TT_MASK;
772 /*MAC will strip vlan tag*/
773 if(lp->vlgrp != NULL && vtag !=0)
774 min_pkt_len =MIN_PKT_LEN - 4;
775 else
776 #endif
777 min_pkt_len =MIN_PKT_LEN;
779 if (pkt_len < min_pkt_len) {
780 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
781 lp->drv_rx_errors++;
782 goto err_next_pkt;
784 if(--rx_pkt_limit < 0)
785 goto rx_not_empty;
786 if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
787 /* if allocation fail,
788 ignore that pkt and go to next one */
789 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
790 lp->drv_rx_errors++;
791 goto err_next_pkt;
794 skb_reserve(new_skb, 2);
795 skb = lp->rx_skbuff[rx_index];
796 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
797 lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
798 skb_put(skb, pkt_len);
799 skb->dev = dev;
800 lp->rx_skbuff[rx_index] = new_skb;
801 new_skb->dev = dev;
802 lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
803 new_skb->data, lp->rx_buff_len-2,PCI_DMA_FROMDEVICE);
805 skb->protocol = eth_type_trans(skb, dev);
807 #if AMD8111E_VLAN_TAG_USED
809 vtag = lp->rx_ring[rx_index].rx_flags & TT_MASK;
810 if(lp->vlgrp != NULL && (vtag == TT_VLAN_TAGGED)){
811 amd8111e_vlan_rx(lp, skb,
812 lp->rx_ring[rx_index].tag_ctrl_info);
813 } else
814 #endif
816 netif_receive_skb(skb);
817 /*COAL update rx coalescing parameters*/
818 lp->coal_conf.rx_packets++;
819 lp->coal_conf.rx_bytes += pkt_len;
820 num_rx_pkt++;
821 dev->last_rx = jiffies;
823 err_next_pkt:
824 lp->rx_ring[rx_index].buff_phy_addr
825 = cpu_to_le32(lp->rx_dma_addr[rx_index]);
826 lp->rx_ring[rx_index].buff_count =
827 cpu_to_le16(lp->rx_buff_len-2);
828 lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
829 rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
831 /* Check the interrupt status register for more packets in the
832 mean time. Process them since we have not used up our quota.*/
834 intr0 = readl(mmio + INT0);
835 /*Ack receive packets */
836 writel(intr0 & RINT0,mmio + INT0);
838 }while(intr0 & RINT0);
840 /* Receive descriptor is empty now */
841 dev->quota -= num_rx_pkt;
842 *budget -= num_rx_pkt;
843 netif_rx_complete(dev);
844 /* enable receive interrupt */
845 writel(VAL0|RINTEN0, mmio + INTEN0);
846 writel(VAL2 | RDMD0, mmio + CMD0);
847 return 0;
848 rx_not_empty:
849 /* Do not call a netif_rx_complete */
850 dev->quota -= num_rx_pkt;
851 *budget -= num_rx_pkt;
852 return 1;
857 #else
859 This function will check the ownership of receive buffers and descriptors. It will indicate to kernel up to half the number of maximum receive buffers in the descriptor ring, in a single receive interrupt. It will also replenish the descriptors with new skbs.
861 static int amd8111e_rx(struct net_device *dev)
863 struct amd8111e_priv *lp = netdev_priv(dev);
864 struct sk_buff *skb,*new_skb;
865 int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
866 int min_pkt_len, status;
867 int num_rx_pkt = 0;
868 int max_rx_pkt = NUM_RX_BUFFERS;
869 short pkt_len;
870 #if AMD8111E_VLAN_TAG_USED
871 short vtag;
872 #endif
874 /* If we own the next entry, it's a new packet. Send it up. */
875 while(++num_rx_pkt <= max_rx_pkt){
876 if(lp->rx_ring[rx_index].rx_flags & OWN_BIT)
877 return 0;
879 /* check if err summary bit is set */
880 if(le16_to_cpu(lp->rx_ring[rx_index].rx_flags) & ERR_BIT){
882 * There is a tricky error noted by John Murphy,
883 * <murf@perftech.com> to Russ Nelson: Even with full-sized
884 * buffers it's possible for a jabber packet to use two
885 * buffers, with only the last correctly noting the error. */
886 /* reseting flags */
887 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
888 goto err_next_pkt;
890 /* check for STP and ENP */
891 status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
892 if(!((status & STP_BIT) && (status & ENP_BIT))){
893 /* reseting flags */
894 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
895 goto err_next_pkt;
897 pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
899 #if AMD8111E_VLAN_TAG_USED
900 vtag = le16_to_cpu(lp->rx_ring[rx_index].rx_flags) & TT_MASK;
901 /*MAC will strip vlan tag*/
902 if(lp->vlgrp != NULL && vtag !=0)
903 min_pkt_len =MIN_PKT_LEN - 4;
904 else
905 #endif
906 min_pkt_len =MIN_PKT_LEN;
908 if (pkt_len < min_pkt_len) {
909 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
910 lp->drv_rx_errors++;
911 goto err_next_pkt;
913 if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
914 /* if allocation fail,
915 ignore that pkt and go to next one */
916 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
917 lp->drv_rx_errors++;
918 goto err_next_pkt;
921 skb_reserve(new_skb, 2);
922 skb = lp->rx_skbuff[rx_index];
923 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
924 lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
925 skb_put(skb, pkt_len);
926 skb->dev = dev;
927 lp->rx_skbuff[rx_index] = new_skb;
928 new_skb->dev = dev;
929 lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
930 new_skb->data, lp->rx_buff_len-2,PCI_DMA_FROMDEVICE);
932 skb->protocol = eth_type_trans(skb, dev);
934 #if AMD8111E_VLAN_TAG_USED
936 vtag = lp->rx_ring[rx_index].rx_flags & TT_MASK;
937 if(lp->vlgrp != NULL && (vtag == TT_VLAN_TAGGED)){
938 amd8111e_vlan_rx(lp, skb,
939 lp->rx_ring[rx_index].tag_ctrl_info);
940 } else
941 #endif
943 netif_rx (skb);
944 /*COAL update rx coalescing parameters*/
945 lp->coal_conf.rx_packets++;
946 lp->coal_conf.rx_bytes += pkt_len;
948 dev->last_rx = jiffies;
950 err_next_pkt:
951 lp->rx_ring[rx_index].buff_phy_addr
952 = cpu_to_le32(lp->rx_dma_addr[rx_index]);
953 lp->rx_ring[rx_index].buff_count =
954 cpu_to_le16(lp->rx_buff_len-2);
955 lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
956 rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
959 return 0;
961 #endif /* CONFIG_AMD8111E_NAPI */
963 This function will indicate the link status to the kernel.
965 static int amd8111e_link_change(struct net_device* dev)
967 struct amd8111e_priv *lp = netdev_priv(dev);
968 int status0,speed;
970 /* read the link change */
971 status0 = readl(lp->mmio + STAT0);
973 if(status0 & LINK_STATS){
974 if(status0 & AUTONEG_COMPLETE)
975 lp->link_config.autoneg = AUTONEG_ENABLE;
976 else
977 lp->link_config.autoneg = AUTONEG_DISABLE;
979 if(status0 & FULL_DPLX)
980 lp->link_config.duplex = DUPLEX_FULL;
981 else
982 lp->link_config.duplex = DUPLEX_HALF;
983 speed = (status0 & SPEED_MASK) >> 7;
984 if(speed == PHY_SPEED_10)
985 lp->link_config.speed = SPEED_10;
986 else if(speed == PHY_SPEED_100)
987 lp->link_config.speed = SPEED_100;
989 printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n", dev->name,
990 (lp->link_config.speed == SPEED_100) ? "100": "10",
991 (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half");
992 netif_carrier_on(dev);
994 else{
995 lp->link_config.speed = SPEED_INVALID;
996 lp->link_config.duplex = DUPLEX_INVALID;
997 lp->link_config.autoneg = AUTONEG_INVALID;
998 printk(KERN_INFO "%s: Link is Down.\n",dev->name);
999 netif_carrier_off(dev);
1002 return 0;
1005 This function reads the mib counters.
1007 static int amd8111e_read_mib(void* mmio, u8 MIB_COUNTER)
1009 unsigned int status;
1010 unsigned int data;
1011 unsigned int repeat = REPEAT_CNT;
1013 writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
1014 do {
1015 status = readw(mmio + MIB_ADDR);
1016 udelay(2); /* controller takes MAX 2 us to get mib data */
1018 while (--repeat && (status & MIB_CMD_ACTIVE));
1020 data = readl(mmio + MIB_DATA);
1021 return data;
1025 This function reads the mib registers and returns the hardware statistics. It updates previous internal driver statistics with new values.
1027 static struct net_device_stats *amd8111e_get_stats(struct net_device * dev)
1029 struct amd8111e_priv *lp = netdev_priv(dev);
1030 void * mmio = lp->mmio;
1031 unsigned long flags;
1032 /* struct net_device_stats *prev_stats = &lp->prev_stats; */
1033 struct net_device_stats* new_stats = &lp->stats;
1035 if(!lp->opened)
1036 return &lp->stats;
1037 spin_lock_irqsave (&lp->lock, flags);
1039 /* stats.rx_packets */
1040 new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
1041 amd8111e_read_mib(mmio, rcv_multicast_pkts)+
1042 amd8111e_read_mib(mmio, rcv_unicast_pkts);
1044 /* stats.tx_packets */
1045 new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
1047 /*stats.rx_bytes */
1048 new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
1050 /* stats.tx_bytes */
1051 new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
1053 /* stats.rx_errors */
1054 /* hw errors + errors driver reported */
1055 new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
1056 amd8111e_read_mib(mmio, rcv_fragments)+
1057 amd8111e_read_mib(mmio, rcv_jabbers)+
1058 amd8111e_read_mib(mmio, rcv_alignment_errors)+
1059 amd8111e_read_mib(mmio, rcv_fcs_errors)+
1060 amd8111e_read_mib(mmio, rcv_miss_pkts)+
1061 lp->drv_rx_errors;
1063 /* stats.tx_errors */
1064 new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
1066 /* stats.rx_dropped*/
1067 new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
1069 /* stats.tx_dropped*/
1070 new_stats->tx_dropped = amd8111e_read_mib(mmio, xmt_underrun_pkts);
1072 /* stats.multicast*/
1073 new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
1075 /* stats.collisions*/
1076 new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
1078 /* stats.rx_length_errors*/
1079 new_stats->rx_length_errors =
1080 amd8111e_read_mib(mmio, rcv_undersize_pkts)+
1081 amd8111e_read_mib(mmio, rcv_oversize_pkts);
1083 /* stats.rx_over_errors*/
1084 new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
1086 /* stats.rx_crc_errors*/
1087 new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
1089 /* stats.rx_frame_errors*/
1090 new_stats->rx_frame_errors =
1091 amd8111e_read_mib(mmio, rcv_alignment_errors);
1093 /* stats.rx_fifo_errors */
1094 new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
1096 /* stats.rx_missed_errors */
1097 new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
1099 /* stats.tx_aborted_errors*/
1100 new_stats->tx_aborted_errors =
1101 amd8111e_read_mib(mmio, xmt_excessive_collision);
1103 /* stats.tx_carrier_errors*/
1104 new_stats->tx_carrier_errors =
1105 amd8111e_read_mib(mmio, xmt_loss_carrier);
1107 /* stats.tx_fifo_errors*/
1108 new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
1110 /* stats.tx_window_errors*/
1111 new_stats->tx_window_errors =
1112 amd8111e_read_mib(mmio, xmt_late_collision);
1114 /* Reset the mibs for collecting new statistics */
1115 /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
1117 spin_unlock_irqrestore (&lp->lock, flags);
1119 return new_stats;
1121 /* This function recalculate the interupt coalescing mode on every interrupt
1122 according to the datarate and the packet rate.
1124 static int amd8111e_calc_coalesce(struct net_device *dev)
1126 struct amd8111e_priv *lp = netdev_priv(dev);
1127 struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
1128 int tx_pkt_rate;
1129 int rx_pkt_rate;
1130 int tx_data_rate;
1131 int rx_data_rate;
1132 int rx_pkt_size;
1133 int tx_pkt_size;
1135 tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
1136 coal_conf->tx_prev_packets = coal_conf->tx_packets;
1138 tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
1139 coal_conf->tx_prev_bytes = coal_conf->tx_bytes;
1141 rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
1142 coal_conf->rx_prev_packets = coal_conf->rx_packets;
1144 rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
1145 coal_conf->rx_prev_bytes = coal_conf->rx_bytes;
1147 if(rx_pkt_rate < 800){
1148 if(coal_conf->rx_coal_type != NO_COALESCE){
1150 coal_conf->rx_timeout = 0x0;
1151 coal_conf->rx_event_count = 0;
1152 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1153 coal_conf->rx_coal_type = NO_COALESCE;
1156 else{
1158 rx_pkt_size = rx_data_rate/rx_pkt_rate;
1159 if (rx_pkt_size < 128){
1160 if(coal_conf->rx_coal_type != NO_COALESCE){
1162 coal_conf->rx_timeout = 0;
1163 coal_conf->rx_event_count = 0;
1164 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1165 coal_conf->rx_coal_type = NO_COALESCE;
1169 else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
1171 if(coal_conf->rx_coal_type != LOW_COALESCE){
1172 coal_conf->rx_timeout = 1;
1173 coal_conf->rx_event_count = 4;
1174 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1175 coal_conf->rx_coal_type = LOW_COALESCE;
1178 else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
1180 if(coal_conf->rx_coal_type != MEDIUM_COALESCE){
1181 coal_conf->rx_timeout = 1;
1182 coal_conf->rx_event_count = 4;
1183 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1184 coal_conf->rx_coal_type = MEDIUM_COALESCE;
1188 else if(rx_pkt_size >= 1024){
1189 if(coal_conf->rx_coal_type != HIGH_COALESCE){
1190 coal_conf->rx_timeout = 2;
1191 coal_conf->rx_event_count = 3;
1192 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1193 coal_conf->rx_coal_type = HIGH_COALESCE;
1197 /* NOW FOR TX INTR COALESC */
1198 if(tx_pkt_rate < 800){
1199 if(coal_conf->tx_coal_type != NO_COALESCE){
1201 coal_conf->tx_timeout = 0x0;
1202 coal_conf->tx_event_count = 0;
1203 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1204 coal_conf->tx_coal_type = NO_COALESCE;
1207 else{
1209 tx_pkt_size = tx_data_rate/tx_pkt_rate;
1210 if (tx_pkt_size < 128){
1212 if(coal_conf->tx_coal_type != NO_COALESCE){
1214 coal_conf->tx_timeout = 0;
1215 coal_conf->tx_event_count = 0;
1216 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1217 coal_conf->tx_coal_type = NO_COALESCE;
1221 else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
1223 if(coal_conf->tx_coal_type != LOW_COALESCE){
1224 coal_conf->tx_timeout = 1;
1225 coal_conf->tx_event_count = 2;
1226 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1227 coal_conf->tx_coal_type = LOW_COALESCE;
1231 else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
1233 if(coal_conf->tx_coal_type != MEDIUM_COALESCE){
1234 coal_conf->tx_timeout = 2;
1235 coal_conf->tx_event_count = 5;
1236 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1237 coal_conf->tx_coal_type = MEDIUM_COALESCE;
1241 else if(tx_pkt_size >= 1024){
1242 if (tx_pkt_size >= 1024){
1243 if(coal_conf->tx_coal_type != HIGH_COALESCE){
1244 coal_conf->tx_timeout = 4;
1245 coal_conf->tx_event_count = 8;
1246 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1247 coal_conf->tx_coal_type = HIGH_COALESCE;
1252 return 0;
1256 This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts.
1258 static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1261 struct net_device * dev = (struct net_device *) dev_id;
1262 struct amd8111e_priv *lp = netdev_priv(dev);
1263 void * mmio = lp->mmio;
1264 unsigned int intr0;
1265 unsigned int handled = 1;
1267 if(dev == NULL)
1268 return IRQ_NONE;
1270 if (regs) spin_lock (&lp->lock);
1271 /* disabling interrupt */
1272 writel(INTREN, mmio + CMD0);
1274 /* Read interrupt status */
1275 intr0 = readl(mmio + INT0);
1277 /* Process all the INT event until INTR bit is clear. */
1279 if (!(intr0 & INTR)){
1280 handled = 0;
1281 goto err_no_interrupt;
1284 /* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */
1285 writel(intr0, mmio + INT0);
1287 /* Check if Receive Interrupt has occurred. */
1288 #if CONFIG_AMD8111E_NAPI
1289 if(intr0 & RINT0){
1290 if(netif_rx_schedule_prep(dev)){
1291 /* Disable receive interupts */
1292 writel(RINTEN0, mmio + INTEN0);
1293 /* Schedule a polling routine */
1294 __netif_rx_schedule(dev);
1296 else {
1297 printk("************Driver bug! \
1298 interrupt while in poll\n");
1299 /* Fix by disabling interrupts */
1300 writel(RINT0, mmio + INT0);
1303 #else
1304 if(intr0 & RINT0){
1305 amd8111e_rx(dev);
1306 writel(VAL2 | RDMD0, mmio + CMD0);
1308 #endif /* CONFIG_AMD8111E_NAPI */
1309 /* Check if Transmit Interrupt has occurred. */
1310 if(intr0 & TINT0)
1311 amd8111e_tx(dev);
1313 /* Check if Link Change Interrupt has occurred. */
1314 if (intr0 & LCINT)
1315 amd8111e_link_change(dev);
1317 /* Check if Hardware Timer Interrupt has occurred. */
1318 if (intr0 & STINT)
1319 amd8111e_calc_coalesce(dev);
1321 err_no_interrupt:
1322 writel( VAL0 | INTREN,mmio + CMD0);
1324 if (regs) spin_unlock(&lp->lock);
1326 return IRQ_RETVAL(handled);
1329 #ifdef CONFIG_NET_POLL_CONTROLLER
1330 static void amd8111e_poll(struct net_device *dev)
1332 unsigned long flags;
1333 local_save_flags(flags);
1334 local_irq_disable();
1335 amd8111e_interrupt(0, dev, NULL);
1336 local_irq_restore(flags);
1338 #endif
1342 This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down.
1344 static int amd8111e_close(struct net_device * dev)
1346 struct amd8111e_priv *lp = netdev_priv(dev);
1347 netif_stop_queue(dev);
1349 spin_lock_irq(&lp->lock);
1351 amd8111e_disable_interrupt(lp);
1352 amd8111e_stop_chip(lp);
1353 amd8111e_free_ring(lp);
1355 netif_carrier_off(lp->amd8111e_net_dev);
1357 /* Delete ipg timer */
1358 if(lp->options & OPTION_DYN_IPG_ENABLE)
1359 del_timer_sync(&lp->ipg_data.ipg_timer);
1361 spin_unlock_irq(&lp->lock);
1362 free_irq(dev->irq, dev);
1364 /* Update the statistics before closing */
1365 amd8111e_get_stats(dev);
1366 lp->opened = 0;
1367 return 0;
1369 /* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device.
1371 static int amd8111e_open(struct net_device * dev )
1373 struct amd8111e_priv *lp = netdev_priv(dev);
1375 if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, SA_SHIRQ,
1376 dev->name, dev))
1377 return -EAGAIN;
1379 spin_lock_irq(&lp->lock);
1381 amd8111e_init_hw_default(lp);
1383 if(amd8111e_restart(dev)){
1384 spin_unlock_irq(&lp->lock);
1385 return -ENOMEM;
1387 /* Start ipg timer */
1388 if(lp->options & OPTION_DYN_IPG_ENABLE){
1389 add_timer(&lp->ipg_data.ipg_timer);
1390 printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name);
1393 lp->opened = 1;
1395 spin_unlock_irq(&lp->lock);
1397 netif_start_queue(dev);
1399 return 0;
1402 This function checks if there is any transmit descriptors available to queue more packet.
1404 static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
1406 int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
1407 if(lp->tx_skbuff[tx_index] != 0)
1408 return -1;
1409 else
1410 return 0;
1414 This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc.
1417 static int amd8111e_start_xmit(struct sk_buff *skb, struct net_device * dev)
1419 struct amd8111e_priv *lp = netdev_priv(dev);
1420 int tx_index;
1421 unsigned long flags;
1423 spin_lock_irqsave(&lp->lock, flags);
1425 tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
1427 lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
1429 lp->tx_skbuff[tx_index] = skb;
1430 lp->tx_ring[tx_index].tx_flags = 0;
1432 #if AMD8111E_VLAN_TAG_USED
1433 if((lp->vlgrp != NULL) && vlan_tx_tag_present(skb)){
1434 lp->tx_ring[tx_index].tag_ctrl_cmd |=
1435 cpu_to_le32(TCC_VLAN_INSERT);
1436 lp->tx_ring[tx_index].tag_ctrl_info =
1437 cpu_to_le16(vlan_tx_tag_get(skb));
1440 #endif
1441 lp->tx_dma_addr[tx_index] =
1442 pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1443 lp->tx_ring[tx_index].buff_phy_addr =
1444 (u32) cpu_to_le32(lp->tx_dma_addr[tx_index]);
1446 /* Set FCS and LTINT bits */
1447 lp->tx_ring[tx_index].tx_flags |=
1448 cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
1450 lp->tx_idx++;
1452 /* Trigger an immediate send poll. */
1453 writel( VAL1 | TDMD0, lp->mmio + CMD0);
1454 writel( VAL2 | RDMD0,lp->mmio + CMD0);
1456 dev->trans_start = jiffies;
1458 if(amd8111e_tx_queue_avail(lp) < 0){
1459 netif_stop_queue(dev);
1461 spin_unlock_irqrestore(&lp->lock, flags);
1462 return 0;
1465 This function returns all the memory mapped registers of the device.
1467 static char* amd8111e_read_regs(struct amd8111e_priv* lp)
1469 void * mmio = lp->mmio;
1470 u32 * reg_buff;
1472 reg_buff = kmalloc( AMD8111E_REG_DUMP_LEN,GFP_KERNEL);
1473 if(NULL == reg_buff)
1474 return NULL;
1476 /* Read only necessary registers */
1477 reg_buff[0] = readl(mmio + XMT_RING_BASE_ADDR0);
1478 reg_buff[1] = readl(mmio + XMT_RING_LEN0);
1479 reg_buff[2] = readl(mmio + RCV_RING_BASE_ADDR0);
1480 reg_buff[3] = readl(mmio + RCV_RING_LEN0);
1481 reg_buff[4] = readl(mmio + CMD0);
1482 reg_buff[5] = readl(mmio + CMD2);
1483 reg_buff[6] = readl(mmio + CMD3);
1484 reg_buff[7] = readl(mmio + CMD7);
1485 reg_buff[8] = readl(mmio + INT0);
1486 reg_buff[9] = readl(mmio + INTEN0);
1487 reg_buff[10] = readl(mmio + LADRF);
1488 reg_buff[11] = readl(mmio + LADRF+4);
1489 reg_buff[12] = readl(mmio + STAT0);
1491 return (char *)reg_buff;
1494 amd8111e crc generator implementation is different from the kernel
1495 ether_crc() function.
1497 int amd8111e_ether_crc(int len, char* mac_addr)
1499 int i,byte;
1500 unsigned char octet;
1501 u32 crc= INITCRC;
1503 for(byte=0; byte < len; byte++){
1504 octet = mac_addr[byte];
1505 for( i=0;i < 8; i++){
1506 /*If the next bit form the input stream is 1,subtract the divisor (CRC32) from the dividend(crc).*/
1507 if( (octet & 0x1) ^ (crc & 0x1) ){
1508 crc >>= 1;
1509 crc ^= CRC32;
1511 else
1512 crc >>= 1;
1514 octet >>= 1;
1517 return crc;
1520 This function sets promiscuos mode, all-multi mode or the multicast address
1521 list to the device.
1523 static void amd8111e_set_multicast_list(struct net_device *dev)
1525 struct dev_mc_list* mc_ptr;
1526 struct amd8111e_priv *lp = netdev_priv(dev);
1527 u32 mc_filter[2] ;
1528 int i,bit_num;
1529 if(dev->flags & IFF_PROMISC){
1530 printk(KERN_INFO "%s: Setting promiscuous mode.\n",dev->name);
1531 writel( VAL2 | PROM, lp->mmio + CMD2);
1532 return;
1534 else
1535 writel( PROM, lp->mmio + CMD2);
1536 if(dev->flags & IFF_ALLMULTI || dev->mc_count > MAX_FILTER_SIZE){
1537 /* get all multicast packet */
1538 mc_filter[1] = mc_filter[0] = 0xffffffff;
1539 lp->mc_list = dev->mc_list;
1540 lp->options |= OPTION_MULTICAST_ENABLE;
1541 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1542 return;
1544 if( dev->mc_count == 0 ){
1545 /* get only own packets */
1546 mc_filter[1] = mc_filter[0] = 0;
1547 lp->mc_list = NULL;
1548 lp->options &= ~OPTION_MULTICAST_ENABLE;
1549 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1550 /* disable promiscous mode */
1551 writel(PROM, lp->mmio + CMD2);
1552 return;
1554 /* load all the multicast addresses in the logic filter */
1555 lp->options |= OPTION_MULTICAST_ENABLE;
1556 lp->mc_list = dev->mc_list;
1557 mc_filter[1] = mc_filter[0] = 0;
1558 for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count;
1559 i++, mc_ptr = mc_ptr->next) {
1560 bit_num = ( amd8111e_ether_crc(ETH_ALEN,mc_ptr->dmi_addr) >> 26 ) & 0x3f;
1561 mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1563 amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
1565 /* To eliminate PCI posting bug */
1566 readl(lp->mmio + CMD2);
1571 This function handles all the ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application.
1574 static int amd8111e_ethtool_ioctl(struct net_device* dev, void __user *useraddr)
1576 struct amd8111e_priv *lp = netdev_priv(dev);
1577 struct pci_dev *pci_dev = lp->pci_dev;
1578 u32 ethcmd;
1580 if( useraddr == NULL)
1581 return -EINVAL;
1582 if(copy_from_user (&ethcmd, useraddr, sizeof (ethcmd)))
1583 return -EFAULT;
1585 switch(ethcmd){
1587 case ETHTOOL_GDRVINFO:{
1588 struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1589 strcpy (info.driver, MODULE_NAME);
1590 strcpy (info.version, MODULE_VERS);
1591 memset(&info.fw_version, 0, sizeof(info.fw_version));
1592 sprintf(info.fw_version,"%u",chip_version);
1593 strcpy (info.bus_info, pci_name(pci_dev));
1594 info.eedump_len = 0;
1595 info.regdump_len = AMD8111E_REG_DUMP_LEN;
1596 if (copy_to_user (useraddr, &info, sizeof(info)))
1597 return -EFAULT;
1598 return 0;
1600 /* get settings */
1601 case ETHTOOL_GSET: {
1602 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1603 spin_lock_irq(&lp->lock);
1604 mii_ethtool_gset(&lp->mii_if, &ecmd);
1605 spin_unlock_irq(&lp->lock);
1606 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1607 return -EFAULT;
1608 return 0;
1610 /* set settings */
1611 case ETHTOOL_SSET: {
1612 int r;
1613 struct ethtool_cmd ecmd;
1614 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1615 return -EFAULT;
1617 spin_lock_irq(&lp->lock);
1618 r = mii_ethtool_sset(&lp->mii_if, &ecmd);
1619 spin_unlock_irq(&lp->lock);
1620 return r;
1622 case ETHTOOL_GREGS: {
1623 struct ethtool_regs regs;
1624 u8 *regbuf;
1625 int ret;
1627 if (copy_from_user(&regs, useraddr, sizeof(regs)))
1628 return -EFAULT;
1629 if (regs.len > AMD8111E_REG_DUMP_LEN)
1630 regs.len = AMD8111E_REG_DUMP_LEN;
1631 regs.version = 0;
1632 if (copy_to_user(useraddr, &regs, sizeof(regs)))
1633 return -EFAULT;
1635 regbuf = amd8111e_read_regs(lp);
1636 if (!regbuf)
1637 return -ENOMEM;
1639 useraddr += offsetof(struct ethtool_regs, data);
1640 ret = 0;
1641 if (copy_to_user(useraddr, regbuf, regs.len))
1642 ret = -EFAULT;
1643 kfree(regbuf);
1644 return ret;
1646 /* restart autonegotiation */
1647 case ETHTOOL_NWAY_RST: {
1648 return mii_nway_restart(&lp->mii_if);
1650 /* get link status */
1651 case ETHTOOL_GLINK: {
1652 struct ethtool_value val = {ETHTOOL_GLINK};
1653 val.data = mii_link_ok(&lp->mii_if);
1654 if (copy_to_user(useraddr, &val, sizeof(val)))
1655 return -EFAULT;
1656 return 0;
1658 case ETHTOOL_GWOL: {
1659 struct ethtool_wolinfo wol_info = { ETHTOOL_GWOL };
1661 wol_info.supported = WAKE_MAGIC|WAKE_PHY;
1662 wol_info.wolopts = 0;
1663 if (lp->options & OPTION_WOL_ENABLE)
1664 wol_info.wolopts = WAKE_MAGIC;
1665 memset(&wol_info.sopass, 0, sizeof(wol_info.sopass));
1666 if (copy_to_user(useraddr, &wol_info, sizeof(wol_info)))
1667 return -EFAULT;
1668 return 0;
1670 case ETHTOOL_SWOL: {
1671 struct ethtool_wolinfo wol_info;
1673 if (copy_from_user(&wol_info, useraddr, sizeof(wol_info)))
1674 return -EFAULT;
1675 if (wol_info.wolopts & ~(WAKE_MAGIC |WAKE_PHY))
1676 return -EINVAL;
1677 spin_lock_irq(&lp->lock);
1678 if(wol_info.wolopts & WAKE_MAGIC)
1679 lp->options |=
1680 (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
1681 else if(wol_info.wolopts & WAKE_PHY)
1682 lp->options |=
1683 (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
1684 else
1685 lp->options &= ~OPTION_WOL_ENABLE;
1686 spin_unlock_irq(&lp->lock);
1687 return 0;
1690 default:
1691 break;
1693 return -EOPNOTSUPP;
1695 static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
1697 struct mii_ioctl_data *data = if_mii(ifr);
1698 struct amd8111e_priv *lp = netdev_priv(dev);
1699 int err;
1700 u32 mii_regval;
1702 if (!capable(CAP_NET_ADMIN))
1703 return -EPERM;
1705 switch(cmd) {
1706 case SIOCETHTOOL:
1707 return amd8111e_ethtool_ioctl(dev, ifr->ifr_data);
1708 case SIOCGMIIPHY:
1709 data->phy_id = PHY_ID;
1711 /* fallthru */
1712 case SIOCGMIIREG:
1714 spin_lock_irq(&lp->lock);
1715 err = amd8111e_read_phy(lp, data->phy_id,
1716 data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
1717 spin_unlock_irq(&lp->lock);
1719 data->val_out = mii_regval;
1720 return err;
1722 case SIOCSMIIREG:
1724 spin_lock_irq(&lp->lock);
1725 err = amd8111e_write_phy(lp, data->phy_id,
1726 data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
1727 spin_unlock_irq(&lp->lock);
1729 return err;
1731 default:
1732 /* do nothing */
1733 break;
1735 return -EOPNOTSUPP;
1737 static int amd8111e_set_mac_address(struct net_device *dev, void *p)
1739 struct amd8111e_priv *lp = dev->priv;
1740 int i;
1741 struct sockaddr *addr = p;
1743 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1744 spin_lock_irq(&lp->lock);
1745 /* Setting the MAC address to the device */
1746 for(i = 0; i < ETH_ADDR_LEN; i++)
1747 writeb( dev->dev_addr[i], lp->mmio + PADR + i );
1749 spin_unlock_irq(&lp->lock);
1751 return 0;
1755 This function changes the mtu of the device. It restarts the device to initialize the descriptor with new receive buffers.
1757 int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
1759 struct amd8111e_priv *lp = netdev_priv(dev);
1760 int err;
1762 if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
1763 return -EINVAL;
1765 if (!netif_running(dev)) {
1766 /* new_mtu will be used
1767 when device starts netxt time */
1768 dev->mtu = new_mtu;
1769 return 0;
1772 spin_lock_irq(&lp->lock);
1774 /* stop the chip */
1775 writel(RUN, lp->mmio + CMD0);
1777 dev->mtu = new_mtu;
1779 err = amd8111e_restart(dev);
1780 spin_unlock_irq(&lp->lock);
1781 if(!err)
1782 netif_start_queue(dev);
1783 return err;
1786 #if AMD8111E_VLAN_TAG_USED
1787 static void amd8111e_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
1789 struct amd8111e_priv *lp = netdev_priv(dev);
1790 spin_lock_irq(&lp->lock);
1791 lp->vlgrp = grp;
1792 spin_unlock_irq(&lp->lock);
1795 static void amd8111e_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1797 struct amd8111e_priv *lp = netdev_priv(dev);
1798 spin_lock_irq(&lp->lock);
1799 if (lp->vlgrp)
1800 lp->vlgrp->vlan_devices[vid] = NULL;
1801 spin_unlock_irq(&lp->lock);
1803 #endif
1804 static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
1806 writel( VAL1|MPPLBA, lp->mmio + CMD3);
1807 writel( VAL0|MPEN_SW, lp->mmio + CMD7);
1809 /* To eliminate PCI posting bug */
1810 readl(lp->mmio + CMD7);
1811 return 0;
1814 static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
1817 /* Adapter is already stoped/suspended/interrupt-disabled */
1818 writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
1820 /* To eliminate PCI posting bug */
1821 readl(lp->mmio + CMD7);
1822 return 0;
1824 /* This function is called when a packet transmission fails to complete within a resonable period, on the assumption that an interrupts have been failed or the interface is locked up. This function will reinitialize the hardware */
1826 static void amd8111e_tx_timeout(struct net_device *dev)
1828 struct amd8111e_priv* lp = netdev_priv(dev);
1829 int err;
1831 printk(KERN_ERR "%s: transmit timed out, resetting\n",
1832 dev->name);
1833 spin_lock_irq(&lp->lock);
1834 err = amd8111e_restart(dev);
1835 spin_unlock_irq(&lp->lock);
1836 if(!err)
1837 netif_wake_queue(dev);
1839 static int amd8111e_suspend(struct pci_dev *pci_dev, u32 state)
1841 struct net_device *dev = pci_get_drvdata(pci_dev);
1842 struct amd8111e_priv *lp = netdev_priv(dev);
1844 if (!netif_running(dev))
1845 return 0;
1847 /* disable the interrupt */
1848 spin_lock_irq(&lp->lock);
1849 amd8111e_disable_interrupt(lp);
1850 spin_unlock_irq(&lp->lock);
1852 netif_device_detach(dev);
1854 /* stop chip */
1855 spin_lock_irq(&lp->lock);
1856 if(lp->options & OPTION_DYN_IPG_ENABLE)
1857 del_timer_sync(&lp->ipg_data.ipg_timer);
1858 amd8111e_stop_chip(lp);
1859 spin_unlock_irq(&lp->lock);
1861 if(lp->options & OPTION_WOL_ENABLE){
1862 /* enable wol */
1863 if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
1864 amd8111e_enable_magicpkt(lp);
1865 if(lp->options & OPTION_WAKE_PHY_ENABLE)
1866 amd8111e_enable_link_change(lp);
1868 pci_enable_wake(pci_dev, 3, 1);
1869 pci_enable_wake(pci_dev, 4, 1); /* D3 cold */
1872 else{
1873 pci_enable_wake(pci_dev, 3, 0);
1874 pci_enable_wake(pci_dev, 4, 0); /* 4 == D3 cold */
1877 pci_save_state(pci_dev, lp->pm_state);
1878 pci_set_power_state(pci_dev, 3);
1880 return 0;
1882 static int amd8111e_resume(struct pci_dev *pci_dev)
1884 struct net_device *dev = pci_get_drvdata(pci_dev);
1885 struct amd8111e_priv *lp = netdev_priv(dev);
1887 if (!netif_running(dev))
1888 return 0;
1890 pci_set_power_state(pci_dev, 0);
1891 pci_restore_state(pci_dev, lp->pm_state);
1893 pci_enable_wake(pci_dev, 3, 0);
1894 pci_enable_wake(pci_dev, 4, 0); /* D3 cold */
1896 netif_device_attach(dev);
1898 spin_lock_irq(&lp->lock);
1899 amd8111e_restart(dev);
1900 /* Restart ipg timer */
1901 if(lp->options & OPTION_DYN_IPG_ENABLE)
1902 mod_timer(&lp->ipg_data.ipg_timer,
1903 jiffies + IPG_CONVERGE_JIFFIES);
1904 spin_unlock_irq(&lp->lock);
1906 return 0;
1910 static void __devexit amd8111e_remove_one(struct pci_dev *pdev)
1912 struct net_device *dev = pci_get_drvdata(pdev);
1913 if (dev) {
1914 unregister_netdev(dev);
1915 iounmap((void *) ((struct amd8111e_priv *)(dev->priv))->mmio);
1916 free_netdev(dev);
1917 pci_release_regions(pdev);
1918 pci_disable_device(pdev);
1919 pci_set_drvdata(pdev, NULL);
1922 static void amd8111e_config_ipg(struct net_device* dev)
1924 struct amd8111e_priv *lp = netdev_priv(dev);
1925 struct ipg_info* ipg_data = &lp->ipg_data;
1926 void * mmio = lp->mmio;
1927 unsigned int prev_col_cnt = ipg_data->col_cnt;
1928 unsigned int total_col_cnt;
1929 unsigned int tmp_ipg;
1931 if(lp->link_config.duplex == DUPLEX_FULL){
1932 ipg_data->ipg = DEFAULT_IPG;
1933 return;
1936 if(ipg_data->ipg_state == SSTATE){
1938 if(ipg_data->timer_tick == IPG_STABLE_TIME){
1940 ipg_data->timer_tick = 0;
1941 ipg_data->ipg = MIN_IPG - IPG_STEP;
1942 ipg_data->current_ipg = MIN_IPG;
1943 ipg_data->diff_col_cnt = 0xFFFFFFFF;
1944 ipg_data->ipg_state = CSTATE;
1946 else
1947 ipg_data->timer_tick++;
1950 if(ipg_data->ipg_state == CSTATE){
1952 /* Get the current collision count */
1954 total_col_cnt = ipg_data->col_cnt =
1955 amd8111e_read_mib(mmio, xmt_collisions);
1957 if ((total_col_cnt - prev_col_cnt) <
1958 (ipg_data->diff_col_cnt)){
1960 ipg_data->diff_col_cnt =
1961 total_col_cnt - prev_col_cnt ;
1963 ipg_data->ipg = ipg_data->current_ipg;
1966 ipg_data->current_ipg += IPG_STEP;
1968 if (ipg_data->current_ipg <= MAX_IPG)
1969 tmp_ipg = ipg_data->current_ipg;
1970 else{
1971 tmp_ipg = ipg_data->ipg;
1972 ipg_data->ipg_state = SSTATE;
1974 writew((u32)tmp_ipg, mmio + IPG);
1975 writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
1977 mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
1978 return;
1982 static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1983 const struct pci_device_id *ent)
1985 int err,i,pm_cap;
1986 unsigned long reg_addr,reg_len;
1987 struct amd8111e_priv* lp;
1988 struct net_device* dev;
1990 err = pci_enable_device(pdev);
1991 if(err){
1992 printk(KERN_ERR "amd8111e: Cannot enable new PCI device,"
1993 "exiting.\n");
1994 return err;
1997 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
1998 printk(KERN_ERR "amd8111e: Cannot find PCI base address"
1999 "exiting.\n");
2000 err = -ENODEV;
2001 goto err_disable_pdev;
2004 err = pci_request_regions(pdev, MODULE_NAME);
2005 if(err){
2006 printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, "
2007 "exiting.\n");
2008 goto err_disable_pdev;
2011 pci_set_master(pdev);
2013 /* Find power-management capability. */
2014 if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){
2015 printk(KERN_ERR "amd8111e: No Power Management capability, "
2016 "exiting.\n");
2017 goto err_free_reg;
2020 /* Initialize DMA */
2021 if(!pci_dma_supported(pdev, 0xffffffff)){
2022 printk(KERN_ERR "amd8111e: DMA not supported,"
2023 "exiting.\n");
2024 goto err_free_reg;
2025 } else
2026 pdev->dma_mask = 0xffffffff;
2028 reg_addr = pci_resource_start(pdev, 0);
2029 reg_len = pci_resource_len(pdev, 0);
2031 dev = alloc_etherdev(sizeof(struct amd8111e_priv));
2032 if (!dev) {
2033 printk(KERN_ERR "amd8111e: Etherdev alloc failed, exiting.\n");
2034 err = -ENOMEM;
2035 goto err_free_reg;
2038 SET_MODULE_OWNER(dev);
2039 SET_NETDEV_DEV(dev, &pdev->dev);
2041 #if AMD8111E_VLAN_TAG_USED
2042 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ;
2043 dev->vlan_rx_register =amd8111e_vlan_rx_register;
2044 dev->vlan_rx_kill_vid = amd8111e_vlan_rx_kill_vid;
2045 #endif
2047 lp = netdev_priv(dev);
2048 lp->pci_dev = pdev;
2049 lp->amd8111e_net_dev = dev;
2050 lp->pm_cap = pm_cap;
2052 /* setting mii default values */
2053 lp->mii_if.dev = dev;
2054 lp->mii_if.mdio_read = amd8111e_mdio_read;
2055 lp->mii_if.mdio_write = amd8111e_mdio_write;
2056 lp->mii_if.phy_id = PHY_ID;
2058 spin_lock_init(&lp->lock);
2060 lp->mmio = ioremap(reg_addr, reg_len);
2061 if (lp->mmio == 0) {
2062 printk(KERN_ERR "amd8111e: Cannot map device registers, "
2063 "exiting\n");
2064 err = -ENOMEM;
2065 goto err_free_dev;
2068 /* Initializing MAC address */
2069 for(i = 0; i < ETH_ADDR_LEN; i++)
2070 dev->dev_addr[i] =readb(lp->mmio + PADR + i);
2072 /* Setting user defined parametrs */
2073 lp->ext_phy_option = speed_duplex[card_idx];
2074 if(coalesce[card_idx])
2075 lp->options |= OPTION_INTR_COAL_ENABLE;
2076 if(dynamic_ipg[card_idx++])
2077 lp->options |= OPTION_DYN_IPG_ENABLE;
2079 /* Initialize driver entry points */
2080 dev->open = amd8111e_open;
2081 dev->hard_start_xmit = amd8111e_start_xmit;
2082 dev->stop = amd8111e_close;
2083 dev->get_stats = amd8111e_get_stats;
2084 dev->set_multicast_list = amd8111e_set_multicast_list;
2085 dev->set_mac_address = amd8111e_set_mac_address;
2086 dev->do_ioctl = amd8111e_ioctl;
2087 dev->change_mtu = amd8111e_change_mtu;
2088 dev->irq =pdev->irq;
2089 dev->tx_timeout = amd8111e_tx_timeout;
2090 dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
2091 #ifdef CONFIG_AMD8111E_NAPI
2092 dev->poll = amd8111e_rx_poll;
2093 dev->weight = 32;
2094 #endif
2095 #ifdef CONFIG_NET_POLL_CONTROLLER
2096 dev->poll_controller = amd8111e_poll;
2097 #endif
2099 #if AMD8111E_VLAN_TAG_USED
2100 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2101 dev->vlan_rx_register =amd8111e_vlan_rx_register;
2102 dev->vlan_rx_kill_vid = amd8111e_vlan_rx_kill_vid;
2103 #endif
2105 /* Set receive buffer length and set jumbo option*/
2106 amd8111e_set_rx_buff_len(dev);
2109 err = register_netdev(dev);
2110 if (err) {
2111 printk(KERN_ERR "amd8111e: Cannot register net device, "
2112 "exiting.\n");
2113 goto err_iounmap;
2116 pci_set_drvdata(pdev, dev);
2118 /* Initialize software ipg timer */
2119 if(lp->options & OPTION_DYN_IPG_ENABLE){
2120 init_timer(&lp->ipg_data.ipg_timer);
2121 lp->ipg_data.ipg_timer.data = (unsigned long) dev;
2122 lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg;
2123 lp->ipg_data.ipg_timer.expires = jiffies +
2124 IPG_CONVERGE_JIFFIES;
2125 lp->ipg_data.ipg = DEFAULT_IPG;
2126 lp->ipg_data.ipg_state = CSTATE;
2129 /* display driver and device information */
2131 chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
2132 printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n", dev->name,MODULE_VERS);
2133 printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet ", dev->name, chip_version);
2134 for (i = 0; i < 6; i++)
2135 printk("%2.2x%c",dev->dev_addr[i],i == 5 ? ' ' : ':');
2136 printk( "\n");
2137 return 0;
2138 err_iounmap:
2139 iounmap((void *) lp->mmio);
2141 err_free_dev:
2142 free_netdev(dev);
2144 err_free_reg:
2145 pci_release_regions(pdev);
2147 err_disable_pdev:
2148 pci_disable_device(pdev);
2149 pci_set_drvdata(pdev, NULL);
2150 return err;
2154 static struct pci_driver amd8111e_driver = {
2155 .name = MODULE_NAME,
2156 .id_table = amd8111e_pci_tbl,
2157 .probe = amd8111e_probe_one,
2158 .remove = __devexit_p(amd8111e_remove_one),
2159 .suspend = amd8111e_suspend,
2160 .resume = amd8111e_resume
2163 static int __init amd8111e_init(void)
2165 return pci_module_init(&amd8111e_driver);
2168 static void __exit amd8111e_cleanup(void)
2170 pci_unregister_driver(&amd8111e_driver);
2173 module_init(amd8111e_init);
2174 module_exit(amd8111e_cleanup);