More meth updates.
[linux-2.6/linux-mips.git] / drivers / net / amd8111e.c
blob140448b42c234230925e961b5d51cfd90aa2f821
2 /* Advanced Micro Devices Inc. AMD8111E Linux Network Driver
3 * Copyright (C) 2003 Advanced Micro Devices
5 *
6 * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
7 * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
8 * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
9 * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
10 * Copyright 1993 United States Government as represented by the
11 * Director, National Security Agency.[ pcnet32.c ]
12 * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
13 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 * USA
31 Module Name:
33 amd8111e.c
35 Abstract:
37 AMD8111 based 10/100 Ethernet Controller Driver.
39 Environment:
41 Kernel Mode
43 Revision History:
44 3.0.0
45 Initial Revision.
46 3.0.1
47 1. Dynamic interrupt coalescing.
48 2. Removed prev_stats.
49 3. MII support.
50 4. Dynamic IPG support
51 3.0.2 05/29/2003
52 1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
53 2. Bug fix: Fixed VLAN support failure.
54 3. Bug fix: Fixed receive interrupt coalescing bug.
55 4. Dynamic IPG support is disabled by default.
56 3.0.3 06/05/2003
57 1. Bug fix: Fixed failure to close the interface if SMP is enabled.
62 #include <linux/config.h>
63 #include <linux/module.h>
64 #include <linux/kernel.h>
65 #include <linux/types.h>
66 #include <linux/compiler.h>
67 #include <linux/slab.h>
68 #include <linux/delay.h>
69 #include <linux/init.h>
70 #include <linux/ioport.h>
71 #include <linux/pci.h>
72 #include <linux/netdevice.h>
73 #include <linux/etherdevice.h>
74 #include <linux/skbuff.h>
75 #include <linux/ethtool.h>
76 #include <linux/mii.h>
77 #include <linux/if_vlan.h>
78 #include <linux/ctype.h>
79 #include <linux/crc32.h>
81 #include <asm/system.h>
82 #include <asm/io.h>
83 #include <asm/byteorder.h>
84 #include <asm/uaccess.h>
86 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
87 #define AMD8111E_VLAN_TAG_USED 1
88 #else
89 #define AMD8111E_VLAN_TAG_USED 0
90 #endif
92 #include "amd8111e.h"
93 #define MODULE_NAME "amd8111e"
94 #define MODULE_VERSION "3.0.3"
95 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
96 MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version 3.0.3");
97 MODULE_LICENSE("GPL");
98 MODULE_PARM(speed_duplex, "1-" __MODULE_STRING (MAX_UNITS) "i");
99 MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotitate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
100 MODULE_PARM(coalesce, "1-" __MODULE_STRING(MAX_UNITS) "i");
101 MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
102 MODULE_PARM(dynamic_ipg, "1-" __MODULE_STRING(MAX_UNITS) "i");
103 MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
105 static struct pci_device_id amd8111e_pci_tbl[] __devinitdata = {
107 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
108 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
109 { 0, }
113 This function will read the PHY registers.
115 static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
117 void * mmio = lp->mmio;
118 unsigned int reg_val;
119 unsigned int repeat= REPEAT_CNT;
121 reg_val = readl(mmio + PHY_ACCESS);
122 while (reg_val & PHY_CMD_ACTIVE)
123 reg_val = readl( mmio + PHY_ACCESS );
125 writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
126 ((reg & 0x1f) << 16), mmio +PHY_ACCESS);
128 reg_val = readl(mmio + PHY_ACCESS);
129 udelay(30); /* It takes 30 us to read/write data */
130 } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
131 if(reg_val & PHY_RD_ERR)
132 goto err_phy_read;
134 *val = reg_val & 0xffff;
135 return 0;
136 err_phy_read:
137 *val = 0;
138 return -EINVAL;
143 This function will write into PHY registers.
145 static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
147 unsigned int repeat = REPEAT_CNT
148 void * mmio = lp->mmio;
149 unsigned int reg_val;
151 reg_val = readl(mmio + PHY_ACCESS);
152 while (reg_val & PHY_CMD_ACTIVE)
153 reg_val = readl( mmio + PHY_ACCESS );
155 writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
156 ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
159 reg_val = readl(mmio + PHY_ACCESS);
160 udelay(30); /* It takes 30 us to read/write the data */
161 } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
163 if(reg_val & PHY_RD_ERR)
164 goto err_phy_write;
166 return 0;
168 err_phy_write:
169 return -EINVAL;
173 This is the mii register read function provided to the mii interface.
175 static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
177 struct amd8111e_priv* lp = dev->priv;
178 unsigned int reg_val;
180 amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
181 return reg_val;
186 This is the mii register write function provided to the mii interface.
188 static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val)
190 struct amd8111e_priv* lp = dev->priv;
192 amd8111e_write_phy(lp, phy_id, reg_num, val);
196 This function will set PHY speed. During initialization sets the original speed to 100 full.
198 static void amd8111e_set_ext_phy(struct net_device *dev)
200 struct amd8111e_priv *lp = (struct amd8111e_priv *)dev->priv;
201 u32 bmcr,advert,tmp;
203 /* Determine mii register values to set the speed */
204 advert = amd8111e_mdio_read(dev, PHY_ID, MII_ADVERTISE);
205 tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
206 switch (lp->ext_phy_option){
208 default:
209 case SPEED_AUTONEG: /* advertise all values */
210 tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
211 ADVERTISE_100HALF|ADVERTISE_100FULL) ;
212 break;
213 case SPEED10_HALF:
214 tmp |= ADVERTISE_10HALF;
215 break;
216 case SPEED10_FULL:
217 tmp |= ADVERTISE_10FULL;
218 break;
219 case SPEED100_HALF:
220 tmp |= ADVERTISE_100HALF;
221 break;
222 case SPEED100_FULL:
223 tmp |= ADVERTISE_100FULL;
224 break;
227 if(advert != tmp)
228 amd8111e_mdio_write(dev, PHY_ID, MII_ADVERTISE, tmp);
229 /* Restart auto negotiation */
230 bmcr = amd8111e_mdio_read(dev, PHY_ID, MII_BMCR);
231 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
232 amd8111e_mdio_write(dev, PHY_ID, MII_BMCR, bmcr);
237 This function will unmap skb->data space and will free
238 all transmit and receive skbuffs.
240 static int amd8111e_free_skbs(struct net_device *dev)
242 struct amd8111e_priv *lp = (struct amd8111e_priv *)dev->priv;
243 struct sk_buff* rx_skbuff;
244 int i;
246 /* Freeing transmit skbs */
247 for(i = 0; i < NUM_TX_BUFFERS; i++){
248 if(lp->tx_skbuff[i]){
249 pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i], lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE);
250 dev_kfree_skb (lp->tx_skbuff[i]);
251 lp->tx_skbuff[i] = NULL;
252 lp->tx_dma_addr[i] = 0;
255 /* Freeing previously allocated receive buffers */
256 for (i = 0; i < NUM_RX_BUFFERS; i++){
257 rx_skbuff = lp->rx_skbuff[i];
258 if(rx_skbuff != NULL){
259 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i],
260 lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE);
261 dev_kfree_skb(lp->rx_skbuff[i]);
262 lp->rx_skbuff[i] = NULL;
263 lp->rx_dma_addr[i] = 0;
267 return 0;
271 This will set the receive buffer length corresponding to the mtu size of networkinterface.
273 static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
275 struct amd8111e_priv* lp = dev->priv;
276 unsigned int mtu = dev->mtu;
278 if (mtu > ETH_DATA_LEN){
279 /* MTU + ethernet header + FCS + optional VLAN tag */
280 lp->rx_buff_len = mtu + ETH_HLEN + 8;
281 lp->options |= OPTION_JUMBO_ENABLE;
282 } else{
283 lp->rx_buff_len = PKT_BUFF_SZ;
284 lp->options &= ~OPTION_JUMBO_ENABLE;
289 This function will free all the previously allocated buffers, determine new receive buffer length and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors.
291 static int amd8111e_init_ring(struct net_device *dev)
293 struct amd8111e_priv *lp = (struct amd8111e_priv *)dev->priv;
294 int i;
296 lp->rx_idx = lp->tx_idx = 0;
297 lp->tx_complete_idx = 0;
298 lp->tx_ring_idx = 0;
301 if(lp->opened)
302 /* Free previously allocated transmit and receive skbs */
303 amd8111e_free_skbs(dev);
305 else{
306 /* allocate the tx and rx descriptors */
307 if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
308 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
309 &lp->tx_ring_dma_addr)) == NULL)
311 goto err_no_mem;
313 if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
314 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
315 &lp->rx_ring_dma_addr)) == NULL)
317 goto err_free_tx_ring;
320 /* Set new receive buff size */
321 amd8111e_set_rx_buff_len(dev);
323 /* Allocating receive skbs */
324 for (i = 0; i < NUM_RX_BUFFERS; i++) {
326 if (!(lp->rx_skbuff[i] = dev_alloc_skb(lp->rx_buff_len))) {
327 /* Release previos allocated skbs */
328 for(--i; i >= 0 ;i--)
329 dev_kfree_skb(lp->rx_skbuff[i]);
330 goto err_free_rx_ring;
332 skb_reserve(lp->rx_skbuff[i],2);
334 /* Initilaizing receive descriptors */
335 for (i = 0; i < NUM_RX_BUFFERS; i++) {
336 lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev,
337 lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
339 lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
340 lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len);
341 lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
344 /* Initializing transmit descriptors */
345 for (i = 0; i < NUM_TX_RING_DR; i++) {
346 lp->tx_ring[i].buff_phy_addr = 0;
347 lp->tx_ring[i].tx_flags = 0;
348 lp->tx_ring[i].buff_count = 0;
351 return 0;
353 err_free_rx_ring:
355 pci_free_consistent(lp->pci_dev,
356 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring,
357 lp->rx_ring_dma_addr);
359 err_free_tx_ring:
361 pci_free_consistent(lp->pci_dev,
362 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
363 lp->tx_ring_dma_addr);
365 err_no_mem:
366 return -ENOMEM;
368 /* This function will set the interrupt coalescing according to the input arguments */
369 static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
371 unsigned int timeout;
372 unsigned int event_count;
374 struct amd8111e_priv *lp = dev->priv;
375 void* mmio = lp->mmio;
376 struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
379 switch(cmod)
381 case RX_INTR_COAL :
382 timeout = coal_conf->rx_timeout;
383 event_count = coal_conf->rx_event_count;
384 if( timeout > MAX_TIMEOUT ||
385 event_count > MAX_EVENT_COUNT )
386 return -EINVAL;
388 timeout = timeout * DELAY_TIMER_CONV;
389 writel(VAL0|STINTEN, mmio+INTEN0);
390 writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
391 mmio+DLY_INT_A);
392 break;
394 case TX_INTR_COAL :
395 timeout = coal_conf->tx_timeout;
396 event_count = coal_conf->tx_event_count;
397 if( timeout > MAX_TIMEOUT ||
398 event_count > MAX_EVENT_COUNT )
399 return -EINVAL;
402 timeout = timeout * DELAY_TIMER_CONV;
403 writel(VAL0|STINTEN,mmio+INTEN0);
404 writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
405 mmio+DLY_INT_B);
406 break;
408 case DISABLE_COAL:
409 writel(0,mmio+STVAL);
410 writel(STINTEN, mmio+INTEN0);
411 writel(0, mmio +DLY_INT_B);
412 writel(0, mmio+DLY_INT_A);
413 break;
414 case ENABLE_COAL:
415 /* Start the timer */
416 writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /* 0.5 sec */
417 writel(VAL0|STINTEN, mmio+INTEN0);
418 break;
419 default:
420 break;
423 return 0;
428 This function initializes the device registers and starts the device.
430 static int amd8111e_restart(struct net_device *dev)
432 struct amd8111e_priv *lp = (struct amd8111e_priv* )dev->priv;
433 void * mmio = lp->mmio;
434 int i,reg_val;
436 /* stop the chip */
437 writel(RUN, mmio + CMD0);
439 if(amd8111e_init_ring(dev))
440 return -ENOMEM;
442 /* enable the port manager and set auto negotiation always */
443 writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
444 writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
446 amd8111e_set_ext_phy(dev);
448 /* set control registers */
449 reg_val = readl(mmio + CTRL1);
450 reg_val &= ~XMTSP_MASK;
451 writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
453 /* enable interrupt */
454 writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
455 APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
456 SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
458 writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
460 /* initialize tx and rx ring base addresses */
461 writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
462 writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
464 writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
465 writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
467 /* set default IPG to 96 */
468 writew((u32)DEFAULT_IPG,mmio+IPG);
469 writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
471 if(lp->options & OPTION_JUMBO_ENABLE){
472 writel((u32)VAL2|JUMBO, mmio + CMD3);
473 /* Reset REX_UFLO */
474 writel( REX_UFLO, mmio + CMD2);
475 /* Should not set REX_UFLO for jumbo frames */
476 writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
477 }else{
478 writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
479 writel((u32)JUMBO, mmio + CMD3);
482 #if AMD8111E_VLAN_TAG_USED
483 writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
484 #endif
485 writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
487 /* Setting the MAC address to the device */
488 for(i = 0; i < ETH_ADDR_LEN; i++)
489 writeb( dev->dev_addr[i], mmio + PADR + i );
491 /* Enable interrupt coalesce */
492 if(lp->options & OPTION_INTR_COAL_ENABLE){
493 printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n",
494 dev->name);
495 amd8111e_set_coalesce(dev,ENABLE_COAL);
498 /* set RUN bit to start the chip */
499 writel(VAL2 | RDMD0, mmio + CMD0);
500 writel(VAL0 | INTREN | RUN, mmio + CMD0);
502 /* To avoid PCI posting bug */
503 readl(mmio+CMD0);
504 return 0;
507 This function clears necessary the device registers.
509 static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
511 unsigned int reg_val;
512 unsigned int logic_filter[2] ={0,};
513 void * mmio = lp->mmio;
516 /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
517 writew( 0x8101, mmio + AUTOPOLL0);
519 /* Clear RCV_RING_BASE_ADDR */
520 writel(0, mmio + RCV_RING_BASE_ADDR0);
522 /* Clear XMT_RING_BASE_ADDR */
523 writel(0, mmio + XMT_RING_BASE_ADDR0);
524 writel(0, mmio + XMT_RING_BASE_ADDR1);
525 writel(0, mmio + XMT_RING_BASE_ADDR2);
526 writel(0, mmio + XMT_RING_BASE_ADDR3);
528 /* Clear CMD0 */
529 writel(CMD0_CLEAR,mmio + CMD0);
531 /* Clear CMD2 */
532 writel(CMD2_CLEAR, mmio +CMD2);
534 /* Clear CMD7 */
535 writel(CMD7_CLEAR , mmio + CMD7);
537 /* Clear DLY_INT_A and DLY_INT_B */
538 writel(0x0, mmio + DLY_INT_A);
539 writel(0x0, mmio + DLY_INT_B);
541 /* Clear FLOW_CONTROL */
542 writel(0x0, mmio + FLOW_CONTROL);
544 /* Clear INT0 write 1 to clear register */
545 reg_val = readl(mmio + INT0);
546 writel(reg_val, mmio + INT0);
548 /* Clear STVAL */
549 writel(0x0, mmio + STVAL);
551 /* Clear INTEN0 */
552 writel( INTEN0_CLEAR, mmio + INTEN0);
554 /* Clear LADRF */
555 writel(0x0 , mmio + LADRF);
557 /* Set SRAM_SIZE & SRAM_BOUNDARY registers */
558 writel( 0x80010,mmio + SRAM_SIZE);
560 /* Clear RCV_RING0_LEN */
561 writel(0x0, mmio + RCV_RING_LEN0);
563 /* Clear XMT_RING0/1/2/3_LEN */
564 writel(0x0, mmio + XMT_RING_LEN0);
565 writel(0x0, mmio + XMT_RING_LEN1);
566 writel(0x0, mmio + XMT_RING_LEN2);
567 writel(0x0, mmio + XMT_RING_LEN3);
569 /* Clear XMT_RING_LIMIT */
570 writel(0x0, mmio + XMT_RING_LIMIT);
572 /* Clear MIB */
573 writew(MIB_CLEAR, mmio + MIB_ADDR);
575 /* Clear LARF */
576 amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF);
578 /* SRAM_SIZE register */
579 reg_val = readl(mmio + SRAM_SIZE);
581 if(lp->options & OPTION_JUMBO_ENABLE)
582 writel( VAL2|JUMBO, mmio + CMD3);
583 #if AMD8111E_VLAN_TAG_USED
584 writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
585 #endif
586 /* Set default value to CTRL1 Register */
587 writel(CTRL1_DEFAULT, mmio + CTRL1);
589 /* To avoid PCI posting bug */
590 readl(mmio + CMD2);
595 This function disables the interrupt and clears all the pending
596 interrupts in INT0
598 static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
600 u32 intr0;
602 /* Disable interrupt */
603 writel(INTREN, lp->mmio + CMD0);
605 /* Clear INT0 */
606 intr0 = readl(lp->mmio + INT0);
607 writel(intr0, lp->mmio + INT0);
609 /* To avoid PCI posting bug */
610 readl(lp->mmio + INT0);
615 This function stops the chip.
617 static void amd8111e_stop_chip(struct amd8111e_priv* lp)
619 writel(RUN, lp->mmio + CMD0);
621 /* To avoid PCI posting bug */
622 readl(lp->mmio + CMD0);
626 This function frees the transmiter and receiver descriptor rings.
628 static void amd8111e_free_ring(struct amd8111e_priv* lp)
631 /* Free transmit and receive skbs */
632 amd8111e_free_skbs(lp->amd8111e_net_dev);
634 /* Free transmit and receive descriptor rings */
635 if(lp->rx_ring){
636 pci_free_consistent(lp->pci_dev,
637 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
638 lp->rx_ring, lp->rx_ring_dma_addr);
639 lp->rx_ring = NULL;
642 if(lp->tx_ring){
643 pci_free_consistent(lp->pci_dev,
644 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
645 lp->tx_ring, lp->tx_ring_dma_addr);
647 lp->tx_ring = NULL;
651 #if AMD8111E_VLAN_TAG_USED
653 This is the receive indication function for packets with vlan tag.
655 static int amd8111e_vlan_rx(struct amd8111e_priv *lp, struct sk_buff *skb, u16 vlan_tag)
657 return vlan_hwaccel_rx(skb, lp->vlgrp, vlan_tag);
659 #endif
662 This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb.
664 static int amd8111e_tx(struct net_device *dev)
666 struct amd8111e_priv* lp = dev->priv;
667 int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
668 int status;
669 /* Complete all the transmit packet */
670 while (lp->tx_complete_idx != lp->tx_idx){
671 tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
672 status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
674 if(status & OWN_BIT)
675 break; /* It still hasn't been Txed */
677 lp->tx_ring[tx_index].buff_phy_addr = 0;
679 /* We must free the original skb */
680 if (lp->tx_skbuff[tx_index]) {
681 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
682 lp->tx_skbuff[tx_index]->len,
683 PCI_DMA_TODEVICE);
684 dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
685 lp->tx_skbuff[tx_index] = 0;
686 lp->tx_dma_addr[tx_index] = 0;
688 lp->tx_complete_idx++;
689 /*COAL update tx coalescing parameters */
690 lp->coal_conf.tx_packets++;
691 lp->coal_conf.tx_bytes += lp->tx_ring[tx_index].buff_count;
693 if (netif_queue_stopped(dev) &&
694 lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
695 /* The ring is no longer full, clear tbusy. */
696 /* lp->tx_full = 0; */
697 netif_wake_queue (dev);
700 return 0;
704 This function will check the ownership of receive buffers and descriptors. It will indicate to kernel up to half the number of maximum receive buffers in the descriptor ring, in a single receive interrupt. It will also replenish the descriptors with new skbs.
706 static int amd8111e_rx(struct net_device *dev)
708 struct amd8111e_priv *lp = dev->priv;
709 struct sk_buff *skb,*new_skb;
710 int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
711 int min_pkt_len, status;
712 int num_rx_pkt = 0;
713 int max_rx_pkt = NUM_RX_BUFFERS/2;
714 short pkt_len;
715 #if AMD8111E_VLAN_TAG_USED
716 short vtag;
717 #endif
719 /* If we own the next entry, it's a new packet. Send it up. */
720 while(++num_rx_pkt <= max_rx_pkt){
721 if(lp->rx_ring[rx_index].rx_flags & OWN_BIT)
722 return 0;
724 /* check if err summary bit is set */
725 if(le16_to_cpu(lp->rx_ring[rx_index].rx_flags) & ERR_BIT){
727 * There is a tricky error noted by John Murphy,
728 * <murf@perftech.com> to Russ Nelson: Even with full-sized
729 * buffers it's possible for a jabber packet to use two
730 * buffers, with only the last correctly noting the error. */
731 /* reseting flags */
732 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
733 goto err_next_pkt;
735 /* check for STP and ENP */
736 status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
737 if(!((status & STP_BIT) && (status & ENP_BIT))){
738 /* reseting flags */
739 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
740 goto err_next_pkt;
742 pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
744 #if AMD8111E_VLAN_TAG_USED
745 vtag = le16_to_cpu(lp->rx_ring[rx_index].rx_flags) & TT_MASK;
746 /*MAC will strip vlan tag*/
747 if(lp->vlgrp != NULL && vtag !=0)
748 min_pkt_len =MIN_PKT_LEN - 4;
749 else
750 #endif
751 min_pkt_len =MIN_PKT_LEN;
753 if (pkt_len < min_pkt_len) {
754 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
755 lp->stats.rx_errors++;
756 goto err_next_pkt;
758 if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
759 /* if allocation fail,
760 ignore that pkt and go to next one */
761 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
762 lp->stats.rx_errors++;
763 goto err_next_pkt;
766 skb_reserve(new_skb, 2);
767 skb = lp->rx_skbuff[rx_index];
768 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
769 lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
770 skb_put(skb, pkt_len);
771 skb->dev = dev;
772 lp->rx_skbuff[rx_index] = new_skb;
773 new_skb->dev = dev;
774 lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
775 new_skb->data, lp->rx_buff_len-2,PCI_DMA_FROMDEVICE);
777 skb->protocol = eth_type_trans(skb, dev);
779 #if AMD8111E_VLAN_TAG_USED
781 vtag = lp->rx_ring[rx_index].rx_flags & TT_MASK;
782 if(lp->vlgrp != NULL && (vtag == TT_VLAN_TAGGED)){
783 amd8111e_vlan_rx(lp, skb,
784 lp->rx_ring[rx_index].tag_ctrl_info);
785 } else
786 #endif
788 netif_rx (skb);
789 /*COAL update rx coalescing parameters*/
790 lp->coal_conf.rx_packets++;
791 lp->coal_conf.rx_bytes += pkt_len;
793 dev->last_rx = jiffies;
795 err_next_pkt:
796 lp->rx_ring[rx_index].buff_phy_addr
797 = cpu_to_le32(lp->rx_dma_addr[rx_index]);
798 lp->rx_ring[rx_index].buff_count =
799 cpu_to_le16(lp->rx_buff_len-2);
800 lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
801 rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
804 return 0;
808 This function will indicate the link status to the kernel.
810 static int amd8111e_link_change(struct net_device* dev)
812 struct amd8111e_priv *lp = dev->priv;
813 int status0,speed;
815 /* read the link change */
816 status0 = readl(lp->mmio + STAT0);
818 if(status0 & LINK_STATS){
819 if(status0 & AUTONEG_COMPLETE)
820 lp->link_config.autoneg = AUTONEG_ENABLE;
821 else
822 lp->link_config.autoneg = AUTONEG_DISABLE;
824 if(status0 & FULL_DPLX)
825 lp->link_config.duplex = DUPLEX_FULL;
826 else
827 lp->link_config.duplex = DUPLEX_HALF;
828 speed = (status0 & SPEED_MASK) >> 7;
829 if(speed == PHY_SPEED_10)
830 lp->link_config.speed = SPEED_10;
831 else if(speed == PHY_SPEED_100)
832 lp->link_config.speed = SPEED_100;
834 printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n", dev->name,
835 (lp->link_config.speed == SPEED_100) ? "100": "10",
836 (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half");
837 netif_carrier_on(dev);
839 else{
840 lp->link_config.speed = SPEED_INVALID;
841 lp->link_config.duplex = DUPLEX_INVALID;
842 lp->link_config.autoneg = AUTONEG_INVALID;
843 printk(KERN_INFO "%s: Link is Down.\n",dev->name);
844 netif_carrier_off(dev);
847 return 0;
850 This function reads the mib counters.
852 static int amd8111e_read_mib(void* mmio, u8 MIB_COUNTER)
854 unsigned int status;
855 unsigned int data;
856 unsigned int repeat = REPEAT_CNT;
858 writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
859 do {
860 status = readw(mmio + MIB_ADDR);
861 udelay(2); /* controller takes MAX 2 us to get mib data */
863 while (--repeat && (status & MIB_CMD_ACTIVE));
865 data = readl(mmio + MIB_DATA);
866 return data;
870 This function reads the mib registers and returns the hardware statistics. It updates previous internal driver statistics with new values.
872 static struct net_device_stats *amd8111e_get_stats(struct net_device * dev)
874 struct amd8111e_priv *lp = dev->priv;
875 void * mmio = lp->mmio;
876 unsigned long flags;
877 /* struct net_device_stats *prev_stats = &lp->prev_stats; */
878 struct net_device_stats* new_stats = &lp->stats;
880 if(!lp->opened)
881 return &lp->stats;
882 spin_lock_irqsave (&lp->lock, flags);
884 /* stats.rx_packets */
885 new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
886 amd8111e_read_mib(mmio, rcv_multicast_pkts)+
887 amd8111e_read_mib(mmio, rcv_unicast_pkts);
889 /* stats.tx_packets */
890 new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
892 /*stats.rx_bytes */
893 new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
895 /* stats.tx_bytes */
896 new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
898 /* stats.rx_errors */
899 new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
900 amd8111e_read_mib(mmio, rcv_fragments)+
901 amd8111e_read_mib(mmio, rcv_jabbers)+
902 amd8111e_read_mib(mmio, rcv_alignment_errors)+
903 amd8111e_read_mib(mmio, rcv_fcs_errors)+
904 amd8111e_read_mib(mmio, rcv_miss_pkts);
906 /* stats.tx_errors */
907 new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
909 /* stats.rx_dropped*/
910 new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
912 /* stats.tx_dropped*/
913 new_stats->tx_dropped = amd8111e_read_mib(mmio, xmt_underrun_pkts);
915 /* stats.multicast*/
916 new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
918 /* stats.collisions*/
919 new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
921 /* stats.rx_length_errors*/
922 new_stats->rx_length_errors =
923 amd8111e_read_mib(mmio, rcv_undersize_pkts)+
924 amd8111e_read_mib(mmio, rcv_oversize_pkts);
926 /* stats.rx_over_errors*/
927 new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
929 /* stats.rx_crc_errors*/
930 new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
932 /* stats.rx_frame_errors*/
933 new_stats->rx_frame_errors =
934 amd8111e_read_mib(mmio, rcv_alignment_errors);
936 /* stats.rx_fifo_errors */
937 new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
939 /* stats.rx_missed_errors */
940 new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
942 /* stats.tx_aborted_errors*/
943 new_stats->tx_aborted_errors =
944 amd8111e_read_mib(mmio, xmt_excessive_collision);
946 /* stats.tx_carrier_errors*/
947 new_stats->tx_carrier_errors =
948 amd8111e_read_mib(mmio, xmt_loss_carrier);
950 /* stats.tx_fifo_errors*/
951 new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
953 /* stats.tx_window_errors*/
954 new_stats->tx_window_errors =
955 amd8111e_read_mib(mmio, xmt_late_collision);
957 /* Reset the mibs for collecting new statistics */
958 /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
960 spin_unlock_irqrestore (&lp->lock, flags);
962 return new_stats;
964 /* This function recalculate the interupt coalescing mode on every interrupt
965 according to the datarate and the packet rate.
967 static int amd8111e_calc_coalesce(struct net_device *dev)
969 struct amd8111e_priv *lp = dev->priv;
970 struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
971 int tx_pkt_rate;
972 int rx_pkt_rate;
973 int tx_data_rate;
974 int rx_data_rate;
975 int rx_pkt_size;
976 int tx_pkt_size;
978 tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
979 coal_conf->tx_prev_packets = coal_conf->tx_packets;
981 tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
982 coal_conf->tx_prev_bytes = coal_conf->tx_bytes;
984 rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
985 coal_conf->rx_prev_packets = coal_conf->rx_packets;
987 rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
988 coal_conf->rx_prev_bytes = coal_conf->rx_bytes;
990 if(rx_pkt_rate < 800){
991 if(coal_conf->rx_coal_type != NO_COALESCE){
993 coal_conf->rx_timeout = 0x0;
994 coal_conf->rx_event_count = 0;
995 amd8111e_set_coalesce(dev,RX_INTR_COAL);
996 coal_conf->rx_coal_type = NO_COALESCE;
999 else{
1001 rx_pkt_size = rx_data_rate/rx_pkt_rate;
1002 if (rx_pkt_size < 128){
1003 if(coal_conf->rx_coal_type != NO_COALESCE){
1005 coal_conf->rx_timeout = 0;
1006 coal_conf->rx_event_count = 0;
1007 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1008 coal_conf->rx_coal_type = NO_COALESCE;
1012 else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
1014 if(coal_conf->rx_coal_type != LOW_COALESCE){
1015 coal_conf->rx_timeout = 1;
1016 coal_conf->rx_event_count = 4;
1017 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1018 coal_conf->rx_coal_type = LOW_COALESCE;
1021 else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
1023 if(coal_conf->rx_coal_type != MEDIUM_COALESCE){
1024 coal_conf->rx_timeout = 1;
1025 coal_conf->rx_event_count = 4;
1026 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1027 coal_conf->rx_coal_type = MEDIUM_COALESCE;
1031 else if(rx_pkt_size >= 1024){
1032 if(coal_conf->rx_coal_type != HIGH_COALESCE){
1033 coal_conf->rx_timeout = 2;
1034 coal_conf->rx_event_count = 3;
1035 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1036 coal_conf->rx_coal_type = HIGH_COALESCE;
1040 /* NOW FOR TX INTR COALESC */
1041 if(tx_pkt_rate < 800){
1042 if(coal_conf->tx_coal_type != NO_COALESCE){
1044 coal_conf->tx_timeout = 0x0;
1045 coal_conf->tx_event_count = 0;
1046 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1047 coal_conf->tx_coal_type = NO_COALESCE;
1050 else{
1052 tx_pkt_size = tx_data_rate/tx_pkt_rate;
1053 if (tx_pkt_size < 128){
1055 if(coal_conf->tx_coal_type != NO_COALESCE){
1057 coal_conf->tx_timeout = 0;
1058 coal_conf->tx_event_count = 0;
1059 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1060 coal_conf->tx_coal_type = NO_COALESCE;
1064 else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
1066 if(coal_conf->tx_coal_type != LOW_COALESCE){
1067 coal_conf->tx_timeout = 1;
1068 coal_conf->tx_event_count = 2;
1069 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1070 coal_conf->tx_coal_type = LOW_COALESCE;
1074 else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
1076 if(coal_conf->tx_coal_type != MEDIUM_COALESCE){
1077 coal_conf->tx_timeout = 2;
1078 coal_conf->tx_event_count = 5;
1079 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1080 coal_conf->tx_coal_type = MEDIUM_COALESCE;
1084 else if(tx_pkt_size >= 1024){
1085 if (tx_pkt_size >= 1024){
1086 if(coal_conf->tx_coal_type != HIGH_COALESCE){
1087 coal_conf->tx_timeout = 4;
1088 coal_conf->tx_event_count = 8;
1089 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1090 coal_conf->tx_coal_type = HIGH_COALESCE;
1095 return 0;
1099 This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts.
1101 static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1104 struct net_device * dev = (struct net_device *) dev_id;
1105 struct amd8111e_priv *lp = dev->priv;
1106 void * mmio = lp->mmio;
1107 unsigned int intr0;
1108 unsigned int handled = 1;
1110 if(dev == NULL)
1111 return IRQ_NONE;
1113 if (regs) spin_lock (&lp->lock);
1114 /* disabling interrupt */
1115 writel(INTREN, mmio + CMD0);
1117 /* Read interrupt status */
1118 intr0 = readl(mmio + INT0);
1120 /* Process all the INT event until INTR bit is clear. */
1122 if (!(intr0 & INTR)) {
1123 handled = 0;
1124 goto err_no_interrupt;
1127 /* Current driver processes 3 interrupts : RINT,TINT,LCINT */
1128 writel(intr0, mmio + INT0);
1130 /* Check if Receive Interrupt has occurred. */
1131 if(intr0 & RINT0){
1132 amd8111e_rx(dev);
1133 writel(VAL2 | RDMD0, mmio + CMD0);
1136 /* Check if Transmit Interrupt has occurred. */
1137 if(intr0 & TINT0)
1138 amd8111e_tx(dev);
1140 /* Check if Link Change Interrupt has occurred. */
1141 if (intr0 & LCINT)
1142 amd8111e_link_change(dev);
1144 /* Check if Hardware Timer Interrupt has occurred. */
1145 if (intr0 & STINT)
1146 amd8111e_calc_coalesce(dev);
1148 err_no_interrupt:
1149 writel( VAL0 | INTREN,mmio + CMD0);
1151 if (regs) spin_unlock(&lp->lock);
1153 return IRQ_RETVAL(handled);
1157 This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down.
1159 static int amd8111e_close(struct net_device * dev)
1161 struct amd8111e_priv *lp = dev->priv;
1162 netif_stop_queue(dev);
1164 spin_lock_irq(&lp->lock);
1166 amd8111e_disable_interrupt(lp);
1167 amd8111e_stop_chip(lp);
1168 amd8111e_free_ring(lp);
1170 netif_carrier_off(lp->amd8111e_net_dev);
1172 /* Delete ipg timer */
1173 if(lp->options & OPTION_DYN_IPG_ENABLE)
1174 del_timer_sync(&lp->ipg_data.ipg_timer);
1176 spin_unlock_irq(&lp->lock);
1177 free_irq(dev->irq, dev);
1179 /* Update the statistics before closing */
1180 amd8111e_get_stats(dev);
1181 lp->opened = 0;
1182 return 0;
1184 /* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device.
1186 static int amd8111e_open(struct net_device * dev )
1188 struct amd8111e_priv *lp = (struct amd8111e_priv *)dev->priv;
1190 if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, SA_SHIRQ,
1191 dev->name, dev))
1192 return -EAGAIN;
1194 spin_lock_irq(&lp->lock);
1196 amd8111e_init_hw_default(lp);
1198 if(amd8111e_restart(dev)){
1199 spin_unlock_irq(&lp->lock);
1200 return -ENOMEM;
1202 /* Start ipg timer */
1203 if(lp->options & OPTION_DYN_IPG_ENABLE){
1204 add_timer(&lp->ipg_data.ipg_timer);
1205 printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name);
1208 lp->opened = 1;
1210 spin_unlock_irq(&lp->lock);
1212 netif_start_queue(dev);
1214 return 0;
1217 This function checks if there is any transmit descriptors available to queue more packet.
1219 static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
1221 int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
1222 if(lp->tx_skbuff[tx_index] != 0)
1223 return -1;
1224 else
1225 return 0;
1229 This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc.
1232 static int amd8111e_start_xmit(struct sk_buff *skb, struct net_device * dev)
1234 struct amd8111e_priv *lp = dev->priv;
1235 int tx_index;
1236 unsigned long flags;
1238 spin_lock_irqsave(&lp->lock, flags);
1240 tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
1242 lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
1244 lp->tx_skbuff[tx_index] = skb;
1245 lp->tx_ring[tx_index].tx_flags = 0;
1247 #if AMD8111E_VLAN_TAG_USED
1248 if((lp->vlgrp != NULL) && vlan_tx_tag_present(skb)){
1249 lp->tx_ring[tx_index].tag_ctrl_cmd |=
1250 cpu_to_le32(TCC_VLAN_INSERT);
1251 lp->tx_ring[tx_index].tag_ctrl_info =
1252 cpu_to_le16(vlan_tx_tag_get(skb));
1255 #endif
1256 lp->tx_dma_addr[tx_index] =
1257 pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1258 lp->tx_ring[tx_index].buff_phy_addr =
1259 (u32) cpu_to_le32(lp->tx_dma_addr[tx_index]);
1261 /* Set FCS and LTINT bits */
1262 lp->tx_ring[tx_index].tx_flags |=
1263 cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
1265 lp->tx_idx++;
1267 /* Trigger an immediate send poll. */
1268 writel( VAL1 | TDMD0, lp->mmio + CMD0);
1269 writel( VAL2 | RDMD0,lp->mmio + CMD0);
1271 dev->trans_start = jiffies;
1273 if(amd8111e_tx_queue_avail(lp) < 0){
1274 netif_stop_queue(dev);
1276 spin_unlock_irqrestore(&lp->lock, flags);
1277 return 0;
1280 This function returns all the memory mapped registers of the device.
1282 static char* amd8111e_read_regs(struct amd8111e_priv* lp)
1284 void * mmio = lp->mmio;
1285 u32 * reg_buff;
1287 reg_buff = kmalloc( AMD8111E_REG_DUMP_LEN,GFP_KERNEL);
1288 if(NULL == reg_buff)
1289 return NULL;
1291 /* Read only necessary registers */
1292 reg_buff[0] = readl(mmio + XMT_RING_BASE_ADDR0);
1293 reg_buff[1] = readl(mmio + XMT_RING_LEN0);
1294 reg_buff[2] = readl(mmio + RCV_RING_BASE_ADDR0);
1295 reg_buff[3] = readl(mmio + RCV_RING_LEN0);
1296 reg_buff[4] = readl(mmio + CMD0);
1297 reg_buff[5] = readl(mmio + CMD2);
1298 reg_buff[6] = readl(mmio + CMD3);
1299 reg_buff[7] = readl(mmio + CMD7);
1300 reg_buff[8] = readl(mmio + INT0);
1301 reg_buff[9] = readl(mmio + INTEN0);
1302 reg_buff[10] = readl(mmio + LADRF);
1303 reg_buff[11] = readl(mmio + LADRF+4);
1304 reg_buff[12] = readl(mmio + STAT0);
1306 return (char *)reg_buff;
1309 amd8111e crc generator implementation is different from the kernel
1310 ether_crc() function.
1312 int amd8111e_ether_crc(int len, char* mac_addr)
1314 int i,byte;
1315 unsigned char octet;
1316 u32 crc= INITCRC;
1318 for(byte=0; byte < len; byte++){
1319 octet = mac_addr[byte];
1320 for( i=0;i < 8; i++){
1321 /*If the next bit form the input stream is 1,subtract the divisor (CRC32) from the dividend(crc).*/
1322 if( (octet & 0x1) ^ (crc & 0x1) ){
1323 crc >>= 1;
1324 crc ^= CRC32;
1326 else
1327 crc >>= 1;
1329 octet >>= 1;
1332 return crc;
1335 This function sets promiscuos mode, all-multi mode or the multicast address
1336 list to the device.
1338 static void amd8111e_set_multicast_list(struct net_device *dev)
1340 struct dev_mc_list* mc_ptr;
1341 struct amd8111e_priv *lp = dev->priv;
1342 u32 mc_filter[2] ;
1343 int i,bit_num;
1344 if(dev->flags & IFF_PROMISC){
1345 printk(KERN_INFO "%s: Setting promiscuous mode.\n",dev->name);
1346 writel( VAL2 | PROM, lp->mmio + CMD2);
1347 return;
1349 else
1350 writel( PROM, lp->mmio + CMD2);
1351 if(dev->flags & IFF_ALLMULTI || dev->mc_count > MAX_FILTER_SIZE){
1352 /* get all multicast packet */
1353 mc_filter[1] = mc_filter[0] = 0xffffffff;
1354 lp->mc_list = dev->mc_list;
1355 lp->options |= OPTION_MULTICAST_ENABLE;
1356 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1357 return;
1359 if( dev->mc_count == 0 ){
1360 /* get only own packets */
1361 mc_filter[1] = mc_filter[0] = 0;
1362 lp->mc_list = 0;
1363 lp->options &= ~OPTION_MULTICAST_ENABLE;
1364 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1365 /* disable promiscous mode */
1366 writel(PROM, lp->mmio + CMD2);
1367 return;
1369 /* load all the multicast addresses in the logic filter */
1370 lp->options |= OPTION_MULTICAST_ENABLE;
1371 lp->mc_list = dev->mc_list;
1372 mc_filter[1] = mc_filter[0] = 0;
1373 for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count;
1374 i++, mc_ptr = mc_ptr->next) {
1375 bit_num = ( amd8111e_ether_crc(ETH_ALEN,mc_ptr->dmi_addr) >> 26 ) & 0x3f;
1376 mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1378 amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
1380 /* To eliminate PCI posting bug */
1381 readl(lp->mmio + CMD2);
1386 This function handles all the ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application.
1389 static int amd8111e_ethtool_ioctl(struct net_device* dev, void* useraddr)
1391 struct amd8111e_priv *lp = dev->priv;
1392 struct pci_dev *pci_dev = lp->pci_dev;
1393 u32 ethcmd;
1395 if( useraddr == NULL)
1396 return -EINVAL;
1397 if(copy_from_user (&ethcmd, useraddr, sizeof (ethcmd)))
1398 return -EFAULT;
1400 switch(ethcmd){
1402 case ETHTOOL_GDRVINFO:{
1403 struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1404 strcpy (info.driver, MODULE_NAME);
1405 strcpy (info.version, MODULE_VERSION);
1406 memset(&info.fw_version, 0, sizeof(info.fw_version));
1407 sprintf(info.fw_version,"%u",chip_version);
1408 strcpy (info.bus_info, pci_dev->slot_name);
1409 info.eedump_len = 0;
1410 info.regdump_len = AMD8111E_REG_DUMP_LEN;
1411 if (copy_to_user (useraddr, &info, sizeof(info)))
1412 return -EFAULT;
1413 return 0;
1415 /* get settings */
1416 case ETHTOOL_GSET: {
1417 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1418 spin_lock_irq(&lp->lock);
1419 mii_ethtool_gset(&lp->mii_if, &ecmd);
1420 spin_unlock_irq(&lp->lock);
1421 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1422 return -EFAULT;
1423 return 0;
1425 /* set settings */
1426 case ETHTOOL_SSET: {
1427 int r;
1428 struct ethtool_cmd ecmd;
1429 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1430 return -EFAULT;
1432 spin_lock_irq(&lp->lock);
1433 r = mii_ethtool_sset(&lp->mii_if, &ecmd);
1434 spin_unlock_irq(&lp->lock);
1435 return r;
1437 case ETHTOOL_GREGS: {
1438 struct ethtool_regs regs;
1439 u8 *regbuf;
1440 int ret;
1442 if (copy_from_user(&regs, useraddr, sizeof(regs)))
1443 return -EFAULT;
1444 if (regs.len > AMD8111E_REG_DUMP_LEN)
1445 regs.len = AMD8111E_REG_DUMP_LEN;
1446 regs.version = 0;
1447 if (copy_to_user(useraddr, &regs, sizeof(regs)))
1448 return -EFAULT;
1450 regbuf = amd8111e_read_regs(lp);
1451 if (!regbuf)
1452 return -ENOMEM;
1454 useraddr += offsetof(struct ethtool_regs, data);
1455 ret = 0;
1456 if (copy_to_user(useraddr, regbuf, regs.len))
1457 ret = -EFAULT;
1458 kfree(regbuf);
1459 return ret;
1461 /* restart autonegotiation */
1462 case ETHTOOL_NWAY_RST: {
1463 return mii_nway_restart(&lp->mii_if);
1465 /* get link status */
1466 case ETHTOOL_GLINK: {
1467 struct ethtool_value val = {ETHTOOL_GLINK};
1468 val.data = mii_link_ok(&lp->mii_if);
1469 if (copy_to_user(useraddr, &val, sizeof(val)))
1470 return -EFAULT;
1471 return 0;
1473 case ETHTOOL_GWOL: {
1474 struct ethtool_wolinfo wol_info = { ETHTOOL_GWOL };
1476 wol_info.supported = WAKE_MAGIC|WAKE_PHY;
1477 wol_info.wolopts = 0;
1478 if (lp->options & OPTION_WOL_ENABLE)
1479 wol_info.wolopts = WAKE_MAGIC;
1480 memset(&wol_info.sopass, 0, sizeof(wol_info.sopass));
1481 if (copy_to_user(useraddr, &wol_info, sizeof(wol_info)))
1482 return -EFAULT;
1483 return 0;
1485 case ETHTOOL_SWOL: {
1486 struct ethtool_wolinfo wol_info;
1488 if (copy_from_user(&wol_info, useraddr, sizeof(wol_info)))
1489 return -EFAULT;
1490 if (wol_info.wolopts & ~(WAKE_MAGIC |WAKE_PHY))
1491 return -EINVAL;
1492 spin_lock_irq(&lp->lock);
1493 if(wol_info.wolopts & WAKE_MAGIC)
1494 lp->options |=
1495 (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
1496 else if(wol_info.wolopts & WAKE_PHY)
1497 lp->options |=
1498 (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
1499 else
1500 lp->options &= ~OPTION_WOL_ENABLE;
1501 spin_unlock_irq(&lp->lock);
1502 return 0;
1505 default:
1506 break;
1508 return -EOPNOTSUPP;
1510 static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
1512 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data;
1513 struct amd8111e_priv *lp = dev->priv;
1514 int err;
1515 u32 mii_regval;
1517 if (!capable(CAP_NET_ADMIN))
1518 return -EPERM;
1520 switch(cmd) {
1521 case SIOCETHTOOL:
1522 return amd8111e_ethtool_ioctl(dev, (void *) ifr->ifr_data);
1523 case SIOCGMIIPHY:
1524 data->phy_id = PHY_ID;
1526 /* fallthru */
1527 case SIOCGMIIREG:
1529 spin_lock_irq(&lp->lock);
1530 err = amd8111e_read_phy(lp, data->phy_id,
1531 data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
1532 spin_unlock_irq(&lp->lock);
1534 data->val_out = mii_regval;
1535 return err;
1537 case SIOCSMIIREG:
1539 spin_lock_irq(&lp->lock);
1540 err = amd8111e_write_phy(lp, data->phy_id,
1541 data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
1542 spin_unlock_irq(&lp->lock);
1544 return err;
1546 default:
1547 /* do nothing */
1548 break;
1550 return -EOPNOTSUPP;
1553 This function changes the mtu of the device. It restarts the device to initialize the descriptor with new receive buffers.
1555 int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
1557 struct amd8111e_priv *lp = dev->priv;
1558 int err;
1560 if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
1561 return -EINVAL;
1563 if (!netif_running(dev)) {
1564 /* new_mtu will be used
1565 when device starts netxt time */
1566 dev->mtu = new_mtu;
1567 return 0;
1570 spin_lock_irq(&lp->lock);
1572 /* stop the chip */
1573 writel(RUN, lp->mmio + CMD0);
1575 dev->mtu = new_mtu;
1577 err = amd8111e_restart(dev);
1578 spin_unlock_irq(&lp->lock);
1579 if(!err)
1580 netif_start_queue(dev);
1581 return err;
1584 #if AMD8111E_VLAN_TAG_USED
1585 static void amd8111e_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
1587 struct amd8111e_priv *lp = dev->priv;
1588 spin_lock_irq(&lp->lock);
1589 lp->vlgrp = grp;
1590 spin_unlock_irq(&lp->lock);
1593 static void amd8111e_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1595 struct amd8111e_priv *lp = dev->priv;
1596 spin_lock_irq(&lp->lock);
1597 if (lp->vlgrp)
1598 lp->vlgrp->vlan_devices[vid] = NULL;
1599 spin_unlock_irq(&lp->lock);
1601 #endif
1602 static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
1604 writel( VAL1|MPPLBA, lp->mmio + CMD3);
1605 writel( VAL0|MPEN_SW, lp->mmio + CMD7);
1607 /* To eliminate PCI posting bug */
1608 readl(lp->mmio + CMD7);
1609 return 0;
1612 static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
1615 /* Adapter is already stoped/suspended/interrupt-disabled */
1616 writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
1618 /* To eliminate PCI posting bug */
1619 readl(lp->mmio + CMD7);
1620 return 0;
1622 /* This function is called when a packet transmission fails to complete within a resonable period, on the assumption that an interrupts have been failed or the interface is locked up. This function will reinitialize the hardware */
1624 static void amd8111e_tx_timeout(struct net_device *dev)
1626 struct amd8111e_priv* lp = dev->priv;
1627 int err;
1629 printk(KERN_ERR "%s: transmit timed out, resetting\n",
1630 dev->name);
1631 spin_lock_irq(&lp->lock);
1632 err = amd8111e_restart(dev);
1633 spin_unlock_irq(&lp->lock);
1634 if(!err)
1635 netif_wake_queue(dev);
1637 static int amd8111e_suspend(struct pci_dev *pci_dev, u32 state)
1639 struct net_device *dev = pci_get_drvdata(pci_dev);
1640 struct amd8111e_priv *lp = dev->priv;
1642 if (!netif_running(dev))
1643 return 0;
1645 /* disable the interrupt */
1646 spin_lock_irq(&lp->lock);
1647 amd8111e_disable_interrupt(lp);
1648 spin_unlock_irq(&lp->lock);
1650 netif_device_detach(dev);
1652 /* stop chip */
1653 spin_lock_irq(&lp->lock);
1654 if(lp->options & OPTION_DYN_IPG_ENABLE)
1655 del_timer_sync(&lp->ipg_data.ipg_timer);
1656 amd8111e_stop_chip(lp);
1657 spin_unlock_irq(&lp->lock);
1659 if(lp->options & OPTION_WOL_ENABLE){
1660 /* enable wol */
1661 if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
1662 amd8111e_enable_magicpkt(lp);
1663 if(lp->options & OPTION_WAKE_PHY_ENABLE)
1664 amd8111e_enable_link_change(lp);
1666 pci_enable_wake(pci_dev, 3, 1);
1667 pci_enable_wake(pci_dev, 4, 1); /* D3 cold */
1670 else{
1671 pci_enable_wake(pci_dev, 3, 0);
1672 pci_enable_wake(pci_dev, 4, 0); /* 4 == D3 cold */
1675 pci_save_state(pci_dev, lp->pm_state);
1676 pci_set_power_state(pci_dev, 3);
1678 return 0;
1680 static int amd8111e_resume(struct pci_dev *pci_dev)
1682 struct net_device *dev = pci_get_drvdata(pci_dev);
1683 struct amd8111e_priv *lp = dev->priv;
1685 if (!netif_running(dev))
1686 return 0;
1688 pci_set_power_state(pci_dev, 0);
1689 pci_restore_state(pci_dev, lp->pm_state);
1691 pci_enable_wake(pci_dev, 3, 0);
1692 pci_enable_wake(pci_dev, 4, 0); /* D3 cold */
1694 netif_device_attach(dev);
1696 spin_lock_irq(&lp->lock);
1697 amd8111e_restart(dev);
1698 /* Restart ipg timer */
1699 if(lp->options & OPTION_DYN_IPG_ENABLE)
1700 mod_timer(&lp->ipg_data.ipg_timer,
1701 jiffies + (IPG_CONVERGE_TIME * HZ));
1702 spin_unlock_irq(&lp->lock);
1704 return 0;
1708 static void __devexit amd8111e_remove_one(struct pci_dev *pdev)
1710 struct net_device *dev = pci_get_drvdata(pdev);
1711 if (dev) {
1712 unregister_netdev(dev);
1713 iounmap((void *) ((struct amd8111e_priv *)(dev->priv))->mmio);
1714 kfree(dev);
1715 pci_release_regions(pdev);
1716 pci_disable_device(pdev);
1717 pci_set_drvdata(pdev, NULL);
1720 static void amd8111e_config_ipg(struct net_device* dev)
1722 struct amd8111e_priv *lp = dev->priv;
1723 struct ipg_info* ipg_data = &lp->ipg_data;
1724 void * mmio = lp->mmio;
1725 unsigned int prev_col_cnt = ipg_data->col_cnt;
1726 unsigned int total_col_cnt;
1727 unsigned int tmp_ipg;
1729 if(lp->link_config.duplex == DUPLEX_FULL){
1730 ipg_data->ipg = DEFAULT_IPG;
1731 return;
1734 if(ipg_data->ipg_state == SSTATE){
1736 if(ipg_data->timer_tick == IPG_STABLE_TIME){
1738 ipg_data->timer_tick = 0;
1739 ipg_data->ipg = MIN_IPG - IPG_STEP;
1740 ipg_data->current_ipg = MIN_IPG;
1741 ipg_data->diff_col_cnt = 0xFFFFFFFF;
1742 ipg_data->ipg_state = CSTATE;
1744 else
1745 ipg_data->timer_tick++;
1748 if(ipg_data->ipg_state == CSTATE){
1750 /* Get the current collision count */
1752 total_col_cnt = ipg_data->col_cnt =
1753 amd8111e_read_mib(mmio, xmt_collisions);
1755 if ((total_col_cnt - prev_col_cnt) <
1756 (ipg_data->diff_col_cnt)){
1758 ipg_data->diff_col_cnt =
1759 total_col_cnt - prev_col_cnt ;
1761 ipg_data->ipg = ipg_data->current_ipg;
1764 ipg_data->current_ipg += IPG_STEP;
1766 if (ipg_data->current_ipg <= MAX_IPG)
1767 tmp_ipg = ipg_data->current_ipg;
1768 else{
1769 tmp_ipg = ipg_data->ipg;
1770 ipg_data->ipg_state = SSTATE;
1772 writew((u32)tmp_ipg, mmio + IPG);
1773 writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
1775 mod_timer(&lp->ipg_data.ipg_timer, jiffies + (IPG_CONVERGE_TIME * HZ));
1776 return;
1780 static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1781 const struct pci_device_id *ent)
1783 int err,i,pm_cap;
1784 unsigned long reg_addr,reg_len;
1785 struct amd8111e_priv* lp;
1786 struct net_device* dev;
1788 err = pci_enable_device(pdev);
1789 if(err){
1790 printk(KERN_ERR "amd8111e: Cannot enable new PCI device,"
1791 "exiting.\n");
1792 return err;
1795 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
1796 printk(KERN_ERR "amd8111e: Cannot find PCI base address"
1797 "exiting.\n");
1798 err = -ENODEV;
1799 goto err_disable_pdev;
1802 err = pci_request_regions(pdev, MODULE_NAME);
1803 if(err){
1804 printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, "
1805 "exiting.\n");
1806 goto err_disable_pdev;
1809 pci_set_master(pdev);
1811 /* Find power-management capability. */
1812 if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){
1813 printk(KERN_ERR "amd8111e: No Power Management capability, "
1814 "exiting.\n");
1815 goto err_free_reg;
1818 /* Initialize DMA */
1819 if(!pci_dma_supported(pdev, 0xffffffff)){
1820 printk(KERN_ERR "amd8111e: DMA not supported,"
1821 "exiting.\n");
1822 goto err_free_reg;
1823 } else
1824 pdev->dma_mask = 0xffffffff;
1826 reg_addr = pci_resource_start(pdev, 0);
1827 reg_len = pci_resource_len(pdev, 0);
1829 dev = alloc_etherdev(sizeof(struct amd8111e_priv));
1830 if (!dev) {
1831 printk(KERN_ERR "amd8111e: Etherdev alloc failed, exiting.\n");
1832 err = -ENOMEM;
1833 goto err_free_reg;
1836 SET_MODULE_OWNER(dev);
1838 #if AMD8111E_VLAN_TAG_USED
1839 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ;
1840 dev->vlan_rx_register =amd8111e_vlan_rx_register;
1841 dev->vlan_rx_kill_vid = amd8111e_vlan_rx_kill_vid;
1842 #endif
1844 lp = dev->priv;
1845 lp->pci_dev = pdev;
1846 lp->amd8111e_net_dev = dev;
1847 lp->pm_cap = pm_cap;
1849 /* setting mii default values */
1850 lp->mii_if.dev = dev;
1851 lp->mii_if.mdio_read = amd8111e_mdio_read;
1852 lp->mii_if.mdio_write = amd8111e_mdio_write;
1853 lp->mii_if.phy_id = PHY_ID;
1855 spin_lock_init(&lp->lock);
1857 lp->mmio = ioremap(reg_addr, reg_len);
1858 if (lp->mmio == 0) {
1859 printk(KERN_ERR "amd8111e: Cannot map device registers, "
1860 "exiting\n");
1861 err = -ENOMEM;
1862 goto err_free_dev;
1865 /* Initializing MAC address */
1866 for(i = 0; i < ETH_ADDR_LEN; i++)
1867 dev->dev_addr[i] =readb(lp->mmio + PADR + i);
1869 /* Setting user defined parametrs */
1870 lp->ext_phy_option = speed_duplex[card_idx];
1871 if(coalesce[card_idx])
1872 lp->options |= OPTION_INTR_COAL_ENABLE;
1873 if(dynamic_ipg[card_idx++])
1874 lp->options |= OPTION_DYN_IPG_ENABLE;
1876 /* Initialize driver entry points */
1877 dev->open = amd8111e_open;
1878 dev->hard_start_xmit = amd8111e_start_xmit;
1879 dev->stop = amd8111e_close;
1880 dev->get_stats = amd8111e_get_stats;
1881 dev->set_multicast_list = amd8111e_set_multicast_list;
1882 dev->do_ioctl = amd8111e_ioctl;
1883 dev->change_mtu = amd8111e_change_mtu;
1884 dev->irq =pdev->irq;
1885 dev->tx_timeout = amd8111e_tx_timeout;
1886 dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
1888 #if AMD8111E_VLAN_TAG_USED
1889 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1890 dev->vlan_rx_register =amd8111e_vlan_rx_register;
1891 dev->vlan_rx_kill_vid = amd8111e_vlan_rx_kill_vid;
1892 #endif
1894 /* Set receive buffer length and set jumbo option*/
1895 amd8111e_set_rx_buff_len(dev);
1897 err = register_netdev(dev);
1898 if (err) {
1899 printk(KERN_ERR "amd8111e: Cannot register net device, "
1900 "exiting.\n");
1901 goto err_iounmap;
1904 pci_set_drvdata(pdev, dev);
1906 /* Initialize software ipg timer */
1907 if(lp->options & OPTION_DYN_IPG_ENABLE){
1908 init_timer(&lp->ipg_data.ipg_timer);
1909 lp->ipg_data.ipg_timer.data = (unsigned long) dev;
1910 lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg;
1911 lp->ipg_data.ipg_timer.expires = jiffies +
1912 IPG_CONVERGE_TIME * HZ;
1913 lp->ipg_data.ipg = DEFAULT_IPG;
1914 lp->ipg_data.ipg_state = CSTATE;
1917 /* display driver and device information */
1919 chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
1920 printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n", dev->name,MODULE_VERSION);
1921 printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet ", dev->name, chip_version);
1922 for (i = 0; i < 6; i++)
1923 printk("%2.2x%c",dev->dev_addr[i],i == 5 ? ' ' : ':');
1924 printk( "\n");
1925 return 0;
1926 err_iounmap:
1927 iounmap((void *) lp->mmio);
1929 err_free_dev:
1930 kfree(dev);
1932 err_free_reg:
1933 pci_release_regions(pdev);
1935 err_disable_pdev:
1936 pci_disable_device(pdev);
1937 pci_set_drvdata(pdev, NULL);
1938 return err;
1942 static struct pci_driver amd8111e_driver = {
1943 name: MODULE_NAME,
1944 id_table: amd8111e_pci_tbl,
1945 probe: amd8111e_probe_one,
1946 remove: __devexit_p(amd8111e_remove_one),
1947 suspend: amd8111e_suspend,
1948 resume: amd8111e_resume
1951 static int __init amd8111e_init(void)
1953 return pci_module_init(&amd8111e_driver);
1956 static void __exit amd8111e_cleanup(void)
1958 pci_unregister_driver(&amd8111e_driver);
1961 module_init(amd8111e_init);
1962 module_exit(amd8111e_cleanup);