2 Written 1998-2000 by Donald Becker.
4 This software may be used and distributed according to the terms of
5 the GNU General Public License (GPL), incorporated herein by reference.
6 Drivers based on or derived from this code fall under the GPL and must
7 retain the authorship, copyright and license notice. This file is not
8 a complete program and may only be used when the entire operating
9 system is licensed under the GPL.
11 The author may be reached as becker@scyld.com, or C/O
12 Scyld Computing Corporation
13 410 Severn Ave., Suite 210
16 Support information and updates available at
17 http://www.scyld.com/network/pci-skeleton.html
21 Version 2.51, Nov 17, 2001 (jgarzik):
23 - Replace some MII-related magic numbers with constants
27 #define DRV_NAME "fealnx"
28 #define DRV_VERSION "2.52"
29 #define DRV_RELDATE "Sep-11-2006"
31 static int debug
; /* 1-> print debug message */
32 static int max_interrupt_work
= 20;
34 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
35 static int multicast_filter_limit
= 32;
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme. */
38 /* Setting to > 1518 effectively disables this feature. */
39 static int rx_copybreak
;
41 /* Used to pass the media type, etc. */
42 /* Both 'options[]' and 'full_duplex[]' should exist for driver */
43 /* interoperability. */
44 /* The media type is usually passed in 'options[]'. */
45 #define MAX_UNITS 8 /* More are supported, limit only on options */
46 static int options
[MAX_UNITS
] = { -1, -1, -1, -1, -1, -1, -1, -1 };
47 static int full_duplex
[MAX_UNITS
] = { -1, -1, -1, -1, -1, -1, -1, -1 };
49 /* Operational parameters that are set at compile time. */
50 /* Keep the ring sizes a power of two for compile efficiency. */
51 /* The compiler will convert <unsigned>'%'<2^N> into a bit mask. */
52 /* Making the Tx ring too large decreases the effectiveness of channel */
53 /* bonding and packet priority. */
54 /* There are no ill effects from too-large receive rings. */
56 // #define TX_RING_SIZE 16
57 // #define RX_RING_SIZE 32
58 #define TX_RING_SIZE 6
59 #define RX_RING_SIZE 12
60 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc)
61 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc)
63 /* Operational parameters that usually are not changed. */
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (2*HZ)
67 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
70 /* Include files, designed to support most kernel versions 2.0.0 and later. */
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/timer.h>
75 #include <linux/errno.h>
76 #include <linux/ioport.h>
77 #include <linux/interrupt.h>
78 #include <linux/pci.h>
79 #include <linux/netdevice.h>
80 #include <linux/etherdevice.h>
81 #include <linux/skbuff.h>
82 #include <linux/init.h>
83 #include <linux/mii.h>
84 #include <linux/ethtool.h>
85 #include <linux/crc32.h>
86 #include <linux/delay.h>
87 #include <linux/bitops.h>
89 #include <asm/processor.h> /* Processor type for cache alignment. */
91 #include <asm/uaccess.h>
92 #include <asm/byteorder.h>
94 /* These identify the driver base version and may not be removed. */
95 static const char version
[] __devinitconst
=
96 KERN_INFO DRV_NAME
".c:v" DRV_VERSION
" " DRV_RELDATE
"\n";
99 /* This driver was written to use PCI memory space, however some x86 systems
100 work only with I/O space accesses. */
105 /* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */
106 /* This is only in the support-all-kernels source code. */
108 #define RUN_AT(x) (jiffies + (x))
110 MODULE_AUTHOR("Myson or whoever");
111 MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver");
112 MODULE_LICENSE("GPL");
113 module_param(max_interrupt_work
, int, 0);
114 module_param(debug
, int, 0);
115 module_param(rx_copybreak
, int, 0);
116 module_param(multicast_filter_limit
, int, 0);
117 module_param_array(options
, int, NULL
, 0);
118 module_param_array(full_duplex
, int, NULL
, 0);
119 MODULE_PARM_DESC(max_interrupt_work
, "fealnx maximum events handled per interrupt");
120 MODULE_PARM_DESC(debug
, "fealnx enable debugging (0-1)");
121 MODULE_PARM_DESC(rx_copybreak
, "fealnx copy breakpoint for copy-only-tiny-frames");
122 MODULE_PARM_DESC(multicast_filter_limit
, "fealnx maximum number of filtered multicast addresses");
123 MODULE_PARM_DESC(options
, "fealnx: Bits 0-3: media type, bit 17: full duplex");
124 MODULE_PARM_DESC(full_duplex
, "fealnx full duplex setting(s) (1)");
127 MIN_REGION_SIZE
= 136,
130 /* A chip capabilities table, matching the entries in pci_tbl[] above. */
131 enum chip_capability_flags
{
137 /* for different PHY */
138 enum phy_type_flags
{
153 static const struct chip_info skel_netdrv_tbl
[] __devinitdata
= {
154 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR
},
155 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR
},
156 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR
},
159 /* Offsets to the Command and Status Registers. */
160 enum fealnx_offsets
{
161 PAR0
= 0x0, /* physical address 0-3 */
162 PAR1
= 0x04, /* physical address 4-5 */
163 MAR0
= 0x08, /* multicast address 0-3 */
164 MAR1
= 0x0C, /* multicast address 4-7 */
165 FAR0
= 0x10, /* flow-control address 0-3 */
166 FAR1
= 0x14, /* flow-control address 4-5 */
167 TCRRCR
= 0x18, /* receive & transmit configuration */
168 BCR
= 0x1C, /* bus command */
169 TXPDR
= 0x20, /* transmit polling demand */
170 RXPDR
= 0x24, /* receive polling demand */
171 RXCWP
= 0x28, /* receive current word pointer */
172 TXLBA
= 0x2C, /* transmit list base address */
173 RXLBA
= 0x30, /* receive list base address */
174 ISR
= 0x34, /* interrupt status */
175 IMR
= 0x38, /* interrupt mask */
176 FTH
= 0x3C, /* flow control high/low threshold */
177 MANAGEMENT
= 0x40, /* bootrom/eeprom and mii management */
178 TALLY
= 0x44, /* tally counters for crc and mpa */
179 TSR
= 0x48, /* tally counter for transmit status */
180 BMCRSR
= 0x4c, /* basic mode control and status */
181 PHYIDENTIFIER
= 0x50, /* phy identifier */
182 ANARANLPAR
= 0x54, /* auto-negotiation advertisement and link
184 ANEROCR
= 0x58, /* auto-negotiation expansion and pci conf. */
185 BPREMRPSR
= 0x5c, /* bypass & receive error mask and phy status */
188 /* Bits in the interrupt status/enable registers. */
189 /* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
190 enum intr_status_bits
{
191 RFCON
= 0x00020000, /* receive flow control xon packet */
192 RFCOFF
= 0x00010000, /* receive flow control xoff packet */
193 LSCStatus
= 0x00008000, /* link status change */
194 ANCStatus
= 0x00004000, /* autonegotiation completed */
195 FBE
= 0x00002000, /* fatal bus error */
196 FBEMask
= 0x00001800, /* mask bit12-11 */
197 ParityErr
= 0x00000000, /* parity error */
198 TargetErr
= 0x00001000, /* target abort */
199 MasterErr
= 0x00000800, /* master error */
200 TUNF
= 0x00000400, /* transmit underflow */
201 ROVF
= 0x00000200, /* receive overflow */
202 ETI
= 0x00000100, /* transmit early int */
203 ERI
= 0x00000080, /* receive early int */
204 CNTOVF
= 0x00000040, /* counter overflow */
205 RBU
= 0x00000020, /* receive buffer unavailable */
206 TBU
= 0x00000010, /* transmit buffer unavilable */
207 TI
= 0x00000008, /* transmit interrupt */
208 RI
= 0x00000004, /* receive interrupt */
209 RxErr
= 0x00000002, /* receive error */
212 /* Bits in the NetworkConfig register, W for writing, R for reading */
213 /* If you have docs and know bit names, please fix 'em */
215 CR_W_ENH
= 0x02000000, /* enhanced mode (name?) */
216 CR_W_FD
= 0x00100000, /* full duplex */
217 CR_W_PS10
= 0x00080000, /* 10 mbit */
218 CR_W_TXEN
= 0x00040000, /* tx enable (name?) */
219 CR_W_PS1000
= 0x00010000, /* 1000 mbit */
220 /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */
221 CR_W_RXMODEMASK
= 0x000000e0,
222 CR_W_PROM
= 0x00000080, /* promiscuous mode */
223 CR_W_AB
= 0x00000040, /* accept broadcast */
224 CR_W_AM
= 0x00000020, /* accept mutlicast */
225 CR_W_ARP
= 0x00000008, /* receive runt pkt */
226 CR_W_ALP
= 0x00000004, /* receive long pkt */
227 CR_W_SEP
= 0x00000002, /* receive error pkt */
228 CR_W_RXEN
= 0x00000001, /* rx enable (unicast?) (name?) */
230 CR_R_TXSTOP
= 0x04000000, /* tx stopped (name?) */
231 CR_R_FD
= 0x00100000, /* full duplex detected */
232 CR_R_PS10
= 0x00080000, /* 10 mbit detected */
233 CR_R_RXSTOP
= 0x00008000, /* rx stopped (name?) */
236 /* The Tulip Rx and Tx buffer descriptors. */
242 struct fealnx_desc
*next_desc_logical
;
243 struct sk_buff
*skbuff
;
248 /* Bits in network_desc.status */
249 enum rx_desc_status_bits
{
250 RXOWN
= 0x80000000, /* own bit */
251 FLNGMASK
= 0x0fff0000, /* frame length */
253 MARSTATUS
= 0x00004000, /* multicast address received */
254 BARSTATUS
= 0x00002000, /* broadcast address received */
255 PHYSTATUS
= 0x00001000, /* physical address received */
256 RXFSD
= 0x00000800, /* first descriptor */
257 RXLSD
= 0x00000400, /* last descriptor */
258 ErrorSummary
= 0x80, /* error summary */
259 RUNT
= 0x40, /* runt packet received */
260 LONG
= 0x20, /* long packet received */
261 FAE
= 0x10, /* frame align error */
262 CRC
= 0x08, /* crc error */
263 RXER
= 0x04, /* receive error */
266 enum rx_desc_control_bits
{
267 RXIC
= 0x00800000, /* interrupt control */
271 enum tx_desc_status_bits
{
272 TXOWN
= 0x80000000, /* own bit */
273 JABTO
= 0x00004000, /* jabber timeout */
274 CSL
= 0x00002000, /* carrier sense lost */
275 LC
= 0x00001000, /* late collision */
276 EC
= 0x00000800, /* excessive collision */
277 UDF
= 0x00000400, /* fifo underflow */
278 DFR
= 0x00000200, /* deferred */
279 HF
= 0x00000100, /* heartbeat fail */
280 NCRMask
= 0x000000ff, /* collision retry count */
284 enum tx_desc_control_bits
{
285 TXIC
= 0x80000000, /* interrupt control */
286 ETIControl
= 0x40000000, /* early transmit interrupt */
287 TXLD
= 0x20000000, /* last descriptor */
288 TXFD
= 0x10000000, /* first descriptor */
289 CRCEnable
= 0x08000000, /* crc control */
290 PADEnable
= 0x04000000, /* padding control */
291 RetryTxLC
= 0x02000000, /* retry late collision */
292 PKTSMask
= 0x3ff800, /* packet size bit21-11 */
294 TBSMask
= 0x000007ff, /* transmit buffer bit 10-0 */
298 /* BootROM/EEPROM/MII Management Register */
299 #define MASK_MIIR_MII_READ 0x00000000
300 #define MASK_MIIR_MII_WRITE 0x00000008
301 #define MASK_MIIR_MII_MDO 0x00000004
302 #define MASK_MIIR_MII_MDI 0x00000002
303 #define MASK_MIIR_MII_MDC 0x00000001
305 /* ST+OP+PHYAD+REGAD+TA */
306 #define OP_READ 0x6000 /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */
307 #define OP_WRITE 0x5002 /* ST:01+OP:01+PHYAD+REGAD+TA:10 */
309 /* ------------------------------------------------------------------------- */
310 /* Constants for Myson PHY */
311 /* ------------------------------------------------------------------------- */
312 #define MysonPHYID 0xd0000302
313 /* 89-7-27 add, (begin) */
314 #define MysonPHYID0 0x0302
315 #define StatusRegister 18
316 #define SPEED100 0x0400 // bit10
317 #define FULLMODE 0x0800 // bit11
318 /* 89-7-27 add, (end) */
320 /* ------------------------------------------------------------------------- */
321 /* Constants for Seeq 80225 PHY */
322 /* ------------------------------------------------------------------------- */
323 #define SeeqPHYID0 0x0016
325 #define MIIRegister18 18
326 #define SPD_DET_100 0x80
327 #define DPLX_DET_FULL 0x40
329 /* ------------------------------------------------------------------------- */
330 /* Constants for Ahdoc 101 PHY */
331 /* ------------------------------------------------------------------------- */
332 #define AhdocPHYID0 0x0022
334 #define DiagnosticReg 18
335 #define DPLX_FULL 0x0800
336 #define Speed_100 0x0400
339 /* -------------------------------------------------------------------------- */
341 /* -------------------------------------------------------------------------- */
342 #define MarvellPHYID0 0x0141
343 #define LevelOnePHYID0 0x0013
345 #define MII1000BaseTControlReg 9
346 #define MII1000BaseTStatusReg 10
347 #define SpecificReg 17
349 /* for 1000BaseT Control Register */
350 #define PHYAbletoPerform1000FullDuplex 0x0200
351 #define PHYAbletoPerform1000HalfDuplex 0x0100
352 #define PHY1000AbilityMask 0x300
354 // for phy specific status register, marvell phy.
355 #define SpeedMask 0x0c000
356 #define Speed_1000M 0x08000
357 #define Speed_100M 0x4000
359 #define Full_Duplex 0x2000
361 // 89/12/29 add, for phy specific status register, levelone phy, (begin)
362 #define LXT1000_100M 0x08000
363 #define LXT1000_1000M 0x0c000
364 #define LXT1000_Full 0x200
365 // 89/12/29 add, for phy specific status register, levelone phy, (end)
367 /* for 3-in-1 case, BMCRSR register */
368 #define LinkIsUp2 0x00040000
371 #define LinkIsUp 0x0004
374 struct netdev_private
{
375 /* Descriptor rings first for alignment. */
376 struct fealnx_desc
*rx_ring
;
377 struct fealnx_desc
*tx_ring
;
379 dma_addr_t rx_ring_dma
;
380 dma_addr_t tx_ring_dma
;
384 /* Media monitoring timer. */
385 struct timer_list timer
;
388 struct timer_list reset_timer
;
389 int reset_timer_armed
;
390 unsigned long crvalue_sv
;
391 unsigned long imrvalue_sv
;
393 /* Frequently used values: keep some adjacent for cache effect. */
395 struct pci_dev
*pci_dev
;
396 unsigned long crvalue
;
397 unsigned long bcrvalue
;
398 unsigned long imrvalue
;
399 struct fealnx_desc
*cur_rx
;
400 struct fealnx_desc
*lack_rxbuf
;
402 struct fealnx_desc
*cur_tx
;
403 struct fealnx_desc
*cur_tx_copy
;
406 unsigned int rx_buf_sz
; /* Based on MTU+slack. */
408 /* These values are keep track of the transceiver/media in use. */
410 unsigned int line_speed
;
411 unsigned int duplexmode
;
412 unsigned int default_port
:4; /* Last dev->if_port value. */
413 unsigned int PHYType
;
415 /* MII transceiver section. */
416 int mii_cnt
; /* MII device addresses. */
417 unsigned char phys
[2]; /* MII device addresses. */
418 struct mii_if_info mii
;
423 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
);
424 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
, int value
);
425 static int netdev_open(struct net_device
*dev
);
426 static void getlinktype(struct net_device
*dev
);
427 static void getlinkstatus(struct net_device
*dev
);
428 static void netdev_timer(unsigned long data
);
429 static void reset_timer(unsigned long data
);
430 static void fealnx_tx_timeout(struct net_device
*dev
);
431 static void init_ring(struct net_device
*dev
);
432 static netdev_tx_t
start_tx(struct sk_buff
*skb
, struct net_device
*dev
);
433 static irqreturn_t
intr_handler(int irq
, void *dev_instance
);
434 static int netdev_rx(struct net_device
*dev
);
435 static void set_rx_mode(struct net_device
*dev
);
436 static void __set_rx_mode(struct net_device
*dev
);
437 static struct net_device_stats
*get_stats(struct net_device
*dev
);
438 static int mii_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
439 static const struct ethtool_ops netdev_ethtool_ops
;
440 static int netdev_close(struct net_device
*dev
);
441 static void reset_rx_descriptors(struct net_device
*dev
);
442 static void reset_tx_descriptors(struct net_device
*dev
);
444 static void stop_nic_rx(void __iomem
*ioaddr
, long crvalue
)
447 iowrite32(crvalue
& ~(CR_W_RXEN
), ioaddr
+ TCRRCR
);
449 if ( (ioread32(ioaddr
+ TCRRCR
) & CR_R_RXSTOP
) == CR_R_RXSTOP
)
455 static void stop_nic_rxtx(void __iomem
*ioaddr
, long crvalue
)
458 iowrite32(crvalue
& ~(CR_W_RXEN
+CR_W_TXEN
), ioaddr
+ TCRRCR
);
460 if ( (ioread32(ioaddr
+ TCRRCR
) & (CR_R_RXSTOP
+CR_R_TXSTOP
))
461 == (CR_R_RXSTOP
+CR_R_TXSTOP
) )
466 static const struct net_device_ops netdev_ops
= {
467 .ndo_open
= netdev_open
,
468 .ndo_stop
= netdev_close
,
469 .ndo_start_xmit
= start_tx
,
470 .ndo_get_stats
= get_stats
,
471 .ndo_set_multicast_list
= set_rx_mode
,
472 .ndo_do_ioctl
= mii_ioctl
,
473 .ndo_tx_timeout
= fealnx_tx_timeout
,
474 .ndo_change_mtu
= eth_change_mtu
,
475 .ndo_set_mac_address
= eth_mac_addr
,
476 .ndo_validate_addr
= eth_validate_addr
,
479 static int __devinit
fealnx_init_one(struct pci_dev
*pdev
,
480 const struct pci_device_id
*ent
)
482 struct netdev_private
*np
;
483 int i
, option
, err
, irq
;
484 static int card_idx
= -1;
486 void __iomem
*ioaddr
;
488 unsigned int chip_id
= ent
->driver_data
;
489 struct net_device
*dev
;
498 /* when built into the kernel, we only print version if device is found */
500 static int printed_version
;
501 if (!printed_version
++)
506 sprintf(boardname
, "fealnx%d", card_idx
);
508 option
= card_idx
< MAX_UNITS
? options
[card_idx
] : 0;
510 i
= pci_enable_device(pdev
);
512 pci_set_master(pdev
);
514 len
= pci_resource_len(pdev
, bar
);
515 if (len
< MIN_REGION_SIZE
) {
517 "region size %ld too small, aborting\n", len
);
521 i
= pci_request_regions(pdev
, boardname
);
527 ioaddr
= pci_iomap(pdev
, bar
, len
);
533 dev
= alloc_etherdev(sizeof(struct netdev_private
));
538 SET_NETDEV_DEV(dev
, &pdev
->dev
);
540 /* read ethernet id */
541 for (i
= 0; i
< 6; ++i
)
542 dev
->dev_addr
[i
] = ioread8(ioaddr
+ PAR0
+ i
);
544 /* Reset the chip to erase previous misconfiguration. */
545 iowrite32(0x00000001, ioaddr
+ BCR
);
547 dev
->base_addr
= (unsigned long)ioaddr
;
550 /* Make certain the descriptor lists are aligned. */
551 np
= netdev_priv(dev
);
553 spin_lock_init(&np
->lock
);
555 np
->flags
= skel_netdrv_tbl
[chip_id
].flags
;
556 pci_set_drvdata(pdev
, dev
);
558 np
->mii
.mdio_read
= mdio_read
;
559 np
->mii
.mdio_write
= mdio_write
;
560 np
->mii
.phy_id_mask
= 0x1f;
561 np
->mii
.reg_num_mask
= 0x1f;
563 ring_space
= pci_alloc_consistent(pdev
, RX_TOTAL_SIZE
, &ring_dma
);
566 goto err_out_free_dev
;
568 np
->rx_ring
= (struct fealnx_desc
*)ring_space
;
569 np
->rx_ring_dma
= ring_dma
;
571 ring_space
= pci_alloc_consistent(pdev
, TX_TOTAL_SIZE
, &ring_dma
);
574 goto err_out_free_rx
;
576 np
->tx_ring
= (struct fealnx_desc
*)ring_space
;
577 np
->tx_ring_dma
= ring_dma
;
579 /* find the connected MII xcvrs */
580 if (np
->flags
== HAS_MII_XCVR
) {
581 int phy
, phy_idx
= 0;
583 for (phy
= 1; phy
< 32 && phy_idx
< ARRAY_SIZE(np
->phys
);
585 int mii_status
= mdio_read(dev
, phy
, 1);
587 if (mii_status
!= 0xffff && mii_status
!= 0x0000) {
588 np
->phys
[phy_idx
++] = phy
;
590 "MII PHY found at address %d, status "
591 "0x%4.4x.\n", phy
, mii_status
);
596 data
= mdio_read(dev
, np
->phys
[0], 2);
597 if (data
== SeeqPHYID0
)
598 np
->PHYType
= SeeqPHY
;
599 else if (data
== AhdocPHYID0
)
600 np
->PHYType
= AhdocPHY
;
601 else if (data
== MarvellPHYID0
)
602 np
->PHYType
= MarvellPHY
;
603 else if (data
== MysonPHYID0
)
604 np
->PHYType
= Myson981
;
605 else if (data
== LevelOnePHYID0
)
606 np
->PHYType
= LevelOnePHY
;
608 np
->PHYType
= OtherPHY
;
613 np
->mii_cnt
= phy_idx
;
616 "MII PHY not found -- this device may "
617 "not operate correctly.\n");
620 /* 89/6/23 add, (begin) */
622 if (ioread32(ioaddr
+ PHYIDENTIFIER
) == MysonPHYID
)
623 np
->PHYType
= MysonPHY
;
625 np
->PHYType
= OtherPHY
;
627 np
->mii
.phy_id
= np
->phys
[0];
630 option
= dev
->mem_start
;
632 /* The lower four bits are the media type. */
635 np
->mii
.full_duplex
= 1;
636 np
->default_port
= option
& 15;
639 if (card_idx
< MAX_UNITS
&& full_duplex
[card_idx
] > 0)
640 np
->mii
.full_duplex
= full_duplex
[card_idx
];
642 if (np
->mii
.full_duplex
) {
643 dev_info(&pdev
->dev
, "Media type forced to Full Duplex.\n");
644 /* 89/6/13 add, (begin) */
645 // if (np->PHYType==MarvellPHY)
646 if ((np
->PHYType
== MarvellPHY
) || (np
->PHYType
== LevelOnePHY
)) {
649 data
= mdio_read(dev
, np
->phys
[0], 9);
650 data
= (data
& 0xfcff) | 0x0200;
651 mdio_write(dev
, np
->phys
[0], 9, data
);
653 /* 89/6/13 add, (end) */
654 if (np
->flags
== HAS_MII_XCVR
)
655 mdio_write(dev
, np
->phys
[0], MII_ADVERTISE
, ADVERTISE_FULL
);
657 iowrite32(ADVERTISE_FULL
, ioaddr
+ ANARANLPAR
);
658 np
->mii
.force_media
= 1;
661 dev
->netdev_ops
= &netdev_ops
;
662 dev
->ethtool_ops
= &netdev_ethtool_ops
;
663 dev
->watchdog_timeo
= TX_TIMEOUT
;
665 err
= register_netdev(dev
);
667 goto err_out_free_tx
;
669 printk(KERN_INFO
"%s: %s at %p, %pM, IRQ %d.\n",
670 dev
->name
, skel_netdrv_tbl
[chip_id
].chip_name
, ioaddr
,
676 pci_free_consistent(pdev
, TX_TOTAL_SIZE
, np
->tx_ring
, np
->tx_ring_dma
);
678 pci_free_consistent(pdev
, RX_TOTAL_SIZE
, np
->rx_ring
, np
->rx_ring_dma
);
682 pci_iounmap(pdev
, ioaddr
);
684 pci_release_regions(pdev
);
689 static void __devexit
fealnx_remove_one(struct pci_dev
*pdev
)
691 struct net_device
*dev
= pci_get_drvdata(pdev
);
694 struct netdev_private
*np
= netdev_priv(dev
);
696 pci_free_consistent(pdev
, TX_TOTAL_SIZE
, np
->tx_ring
,
698 pci_free_consistent(pdev
, RX_TOTAL_SIZE
, np
->rx_ring
,
700 unregister_netdev(dev
);
701 pci_iounmap(pdev
, np
->mem
);
703 pci_release_regions(pdev
);
704 pci_set_drvdata(pdev
, NULL
);
706 printk(KERN_ERR
"fealnx: remove for unknown device\n");
710 static ulong
m80x_send_cmd_to_phy(void __iomem
*miiport
, int opcode
, int phyad
, int regad
)
714 unsigned int mask
, data
;
716 /* enable MII output */
717 miir
= (ulong
) ioread32(miiport
);
720 miir
|= MASK_MIIR_MII_WRITE
+ MASK_MIIR_MII_MDO
;
722 /* send 32 1's preamble */
723 for (i
= 0; i
< 32; i
++) {
724 /* low MDC; MDO is already high (miir) */
725 miir
&= ~MASK_MIIR_MII_MDC
;
726 iowrite32(miir
, miiport
);
729 miir
|= MASK_MIIR_MII_MDC
;
730 iowrite32(miir
, miiport
);
733 /* calculate ST+OP+PHYAD+REGAD+TA */
734 data
= opcode
| (phyad
<< 7) | (regad
<< 2);
739 /* low MDC, prepare MDO */
740 miir
&= ~(MASK_MIIR_MII_MDC
+ MASK_MIIR_MII_MDO
);
742 miir
|= MASK_MIIR_MII_MDO
;
744 iowrite32(miir
, miiport
);
746 miir
|= MASK_MIIR_MII_MDC
;
747 iowrite32(miir
, miiport
);
752 if (mask
== 0x2 && opcode
== OP_READ
)
753 miir
&= ~MASK_MIIR_MII_WRITE
;
759 static int mdio_read(struct net_device
*dev
, int phyad
, int regad
)
761 struct netdev_private
*np
= netdev_priv(dev
);
762 void __iomem
*miiport
= np
->mem
+ MANAGEMENT
;
764 unsigned int mask
, data
;
766 miir
= m80x_send_cmd_to_phy(miiport
, OP_READ
, phyad
, regad
);
773 miir
&= ~MASK_MIIR_MII_MDC
;
774 iowrite32(miir
, miiport
);
777 miir
= ioread32(miiport
);
778 if (miir
& MASK_MIIR_MII_MDI
)
781 /* high MDC, and wait */
782 miir
|= MASK_MIIR_MII_MDC
;
783 iowrite32(miir
, miiport
);
791 miir
&= ~MASK_MIIR_MII_MDC
;
792 iowrite32(miir
, miiport
);
794 return data
& 0xffff;
798 static void mdio_write(struct net_device
*dev
, int phyad
, int regad
, int data
)
800 struct netdev_private
*np
= netdev_priv(dev
);
801 void __iomem
*miiport
= np
->mem
+ MANAGEMENT
;
805 miir
= m80x_send_cmd_to_phy(miiport
, OP_WRITE
, phyad
, regad
);
810 /* low MDC, prepare MDO */
811 miir
&= ~(MASK_MIIR_MII_MDC
+ MASK_MIIR_MII_MDO
);
813 miir
|= MASK_MIIR_MII_MDO
;
814 iowrite32(miir
, miiport
);
817 miir
|= MASK_MIIR_MII_MDC
;
818 iowrite32(miir
, miiport
);
825 miir
&= ~MASK_MIIR_MII_MDC
;
826 iowrite32(miir
, miiport
);
830 static int netdev_open(struct net_device
*dev
)
832 struct netdev_private
*np
= netdev_priv(dev
);
833 void __iomem
*ioaddr
= np
->mem
;
836 iowrite32(0x00000001, ioaddr
+ BCR
); /* Reset */
838 if (request_irq(dev
->irq
, intr_handler
, IRQF_SHARED
, dev
->name
, dev
))
841 for (i
= 0; i
< 3; i
++)
842 iowrite16(((unsigned short*)dev
->dev_addr
)[i
],
843 ioaddr
+ PAR0
+ i
*2);
847 iowrite32(np
->rx_ring_dma
, ioaddr
+ RXLBA
);
848 iowrite32(np
->tx_ring_dma
, ioaddr
+ TXLBA
);
850 /* Initialize other registers. */
852 np
->bcrvalue
= 0x10; /* little-endian, 8 burst length */
854 np
->bcrvalue
|= 0x04; /* big-endian */
857 #if defined(__i386__) && !defined(MODULE)
858 if (boot_cpu_data
.x86
<= 4)
862 np
->crvalue
= 0xe00; /* rx 128 burst length */
867 // np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI;
868 np
->imrvalue
= TUNF
| CNTOVF
| RBU
| TI
| RI
;
869 if (np
->pci_dev
->device
== 0x891) {
870 np
->bcrvalue
|= 0x200; /* set PROG bit */
871 np
->crvalue
|= CR_W_ENH
; /* set enhanced bit */
874 iowrite32(np
->bcrvalue
, ioaddr
+ BCR
);
876 if (dev
->if_port
== 0)
877 dev
->if_port
= np
->default_port
;
879 iowrite32(0, ioaddr
+ RXPDR
);
881 // np->crvalue = 0x00e40001; /* tx store and forward, tx/rx enable */
882 np
->crvalue
|= 0x00e40001; /* tx store and forward, tx/rx enable */
883 np
->mii
.full_duplex
= np
->mii
.force_media
;
889 netif_start_queue(dev
);
891 /* Clear and Enable interrupts by setting the interrupt mask. */
892 iowrite32(FBE
| TUNF
| CNTOVF
| RBU
| TI
| RI
, ioaddr
+ ISR
);
893 iowrite32(np
->imrvalue
, ioaddr
+ IMR
);
896 printk(KERN_DEBUG
"%s: Done netdev_open().\n", dev
->name
);
898 /* Set the timer to check for link beat. */
899 init_timer(&np
->timer
);
900 np
->timer
.expires
= RUN_AT(3 * HZ
);
901 np
->timer
.data
= (unsigned long) dev
;
902 np
->timer
.function
= &netdev_timer
;
905 add_timer(&np
->timer
);
907 init_timer(&np
->reset_timer
);
908 np
->reset_timer
.data
= (unsigned long) dev
;
909 np
->reset_timer
.function
= &reset_timer
;
910 np
->reset_timer_armed
= 0;
916 static void getlinkstatus(struct net_device
*dev
)
917 /* function: Routine will read MII Status Register to get link status. */
918 /* input : dev... pointer to the adapter block. */
921 struct netdev_private
*np
= netdev_priv(dev
);
922 unsigned int i
, DelayTime
= 0x1000;
926 if (np
->PHYType
== MysonPHY
) {
927 for (i
= 0; i
< DelayTime
; ++i
) {
928 if (ioread32(np
->mem
+ BMCRSR
) & LinkIsUp2
) {
935 for (i
= 0; i
< DelayTime
; ++i
) {
936 if (mdio_read(dev
, np
->phys
[0], MII_BMSR
) & BMSR_LSTATUS
) {
946 static void getlinktype(struct net_device
*dev
)
948 struct netdev_private
*np
= netdev_priv(dev
);
950 if (np
->PHYType
== MysonPHY
) { /* 3-in-1 case */
951 if (ioread32(np
->mem
+ TCRRCR
) & CR_R_FD
)
952 np
->duplexmode
= 2; /* full duplex */
954 np
->duplexmode
= 1; /* half duplex */
955 if (ioread32(np
->mem
+ TCRRCR
) & CR_R_PS10
)
956 np
->line_speed
= 1; /* 10M */
958 np
->line_speed
= 2; /* 100M */
960 if (np
->PHYType
== SeeqPHY
) { /* this PHY is SEEQ 80225 */
963 data
= mdio_read(dev
, np
->phys
[0], MIIRegister18
);
964 if (data
& SPD_DET_100
)
965 np
->line_speed
= 2; /* 100M */
967 np
->line_speed
= 1; /* 10M */
968 if (data
& DPLX_DET_FULL
)
969 np
->duplexmode
= 2; /* full duplex mode */
971 np
->duplexmode
= 1; /* half duplex mode */
972 } else if (np
->PHYType
== AhdocPHY
) {
975 data
= mdio_read(dev
, np
->phys
[0], DiagnosticReg
);
976 if (data
& Speed_100
)
977 np
->line_speed
= 2; /* 100M */
979 np
->line_speed
= 1; /* 10M */
980 if (data
& DPLX_FULL
)
981 np
->duplexmode
= 2; /* full duplex mode */
983 np
->duplexmode
= 1; /* half duplex mode */
985 /* 89/6/13 add, (begin) */
986 else if (np
->PHYType
== MarvellPHY
) {
989 data
= mdio_read(dev
, np
->phys
[0], SpecificReg
);
990 if (data
& Full_Duplex
)
991 np
->duplexmode
= 2; /* full duplex mode */
993 np
->duplexmode
= 1; /* half duplex mode */
995 if (data
== Speed_1000M
)
996 np
->line_speed
= 3; /* 1000M */
997 else if (data
== Speed_100M
)
998 np
->line_speed
= 2; /* 100M */
1000 np
->line_speed
= 1; /* 10M */
1002 /* 89/6/13 add, (end) */
1003 /* 89/7/27 add, (begin) */
1004 else if (np
->PHYType
== Myson981
) {
1007 data
= mdio_read(dev
, np
->phys
[0], StatusRegister
);
1009 if (data
& SPEED100
)
1014 if (data
& FULLMODE
)
1019 /* 89/7/27 add, (end) */
1021 else if (np
->PHYType
== LevelOnePHY
) {
1024 data
= mdio_read(dev
, np
->phys
[0], SpecificReg
);
1025 if (data
& LXT1000_Full
)
1026 np
->duplexmode
= 2; /* full duplex mode */
1028 np
->duplexmode
= 1; /* half duplex mode */
1030 if (data
== LXT1000_1000M
)
1031 np
->line_speed
= 3; /* 1000M */
1032 else if (data
== LXT1000_100M
)
1033 np
->line_speed
= 2; /* 100M */
1035 np
->line_speed
= 1; /* 10M */
1037 np
->crvalue
&= (~CR_W_PS10
) & (~CR_W_FD
) & (~CR_W_PS1000
);
1038 if (np
->line_speed
== 1)
1039 np
->crvalue
|= CR_W_PS10
;
1040 else if (np
->line_speed
== 3)
1041 np
->crvalue
|= CR_W_PS1000
;
1042 if (np
->duplexmode
== 2)
1043 np
->crvalue
|= CR_W_FD
;
1048 /* Take lock before calling this */
1049 static void allocate_rx_buffers(struct net_device
*dev
)
1051 struct netdev_private
*np
= netdev_priv(dev
);
1053 /* allocate skb for rx buffers */
1054 while (np
->really_rx_count
!= RX_RING_SIZE
) {
1055 struct sk_buff
*skb
;
1057 skb
= dev_alloc_skb(np
->rx_buf_sz
);
1059 break; /* Better luck next round. */
1061 while (np
->lack_rxbuf
->skbuff
)
1062 np
->lack_rxbuf
= np
->lack_rxbuf
->next_desc_logical
;
1064 skb
->dev
= dev
; /* Mark as being used by this device. */
1065 np
->lack_rxbuf
->skbuff
= skb
;
1066 np
->lack_rxbuf
->buffer
= pci_map_single(np
->pci_dev
, skb
->data
,
1067 np
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1068 np
->lack_rxbuf
->status
= RXOWN
;
1069 ++np
->really_rx_count
;
1074 static void netdev_timer(unsigned long data
)
1076 struct net_device
*dev
= (struct net_device
*) data
;
1077 struct netdev_private
*np
= netdev_priv(dev
);
1078 void __iomem
*ioaddr
= np
->mem
;
1079 int old_crvalue
= np
->crvalue
;
1080 unsigned int old_linkok
= np
->linkok
;
1081 unsigned long flags
;
1084 printk(KERN_DEBUG
"%s: Media selection timer tick, status %8.8x "
1085 "config %8.8x.\n", dev
->name
, ioread32(ioaddr
+ ISR
),
1086 ioread32(ioaddr
+ TCRRCR
));
1088 spin_lock_irqsave(&np
->lock
, flags
);
1090 if (np
->flags
== HAS_MII_XCVR
) {
1092 if ((old_linkok
== 0) && (np
->linkok
== 1)) { /* we need to detect the media type again */
1094 if (np
->crvalue
!= old_crvalue
) {
1095 stop_nic_rxtx(ioaddr
, np
->crvalue
);
1096 iowrite32(np
->crvalue
, ioaddr
+ TCRRCR
);
1101 allocate_rx_buffers(dev
);
1103 spin_unlock_irqrestore(&np
->lock
, flags
);
1105 np
->timer
.expires
= RUN_AT(10 * HZ
);
1106 add_timer(&np
->timer
);
1110 /* Take lock before calling */
1111 /* Reset chip and disable rx, tx and interrupts */
1112 static void reset_and_disable_rxtx(struct net_device
*dev
)
1114 struct netdev_private
*np
= netdev_priv(dev
);
1115 void __iomem
*ioaddr
= np
->mem
;
1118 /* Reset the chip's Tx and Rx processes. */
1119 stop_nic_rxtx(ioaddr
, 0);
1121 /* Disable interrupts by clearing the interrupt mask. */
1122 iowrite32(0, ioaddr
+ IMR
);
1124 /* Reset the chip to erase previous misconfiguration. */
1125 iowrite32(0x00000001, ioaddr
+ BCR
);
1127 /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw).
1128 We surely wait too long (address+data phase). Who cares? */
1130 ioread32(ioaddr
+ BCR
);
1136 /* Take lock before calling */
1137 /* Restore chip after reset */
1138 static void enable_rxtx(struct net_device
*dev
)
1140 struct netdev_private
*np
= netdev_priv(dev
);
1141 void __iomem
*ioaddr
= np
->mem
;
1143 reset_rx_descriptors(dev
);
1145 iowrite32(np
->tx_ring_dma
+ ((char*)np
->cur_tx
- (char*)np
->tx_ring
),
1147 iowrite32(np
->rx_ring_dma
+ ((char*)np
->cur_rx
- (char*)np
->rx_ring
),
1150 iowrite32(np
->bcrvalue
, ioaddr
+ BCR
);
1152 iowrite32(0, ioaddr
+ RXPDR
);
1153 __set_rx_mode(dev
); /* changes np->crvalue, writes it into TCRRCR */
1155 /* Clear and Enable interrupts by setting the interrupt mask. */
1156 iowrite32(FBE
| TUNF
| CNTOVF
| RBU
| TI
| RI
, ioaddr
+ ISR
);
1157 iowrite32(np
->imrvalue
, ioaddr
+ IMR
);
1159 iowrite32(0, ioaddr
+ TXPDR
);
1163 static void reset_timer(unsigned long data
)
1165 struct net_device
*dev
= (struct net_device
*) data
;
1166 struct netdev_private
*np
= netdev_priv(dev
);
1167 unsigned long flags
;
1169 printk(KERN_WARNING
"%s: resetting tx and rx machinery\n", dev
->name
);
1171 spin_lock_irqsave(&np
->lock
, flags
);
1172 np
->crvalue
= np
->crvalue_sv
;
1173 np
->imrvalue
= np
->imrvalue_sv
;
1175 reset_and_disable_rxtx(dev
);
1176 /* works for me without this:
1177 reset_tx_descriptors(dev); */
1179 netif_start_queue(dev
);
1181 np
->reset_timer_armed
= 0;
1183 spin_unlock_irqrestore(&np
->lock
, flags
);
1187 static void fealnx_tx_timeout(struct net_device
*dev
)
1189 struct netdev_private
*np
= netdev_priv(dev
);
1190 void __iomem
*ioaddr
= np
->mem
;
1191 unsigned long flags
;
1195 "%s: Transmit timed out, status %8.8x, resetting...\n",
1196 dev
->name
, ioread32(ioaddr
+ ISR
));
1199 printk(KERN_DEBUG
" Rx ring %p: ", np
->rx_ring
);
1200 for (i
= 0; i
< RX_RING_SIZE
; i
++)
1201 printk(KERN_CONT
" %8.8x",
1202 (unsigned int) np
->rx_ring
[i
].status
);
1203 printk(KERN_CONT
"\n");
1204 printk(KERN_DEBUG
" Tx ring %p: ", np
->tx_ring
);
1205 for (i
= 0; i
< TX_RING_SIZE
; i
++)
1206 printk(KERN_CONT
" %4.4x", np
->tx_ring
[i
].status
);
1207 printk(KERN_CONT
"\n");
1210 spin_lock_irqsave(&np
->lock
, flags
);
1212 reset_and_disable_rxtx(dev
);
1213 reset_tx_descriptors(dev
);
1216 spin_unlock_irqrestore(&np
->lock
, flags
);
1218 dev
->trans_start
= jiffies
; /* prevent tx timeout */
1219 dev
->stats
.tx_errors
++;
1220 netif_wake_queue(dev
); /* or .._start_.. ?? */
1224 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1225 static void init_ring(struct net_device
*dev
)
1227 struct netdev_private
*np
= netdev_priv(dev
);
1230 /* initialize rx variables */
1231 np
->rx_buf_sz
= (dev
->mtu
<= 1500 ? PKT_BUF_SZ
: dev
->mtu
+ 32);
1232 np
->cur_rx
= &np
->rx_ring
[0];
1233 np
->lack_rxbuf
= np
->rx_ring
;
1234 np
->really_rx_count
= 0;
1236 /* initial rx descriptors. */
1237 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1238 np
->rx_ring
[i
].status
= 0;
1239 np
->rx_ring
[i
].control
= np
->rx_buf_sz
<< RBSShift
;
1240 np
->rx_ring
[i
].next_desc
= np
->rx_ring_dma
+
1241 (i
+ 1)*sizeof(struct fealnx_desc
);
1242 np
->rx_ring
[i
].next_desc_logical
= &np
->rx_ring
[i
+ 1];
1243 np
->rx_ring
[i
].skbuff
= NULL
;
1246 /* for the last rx descriptor */
1247 np
->rx_ring
[i
- 1].next_desc
= np
->rx_ring_dma
;
1248 np
->rx_ring
[i
- 1].next_desc_logical
= np
->rx_ring
;
1250 /* allocate skb for rx buffers */
1251 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1252 struct sk_buff
*skb
= dev_alloc_skb(np
->rx_buf_sz
);
1255 np
->lack_rxbuf
= &np
->rx_ring
[i
];
1259 ++np
->really_rx_count
;
1260 np
->rx_ring
[i
].skbuff
= skb
;
1261 skb
->dev
= dev
; /* Mark as being used by this device. */
1262 np
->rx_ring
[i
].buffer
= pci_map_single(np
->pci_dev
, skb
->data
,
1263 np
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1264 np
->rx_ring
[i
].status
= RXOWN
;
1265 np
->rx_ring
[i
].control
|= RXIC
;
1268 /* initialize tx variables */
1269 np
->cur_tx
= &np
->tx_ring
[0];
1270 np
->cur_tx_copy
= &np
->tx_ring
[0];
1271 np
->really_tx_count
= 0;
1272 np
->free_tx_count
= TX_RING_SIZE
;
1274 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1275 np
->tx_ring
[i
].status
= 0;
1276 np
->tx_ring
[i
].next_desc
= np
->tx_ring_dma
+
1277 (i
+ 1)*sizeof(struct fealnx_desc
);
1278 np
->tx_ring
[i
].next_desc_logical
= &np
->tx_ring
[i
+ 1];
1279 np
->tx_ring
[i
].skbuff
= NULL
;
1282 /* for the last tx descriptor */
1283 np
->tx_ring
[i
- 1].next_desc
= np
->tx_ring_dma
;
1284 np
->tx_ring
[i
- 1].next_desc_logical
= &np
->tx_ring
[0];
1288 static netdev_tx_t
start_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1290 struct netdev_private
*np
= netdev_priv(dev
);
1291 unsigned long flags
;
1293 spin_lock_irqsave(&np
->lock
, flags
);
1295 np
->cur_tx_copy
->skbuff
= skb
;
1299 #if defined(one_buffer)
1300 np
->cur_tx_copy
->buffer
= pci_map_single(np
->pci_dev
, skb
->data
,
1301 skb
->len
, PCI_DMA_TODEVICE
);
1302 np
->cur_tx_copy
->control
= TXIC
| TXLD
| TXFD
| CRCEnable
| PADEnable
;
1303 np
->cur_tx_copy
->control
|= (skb
->len
<< PKTSShift
); /* pkt size */
1304 np
->cur_tx_copy
->control
|= (skb
->len
<< TBSShift
); /* buffer size */
1306 if (np
->pci_dev
->device
== 0x891)
1307 np
->cur_tx_copy
->control
|= ETIControl
| RetryTxLC
;
1308 np
->cur_tx_copy
->status
= TXOWN
;
1309 np
->cur_tx_copy
= np
->cur_tx_copy
->next_desc_logical
;
1310 --np
->free_tx_count
;
1311 #elif defined(two_buffer)
1312 if (skb
->len
> BPT
) {
1313 struct fealnx_desc
*next
;
1315 /* for the first descriptor */
1316 np
->cur_tx_copy
->buffer
= pci_map_single(np
->pci_dev
, skb
->data
,
1317 BPT
, PCI_DMA_TODEVICE
);
1318 np
->cur_tx_copy
->control
= TXIC
| TXFD
| CRCEnable
| PADEnable
;
1319 np
->cur_tx_copy
->control
|= (skb
->len
<< PKTSShift
); /* pkt size */
1320 np
->cur_tx_copy
->control
|= (BPT
<< TBSShift
); /* buffer size */
1322 /* for the last descriptor */
1323 next
= np
->cur_tx_copy
->next_desc_logical
;
1325 next
->control
= TXIC
| TXLD
| CRCEnable
| PADEnable
;
1326 next
->control
|= (skb
->len
<< PKTSShift
); /* pkt size */
1327 next
->control
|= ((skb
->len
- BPT
) << TBSShift
); /* buf size */
1329 if (np
->pci_dev
->device
== 0x891)
1330 np
->cur_tx_copy
->control
|= ETIControl
| RetryTxLC
;
1331 next
->buffer
= pci_map_single(ep
->pci_dev
, skb
->data
+ BPT
,
1332 skb
->len
- BPT
, PCI_DMA_TODEVICE
);
1334 next
->status
= TXOWN
;
1335 np
->cur_tx_copy
->status
= TXOWN
;
1337 np
->cur_tx_copy
= next
->next_desc_logical
;
1338 np
->free_tx_count
-= 2;
1340 np
->cur_tx_copy
->buffer
= pci_map_single(np
->pci_dev
, skb
->data
,
1341 skb
->len
, PCI_DMA_TODEVICE
);
1342 np
->cur_tx_copy
->control
= TXIC
| TXLD
| TXFD
| CRCEnable
| PADEnable
;
1343 np
->cur_tx_copy
->control
|= (skb
->len
<< PKTSShift
); /* pkt size */
1344 np
->cur_tx_copy
->control
|= (skb
->len
<< TBSShift
); /* buffer size */
1346 if (np
->pci_dev
->device
== 0x891)
1347 np
->cur_tx_copy
->control
|= ETIControl
| RetryTxLC
;
1348 np
->cur_tx_copy
->status
= TXOWN
;
1349 np
->cur_tx_copy
= np
->cur_tx_copy
->next_desc_logical
;
1350 --np
->free_tx_count
;
1354 if (np
->free_tx_count
< 2)
1355 netif_stop_queue(dev
);
1356 ++np
->really_tx_count
;
1357 iowrite32(0, np
->mem
+ TXPDR
);
1359 spin_unlock_irqrestore(&np
->lock
, flags
);
1360 return NETDEV_TX_OK
;
1364 /* Take lock before calling */
1365 /* Chip probably hosed tx ring. Clean up. */
1366 static void reset_tx_descriptors(struct net_device
*dev
)
1368 struct netdev_private
*np
= netdev_priv(dev
);
1369 struct fealnx_desc
*cur
;
1372 /* initialize tx variables */
1373 np
->cur_tx
= &np
->tx_ring
[0];
1374 np
->cur_tx_copy
= &np
->tx_ring
[0];
1375 np
->really_tx_count
= 0;
1376 np
->free_tx_count
= TX_RING_SIZE
;
1378 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1379 cur
= &np
->tx_ring
[i
];
1381 pci_unmap_single(np
->pci_dev
, cur
->buffer
,
1382 cur
->skbuff
->len
, PCI_DMA_TODEVICE
);
1383 dev_kfree_skb_any(cur
->skbuff
);
1387 cur
->control
= 0; /* needed? */
1388 /* probably not needed. We do it for purely paranoid reasons */
1389 cur
->next_desc
= np
->tx_ring_dma
+
1390 (i
+ 1)*sizeof(struct fealnx_desc
);
1391 cur
->next_desc_logical
= &np
->tx_ring
[i
+ 1];
1393 /* for the last tx descriptor */
1394 np
->tx_ring
[TX_RING_SIZE
- 1].next_desc
= np
->tx_ring_dma
;
1395 np
->tx_ring
[TX_RING_SIZE
- 1].next_desc_logical
= &np
->tx_ring
[0];
1399 /* Take lock and stop rx before calling this */
1400 static void reset_rx_descriptors(struct net_device
*dev
)
1402 struct netdev_private
*np
= netdev_priv(dev
);
1403 struct fealnx_desc
*cur
= np
->cur_rx
;
1406 allocate_rx_buffers(dev
);
1408 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1410 cur
->status
= RXOWN
;
1411 cur
= cur
->next_desc_logical
;
1414 iowrite32(np
->rx_ring_dma
+ ((char*)np
->cur_rx
- (char*)np
->rx_ring
),
1419 /* The interrupt handler does all of the Rx thread work and cleans up
1420 after the Tx thread. */
1421 static irqreturn_t
intr_handler(int irq
, void *dev_instance
)
1423 struct net_device
*dev
= (struct net_device
*) dev_instance
;
1424 struct netdev_private
*np
= netdev_priv(dev
);
1425 void __iomem
*ioaddr
= np
->mem
;
1426 long boguscnt
= max_interrupt_work
;
1427 unsigned int num_tx
= 0;
1430 spin_lock(&np
->lock
);
1432 iowrite32(0, ioaddr
+ IMR
);
1435 u32 intr_status
= ioread32(ioaddr
+ ISR
);
1437 /* Acknowledge all of the current interrupt sources ASAP. */
1438 iowrite32(intr_status
, ioaddr
+ ISR
);
1441 printk(KERN_DEBUG
"%s: Interrupt, status %4.4x.\n", dev
->name
,
1444 if (!(intr_status
& np
->imrvalue
))
1451 // if (intr_status & FBE)
1452 // { /* fatal error */
1453 // stop_nic_tx(ioaddr, 0);
1454 // stop_nic_rx(ioaddr, 0);
1458 if (intr_status
& TUNF
)
1459 iowrite32(0, ioaddr
+ TXPDR
);
1461 if (intr_status
& CNTOVF
) {
1463 dev
->stats
.rx_missed_errors
+=
1464 ioread32(ioaddr
+ TALLY
) & 0x7fff;
1467 dev
->stats
.rx_crc_errors
+=
1468 (ioread32(ioaddr
+ TALLY
) & 0x7fff0000) >> 16;
1471 if (intr_status
& (RI
| RBU
)) {
1472 if (intr_status
& RI
)
1475 stop_nic_rx(ioaddr
, np
->crvalue
);
1476 reset_rx_descriptors(dev
);
1477 iowrite32(np
->crvalue
, ioaddr
+ TCRRCR
);
1481 while (np
->really_tx_count
) {
1482 long tx_status
= np
->cur_tx
->status
;
1483 long tx_control
= np
->cur_tx
->control
;
1485 if (!(tx_control
& TXLD
)) { /* this pkt is combined by two tx descriptors */
1486 struct fealnx_desc
*next
;
1488 next
= np
->cur_tx
->next_desc_logical
;
1489 tx_status
= next
->status
;
1490 tx_control
= next
->control
;
1493 if (tx_status
& TXOWN
)
1496 if (!(np
->crvalue
& CR_W_ENH
)) {
1497 if (tx_status
& (CSL
| LC
| EC
| UDF
| HF
)) {
1498 dev
->stats
.tx_errors
++;
1500 dev
->stats
.tx_aborted_errors
++;
1501 if (tx_status
& CSL
)
1502 dev
->stats
.tx_carrier_errors
++;
1504 dev
->stats
.tx_window_errors
++;
1505 if (tx_status
& UDF
)
1506 dev
->stats
.tx_fifo_errors
++;
1507 if ((tx_status
& HF
) && np
->mii
.full_duplex
== 0)
1508 dev
->stats
.tx_heartbeat_errors
++;
1511 dev
->stats
.tx_bytes
+=
1512 ((tx_control
& PKTSMask
) >> PKTSShift
);
1514 dev
->stats
.collisions
+=
1515 ((tx_status
& NCRMask
) >> NCRShift
);
1516 dev
->stats
.tx_packets
++;
1519 dev
->stats
.tx_bytes
+=
1520 ((tx_control
& PKTSMask
) >> PKTSShift
);
1521 dev
->stats
.tx_packets
++;
1524 /* Free the original skb. */
1525 pci_unmap_single(np
->pci_dev
, np
->cur_tx
->buffer
,
1526 np
->cur_tx
->skbuff
->len
, PCI_DMA_TODEVICE
);
1527 dev_kfree_skb_irq(np
->cur_tx
->skbuff
);
1528 np
->cur_tx
->skbuff
= NULL
;
1529 --np
->really_tx_count
;
1530 if (np
->cur_tx
->control
& TXLD
) {
1531 np
->cur_tx
= np
->cur_tx
->next_desc_logical
;
1532 ++np
->free_tx_count
;
1534 np
->cur_tx
= np
->cur_tx
->next_desc_logical
;
1535 np
->cur_tx
= np
->cur_tx
->next_desc_logical
;
1536 np
->free_tx_count
+= 2;
1539 } /* end of for loop */
1541 if (num_tx
&& np
->free_tx_count
>= 2)
1542 netif_wake_queue(dev
);
1544 /* read transmit status for enhanced mode only */
1545 if (np
->crvalue
& CR_W_ENH
) {
1548 data
= ioread32(ioaddr
+ TSR
);
1549 dev
->stats
.tx_errors
+= (data
& 0xff000000) >> 24;
1550 dev
->stats
.tx_aborted_errors
+=
1551 (data
& 0xff000000) >> 24;
1552 dev
->stats
.tx_window_errors
+=
1553 (data
& 0x00ff0000) >> 16;
1554 dev
->stats
.collisions
+= (data
& 0x0000ffff);
1557 if (--boguscnt
< 0) {
1558 printk(KERN_WARNING
"%s: Too much work at interrupt, "
1559 "status=0x%4.4x.\n", dev
->name
, intr_status
);
1560 if (!np
->reset_timer_armed
) {
1561 np
->reset_timer_armed
= 1;
1562 np
->reset_timer
.expires
= RUN_AT(HZ
/2);
1563 add_timer(&np
->reset_timer
);
1564 stop_nic_rxtx(ioaddr
, 0);
1565 netif_stop_queue(dev
);
1566 /* or netif_tx_disable(dev); ?? */
1567 /* Prevent other paths from enabling tx,rx,intrs */
1568 np
->crvalue_sv
= np
->crvalue
;
1569 np
->imrvalue_sv
= np
->imrvalue
;
1570 np
->crvalue
&= ~(CR_W_TXEN
| CR_W_RXEN
); /* or simply = 0? */
1578 /* read the tally counters */
1580 dev
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ TALLY
) & 0x7fff;
1583 dev
->stats
.rx_crc_errors
+=
1584 (ioread32(ioaddr
+ TALLY
) & 0x7fff0000) >> 16;
1587 printk(KERN_DEBUG
"%s: exiting interrupt, status=%#4.4x.\n",
1588 dev
->name
, ioread32(ioaddr
+ ISR
));
1590 iowrite32(np
->imrvalue
, ioaddr
+ IMR
);
1592 spin_unlock(&np
->lock
);
1594 return IRQ_RETVAL(handled
);
1598 /* This routine is logically part of the interrupt handler, but separated
1599 for clarity and better register allocation. */
1600 static int netdev_rx(struct net_device
*dev
)
1602 struct netdev_private
*np
= netdev_priv(dev
);
1603 void __iomem
*ioaddr
= np
->mem
;
1605 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1606 while (!(np
->cur_rx
->status
& RXOWN
) && np
->cur_rx
->skbuff
) {
1607 s32 rx_status
= np
->cur_rx
->status
;
1609 if (np
->really_rx_count
== 0)
1613 printk(KERN_DEBUG
" netdev_rx() status was %8.8x.\n", rx_status
);
1615 if ((!((rx_status
& RXFSD
) && (rx_status
& RXLSD
))) ||
1616 (rx_status
& ErrorSummary
)) {
1617 if (rx_status
& ErrorSummary
) { /* there was a fatal error */
1620 "%s: Receive error, Rx status %8.8x.\n",
1621 dev
->name
, rx_status
);
1623 dev
->stats
.rx_errors
++; /* end of a packet. */
1624 if (rx_status
& (LONG
| RUNT
))
1625 dev
->stats
.rx_length_errors
++;
1626 if (rx_status
& RXER
)
1627 dev
->stats
.rx_frame_errors
++;
1628 if (rx_status
& CRC
)
1629 dev
->stats
.rx_crc_errors
++;
1631 int need_to_reset
= 0;
1634 if (rx_status
& RXFSD
) { /* this pkt is too long, over one rx buffer */
1635 struct fealnx_desc
*cur
;
1637 /* check this packet is received completely? */
1639 while (desno
<= np
->really_rx_count
) {
1641 if ((!(cur
->status
& RXOWN
)) &&
1642 (cur
->status
& RXLSD
))
1644 /* goto next rx descriptor */
1645 cur
= cur
->next_desc_logical
;
1647 if (desno
> np
->really_rx_count
)
1649 } else /* RXLSD did not find, something error */
1652 if (need_to_reset
== 0) {
1655 dev
->stats
.rx_length_errors
++;
1657 /* free all rx descriptors related this long pkt */
1658 for (i
= 0; i
< desno
; ++i
) {
1659 if (!np
->cur_rx
->skbuff
) {
1661 "%s: I'm scared\n", dev
->name
);
1664 np
->cur_rx
->status
= RXOWN
;
1665 np
->cur_rx
= np
->cur_rx
->next_desc_logical
;
1668 } else { /* rx error, need to reset this chip */
1669 stop_nic_rx(ioaddr
, np
->crvalue
);
1670 reset_rx_descriptors(dev
);
1671 iowrite32(np
->crvalue
, ioaddr
+ TCRRCR
);
1673 break; /* exit the while loop */
1675 } else { /* this received pkt is ok */
1677 struct sk_buff
*skb
;
1678 /* Omit the four octet CRC from the length. */
1679 short pkt_len
= ((rx_status
& FLNGMASK
) >> FLNGShift
) - 4;
1681 #ifndef final_version
1683 printk(KERN_DEBUG
" netdev_rx() normal Rx pkt length %d"
1684 " status %x.\n", pkt_len
, rx_status
);
1687 /* Check if the packet is long enough to accept without copying
1688 to a minimally-sized skbuff. */
1689 if (pkt_len
< rx_copybreak
&&
1690 (skb
= dev_alloc_skb(pkt_len
+ 2)) != NULL
) {
1691 skb_reserve(skb
, 2); /* 16 byte align the IP header */
1692 pci_dma_sync_single_for_cpu(np
->pci_dev
,
1695 PCI_DMA_FROMDEVICE
);
1696 /* Call copy + cksum if available. */
1698 #if !defined(__alpha__)
1699 skb_copy_to_linear_data(skb
,
1700 np
->cur_rx
->skbuff
->data
, pkt_len
);
1701 skb_put(skb
, pkt_len
);
1703 memcpy(skb_put(skb
, pkt_len
),
1704 np
->cur_rx
->skbuff
->data
, pkt_len
);
1706 pci_dma_sync_single_for_device(np
->pci_dev
,
1709 PCI_DMA_FROMDEVICE
);
1711 pci_unmap_single(np
->pci_dev
,
1714 PCI_DMA_FROMDEVICE
);
1715 skb_put(skb
= np
->cur_rx
->skbuff
, pkt_len
);
1716 np
->cur_rx
->skbuff
= NULL
;
1717 --np
->really_rx_count
;
1719 skb
->protocol
= eth_type_trans(skb
, dev
);
1721 dev
->stats
.rx_packets
++;
1722 dev
->stats
.rx_bytes
+= pkt_len
;
1725 np
->cur_rx
= np
->cur_rx
->next_desc_logical
;
1726 } /* end of while loop */
1728 /* allocate skb for rx buffers */
1729 allocate_rx_buffers(dev
);
1735 static struct net_device_stats
*get_stats(struct net_device
*dev
)
1737 struct netdev_private
*np
= netdev_priv(dev
);
1738 void __iomem
*ioaddr
= np
->mem
;
1740 /* The chip only need report frame silently dropped. */
1741 if (netif_running(dev
)) {
1742 dev
->stats
.rx_missed_errors
+=
1743 ioread32(ioaddr
+ TALLY
) & 0x7fff;
1744 dev
->stats
.rx_crc_errors
+=
1745 (ioread32(ioaddr
+ TALLY
) & 0x7fff0000) >> 16;
1752 /* for dev->set_multicast_list */
1753 static void set_rx_mode(struct net_device
*dev
)
1755 spinlock_t
*lp
= &((struct netdev_private
*)netdev_priv(dev
))->lock
;
1756 unsigned long flags
;
1757 spin_lock_irqsave(lp
, flags
);
1759 spin_unlock_irqrestore(lp
, flags
);
1763 /* Take lock before calling */
1764 static void __set_rx_mode(struct net_device
*dev
)
1766 struct netdev_private
*np
= netdev_priv(dev
);
1767 void __iomem
*ioaddr
= np
->mem
;
1768 u32 mc_filter
[2]; /* Multicast hash filter */
1771 if (dev
->flags
& IFF_PROMISC
) { /* Set promiscuous. */
1772 memset(mc_filter
, 0xff, sizeof(mc_filter
));
1773 rx_mode
= CR_W_PROM
| CR_W_AB
| CR_W_AM
;
1774 } else if ((netdev_mc_count(dev
) > multicast_filter_limit
) ||
1775 (dev
->flags
& IFF_ALLMULTI
)) {
1776 /* Too many to match, or accept all multicasts. */
1777 memset(mc_filter
, 0xff, sizeof(mc_filter
));
1778 rx_mode
= CR_W_AB
| CR_W_AM
;
1780 struct netdev_hw_addr
*ha
;
1782 memset(mc_filter
, 0, sizeof(mc_filter
));
1783 netdev_for_each_mc_addr(ha
, dev
) {
1785 bit
= (ether_crc(ETH_ALEN
, ha
->addr
) >> 26) ^ 0x3F;
1786 mc_filter
[bit
>> 5] |= (1 << bit
);
1788 rx_mode
= CR_W_AB
| CR_W_AM
;
1791 stop_nic_rxtx(ioaddr
, np
->crvalue
);
1793 iowrite32(mc_filter
[0], ioaddr
+ MAR0
);
1794 iowrite32(mc_filter
[1], ioaddr
+ MAR1
);
1795 np
->crvalue
&= ~CR_W_RXMODEMASK
;
1796 np
->crvalue
|= rx_mode
;
1797 iowrite32(np
->crvalue
, ioaddr
+ TCRRCR
);
1800 static void netdev_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
1802 struct netdev_private
*np
= netdev_priv(dev
);
1804 strcpy(info
->driver
, DRV_NAME
);
1805 strcpy(info
->version
, DRV_VERSION
);
1806 strcpy(info
->bus_info
, pci_name(np
->pci_dev
));
1809 static int netdev_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1811 struct netdev_private
*np
= netdev_priv(dev
);
1814 spin_lock_irq(&np
->lock
);
1815 rc
= mii_ethtool_gset(&np
->mii
, cmd
);
1816 spin_unlock_irq(&np
->lock
);
1821 static int netdev_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1823 struct netdev_private
*np
= netdev_priv(dev
);
1826 spin_lock_irq(&np
->lock
);
1827 rc
= mii_ethtool_sset(&np
->mii
, cmd
);
1828 spin_unlock_irq(&np
->lock
);
1833 static int netdev_nway_reset(struct net_device
*dev
)
1835 struct netdev_private
*np
= netdev_priv(dev
);
1836 return mii_nway_restart(&np
->mii
);
1839 static u32
netdev_get_link(struct net_device
*dev
)
1841 struct netdev_private
*np
= netdev_priv(dev
);
1842 return mii_link_ok(&np
->mii
);
1845 static u32
netdev_get_msglevel(struct net_device
*dev
)
1850 static void netdev_set_msglevel(struct net_device
*dev
, u32 value
)
1855 static const struct ethtool_ops netdev_ethtool_ops
= {
1856 .get_drvinfo
= netdev_get_drvinfo
,
1857 .get_settings
= netdev_get_settings
,
1858 .set_settings
= netdev_set_settings
,
1859 .nway_reset
= netdev_nway_reset
,
1860 .get_link
= netdev_get_link
,
1861 .get_msglevel
= netdev_get_msglevel
,
1862 .set_msglevel
= netdev_set_msglevel
,
1865 static int mii_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1867 struct netdev_private
*np
= netdev_priv(dev
);
1870 if (!netif_running(dev
))
1873 spin_lock_irq(&np
->lock
);
1874 rc
= generic_mii_ioctl(&np
->mii
, if_mii(rq
), cmd
, NULL
);
1875 spin_unlock_irq(&np
->lock
);
1881 static int netdev_close(struct net_device
*dev
)
1883 struct netdev_private
*np
= netdev_priv(dev
);
1884 void __iomem
*ioaddr
= np
->mem
;
1887 netif_stop_queue(dev
);
1889 /* Disable interrupts by clearing the interrupt mask. */
1890 iowrite32(0x0000, ioaddr
+ IMR
);
1892 /* Stop the chip's Tx and Rx processes. */
1893 stop_nic_rxtx(ioaddr
, 0);
1895 del_timer_sync(&np
->timer
);
1896 del_timer_sync(&np
->reset_timer
);
1898 free_irq(dev
->irq
, dev
);
1900 /* Free all the skbuffs in the Rx queue. */
1901 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1902 struct sk_buff
*skb
= np
->rx_ring
[i
].skbuff
;
1904 np
->rx_ring
[i
].status
= 0;
1906 pci_unmap_single(np
->pci_dev
, np
->rx_ring
[i
].buffer
,
1907 np
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1909 np
->rx_ring
[i
].skbuff
= NULL
;
1913 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1914 struct sk_buff
*skb
= np
->tx_ring
[i
].skbuff
;
1917 pci_unmap_single(np
->pci_dev
, np
->tx_ring
[i
].buffer
,
1918 skb
->len
, PCI_DMA_TODEVICE
);
1920 np
->tx_ring
[i
].skbuff
= NULL
;
1927 static DEFINE_PCI_DEVICE_TABLE(fealnx_pci_tbl
) = {
1928 {0x1516, 0x0800, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0},
1929 {0x1516, 0x0803, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 1},
1930 {0x1516, 0x0891, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 2},
1931 {} /* terminate list */
1933 MODULE_DEVICE_TABLE(pci
, fealnx_pci_tbl
);
1936 static struct pci_driver fealnx_driver
= {
1938 .id_table
= fealnx_pci_tbl
,
1939 .probe
= fealnx_init_one
,
1940 .remove
= __devexit_p(fealnx_remove_one
),
1943 static int __init
fealnx_init(void)
1945 /* when a module, this is printed whether or not devices are found in probe */
1950 return pci_register_driver(&fealnx_driver
);
1953 static void __exit
fealnx_exit(void)
1955 pci_unregister_driver(&fealnx_driver
);
1958 module_init(fealnx_init
);
1959 module_exit(fealnx_exit
);