More meth updates.
[linux-2.6/linux-mips.git] / drivers / net / yellowfin.c
blob1f17e03fab20c69af88848a104015d222fd8e919
1 /* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
2 /*
3 Written 1997-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
13 It also supports the Symbios Logic version of the same chip core.
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
18 Annapolis MD 21403
20 Support and updates available at
21 http://www.scyld.com/network/yellowfin.html
24 Linux kernel changelog:
25 -----------------------
27 LK1.1.1 (jgarzik): Port to 2.4 kernel
29 LK1.1.2 (jgarzik):
30 * Merge in becker version 1.05
32 LK1.1.3 (jgarzik):
33 * Various cleanups
34 * Update yellowfin_timer to correctly calculate duplex.
35 (suggested by Manfred Spraul)
37 LK1.1.4 (val@nmt.edu):
38 * Fix three endian-ness bugs
39 * Support dual function SYM53C885E ethernet chip
41 LK1.1.5 (val@nmt.edu):
42 * Fix forced full-duplex bug I introduced
44 LK1.1.6 (val@nmt.edu):
45 * Only print warning on truly "oversized" packets
46 * Fix theoretical bug on gigabit cards - return to 1.1.3 behavior
50 #define DRV_NAME "yellowfin"
51 #define DRV_VERSION "1.05+LK1.1.6"
52 #define DRV_RELDATE "Feb 11, 2002"
54 #define PFX DRV_NAME ": "
56 /* The user-configurable values.
57 These may be modified when a driver module is loaded.*/
59 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
60 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
61 static int max_interrupt_work = 20;
62 static int mtu;
63 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
64 /* System-wide count of bogus-rx frames. */
65 static int bogus_rx;
66 static int dma_ctrl = 0x004A0263; /* Constrained by errata */
67 static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
68 #elif YF_NEW /* A future perfect board :->. */
69 static int dma_ctrl = 0x00CAC277; /* Override when loading module! */
70 static int fifo_cfg = 0x0028;
71 #else
72 static int dma_ctrl = 0x004A0263; /* Constrained by errata */
73 static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
74 #endif
76 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
77 Setting to > 1514 effectively disables this feature. */
78 static int rx_copybreak;
80 /* Used to pass the media type, etc.
81 No media types are currently defined. These exist for driver
82 interoperability.
84 #define MAX_UNITS 8 /* More are supported, limit only on options */
85 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
88 /* Do ugly workaround for GX server chipset errata. */
89 static int gx_fix;
91 /* Operational parameters that are set at compile time. */
93 /* Keep the ring sizes a power of two for efficiency.
94 Making the Tx ring too long decreases the effectiveness of channel
95 bonding and packet priority.
96 There are no ill effects from too-large receive rings. */
97 #define TX_RING_SIZE 16
98 #define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */
99 #define RX_RING_SIZE 64
100 #define STATUS_TOTAL_SIZE TX_RING_SIZE*sizeof(struct tx_status_words)
101 #define TX_TOTAL_SIZE 2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
102 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct yellowfin_desc)
104 /* Operational parameters that usually are not changed. */
105 /* Time in jiffies before concluding the transmitter is hung. */
106 #define TX_TIMEOUT (2*HZ)
107 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
109 #define yellowfin_debug debug
111 #if !defined(__OPTIMIZE__)
112 #warning You must compile this file with the correct options!
113 #warning See the last lines of the source file.
114 #error You must compile this driver with "-O".
115 #endif
117 #include <linux/module.h>
118 #include <linux/kernel.h>
119 #include <linux/string.h>
120 #include <linux/timer.h>
121 #include <linux/errno.h>
122 #include <linux/ioport.h>
123 #include <linux/slab.h>
124 #include <linux/interrupt.h>
125 #include <linux/pci.h>
126 #include <linux/init.h>
127 #include <linux/mii.h>
128 #include <linux/netdevice.h>
129 #include <linux/etherdevice.h>
130 #include <linux/skbuff.h>
131 #include <linux/ethtool.h>
132 #include <linux/crc32.h>
133 #include <asm/uaccess.h>
134 #include <asm/processor.h> /* Processor type for cache alignment. */
135 #include <asm/unaligned.h>
136 #include <asm/bitops.h>
137 #include <asm/io.h>
139 /* These identify the driver base version and may not be removed. */
140 static char version[] __devinitdata =
141 KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
142 KERN_INFO " http://www.scyld.com/network/yellowfin.html\n"
143 KERN_INFO " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
145 #ifndef USE_IO_OPS
146 #undef inb
147 #undef inw
148 #undef inl
149 #undef outb
150 #undef outw
151 #undef outl
152 #define inb readb
153 #define inw readw
154 #define inl readl
155 #define outb writeb
156 #define outw writew
157 #define outl writel
158 #endif /* !USE_IO_OPS */
159 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
160 MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
161 MODULE_LICENSE("GPL");
163 MODULE_PARM(max_interrupt_work, "i");
164 MODULE_PARM(mtu, "i");
165 MODULE_PARM(debug, "i");
166 MODULE_PARM(rx_copybreak, "i");
167 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
168 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
169 MODULE_PARM(gx_fix, "i");
170 MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
171 MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
172 MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
173 MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
174 MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
175 MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
176 MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
179 Theory of Operation
181 I. Board Compatibility
183 This device driver is designed for the Packet Engines "Yellowfin" Gigabit
184 Ethernet adapter. The G-NIC 64-bit PCI card is supported, as well as the
185 Symbios 53C885E dual function chip.
187 II. Board-specific settings
189 PCI bus devices are configured by the system at boot time, so no jumpers
190 need to be set on the board. The system BIOS preferably should assign the
191 PCI INTA signal to an otherwise unused system IRQ line.
192 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
193 interrupt lines.
195 III. Driver operation
197 IIIa. Ring buffers
199 The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
200 This is a descriptor list scheme similar to that used by the EEPro100 and
201 Tulip. This driver uses two statically allocated fixed-size descriptor lists
202 formed into rings by a branch from the final descriptor to the beginning of
203 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
205 The driver allocates full frame size skbuffs for the Rx ring buffers at
206 open() time and passes the skb->data field to the Yellowfin as receive data
207 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
208 a fresh skbuff is allocated and the frame is copied to the new skbuff.
209 When the incoming frame is larger, the skbuff is passed directly up the
210 protocol stack and replaced by a newly allocated skbuff.
212 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
213 using a full-sized skbuff for small frames vs. the copying costs of larger
214 frames. For small frames the copying cost is negligible (esp. considering
215 that we are pre-loading the cache with immediately useful header
216 information). For large frames the copying cost is non-trivial, and the
217 larger copy might flush the cache of useful data.
219 IIIC. Synchronization
221 The driver runs as two independent, single-threaded flows of control. One
222 is the send-packet routine, which enforces single-threaded use by the
223 dev->tbusy flag. The other thread is the interrupt handler, which is single
224 threaded by the hardware and other software.
226 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
227 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
228 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
229 the 'yp->tx_full' flag.
231 The interrupt handler has exclusive control over the Rx ring and records stats
232 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
233 empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
234 clears both the tx_full and tbusy flags.
236 IV. Notes
238 Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
239 Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
240 and an AlphaStation to verifty the Alpha port!
242 IVb. References
244 Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
245 Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
246 Data Manual v3.0
247 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
248 http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
250 IVc. Errata
252 See Packet Engines confidential appendix (prototype chips only).
257 enum pci_id_flags_bits {
258 /* Set PCI command register bits before calling probe1(). */
259 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
260 /* Read and map the single following PCI BAR. */
261 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
262 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
263 PCI_UNUSED_IRQ=0x800,
265 enum capability_flags {
266 HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
267 HasMACAddrBug=32, /* Only on early revs. */
268 DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
270 /* The PCI I/O space extent. */
271 #define YELLOWFIN_SIZE 0x100
272 #ifdef USE_IO_OPS
273 #define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
274 #else
275 #define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
276 #endif
278 struct pci_id_info {
279 const char *name;
280 struct match_info {
281 int pci, pci_mask, subsystem, subsystem_mask;
282 int revision, revision_mask; /* Only 8 bits. */
283 } id;
284 enum pci_id_flags_bits pci_flags;
285 int io_size; /* Needed for I/O region check or ioremap(). */
286 int drv_flags; /* Driver use, intended as capability flags. */
289 static struct pci_id_info pci_id_tbl[] = {
290 {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
291 PCI_IOTYPE, YELLOWFIN_SIZE,
292 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
293 {"Symbios SYM83C885", { 0x07011000, 0xffffffff},
294 PCI_IOTYPE, YELLOWFIN_SIZE, HasMII | DontUseEeprom },
295 {0,},
298 static struct pci_device_id yellowfin_pci_tbl[] __devinitdata = {
299 { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
300 { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
301 { 0, }
303 MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
306 /* Offsets to the Yellowfin registers. Various sizes and alignments. */
307 enum yellowfin_offsets {
308 TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
309 TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
310 RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
311 RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
312 EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
313 ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
314 Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
315 MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
316 MII_Status=0xAE,
317 RxDepth=0xB8, FlowCtrl=0xBC,
318 AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
319 EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
320 EEFeature=0xF5,
323 /* The Yellowfin Rx and Tx buffer descriptors.
324 Elements are written as 32 bit for endian portability. */
325 struct yellowfin_desc {
326 u32 dbdma_cmd;
327 u32 addr;
328 u32 branch_addr;
329 u32 result_status;
332 struct tx_status_words {
333 #ifdef __BIG_ENDIAN
334 u16 tx_errs;
335 u16 tx_cnt;
336 u16 paused;
337 u16 total_tx_cnt;
338 #else /* Little endian chips. */
339 u16 tx_cnt;
340 u16 tx_errs;
341 u16 total_tx_cnt;
342 u16 paused;
343 #endif /* __BIG_ENDIAN */
346 /* Bits in yellowfin_desc.cmd */
347 enum desc_cmd_bits {
348 CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
349 CMD_NOP=0x60000000, CMD_STOP=0x70000000,
350 BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
351 BRANCH_IFTRUE=0x040000,
354 /* Bits in yellowfin_desc.status */
355 enum desc_status_bits { RX_EOP=0x0040, };
357 /* Bits in the interrupt status/mask registers. */
358 enum intr_status_bits {
359 IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
360 IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
361 IntrEarlyRx=0x100, IntrWakeup=0x200, };
363 #define PRIV_ALIGN 31 /* Required alignment mask */
364 #define MII_CNT 4
365 struct yellowfin_private {
366 /* Descriptor rings first for alignment.
367 Tx requires a second descriptor for status. */
368 struct yellowfin_desc *rx_ring;
369 struct yellowfin_desc *tx_ring;
370 struct sk_buff* rx_skbuff[RX_RING_SIZE];
371 struct sk_buff* tx_skbuff[TX_RING_SIZE];
372 dma_addr_t rx_ring_dma;
373 dma_addr_t tx_ring_dma;
375 struct tx_status_words *tx_status;
376 dma_addr_t tx_status_dma;
378 struct timer_list timer; /* Media selection timer. */
379 struct net_device_stats stats;
380 /* Frequently used and paired value: keep adjacent for cache effect. */
381 int chip_id, drv_flags;
382 struct pci_dev *pci_dev;
383 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
384 unsigned int rx_buf_sz; /* Based on MTU+slack. */
385 struct tx_status_words *tx_tail_desc;
386 unsigned int cur_tx, dirty_tx;
387 int tx_threshold;
388 unsigned int tx_full:1; /* The Tx queue is full. */
389 unsigned int full_duplex:1; /* Full-duplex operation requested. */
390 unsigned int duplex_lock:1;
391 unsigned int medialock:1; /* Do not sense media. */
392 unsigned int default_port:4; /* Last dev->if_port value. */
393 /* MII transceiver section. */
394 int mii_cnt; /* MII device addresses. */
395 u16 advertising; /* NWay media advertisement */
396 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used */
397 spinlock_t lock;
400 static int read_eeprom(long ioaddr, int location);
401 static int mdio_read(long ioaddr, int phy_id, int location);
402 static void mdio_write(long ioaddr, int phy_id, int location, int value);
403 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
404 static int yellowfin_open(struct net_device *dev);
405 static void yellowfin_timer(unsigned long data);
406 static void yellowfin_tx_timeout(struct net_device *dev);
407 static void yellowfin_init_ring(struct net_device *dev);
408 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev);
409 static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
410 static int yellowfin_rx(struct net_device *dev);
411 static void yellowfin_error(struct net_device *dev, int intr_status);
412 static int yellowfin_close(struct net_device *dev);
413 static struct net_device_stats *yellowfin_get_stats(struct net_device *dev);
414 static void set_rx_mode(struct net_device *dev);
417 static int __devinit yellowfin_init_one(struct pci_dev *pdev,
418 const struct pci_device_id *ent)
420 struct net_device *dev;
421 struct yellowfin_private *np;
422 int irq;
423 int chip_idx = ent->driver_data;
424 static int find_cnt;
425 long ioaddr, real_ioaddr;
426 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
427 int drv_flags = pci_id_tbl[chip_idx].drv_flags;
428 void *ring_space;
429 dma_addr_t ring_dma;
431 /* when built into the kernel, we only print version if device is found */
432 #ifndef MODULE
433 static int printed_version;
434 if (!printed_version++)
435 printk(version);
436 #endif
438 i = pci_enable_device(pdev);
439 if (i) return i;
441 dev = alloc_etherdev(sizeof(*np));
442 if (!dev) {
443 printk (KERN_ERR PFX "cannot allocate ethernet device\n");
444 return -ENOMEM;
446 SET_MODULE_OWNER(dev);
447 SET_NETDEV_DEV(dev, &pdev->dev);
449 np = dev->priv;
451 if (pci_request_regions(pdev, dev->name))
452 goto err_out_free_netdev;
454 pci_set_master (pdev);
456 #ifdef USE_IO_OPS
457 real_ioaddr = ioaddr = pci_resource_start (pdev, 0);
458 #else
459 real_ioaddr = ioaddr = pci_resource_start (pdev, 1);
460 ioaddr = (long) ioremap(ioaddr, YELLOWFIN_SIZE);
461 if (!ioaddr)
462 goto err_out_free_res;
463 #endif
464 irq = pdev->irq;
466 if (drv_flags & DontUseEeprom)
467 for (i = 0; i < 6; i++)
468 dev->dev_addr[i] = inb(ioaddr + StnAddr + i);
469 else {
470 int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
471 for (i = 0; i < 6; i++)
472 dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
475 /* Reset the chip. */
476 outl(0x80000000, ioaddr + DMACtrl);
478 dev->base_addr = ioaddr;
479 dev->irq = irq;
481 pci_set_drvdata(pdev, dev);
482 spin_lock_init(&np->lock);
484 np->pci_dev = pdev;
485 np->chip_id = chip_idx;
486 np->drv_flags = drv_flags;
488 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
489 if (!ring_space)
490 goto err_out_cleardev;
491 np->tx_ring = (struct yellowfin_desc *)ring_space;
492 np->tx_ring_dma = ring_dma;
494 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
495 if (!ring_space)
496 goto err_out_unmap_tx;
497 np->rx_ring = (struct yellowfin_desc *)ring_space;
498 np->rx_ring_dma = ring_dma;
500 ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
501 if (!ring_space)
502 goto err_out_unmap_rx;
503 np->tx_status = (struct tx_status_words *)ring_space;
504 np->tx_status_dma = ring_dma;
506 if (dev->mem_start)
507 option = dev->mem_start;
509 /* The lower four bits are the media type. */
510 if (option > 0) {
511 if (option & 0x200)
512 np->full_duplex = 1;
513 np->default_port = option & 15;
514 if (np->default_port)
515 np->medialock = 1;
517 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
518 np->full_duplex = 1;
520 if (np->full_duplex)
521 np->duplex_lock = 1;
523 /* The Yellowfin-specific entries in the device structure. */
524 dev->open = &yellowfin_open;
525 dev->hard_start_xmit = &yellowfin_start_xmit;
526 dev->stop = &yellowfin_close;
527 dev->get_stats = &yellowfin_get_stats;
528 dev->set_multicast_list = &set_rx_mode;
529 dev->do_ioctl = &netdev_ioctl;
530 dev->tx_timeout = yellowfin_tx_timeout;
531 dev->watchdog_timeo = TX_TIMEOUT;
533 if (mtu)
534 dev->mtu = mtu;
536 i = register_netdev(dev);
537 if (i)
538 goto err_out_unmap_status;
540 printk(KERN_INFO "%s: %s type %8x at 0x%lx, ",
541 dev->name, pci_id_tbl[chip_idx].name, inl(ioaddr + ChipRev), ioaddr);
542 for (i = 0; i < 5; i++)
543 printk("%2.2x:", dev->dev_addr[i]);
544 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
546 if (np->drv_flags & HasMII) {
547 int phy, phy_idx = 0;
548 for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
549 int mii_status = mdio_read(ioaddr, phy, 1);
550 if (mii_status != 0xffff && mii_status != 0x0000) {
551 np->phys[phy_idx++] = phy;
552 np->advertising = mdio_read(ioaddr, phy, 4);
553 printk(KERN_INFO "%s: MII PHY found at address %d, status "
554 "0x%4.4x advertising %4.4x.\n",
555 dev->name, phy, mii_status, np->advertising);
558 np->mii_cnt = phy_idx;
561 find_cnt++;
563 return 0;
565 err_out_unmap_status:
566 pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
567 np->tx_status_dma);
568 err_out_unmap_rx:
569 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
570 err_out_unmap_tx:
571 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
572 err_out_cleardev:
573 pci_set_drvdata(pdev, NULL);
574 #ifndef USE_IO_OPS
575 iounmap((void *)ioaddr);
576 err_out_free_res:
577 #endif
578 pci_release_regions(pdev);
579 err_out_free_netdev:
580 kfree (dev);
581 return -ENODEV;
584 static int __devinit read_eeprom(long ioaddr, int location)
586 int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */
588 outb(location, ioaddr + EEAddr);
589 outb(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
590 while ((inb(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0)
592 return inb(ioaddr + EERead);
595 /* MII Managemen Data I/O accesses.
596 These routines assume the MDIO controller is idle, and do not exit until
597 the command is finished. */
599 static int mdio_read(long ioaddr, int phy_id, int location)
601 int i;
603 outw((phy_id<<8) + location, ioaddr + MII_Addr);
604 outw(1, ioaddr + MII_Cmd);
605 for (i = 10000; i >= 0; i--)
606 if ((inw(ioaddr + MII_Status) & 1) == 0)
607 break;
608 return inw(ioaddr + MII_Rd_Data);
611 static void mdio_write(long ioaddr, int phy_id, int location, int value)
613 int i;
615 outw((phy_id<<8) + location, ioaddr + MII_Addr);
616 outw(value, ioaddr + MII_Wr_Data);
618 /* Wait for the command to finish. */
619 for (i = 10000; i >= 0; i--)
620 if ((inw(ioaddr + MII_Status) & 1) == 0)
621 break;
622 return;
626 static int yellowfin_open(struct net_device *dev)
628 struct yellowfin_private *yp = dev->priv;
629 long ioaddr = dev->base_addr;
630 int i;
632 /* Reset the chip. */
633 outl(0x80000000, ioaddr + DMACtrl);
635 i = request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ, dev->name, dev);
636 if (i) return i;
638 if (yellowfin_debug > 1)
639 printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
640 dev->name, dev->irq);
642 yellowfin_init_ring(dev);
644 outl(yp->rx_ring_dma, ioaddr + RxPtr);
645 outl(yp->tx_ring_dma, ioaddr + TxPtr);
647 for (i = 0; i < 6; i++)
648 outb(dev->dev_addr[i], ioaddr + StnAddr + i);
650 /* Set up various condition 'select' registers.
651 There are no options here. */
652 outl(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */
653 outl(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */
654 outl(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */
655 outl(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */
656 outl(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */
657 outl(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */
659 /* Initialize other registers: with so many this eventually this will
660 converted to an offset/value list. */
661 outl(dma_ctrl, ioaddr + DMACtrl);
662 outw(fifo_cfg, ioaddr + FIFOcfg);
663 /* Enable automatic generation of flow control frames, period 0xffff. */
664 outl(0x0030FFFF, ioaddr + FlowCtrl);
666 yp->tx_threshold = 32;
667 outl(yp->tx_threshold, ioaddr + TxThreshold);
669 if (dev->if_port == 0)
670 dev->if_port = yp->default_port;
672 netif_start_queue(dev);
674 /* Setting the Rx mode will start the Rx process. */
675 if (yp->drv_flags & IsGigabit) {
676 /* We are always in full-duplex mode with gigabit! */
677 yp->full_duplex = 1;
678 outw(0x01CF, ioaddr + Cnfg);
679 } else {
680 outw(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
681 outw(0x1018, ioaddr + FrameGap1);
682 outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
684 set_rx_mode(dev);
686 /* Enable interrupts by setting the interrupt mask. */
687 outw(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */
688 outw(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
689 outl(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */
690 outl(0x80008000, ioaddr + TxCtrl);
692 if (yellowfin_debug > 2) {
693 printk(KERN_DEBUG "%s: Done yellowfin_open().\n",
694 dev->name);
697 /* Set the timer to check for link beat. */
698 init_timer(&yp->timer);
699 yp->timer.expires = jiffies + 3*HZ;
700 yp->timer.data = (unsigned long)dev;
701 yp->timer.function = &yellowfin_timer; /* timer handler */
702 add_timer(&yp->timer);
704 return 0;
707 static void yellowfin_timer(unsigned long data)
709 struct net_device *dev = (struct net_device *)data;
710 struct yellowfin_private *yp = dev->priv;
711 long ioaddr = dev->base_addr;
712 int next_tick = 60*HZ;
714 if (yellowfin_debug > 3) {
715 printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n",
716 dev->name, inw(ioaddr + IntrStatus));
719 if (yp->mii_cnt) {
720 int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
721 int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
722 int negotiated = lpa & yp->advertising;
723 if (yellowfin_debug > 1)
724 printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, "
725 "link partner capability %4.4x.\n",
726 dev->name, yp->phys[0], bmsr, lpa);
728 yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
730 outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
732 if (bmsr & BMSR_LSTATUS)
733 next_tick = 60*HZ;
734 else
735 next_tick = 3*HZ;
738 yp->timer.expires = jiffies + next_tick;
739 add_timer(&yp->timer);
742 static void yellowfin_tx_timeout(struct net_device *dev)
744 struct yellowfin_private *yp = dev->priv;
745 long ioaddr = dev->base_addr;
747 printk(KERN_WARNING "%s: Yellowfin transmit timed out at %d/%d Tx "
748 "status %4.4x, Rx status %4.4x, resetting...\n",
749 dev->name, yp->cur_tx, yp->dirty_tx,
750 inl(ioaddr + TxStatus), inl(ioaddr + RxStatus));
752 /* Note: these should be KERN_DEBUG. */
753 if (yellowfin_debug) {
754 int i;
755 printk(KERN_WARNING " Rx ring %p: ", yp->rx_ring);
756 for (i = 0; i < RX_RING_SIZE; i++)
757 printk(" %8.8x", yp->rx_ring[i].result_status);
758 printk("\n"KERN_WARNING" Tx ring %p: ", yp->tx_ring);
759 for (i = 0; i < TX_RING_SIZE; i++)
760 printk(" %4.4x /%8.8x", yp->tx_status[i].tx_errs,
761 yp->tx_ring[i].result_status);
762 printk("\n");
765 /* If the hardware is found to hang regularly, we will update the code
766 to reinitialize the chip here. */
767 dev->if_port = 0;
769 /* Wake the potentially-idle transmit channel. */
770 outl(0x10001000, dev->base_addr + TxCtrl);
771 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
772 netif_wake_queue (dev); /* Typical path */
774 dev->trans_start = jiffies;
775 yp->stats.tx_errors++;
778 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
779 static void yellowfin_init_ring(struct net_device *dev)
781 struct yellowfin_private *yp = dev->priv;
782 int i;
784 yp->tx_full = 0;
785 yp->cur_rx = yp->cur_tx = 0;
786 yp->dirty_tx = 0;
788 yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
790 for (i = 0; i < RX_RING_SIZE; i++) {
791 yp->rx_ring[i].dbdma_cmd =
792 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
793 yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
794 ((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
797 for (i = 0; i < RX_RING_SIZE; i++) {
798 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
799 yp->rx_skbuff[i] = skb;
800 if (skb == NULL)
801 break;
802 skb->dev = dev; /* Mark as being used by this device. */
803 skb_reserve(skb, 2); /* 16 byte align the IP header. */
804 yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
805 skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
807 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
808 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
810 #define NO_TXSTATS
811 #ifdef NO_TXSTATS
812 /* In this mode the Tx ring needs only a single descriptor. */
813 for (i = 0; i < TX_RING_SIZE; i++) {
814 yp->tx_skbuff[i] = 0;
815 yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
816 yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
817 ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
819 /* Wrap ring */
820 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
821 #else
823 int j;
825 /* Tx ring needs a pair of descriptors, the second for the status. */
826 for (i = 0; i < TX_RING_SIZE; i++) {
827 j = 2*i;
828 yp->tx_skbuff[i] = 0;
829 /* Branch on Tx error. */
830 yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
831 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
832 (j+1)*sizeof(struct yellowfin_desc);
833 j++;
834 if (yp->flags & FullTxStatus) {
835 yp->tx_ring[j].dbdma_cmd =
836 cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
837 yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
838 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
839 i*sizeof(struct tx_status_words);
840 } else {
841 /* Symbios chips write only tx_errs word. */
842 yp->tx_ring[j].dbdma_cmd =
843 cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
844 yp->tx_ring[j].request_cnt = 2;
845 /* Om pade ummmmm... */
846 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
847 i*sizeof(struct tx_status_words) +
848 &(yp->tx_status[0].tx_errs) -
849 &(yp->tx_status[0]));
851 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
852 ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
854 /* Wrap ring */
855 yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
857 #endif
858 yp->tx_tail_desc = &yp->tx_status[0];
859 return;
862 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
864 struct yellowfin_private *yp = dev->priv;
865 unsigned entry;
866 int len = skb->len;
868 netif_stop_queue (dev);
870 /* Note: Ordering is important here, set the field with the
871 "ownership" bit last, and only then increment cur_tx. */
873 /* Calculate the next Tx descriptor entry. */
874 entry = yp->cur_tx % TX_RING_SIZE;
876 yp->tx_skbuff[entry] = skb;
878 if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */
879 int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
880 /* Fix GX chipset errata. */
881 if (cacheline_end > 24 || cacheline_end == 0) {
882 len = skb->len + 32 - cacheline_end + 1;
883 if (len != skb->len)
884 skb = skb_padto(skb, len);
886 if (skb == NULL) {
887 yp->tx_skbuff[entry] = NULL;
888 netif_wake_queue(dev);
889 return 0;
892 #ifdef NO_TXSTATS
893 yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
894 skb->data, len, PCI_DMA_TODEVICE));
895 yp->tx_ring[entry].result_status = 0;
896 if (entry >= TX_RING_SIZE-1) {
897 /* New stop command. */
898 yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
899 yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
900 cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
901 } else {
902 yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
903 yp->tx_ring[entry].dbdma_cmd =
904 cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
906 yp->cur_tx++;
907 #else
908 yp->tx_ring[entry<<1].request_cnt = len;
909 yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
910 skb->data, len, PCI_DMA_TODEVICE));
911 /* The input_last (status-write) command is constant, but we must
912 rewrite the subsequent 'stop' command. */
914 yp->cur_tx++;
916 unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
917 yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
919 /* Final step -- overwrite the old 'stop' command. */
921 yp->tx_ring[entry<<1].dbdma_cmd =
922 cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
923 CMD_TX_PKT | BRANCH_IFTRUE) | len);
924 #endif
926 /* Non-x86 Todo: explicitly flush cache lines here. */
928 /* Wake the potentially-idle transmit channel. */
929 outl(0x10001000, dev->base_addr + TxCtrl);
931 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
932 netif_start_queue (dev); /* Typical path */
933 else
934 yp->tx_full = 1;
935 dev->trans_start = jiffies;
937 if (yellowfin_debug > 4) {
938 printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n",
939 dev->name, yp->cur_tx, entry);
941 return 0;
944 /* The interrupt handler does all of the Rx thread work and cleans up
945 after the Tx thread. */
946 static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
948 struct net_device *dev = dev_instance;
949 struct yellowfin_private *yp;
950 long ioaddr;
951 int boguscnt = max_interrupt_work;
952 unsigned int handled = 0;
954 #ifndef final_version /* Can never occur. */
955 if (dev == NULL) {
956 printk (KERN_ERR "yellowfin_interrupt(): irq %d for unknown device.\n", irq);
957 return IRQ_NONE;
959 #endif
961 ioaddr = dev->base_addr;
962 yp = dev->priv;
964 spin_lock (&yp->lock);
966 do {
967 u16 intr_status = inw(ioaddr + IntrClear);
969 if (yellowfin_debug > 4)
970 printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n",
971 dev->name, intr_status);
973 if (intr_status == 0)
974 break;
975 handled = 1;
977 if (intr_status & (IntrRxDone | IntrEarlyRx)) {
978 yellowfin_rx(dev);
979 outl(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */
982 #ifdef NO_TXSTATS
983 for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
984 int entry = yp->dirty_tx % TX_RING_SIZE;
985 struct sk_buff *skb;
987 if (yp->tx_ring[entry].result_status == 0)
988 break;
989 skb = yp->tx_skbuff[entry];
990 yp->stats.tx_packets++;
991 yp->stats.tx_bytes += skb->len;
992 /* Free the original skb. */
993 pci_unmap_single(yp->pci_dev, yp->tx_ring[entry].addr,
994 skb->len, PCI_DMA_TODEVICE);
995 dev_kfree_skb_irq(skb);
996 yp->tx_skbuff[entry] = 0;
998 if (yp->tx_full
999 && yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
1000 /* The ring is no longer full, clear tbusy. */
1001 yp->tx_full = 0;
1002 netif_wake_queue(dev);
1004 #else
1005 if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
1006 unsigned dirty_tx = yp->dirty_tx;
1008 for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
1009 dirty_tx++) {
1010 /* Todo: optimize this. */
1011 int entry = dirty_tx % TX_RING_SIZE;
1012 u16 tx_errs = yp->tx_status[entry].tx_errs;
1013 struct sk_buff *skb;
1015 #ifndef final_version
1016 if (yellowfin_debug > 5)
1017 printk(KERN_DEBUG "%s: Tx queue %d check, Tx status "
1018 "%4.4x %4.4x %4.4x %4.4x.\n",
1019 dev->name, entry,
1020 yp->tx_status[entry].tx_cnt,
1021 yp->tx_status[entry].tx_errs,
1022 yp->tx_status[entry].total_tx_cnt,
1023 yp->tx_status[entry].paused);
1024 #endif
1025 if (tx_errs == 0)
1026 break; /* It still hasn't been Txed */
1027 skb = yp->tx_skbuff[entry];
1028 if (tx_errs & 0xF810) {
1029 /* There was an major error, log it. */
1030 #ifndef final_version
1031 if (yellowfin_debug > 1)
1032 printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
1033 dev->name, tx_errs);
1034 #endif
1035 yp->stats.tx_errors++;
1036 if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++;
1037 if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++;
1038 if (tx_errs & 0x2000) yp->stats.tx_window_errors++;
1039 if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++;
1040 } else {
1041 #ifndef final_version
1042 if (yellowfin_debug > 4)
1043 printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n",
1044 dev->name, tx_errs);
1045 #endif
1046 yp->stats.tx_bytes += skb->len;
1047 yp->stats.collisions += tx_errs & 15;
1048 yp->stats.tx_packets++;
1050 /* Free the original skb. */
1051 pci_unmap_single(yp->pci_dev,
1052 yp->tx_ring[entry<<1].addr, skb->len,
1053 PCI_DMA_TODEVICE);
1054 dev_kfree_skb_irq(skb);
1055 yp->tx_skbuff[entry] = 0;
1056 /* Mark status as empty. */
1057 yp->tx_status[entry].tx_errs = 0;
1060 #ifndef final_version
1061 if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
1062 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1063 dev->name, dirty_tx, yp->cur_tx, yp->tx_full);
1064 dirty_tx += TX_RING_SIZE;
1066 #endif
1068 if (yp->tx_full
1069 && yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1070 /* The ring is no longer full, clear tbusy. */
1071 yp->tx_full = 0;
1072 netif_wake_queue(dev);
1075 yp->dirty_tx = dirty_tx;
1076 yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1078 #endif
1080 /* Log errors and other uncommon events. */
1081 if (intr_status & 0x2ee) /* Abnormal error summary. */
1082 yellowfin_error(dev, intr_status);
1084 if (--boguscnt < 0) {
1085 printk(KERN_WARNING "%s: Too much work at interrupt, "
1086 "status=0x%4.4x.\n",
1087 dev->name, intr_status);
1088 break;
1090 } while (1);
1092 if (yellowfin_debug > 3)
1093 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1094 dev->name, inw(ioaddr + IntrStatus));
1096 spin_unlock (&yp->lock);
1097 return IRQ_RETVAL(handled);
1100 /* This routine is logically part of the interrupt handler, but separated
1101 for clarity and better register allocation. */
1102 static int yellowfin_rx(struct net_device *dev)
1104 struct yellowfin_private *yp = dev->priv;
1105 int entry = yp->cur_rx % RX_RING_SIZE;
1106 int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1108 if (yellowfin_debug > 4) {
1109 printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %8.8x.\n",
1110 entry, yp->rx_ring[entry].result_status);
1111 printk(KERN_DEBUG " #%d desc. %8.8x %8.8x %8.8x.\n",
1112 entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1113 yp->rx_ring[entry].result_status);
1116 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1117 while (1) {
1118 struct yellowfin_desc *desc = &yp->rx_ring[entry];
1119 struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1120 s16 frame_status;
1121 u16 desc_status;
1122 int data_size;
1123 u8 *buf_addr;
1125 if(!desc->result_status)
1126 break;
1127 pci_dma_sync_single(yp->pci_dev, desc->addr,
1128 yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1129 desc_status = le32_to_cpu(desc->result_status) >> 16;
1130 buf_addr = rx_skb->tail;
1131 data_size = (le32_to_cpu(desc->dbdma_cmd) -
1132 le32_to_cpu(desc->result_status)) & 0xffff;
1133 frame_status = le16_to_cpu(get_unaligned((s16*)&(buf_addr[data_size - 2])));
1134 if (yellowfin_debug > 4)
1135 printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n",
1136 frame_status);
1137 if (--boguscnt < 0)
1138 break;
1139 if ( ! (desc_status & RX_EOP)) {
1140 if (data_size != 0)
1141 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers,"
1142 " status %4.4x, data_size %d!\n", dev->name, desc_status, data_size);
1143 yp->stats.rx_length_errors++;
1144 } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
1145 /* There was a error. */
1146 if (yellowfin_debug > 3)
1147 printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n",
1148 frame_status);
1149 yp->stats.rx_errors++;
1150 if (frame_status & 0x0060) yp->stats.rx_length_errors++;
1151 if (frame_status & 0x0008) yp->stats.rx_frame_errors++;
1152 if (frame_status & 0x0010) yp->stats.rx_crc_errors++;
1153 if (frame_status < 0) yp->stats.rx_dropped++;
1154 } else if ( !(yp->drv_flags & IsGigabit) &&
1155 ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1156 u8 status1 = buf_addr[data_size-2];
1157 u8 status2 = buf_addr[data_size-1];
1158 yp->stats.rx_errors++;
1159 if (status1 & 0xC0) yp->stats.rx_length_errors++;
1160 if (status2 & 0x03) yp->stats.rx_frame_errors++;
1161 if (status2 & 0x04) yp->stats.rx_crc_errors++;
1162 if (status2 & 0x80) yp->stats.rx_dropped++;
1163 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
1164 } else if ((yp->flags & HasMACAddrBug) &&
1165 memcmp(le32_to_cpu(yp->rx_ring_dma +
1166 entry*sizeof(struct yellowfin_desc)),
1167 dev->dev_addr, 6) != 0 &&
1168 memcmp(le32_to_cpu(yp->rx_ring_dma +
1169 entry*sizeof(struct yellowfin_desc)),
1170 "\377\377\377\377\377\377", 6) != 0) {
1171 if (bogus_rx++ == 0)
1172 printk(KERN_WARNING "%s: Bad frame to %2.2x:%2.2x:%2.2x:%2.2x:"
1173 "%2.2x:%2.2x.\n",
1174 dev->name, buf_addr[0], buf_addr[1], buf_addr[2],
1175 buf_addr[3], buf_addr[4], buf_addr[5]);
1176 #endif
1177 } else {
1178 struct sk_buff *skb;
1179 int pkt_len = data_size -
1180 (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1181 /* To verify: Yellowfin Length should omit the CRC! */
1183 #ifndef final_version
1184 if (yellowfin_debug > 4)
1185 printk(KERN_DEBUG " yellowfin_rx() normal Rx pkt length %d"
1186 " of %d, bogus_cnt %d.\n",
1187 pkt_len, data_size, boguscnt);
1188 #endif
1189 /* Check if the packet is long enough to just pass up the skbuff
1190 without copying to a properly sized skbuff. */
1191 if (pkt_len > rx_copybreak) {
1192 skb_put(skb = rx_skb, pkt_len);
1193 pci_unmap_single(yp->pci_dev,
1194 yp->rx_ring[entry].addr,
1195 yp->rx_buf_sz,
1196 PCI_DMA_FROMDEVICE);
1197 yp->rx_skbuff[entry] = NULL;
1198 } else {
1199 skb = dev_alloc_skb(pkt_len + 2);
1200 if (skb == NULL)
1201 break;
1202 skb->dev = dev;
1203 skb_reserve(skb, 2); /* 16 byte align the IP header */
1204 #if HAS_IP_COPYSUM
1205 eth_copy_and_sum(skb, rx_skb->tail, pkt_len, 0);
1206 skb_put(skb, pkt_len);
1207 #else
1208 memcpy(skb_put(skb, pkt_len),
1209 rx_skb->tail, pkt_len);
1210 #endif
1212 skb->protocol = eth_type_trans(skb, dev);
1213 netif_rx(skb);
1214 dev->last_rx = jiffies;
1215 yp->stats.rx_packets++;
1216 yp->stats.rx_bytes += pkt_len;
1218 entry = (++yp->cur_rx) % RX_RING_SIZE;
1221 /* Refill the Rx ring buffers. */
1222 for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1223 entry = yp->dirty_rx % RX_RING_SIZE;
1224 if (yp->rx_skbuff[entry] == NULL) {
1225 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
1226 if (skb == NULL)
1227 break; /* Better luck next round. */
1228 yp->rx_skbuff[entry] = skb;
1229 skb->dev = dev; /* Mark as being used by this device. */
1230 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1231 yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1232 skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1234 yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1235 yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
1236 if (entry != 0)
1237 yp->rx_ring[entry - 1].dbdma_cmd =
1238 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1239 else
1240 yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1241 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1242 | yp->rx_buf_sz);
1245 return 0;
1248 static void yellowfin_error(struct net_device *dev, int intr_status)
1250 struct yellowfin_private *yp = dev->priv;
1252 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1253 dev->name, intr_status);
1254 /* Hmmmmm, it's not clear what to do here. */
1255 if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1256 yp->stats.tx_errors++;
1257 if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1258 yp->stats.rx_errors++;
1261 static int yellowfin_close(struct net_device *dev)
1263 long ioaddr = dev->base_addr;
1264 struct yellowfin_private *yp = dev->priv;
1265 int i;
1267 netif_stop_queue (dev);
1269 if (yellowfin_debug > 1) {
1270 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
1271 "Rx %4.4x Int %2.2x.\n",
1272 dev->name, inw(ioaddr + TxStatus),
1273 inw(ioaddr + RxStatus), inw(ioaddr + IntrStatus));
1274 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1275 dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
1278 /* Disable interrupts by clearing the interrupt mask. */
1279 outw(0x0000, ioaddr + IntrEnb);
1281 /* Stop the chip's Tx and Rx processes. */
1282 outl(0x80000000, ioaddr + RxCtrl);
1283 outl(0x80000000, ioaddr + TxCtrl);
1285 del_timer(&yp->timer);
1287 #if defined(__i386__)
1288 if (yellowfin_debug > 2) {
1289 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n", yp->tx_ring_dma);
1290 for (i = 0; i < TX_RING_SIZE*2; i++)
1291 printk(" %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n",
1292 inl(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1293 i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1294 yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1295 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
1296 for (i = 0; i < TX_RING_SIZE; i++)
1297 printk(" #%d status %4.4x %4.4x %4.4x %4.4x.\n",
1298 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1299 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1301 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n", yp->rx_ring_dma);
1302 for (i = 0; i < RX_RING_SIZE; i++) {
1303 printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n",
1304 inl(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1305 i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1306 yp->rx_ring[i].result_status);
1307 if (yellowfin_debug > 6) {
1308 if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1309 int j;
1310 for (j = 0; j < 0x50; j++)
1311 printk(" %4.4x",
1312 get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1313 printk("\n");
1318 #endif /* __i386__ debugging only */
1320 free_irq(dev->irq, dev);
1322 /* Free all the skbuffs in the Rx queue. */
1323 for (i = 0; i < RX_RING_SIZE; i++) {
1324 yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1325 yp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
1326 if (yp->rx_skbuff[i]) {
1327 dev_kfree_skb(yp->rx_skbuff[i]);
1329 yp->rx_skbuff[i] = 0;
1331 for (i = 0; i < TX_RING_SIZE; i++) {
1332 if (yp->tx_skbuff[i])
1333 dev_kfree_skb(yp->tx_skbuff[i]);
1334 yp->tx_skbuff[i] = 0;
1337 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
1338 if (yellowfin_debug > 0) {
1339 printk(KERN_DEBUG "%s: Received %d frames that we should not have.\n",
1340 dev->name, bogus_rx);
1342 #endif
1344 return 0;
1347 static struct net_device_stats *yellowfin_get_stats(struct net_device *dev)
1349 struct yellowfin_private *yp = dev->priv;
1350 return &yp->stats;
1353 /* Set or clear the multicast filter for this adaptor. */
1355 static void set_rx_mode(struct net_device *dev)
1357 struct yellowfin_private *yp = dev->priv;
1358 long ioaddr = dev->base_addr;
1359 u16 cfg_value = inw(ioaddr + Cnfg);
1361 /* Stop the Rx process to change any value. */
1362 outw(cfg_value & ~0x1000, ioaddr + Cnfg);
1363 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1364 /* Unconditionally log net taps. */
1365 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1366 outw(0x000F, ioaddr + AddrMode);
1367 } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) {
1368 /* Too many to filter well, or accept all multicasts. */
1369 outw(0x000B, ioaddr + AddrMode);
1370 } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
1371 struct dev_mc_list *mclist;
1372 u16 hash_table[4];
1373 int i;
1374 memset(hash_table, 0, sizeof(hash_table));
1375 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1376 i++, mclist = mclist->next) {
1377 unsigned int bit;
1379 /* Due to a bug in the early chip versions, multiple filter
1380 slots must be set for each address. */
1381 if (yp->drv_flags & HasMulticastBug) {
1382 bit = (ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f;
1383 hash_table[bit >> 4] |= (1 << bit);
1384 bit = (ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f;
1385 hash_table[bit >> 4] |= (1 << bit);
1386 bit = (ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f;
1387 hash_table[bit >> 4] |= (1 << bit);
1389 bit = (ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f;
1390 hash_table[bit >> 4] |= (1 << bit);
1392 /* Copy the hash table to the chip. */
1393 for (i = 0; i < 4; i++)
1394 outw(hash_table[i], ioaddr + HashTbl + i*2);
1395 outw(0x0003, ioaddr + AddrMode);
1396 } else { /* Normal, unicast/broadcast-only mode. */
1397 outw(0x0001, ioaddr + AddrMode);
1399 /* Restart the Rx process. */
1400 outw(cfg_value | 0x1000, ioaddr + Cnfg);
1403 static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
1405 struct yellowfin_private *np = dev->priv;
1406 u32 ethcmd;
1408 if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
1409 return -EFAULT;
1411 switch (ethcmd) {
1412 case ETHTOOL_GDRVINFO: {
1413 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
1414 strcpy(info.driver, DRV_NAME);
1415 strcpy(info.version, DRV_VERSION);
1416 strcpy(info.bus_info, np->pci_dev->slot_name);
1417 if (copy_to_user(useraddr, &info, sizeof(info)))
1418 return -EFAULT;
1419 return 0;
1424 return -EOPNOTSUPP;
1427 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1429 struct yellowfin_private *np = dev->priv;
1430 long ioaddr = dev->base_addr;
1431 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
1433 switch(cmd) {
1434 case SIOCETHTOOL:
1435 return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
1436 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1437 data->phy_id = np->phys[0] & 0x1f;
1438 /* Fall Through */
1440 case SIOCGMIIREG: /* Read MII PHY register. */
1441 data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1442 return 0;
1444 case SIOCSMIIREG: /* Write MII PHY register. */
1445 if (!capable(CAP_NET_ADMIN))
1446 return -EPERM;
1447 if (data->phy_id == np->phys[0]) {
1448 u16 value = data->val_in;
1449 switch (data->reg_num) {
1450 case 0:
1451 /* Check for autonegotiation on or reset. */
1452 np->medialock = (value & 0x9000) ? 0 : 1;
1453 if (np->medialock)
1454 np->full_duplex = (value & 0x0100) ? 1 : 0;
1455 break;
1456 case 4: np->advertising = value; break;
1458 /* Perhaps check_duplex(dev), depending on chip semantics. */
1460 mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1461 return 0;
1462 default:
1463 return -EOPNOTSUPP;
1468 static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
1470 struct net_device *dev = pci_get_drvdata(pdev);
1471 struct yellowfin_private *np;
1473 if (!dev)
1474 BUG();
1475 np = dev->priv;
1477 pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
1478 np->tx_status_dma);
1479 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
1480 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
1481 unregister_netdev (dev);
1483 pci_release_regions (pdev);
1485 #ifndef USE_IO_OPS
1486 iounmap ((void *) dev->base_addr);
1487 #endif
1489 kfree (dev);
1490 pci_set_drvdata(pdev, NULL);
1494 static struct pci_driver yellowfin_driver = {
1495 .name = DRV_NAME,
1496 .id_table = yellowfin_pci_tbl,
1497 .probe = yellowfin_init_one,
1498 .remove = __devexit_p(yellowfin_remove_one),
1502 static int __init yellowfin_init (void)
1504 /* when a module, this is printed whether or not devices are found in probe */
1505 #ifdef MODULE
1506 printk(version);
1507 #endif
1508 return pci_module_init (&yellowfin_driver);
1512 static void __exit yellowfin_cleanup (void)
1514 pci_unregister_driver (&yellowfin_driver);
1518 module_init(yellowfin_init);
1519 module_exit(yellowfin_cleanup);
1522 * Local variables:
1523 * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c yellowfin.c"
1524 * compile-command-alphaLX: "gcc -DMODULE -Wall -Wstrict-prototypes -O2 -c yellowfin.c -fomit-frame-pointer -fno-strength-reduce -mno-fp-regs -Wa,-m21164a -DBWX_USABLE -DBWIO_ENABLED"
1525 * simple-compile-command: "gcc -DMODULE -O6 -c yellowfin.c"
1526 * c-indent-level: 4
1527 * c-basic-offset: 4
1528 * tab-width: 4
1529 * End: