Import 2.3.99pre4-2
[davej-history.git] / drivers / net / yellowfin.c
bloba86c83b7d8d5fd1cb1ccfda604ef2b78d06ddab0
1 /* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
2 /*
3 Written 1997-1999 by Donald Becker.
5 This software may be used and distributed according to the terms
6 of the GNU Public License, incorporated herein by reference.
8 This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
9 It also supports the Symbios Logic version of the same chip core.
11 The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
12 Center of Excellence in Space Data and Information Sciences
13 Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
15 Support and updates available at
16 http://cesdis.gsfc.nasa.gov/linux/drivers/yellowfin.html
19 static const char *version =
20 "yellowfin.c:v1.03a 7/30/99 Written by Donald Becker, becker@cesdis.edu\n"
21 " http://cesdis.gsfc.nasa.gov/linux/drivers/yellowfin.html\n";
23 /* A few user-configurable values. */
25 static int debug = 1;
26 static int max_interrupt_work = 20;
27 static int mtu = 0;
28 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
29 /* System-wide count of bogus-rx frames. */
30 static int bogus_rx = 0;
31 static int dma_ctrl = 0x004A0263; /* Constrained by errata */
32 static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
33 #elif YF_NEW /* A future perfect board :->. */
34 static int dma_ctrl = 0x00CAC277; /* Override when loading module! */
35 static int fifo_cfg = 0x0028;
36 #else
37 static int dma_ctrl = 0x004A0263; /* Constrained by errata */
38 static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
39 #endif
41 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
42 Setting to > 1514 effectively disables this feature. */
43 static int rx_copybreak = 0;
45 /* Used to pass the media type, etc.
46 No media types are currently defined. These exist for driver
47 interoperability.
49 #define MAX_UNITS 8 /* More are supported, limit only on options */
50 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
51 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
53 /* Do ugly workaround for GX server chipset errata. */
54 static int gx_fix = 0;
56 /* Operational parameters that are set at compile time. */
58 /* Keep the ring sizes a power of two for efficiency.
59 Making the Tx queue too long decreases the effectiveness of channel
60 bonding and packet priority.
61 There are no ill effects from too-large receive rings. */
62 #define TX_RING_SIZE 16
63 #define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */
64 #define RX_RING_SIZE 64
66 /* Operational parameters that usually are not changed. */
67 /* Time in jiffies before concluding the transmitter is hung. */
68 #define TX_TIMEOUT (2*HZ)
70 #define yellowfin_debug debug
72 #if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
73 #warning You must compile this file with the correct options!
74 #warning See the last lines of the source file.
75 #error You must compile this driver with "-O".
76 #endif
78 #include <linux/version.h>
79 #include <linux/module.h>
80 #include <linux/kernel.h>
81 #include <linux/string.h>
82 #include <linux/timer.h>
83 #include <linux/errno.h>
84 #include <linux/ioport.h>
85 #include <linux/malloc.h>
86 #include <linux/interrupt.h>
87 #include <linux/pci.h>
88 #include <linux/init.h>
89 #include <asm/processor.h> /* Processor type for cache alignment. */
90 #include <asm/unaligned.h>
91 #include <asm/bitops.h>
92 #include <asm/io.h>
94 #include <linux/netdevice.h>
95 #include <linux/etherdevice.h>
96 #include <linux/skbuff.h>
98 /* Condensed operations for readability.
99 Compatibility defines are now in drv_compat.h */
101 #define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
102 #define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
105 #ifdef USE_IO_OPS
106 #define YF_INB inb
107 #define YF_INW inw
108 #define YF_INL inl
109 #define YF_OUTB outb
110 #define YF_OUTW outw
111 #define YF_OUTL outl
112 #else
113 #define YF_INB readb
114 #define YF_INW readw
115 #define YF_INL readl
116 #define YF_OUTB writeb
117 #define YF_OUTW writew
118 #define YF_OUTL writel
119 #endif
122 Theory of Operation
124 I. Board Compatibility
126 This device driver is designed for the Packet Engines "Yellowfin" Gigabit
127 Ethernet adapter. The only PCA currently supported is the G-NIC 64-bit
128 PCI card.
130 II. Board-specific settings
132 PCI bus devices are configured by the system at boot time, so no jumpers
133 need to be set on the board. The system BIOS preferably should assign the
134 PCI INTA signal to an otherwise unused system IRQ line.
135 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
136 interrupt lines.
138 III. Driver operation
140 IIIa. Ring buffers
142 The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
143 This is a descriptor list scheme similar to that used by the EEPro100 and
144 Tulip. This driver uses two statically allocated fixed-size descriptor lists
145 formed into rings by a branch from the final descriptor to the beginning of
146 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
148 The driver allocates full frame size skbuffs for the Rx ring buffers at
149 open() time and passes the skb->data field to the Yellowfin as receive data
150 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
151 a fresh skbuff is allocated and the frame is copied to the new skbuff.
152 When the incoming frame is larger, the skbuff is passed directly up the
153 protocol stack and replaced by a newly allocated skbuff.
155 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
156 using a full-sized skbuff for small frames vs. the copying costs of larger
157 frames. For small frames the copying cost is negligible (esp. considering
158 that we are pre-loading the cache with immediately useful header
159 information). For large frames the copying cost is non-trivial, and the
160 larger copy might flush the cache of useful data.
162 IIIC. Synchronization
164 The driver runs as two independent, single-threaded flows of control. One
165 is the send-packet routine, which enforces single-threaded use by the
166 dev->tbusy flag. The other thread is the interrupt handler, which is single
167 threaded by the hardware and other software.
169 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
170 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
171 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
172 the 'yp->tx_full' flag.
174 The interrupt handler has exclusive control over the Rx ring and records stats
175 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
176 empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
177 clears both the tx_full and tbusy flags.
179 IV. Notes
181 Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
182 Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
183 and an AlphaStation to verifty the Alpha port!
185 IVb. References
187 Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
188 Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
189 Data Manual v3.0
190 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
191 http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
193 IVc. Errata
195 See Packet Engines confidential appendix (prototype chips only).
199 /* A few values that may be tweaked. */
200 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
202 /* The rest of these values should never change. */
204 enum capability_flags {
205 HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
206 HasMACAddrBug=32, /* Really only on early revs. */
210 /* The PCI I/O space extent. */
211 #define YELLOWFIN_SIZE 0x100
213 #define YELLOWFIN_MODULE_NAME "yellowfin"
214 #define PFX YELLOWFIN_MODULE_NAME ": "
217 typedef enum {
218 YELLOWFIN_GNIC,
219 SYM83C885,
220 } chip_t;
223 struct chip_info {
224 const char *name;
225 int flags;
229 /* index by chip_t */
230 static struct chip_info chip_info[] = {
231 {"Yellowfin G-NIC Gigabit Ethernet",
232 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug},
233 {"Symbios SYM83C885", HasMII },
237 static struct pci_device_id yellowfin_pci_tbl[] __devinitdata = {
238 { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, YELLOWFIN_GNIC },
239 { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SYM83C885 },
240 { 0, },
242 MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
245 /* Offsets to the Yellowfin registers. Various sizes and alignments. */
246 enum yellowfin_offsets {
247 TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
248 TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
249 RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
250 RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
251 EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
252 ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
253 Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
254 MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
255 MII_Status=0xAE,
256 RxDepth=0xB8, FlowCtrl=0xBC,
257 AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
258 EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
259 EEFeature=0xF5,
262 /* The Yellowfin Rx and Tx buffer descriptors.
263 Elements are written as 32 bit for endian portability. */
264 struct yellowfin_desc {
265 u32 dbdma_cmd;
266 u32 addr;
267 u32 branch_addr;
268 u32 result_status;
271 struct tx_status_words {
272 #if defined(__powerpc__)
273 u16 tx_errs;
274 u16 tx_cnt;
275 u16 paused;
276 u16 total_tx_cnt;
277 #else /* Little endian chips. */
278 u16 tx_cnt;
279 u16 tx_errs;
280 u16 total_tx_cnt;
281 u16 paused;
282 #endif
285 /* Bits in yellowfin_desc.cmd */
286 enum desc_cmd_bits {
287 CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
288 CMD_NOP=0x60000000, CMD_STOP=0x70000000,
289 BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
290 BRANCH_IFTRUE=0x040000,
293 /* Bits in yellowfin_desc.status */
294 enum desc_status_bits { RX_EOP=0x0040, };
296 /* Bits in the interrupt status/mask registers. */
297 enum intr_status_bits {
298 IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
299 IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
300 IntrEarlyRx=0x100, IntrWakeup=0x200, };
302 #define PRIV_ALIGN 31 /* Required alignment mask */
303 struct yellowfin_private {
304 /* Descriptor rings first for alignment. Tx requires a second descriptor
305 for status. */
306 struct yellowfin_desc rx_ring[RX_RING_SIZE];
307 struct yellowfin_desc tx_ring[TX_RING_SIZE*2];
308 const char *product_name;
309 struct net_device *next_module;
310 void *priv_addr; /* Unaligned address for kfree */
311 /* The addresses of receive-in-place skbuffs. */
312 struct sk_buff* rx_skbuff[RX_RING_SIZE];
313 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
314 struct sk_buff* tx_skbuff[TX_RING_SIZE];
315 struct tx_status_words tx_status[TX_RING_SIZE];
316 struct timer_list timer; /* Media selection timer. */
317 struct net_device_stats stats;
318 /* Frequently used and paired value: keep adjacent for cache effect. */
319 struct pci_dev *pci_dev;
320 int chip_id, flags;
321 struct yellowfin_desc *rx_head_desc;
322 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
323 unsigned int rx_buf_sz; /* Based on MTU+slack. */
324 struct tx_status_words *tx_tail_desc;
325 unsigned int cur_tx, dirty_tx;
326 int tx_threshold;
327 unsigned int tx_full:1; /* The Tx queue is full. */
328 unsigned int full_duplex:1; /* Full-duplex operation requested. */
329 unsigned int duplex_lock:1;
330 unsigned int medialock:1; /* Do not sense media. */
331 unsigned int default_port:4; /* Last dev->if_port value. */
332 /* MII transceiver section. */
333 int mii_cnt; /* MII device addresses. */
334 u16 advertising; /* NWay media advertisement */
335 unsigned char phys[2]; /* MII device addresses. */
336 u32 pad[4]; /* Used for 32-byte alignment */
337 spinlock_t lock;
341 MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
342 MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
343 MODULE_PARM(max_interrupt_work, "i");
344 MODULE_PARM(mtu, "i");
345 MODULE_PARM(debug, "i");
346 MODULE_PARM(rx_copybreak, "i");
347 MODULE_PARM(gx_fix, "i");
348 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
349 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
352 static int read_eeprom(long ioaddr, int location);
353 static int mdio_read(long ioaddr, int phy_id, int location);
354 static void mdio_write(long ioaddr, int phy_id, int location, int value);
355 #ifdef HAVE_PRIVATE_IOCTL
356 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
357 #endif
358 static int yellowfin_open(struct net_device *dev);
359 static void yellowfin_timer(unsigned long data);
360 static void yellowfin_tx_timeout(struct net_device *dev);
361 static void yellowfin_init_ring(struct net_device *dev);
362 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev);
363 static void yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
364 static int yellowfin_rx(struct net_device *dev);
365 static void yellowfin_error(struct net_device *dev, int intr_status);
366 static int yellowfin_close(struct net_device *dev);
367 static struct net_device_stats *yellowfin_get_stats(struct net_device *dev);
368 static void set_rx_mode(struct net_device *dev);
371 static int __devinit read_eeprom(long ioaddr, int location)
373 int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */
375 YF_OUTB(location, ioaddr + EEAddr);
376 YF_OUTB(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
377 while ((YF_INB(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0)
379 return YF_INB(ioaddr + EERead);
382 /* MII Managemen Data I/O accesses.
383 These routines assume the MDIO controller is idle, and do not exit until
384 the command is finished. */
386 static int mdio_read(long ioaddr, int phy_id, int location)
388 int i;
390 YF_OUTW((phy_id<<8) + location, ioaddr + MII_Addr);
391 YF_OUTW(1, ioaddr + MII_Cmd);
392 for (i = 10000; i >= 0; i--)
393 if ((YF_INW(ioaddr + MII_Status) & 1) == 0)
394 break;
395 return YF_INW(ioaddr + MII_Rd_Data);
398 static void mdio_write(long ioaddr, int phy_id, int location, int value)
400 int i;
402 YF_OUTW((phy_id<<8) + location, ioaddr + MII_Addr);
403 YF_OUTW(value, ioaddr + MII_Wr_Data);
405 /* Wait for the command to finish. */
406 for (i = 10000; i >= 0; i--)
407 if ((YF_INW(ioaddr + MII_Status) & 1) == 0)
408 break;
409 return;
413 static int yellowfin_open(struct net_device *dev)
415 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
416 long ioaddr = dev->base_addr;
417 int i;
419 /* Reset the chip. */
420 YF_OUTL(0x80000000, ioaddr + DMACtrl);
422 if (request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ, dev->name, dev))
423 return -EAGAIN;
425 if (yellowfin_debug > 1)
426 printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
427 dev->name, dev->irq);
429 MOD_INC_USE_COUNT;
431 yellowfin_init_ring(dev);
433 YF_OUTL(virt_to_bus(yp->rx_ring), ioaddr + RxPtr);
434 YF_OUTL(virt_to_bus(yp->tx_ring), ioaddr + TxPtr);
436 for (i = 0; i < 6; i++)
437 YF_OUTB(dev->dev_addr[i], ioaddr + StnAddr + i);
439 /* Set up various condition 'select' registers.
440 There are no options here. */
441 YF_OUTL(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */
442 YF_OUTL(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */
443 YF_OUTL(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */
444 YF_OUTL(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */
445 YF_OUTL(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */
446 YF_OUTL(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */
448 /* Initialize other registers: with so many this eventually this will
449 converted to an offset/value list. */
450 YF_OUTL(dma_ctrl, ioaddr + DMACtrl);
451 YF_OUTW(fifo_cfg, ioaddr + FIFOcfg);
452 /* Enable automatic generation of flow control frames, period 0xffff. */
453 YF_OUTL(0x0030FFFF, ioaddr + FlowCtrl);
455 yp->tx_threshold = 32;
456 YF_OUTL(yp->tx_threshold, ioaddr + TxThreshold);
458 if (dev->if_port == 0)
459 dev->if_port = yp->default_port;
461 netif_start_queue (dev);
463 /* Setting the Rx mode will start the Rx process. */
464 if (yp->flags & IsGigabit) {
465 /* We are always in full-duplex mode with gigabit! */
466 yp->full_duplex = 1;
467 YF_OUTW(0x01CF, ioaddr + Cnfg);
468 } else {
469 YF_OUTW(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
470 YF_OUTW(0x1018, ioaddr + FrameGap1);
471 YF_OUTW(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
473 set_rx_mode(dev);
475 /* Enable interrupts by setting the interrupt mask. */
476 YF_OUTW(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */
477 YF_OUTW(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
478 YF_OUTL(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */
479 YF_OUTL(0x80008000, ioaddr + TxCtrl);
481 if (yellowfin_debug > 2) {
482 printk(KERN_DEBUG "%s: Done yellowfin_open().\n",
483 dev->name);
485 /* Set the timer to check for link beat. */
486 init_timer(&yp->timer);
487 yp->timer.expires = jiffies + 3*HZ;
488 yp->timer.data = (unsigned long)dev;
489 yp->timer.function = &yellowfin_timer; /* timer handler */
490 add_timer(&yp->timer);
492 return 0;
495 static void yellowfin_timer(unsigned long data)
497 struct net_device *dev = (struct net_device *)data;
498 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
499 long ioaddr = dev->base_addr;
500 int next_tick = 60*HZ;
502 if (yellowfin_debug > 3) {
503 printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n",
504 dev->name, YF_INW(ioaddr + IntrStatus));
507 if (yp->mii_cnt) {
508 int mii_reg1 = mdio_read(ioaddr, yp->phys[0], 1);
509 int mii_reg5 = mdio_read(ioaddr, yp->phys[0], 5);
510 int negotiated = mii_reg5 & yp->advertising;
511 if (yellowfin_debug > 1)
512 printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, "
513 "link partner capability %4.4x.\n",
514 dev->name, yp->phys[0], mii_reg1, mii_reg5);
516 if ( ! yp->duplex_lock &&
517 ((negotiated & 0x0300) == 0x0100
518 || (negotiated & 0x00C0) == 0x0040)) {
519 yp->full_duplex = 1;
521 YF_OUTW(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
523 if (mii_reg1 & 0x0004)
524 next_tick = 60*HZ;
525 else
526 next_tick = 3*HZ;
529 yp->timer.expires = jiffies + next_tick;
530 add_timer(&yp->timer);
533 static void yellowfin_tx_timeout(struct net_device *dev)
535 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
536 long ioaddr = dev->base_addr;
538 printk(KERN_WARNING "%s: Yellowfin transmit timed out at %d/%d Tx "
539 "status %4.4x, Rx status %4.4x, resetting...\n",
540 dev->name, yp->cur_tx, yp->dirty_tx,
541 YF_INL(ioaddr + TxStatus), YF_INL(ioaddr + RxStatus));
543 /* Note: these should be KERN_DEBUG. */
544 if (yellowfin_debug) {
545 int i;
546 printk(KERN_WARNING " Rx ring %p: ", yp->rx_ring);
547 for (i = 0; i < RX_RING_SIZE; i++)
548 printk(" %8.8x", yp->rx_ring[i].result_status);
549 printk("\n"KERN_WARNING" Tx ring %p: ", yp->tx_ring);
550 for (i = 0; i < TX_RING_SIZE; i++)
551 printk(" %4.4x /%8.8x", yp->tx_status[i].tx_errs,
552 yp->tx_ring[i].result_status);
553 printk("\n");
556 /* If the hardware is found to hang regularly, we will update the code
557 to reinitialize the chip here. */
558 dev->if_port = 0;
560 /* Wake the potentially-idle transmit channel. */
561 YF_OUTL(0x10001000, dev->base_addr + TxCtrl);
562 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
563 netif_wake_queue (dev); /* Typical path */
565 dev->trans_start = jiffies;
566 yp->stats.tx_errors++;
567 return;
570 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
571 static void yellowfin_init_ring(struct net_device *dev)
573 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
574 int i;
576 yp->tx_full = 0;
577 yp->cur_rx = yp->cur_tx = 0;
578 yp->dirty_tx = 0;
580 yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
581 yp->rx_head_desc = &yp->rx_ring[0];
583 for (i = 0; i < RX_RING_SIZE; i++) {
584 yp->rx_ring[i].dbdma_cmd =
585 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
586 yp->rx_ring[i].branch_addr = virt_to_le32desc(&yp->rx_ring[i+1]);
588 /* Mark the last entry as wrapping the ring. */
589 yp->rx_ring[i-1].branch_addr = virt_to_le32desc(&yp->rx_ring[0]);
591 for (i = 0; i < RX_RING_SIZE; i++) {
592 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
593 yp->rx_skbuff[i] = skb;
594 if (skb == NULL)
595 break;
596 skb->dev = dev; /* Mark as being used by this device. */
597 skb_reserve(skb, 2); /* 16 byte align the IP header. */
598 yp->rx_ring[i].addr = virt_to_le32desc(skb->tail);
600 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
601 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
603 #define NO_TXSTATS
604 #ifdef NO_TXSTATS
605 /* In this mode the Tx ring needs only a single descriptor. */
606 for (i = 0; i < TX_RING_SIZE; i++) {
607 yp->tx_skbuff[i] = 0;
608 yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
609 yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[i+1]);
611 /* Wrap ring */
612 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
613 yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[0]);
614 #else
615 /* Tx ring needs a pair of descriptors, the second for the status. */
616 for (i = 0; i < TX_RING_SIZE*2; i++) {
617 yp->tx_skbuff[i/2] = 0;
618 /* Branch on Tx error. */
619 yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
620 yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[i+1]);
621 i++;
622 if (yp->flags & FullTxStatus) {
623 yp->tx_ring[i].dbdma_cmd =
624 cpu_to_le32(CMD_TXSTATUS | sizeof(yp->tx_status[i]));
625 yp->tx_ring[i].request_cnt = sizeof(yp->tx_status[i]);
626 yp->tx_ring[i].addr = virt_to_le32desc(&yp->tx_status[i/2]);
627 } else { /* Symbios chips write only tx_errs word. */
628 yp->tx_ring[i].dbdma_cmd =
629 cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
630 yp->tx_ring[i].request_cnt = 2;
631 yp->tx_ring[i].addr = virt_to_le32desc(&yp->tx_status[i/2].tx_errs);
633 yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[i+1]);
635 /* Wrap ring */
636 yp->tx_ring[--i].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
637 yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[0]);
638 #endif
639 yp->tx_tail_desc = &yp->tx_status[0];
640 return;
643 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
645 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
646 unsigned entry;
648 netif_stop_queue (dev);
650 /* Caution: the write order is important here, set the base address
651 with the "ownership" bits last. */
653 /* Calculate the next Tx descriptor entry. */
654 entry = yp->cur_tx % TX_RING_SIZE;
656 yp->tx_skbuff[entry] = skb;
658 if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */
659 int cacheline_end = (virt_to_bus(skb->data) + skb->len) % 32;
660 /* Fix GX chipset errata. */
661 if (cacheline_end > 24 || cacheline_end == 0)
662 skb->len += 32 - cacheline_end + 1;
664 #ifdef NO_TXSTATS
665 yp->tx_ring[entry].addr = virt_to_le32desc(skb->data);
666 yp->tx_ring[entry].result_status = 0;
667 if (entry >= TX_RING_SIZE-1) {
668 /* New stop command. */
669 yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
670 yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
671 cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | skb->len);
672 } else {
673 yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
674 yp->tx_ring[entry].dbdma_cmd =
675 cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | skb->len);
677 yp->cur_tx++;
678 #else
679 yp->tx_ring[entry<<1].request_cnt = skb->len;
680 yp->tx_ring[entry<<1].addr = virt_to_le32desc(skb->data);
681 /* The input_last (status-write) command is constant, but we must rewrite
682 the subsequent 'stop' command. */
684 yp->cur_tx++;
686 unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
687 yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
689 /* Final step -- overwrite the old 'stop' command. */
691 yp->tx_ring[entry<<1].dbdma_cmd =
692 cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
693 CMD_TX_PKT | BRANCH_IFTRUE) | skb->len);
694 #endif
696 /* Non-x86 Todo: explicitly flush cache lines here. */
698 /* Wake the potentially-idle transmit channel. */
699 YF_OUTL(0x10001000, dev->base_addr + TxCtrl);
701 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
702 netif_start_queue (dev); /* Typical path */
703 else
704 yp->tx_full = 1;
705 dev->trans_start = jiffies;
707 if (yellowfin_debug > 4) {
708 printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n",
709 dev->name, yp->cur_tx, entry);
711 return 0;
714 /* The interrupt handler does all of the Rx thread work and cleans up
715 after the Tx thread. */
716 static void yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
718 struct net_device *dev = (struct net_device *)dev_instance;
719 struct yellowfin_private *yp;
720 long ioaddr, boguscnt = max_interrupt_work;
722 #ifndef final_version /* Can never occur. */
723 if (dev == NULL) {
724 printk (KERN_ERR "yellowfin_interrupt(): irq %d for unknown device.\n", irq);
725 return;
727 #endif
729 ioaddr = dev->base_addr;
730 yp = (struct yellowfin_private *)dev->priv;
732 spin_lock (&yp->lock);
734 do {
735 u16 intr_status = YF_INW(ioaddr + IntrClear);
737 if (yellowfin_debug > 4)
738 printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n",
739 dev->name, intr_status);
741 if (intr_status == 0)
742 break;
744 if (intr_status & (IntrRxDone | IntrEarlyRx)) {
745 yellowfin_rx(dev);
746 YF_OUTL(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */
749 #ifdef NO_TXSTATS
750 for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
751 int entry = yp->dirty_tx % TX_RING_SIZE;
752 if (yp->tx_ring[entry].result_status == 0)
753 break;
754 yp->stats.tx_bytes += yp->tx_skbuff[entry]->len;
755 yp->stats.tx_packets++;
756 /* Free the original skb. */
757 dev_kfree_skb_irq(yp->tx_skbuff[entry]);
758 yp->tx_skbuff[entry] = 0;
760 if (yp->tx_full
761 && yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
762 /* The ring is no longer full, clear tbusy. */
763 yp->tx_full = 0;
765 if (yp->tx_full)
766 netif_stop_queue(dev);
767 else
768 netif_wake_queue(dev);
769 #else
770 if (intr_status & IntrTxDone
771 || yp->tx_tail_desc->tx_errs) {
772 unsigned dirty_tx = yp->dirty_tx;
774 for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
775 dirty_tx++) {
776 /* Todo: optimize this. */
777 int entry = dirty_tx % TX_RING_SIZE;
778 u16 tx_errs = yp->tx_status[entry].tx_errs;
780 #ifndef final_version
781 if (yellowfin_debug > 5)
782 printk(KERN_DEBUG "%s: Tx queue %d check, Tx status "
783 "%4.4x %4.4x %4.4x %4.4x.\n",
784 dev->name, entry,
785 yp->tx_status[entry].tx_cnt,
786 yp->tx_status[entry].tx_errs,
787 yp->tx_status[entry].total_tx_cnt,
788 yp->tx_status[entry].paused);
789 #endif
790 if (tx_errs == 0)
791 break; /* It still hasn't been Txed */
792 if (tx_errs & 0xF810) {
793 /* There was an major error, log it. */
794 #ifndef final_version
795 if (yellowfin_debug > 1)
796 printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
797 dev->name, tx_errs);
798 #endif
799 yp->stats.tx_errors++;
800 if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++;
801 if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++;
802 if (tx_errs & 0x2000) yp->stats.tx_window_errors++;
803 if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++;
804 #ifdef ETHER_STATS
805 if (tx_errs & 0x1000) yp->stats.collisions16++;
806 #endif
807 } else {
808 #ifndef final_version
809 if (yellowfin_debug > 4)
810 printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n",
811 dev->name, tx_errs);
812 #endif
813 #ifdef ETHER_STATS
814 if (tx_errs & 0x0400) yp->stats.tx_deferred++;
815 #endif
816 yp->stats.tx_bytes += yp->tx_skbuff[entry]->len;
817 yp->stats.collisions += tx_errs & 15;
818 yp->stats.tx_packets++;
820 /* Free the original skb. */
821 dev_kfree_skb_irq(yp->tx_skbuff[entry]);
822 yp->tx_skbuff[entry] = 0;
823 /* Mark status as empty. */
824 yp->tx_status[entry].tx_errs = 0;
827 #ifndef final_version
828 if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
829 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
830 dev->name, dirty_tx, yp->cur_tx, yp->tx_full);
831 dirty_tx += TX_RING_SIZE;
833 #endif
835 if (yp->tx_full
836 && yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
837 /* The ring is no longer full, clear tbusy. */
838 yp->tx_full = 0;
840 if (yp->tx_full)
841 netif_stop_queue(dev);
842 else
843 netif_wake_queue(dev);
845 yp->dirty_tx = dirty_tx;
846 yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
848 #endif
850 /* Log errors and other uncommon events. */
851 if (intr_status & 0x2ee) /* Abnormal error summary. */
852 yellowfin_error(dev, intr_status);
854 if (--boguscnt < 0) {
855 printk(KERN_WARNING "%s: Too much work at interrupt, status=0x%4.4x.\n",
856 dev->name, intr_status);
857 break;
859 } while (1);
861 if (yellowfin_debug > 3)
862 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
863 dev->name, YF_INW(ioaddr + IntrStatus));
865 /* Code that should never be run! Perhaps remove after testing.. */
867 static int stopit = 10;
868 if ((!(netif_running(dev))) && --stopit < 0) {
869 printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n",
870 dev->name);
871 free_irq(irq, dev);
875 spin_unlock (&yp->lock);
878 /* This routine is logically part of the interrupt handler, but separated
879 for clarity and better register allocation. */
880 static int yellowfin_rx(struct net_device *dev)
882 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
883 int entry = yp->cur_rx % RX_RING_SIZE;
884 int boguscnt = 20;
886 if (yellowfin_debug > 4) {
887 printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %8.8x.\n",
888 entry, yp->rx_ring[entry].result_status);
889 printk(KERN_DEBUG " #%d desc. %8.8x %8.8x %8.8x.\n",
890 entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
891 yp->rx_ring[entry].result_status);
894 /* If EOP is set on the next entry, it's a new packet. Send it up. */
895 while (yp->rx_head_desc->result_status) {
896 struct yellowfin_desc *desc = yp->rx_head_desc;
897 u16 desc_status = le32_to_cpu(desc->result_status) >> 16;
898 int data_size =
899 (le32_to_cpu(desc->dbdma_cmd) - le32_to_cpu(desc->result_status))
900 & 0xffff;
901 u8 *buf_addr = le32desc_to_virt(desc->addr);
902 s16 frame_status = get_unaligned((s16*)&(buf_addr[data_size - 2]));
904 if (yellowfin_debug > 4)
905 printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n",
906 frame_status);
907 if (--boguscnt < 0)
908 break;
909 if ( ! (desc_status & RX_EOP)) {
910 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers,"
911 " status %4.4x!\n", dev->name, desc_status);
912 yp->stats.rx_length_errors++;
913 } else if ((yp->flags & IsGigabit) && (frame_status & 0x0038)) {
914 /* There was a error. */
915 if (yellowfin_debug > 3)
916 printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n",
917 frame_status);
918 yp->stats.rx_errors++;
919 if (frame_status & 0x0060) yp->stats.rx_length_errors++;
920 if (frame_status & 0x0008) yp->stats.rx_frame_errors++;
921 if (frame_status & 0x0010) yp->stats.rx_crc_errors++;
922 if (frame_status < 0) yp->stats.rx_dropped++;
923 } else if ( !(yp->flags & IsGigabit) &&
924 ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
925 u8 status1 = buf_addr[data_size-2];
926 u8 status2 = buf_addr[data_size-1];
927 yp->stats.rx_errors++;
928 if (status1 & 0xC0) yp->stats.rx_length_errors++;
929 if (status2 & 0x03) yp->stats.rx_frame_errors++;
930 if (status2 & 0x04) yp->stats.rx_crc_errors++;
931 if (status2 & 0x80) yp->stats.rx_dropped++;
932 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
933 } else if ((yp->flags & HasMACAddrBug) &&
934 memcmp(le32desc_to_virt(yp->rx_ring[entry].addr),
935 dev->dev_addr, 6) != 0
936 && memcmp(le32desc_to_virt(yp->rx_ring[entry].addr),
937 "\377\377\377\377\377\377", 6) != 0) {
938 if (bogus_rx++ == 0)
939 printk(KERN_WARNING "%s: Bad frame to %2.2x:%2.2x:%2.2x:%2.2x:"
940 "%2.2x:%2.2x.\n",
941 dev->name, buf_addr[0], buf_addr[1], buf_addr[2],
942 buf_addr[3], buf_addr[4], buf_addr[5]);
943 #endif
944 } else {
945 struct sk_buff *skb;
946 int pkt_len = data_size -
947 (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
948 /* To verify: Yellowfin Length should omit the CRC! */
950 #ifndef final_version
951 if (yellowfin_debug > 4)
952 printk(KERN_DEBUG " yellowfin_rx() normal Rx pkt length %d"
953 " of %d, bogus_cnt %d.\n",
954 pkt_len, data_size, boguscnt);
955 #endif
956 /* Check if the packet is long enough to just pass up the skbuff
957 without copying to a properly sized skbuff. */
958 if (pkt_len > rx_copybreak) {
959 char *temp = skb_put(skb = yp->rx_skbuff[entry], pkt_len);
960 #ifndef final_verison /* Remove after testing. */
961 if (le32desc_to_virt(yp->rx_ring[entry].addr) != temp)
962 printk(KERN_WARNING "%s: Warning -- the skbuff addresses "
963 "do not match in yellowfin_rx: %p vs. %p / %p.\n",
964 dev->name, le32desc_to_virt(yp->rx_ring[entry].addr),
965 skb->head, temp);
966 #endif
967 yp->rx_skbuff[entry] = NULL;
968 } else {
969 skb = dev_alloc_skb(pkt_len + 2);
970 if (skb == NULL)
971 break;
972 skb->dev = dev;
973 skb_reserve(skb, 2); /* 16 byte align the data fields */
974 #if 1 || USE_IP_CSUM
975 eth_copy_and_sum(skb, yp->rx_skbuff[entry]->tail, pkt_len, 0);
976 skb_put(skb, pkt_len);
977 #else
978 memcpy(skb_put(skb, pkt_len), yp->rx_skbuff[entry]->tail,
979 pkt_len);
980 #endif
982 skb->protocol = eth_type_trans(skb, dev);
983 netif_rx(skb);
984 dev->last_rx = jiffies;
985 yp->stats.rx_packets++;
986 yp->stats.rx_bytes += pkt_len;
988 entry = (++yp->cur_rx) % RX_RING_SIZE;
989 yp->rx_head_desc = &yp->rx_ring[entry];
992 /* Refill the Rx ring buffers. */
993 for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
994 entry = yp->dirty_rx % RX_RING_SIZE;
995 if (yp->rx_skbuff[entry] == NULL) {
996 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
997 if (skb == NULL)
998 break; /* Better luck next round. */
999 yp->rx_skbuff[entry] = skb;
1000 skb->dev = dev; /* Mark as being used by this device. */
1001 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1002 yp->rx_ring[entry].addr = virt_to_le32desc(skb->tail);
1004 yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1005 yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
1006 if (entry != 0)
1007 yp->rx_ring[entry - 1].dbdma_cmd =
1008 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1009 else
1010 yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1011 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1012 | yp->rx_buf_sz);
1015 return 0;
1018 static void yellowfin_error(struct net_device *dev, int intr_status)
1020 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
1022 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1023 dev->name, intr_status);
1024 /* Hmmmmm, it's not clear what to do here. */
1025 if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1026 yp->stats.tx_errors++;
1027 if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1028 yp->stats.rx_errors++;
1031 static int yellowfin_close(struct net_device *dev)
1033 long ioaddr = dev->base_addr;
1034 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
1035 int i;
1037 netif_stop_queue (dev);
1039 if (yellowfin_debug > 1) {
1040 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x Rx %4.4x Int %2.2x.\n",
1041 dev->name, YF_INW(ioaddr + TxStatus),
1042 YF_INW(ioaddr + RxStatus),
1043 YF_INW(ioaddr + IntrStatus));
1044 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1045 dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
1048 /* Disable interrupts by clearing the interrupt mask. */
1049 YF_OUTW(0x0000, ioaddr + IntrEnb);
1051 /* Stop the chip's Tx and Rx processes. */
1052 YF_OUTL(0x80000000, ioaddr + RxCtrl);
1053 YF_OUTL(0x80000000, ioaddr + TxCtrl);
1055 del_timer(&yp->timer);
1057 #if !defined(final_version) && defined(__i386__)
1058 if (yellowfin_debug > 2) {
1059 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n", (int)virt_to_bus(yp->tx_ring));
1060 for (i = 0; i < TX_RING_SIZE*2; i++)
1061 printk(" %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n",
1062 YF_INL(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1063 i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1064 yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1065 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
1066 for (i = 0; i < TX_RING_SIZE; i++)
1067 printk(" #%d status %4.4x %4.4x %4.4x %4.4x.\n",
1068 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1069 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1071 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n", (int)virt_to_bus(yp->rx_ring));
1072 for (i = 0; i < RX_RING_SIZE; i++) {
1073 printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n",
1074 YF_INL(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1075 i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1076 yp->rx_ring[i].result_status);
1077 if (yellowfin_debug > 6) {
1078 if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1079 int j;
1080 for (j = 0; j < 0x50; j++)
1081 printk(" %4.4x",
1082 get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1083 printk("\n");
1088 #endif /* __i386__ debugging only */
1090 free_irq(dev->irq, dev);
1092 /* Free all the skbuffs in the Rx queue. */
1093 for (i = 0; i < RX_RING_SIZE; i++) {
1094 yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1095 yp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
1096 if (yp->rx_skbuff[i]) {
1097 dev_kfree_skb(yp->rx_skbuff[i]);
1099 yp->rx_skbuff[i] = 0;
1101 for (i = 0; i < TX_RING_SIZE; i++) {
1102 if (yp->tx_skbuff[i])
1103 dev_kfree_skb(yp->tx_skbuff[i]);
1104 yp->tx_skbuff[i] = 0;
1107 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
1108 if (yellowfin_debug > 0) {
1109 printk(KERN_DEBUG "%s: Received %d frames that we should not have.\n",
1110 dev->name, bogus_rx);
1112 #endif
1113 MOD_DEC_USE_COUNT;
1115 return 0;
1118 static struct net_device_stats *yellowfin_get_stats(struct net_device *dev)
1120 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
1121 return &yp->stats;
1124 /* Set or clear the multicast filter for this adaptor. */
1126 /* The little-endian AUTODIN32 ethernet CRC calculation.
1127 N.B. Do not use for bulk data, use a table-based routine instead.
1128 This is common code and should be moved to net/core/crc.c */
1129 static unsigned const ethernet_polynomial_le = 0xedb88320U;
1131 static inline unsigned ether_crc_le(int length, unsigned char *data)
1133 unsigned int crc = 0xffffffff; /* Initial value. */
1134 while(--length >= 0) {
1135 unsigned char current_octet = *data++;
1136 int bit;
1137 for (bit = 8; --bit >= 0; current_octet >>= 1) {
1138 if ((crc ^ current_octet) & 1) {
1139 crc >>= 1;
1140 crc ^= ethernet_polynomial_le;
1141 } else
1142 crc >>= 1;
1145 return crc;
1149 static void set_rx_mode(struct net_device *dev)
1151 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
1152 long ioaddr = dev->base_addr;
1153 u16 cfg_value = YF_INW(ioaddr + Cnfg);
1155 /* Stop the Rx process to change any value. */
1156 YF_OUTW(cfg_value & ~0x1000, ioaddr + Cnfg);
1157 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1158 /* Unconditionally log net taps. */
1159 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1160 YF_OUTW(0x000F, ioaddr + AddrMode);
1161 } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) {
1162 /* Too many to filter well, or accept all multicasts. */
1163 YF_OUTW(0x000B, ioaddr + AddrMode);
1164 } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
1165 struct dev_mc_list *mclist;
1166 u16 hash_table[4];
1167 int i;
1168 memset(hash_table, 0, sizeof(hash_table));
1169 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1170 i++, mclist = mclist->next) {
1171 /* Due to a bug in the early chip versions, multiple filter
1172 slots must be set for each address. */
1173 if (yp->flags & HasMulticastBug) {
1174 set_bit((ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f,
1175 hash_table);
1176 set_bit((ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f,
1177 hash_table);
1178 set_bit((ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f,
1179 hash_table);
1181 set_bit((ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f,
1182 hash_table);
1184 /* Copy the hash table to the chip. */
1185 for (i = 0; i < 4; i++)
1186 YF_OUTW(hash_table[i], ioaddr + HashTbl + i*2);
1187 YF_OUTW(0x0003, ioaddr + AddrMode);
1188 } else { /* Normal, unicast/broadcast-only mode. */
1189 YF_OUTW(0x0001, ioaddr + AddrMode);
1191 /* Restart the Rx process. */
1192 YF_OUTW(cfg_value | 0x1000, ioaddr + Cnfg);
1195 #ifdef HAVE_PRIVATE_IOCTL
1196 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1198 long ioaddr = dev->base_addr;
1199 u16 *data = (u16 *)&rq->ifr_data;
1201 switch(cmd) {
1202 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
1203 data[0] = ((struct yellowfin_private *)dev->priv)->phys[0] & 0x1f;
1204 /* Fall Through */
1205 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
1206 data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
1207 return 0;
1208 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
1209 if (!capable(CAP_NET_ADMIN))
1210 return -EPERM;
1211 mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1212 return 0;
1213 default:
1214 return -EOPNOTSUPP;
1217 #endif /* HAVE_PRIVATE_IOCTL */
1220 static int __devinit yellowfin_init_one(struct pci_dev *pdev,
1221 const struct pci_device_id *ent)
1223 struct net_device *dev;
1224 struct yellowfin_private *yp;
1225 int option, i, irq;
1226 int flags, chip_idx;
1227 static int find_cnt = 0;
1228 long ioaddr, real_ioaddr;
1230 chip_idx = ent->driver_data;
1231 flags = chip_info[chip_idx].flags;
1233 dev = init_etherdev(NULL, 0);
1234 if (!dev) {
1235 printk (KERN_ERR PFX "cannot allocate ethernet device\n");
1236 return -ENOMEM;
1239 dev->priv = kmalloc(sizeof(*yp) + PRIV_ALIGN, GFP_KERNEL);
1240 if (!dev->priv)
1241 goto err_out_free_netdev;
1242 yp = (void *)(((long)dev->priv + PRIV_ALIGN) & ~PRIV_ALIGN);
1243 memset(yp, 0, sizeof(*yp));
1244 yp->priv_addr = dev->priv; /* store real addr for kfree */
1245 dev->priv = yp; /* use aligned addr */
1247 if (!request_region (pci_resource_start (pdev, 0),
1248 YELLOWFIN_SIZE, YELLOWFIN_MODULE_NAME)) {
1249 printk (KERN_ERR PFX "cannot obtain I/O port region\n");
1250 goto err_out_free_priv;
1252 if (!request_mem_region (pci_resource_start (pdev, 1),
1253 YELLOWFIN_SIZE, YELLOWFIN_MODULE_NAME)) {
1254 printk (KERN_ERR PFX "cannot obtain MMIO region\n");
1255 goto err_out_free_pio_region;
1258 pci_enable_device (pdev);
1259 pci_set_master (pdev);
1261 #ifdef USE_IO_OPS
1262 real_ioaddr = ioaddr = pci_resource_start (pdev, 0);
1263 #else
1264 real_ioaddr = ioaddr = pci_resource_start (pdev, 1);
1265 ioaddr = (long) ioremap(ioaddr, YELLOWFIN_SIZE);
1266 #endif
1267 irq = pdev->irq;
1269 printk(KERN_INFO "%s: %s type %8x at 0x%lx, ",
1270 dev->name, chip_info[chip_idx].name,
1271 YF_INL(ioaddr + ChipRev), real_ioaddr);
1273 if (flags & IsGigabit)
1274 for (i = 0; i < 6; i++)
1275 dev->dev_addr[i] = YF_INB(ioaddr + StnAddr + i);
1276 else {
1277 int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
1278 for (i = 0; i < 6; i++)
1279 dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
1281 for (i = 0; i < 5; i++)
1282 printk("%2.2x:", dev->dev_addr[i]);
1283 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
1285 /* Reset the chip. */
1286 YF_OUTL(0x80000000, ioaddr + DMACtrl);
1288 dev->base_addr = ioaddr;
1289 dev->irq = irq;
1291 pdev->driver_data = dev;
1292 yp->chip_id = chip_idx;
1293 yp->flags = flags;
1294 yp->lock = SPIN_LOCK_UNLOCKED;
1296 option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
1297 if (dev->mem_start)
1298 option = dev->mem_start;
1300 /* The lower four bits are the media type. */
1301 if (option > 0) {
1302 if (option & 0x200)
1303 yp->full_duplex = 1;
1304 yp->default_port = option & 15;
1305 if (yp->default_port)
1306 yp->medialock = 1;
1308 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
1309 yp->full_duplex = 1;
1311 if (yp->full_duplex)
1312 yp->duplex_lock = 1;
1314 /* The Yellowfin-specific entries in the device structure. */
1315 dev->open = &yellowfin_open;
1316 dev->hard_start_xmit = &yellowfin_start_xmit;
1317 dev->stop = &yellowfin_close;
1318 dev->get_stats = &yellowfin_get_stats;
1319 dev->set_multicast_list = &set_rx_mode;
1320 #ifdef HAVE_PRIVATE_IOCTL
1321 dev->do_ioctl = &mii_ioctl;
1322 #endif
1323 dev->tx_timeout = yellowfin_tx_timeout;
1324 dev->watchdog_timeo = TX_TIMEOUT;
1326 if (mtu)
1327 dev->mtu = mtu;
1329 if (yp->flags & HasMII) {
1330 int phy, phy_idx = 0;
1331 for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
1332 int mii_status = mdio_read(ioaddr, phy, 1);
1333 if (mii_status != 0xffff &&
1334 mii_status != 0x0000) {
1335 yp->phys[phy_idx++] = phy;
1336 yp->advertising = mdio_read(ioaddr, phy, 4);
1337 printk(KERN_INFO "%s: MII PHY found at address %d, status "
1338 "0x%4.4x advertising %4.4x.\n",
1339 dev->name, phy, mii_status, yp->advertising);
1342 yp->mii_cnt = phy_idx;
1345 find_cnt++;
1347 return 0;
1349 err_out_free_pio_region:
1350 release_region (pci_resource_start (pdev, 0), YELLOWFIN_SIZE);
1351 err_out_free_priv:
1352 kfree (dev->priv);
1353 err_out_free_netdev:
1354 unregister_netdev (dev);
1355 kfree (dev);
1356 return -ENODEV;
1359 static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
1361 struct net_device *dev = pdev->driver_data;
1362 struct yellowfin_private *np;
1364 if (!dev) {
1365 printk (KERN_ERR "remove non-existent device\n");
1366 return;
1368 np = (struct yellowfin_private *) dev->priv;
1370 unregister_netdev (dev);
1372 #ifdef USE_IO_OPS
1373 release_region (dev->base_addr, YELLOWFIN_SIZE);
1374 #else
1375 iounmap ((void *) dev->base_addr);
1376 release_mem_region (dev->base_addr, YELLOWFIN_SIZE);
1377 #endif
1379 if (np->priv_addr)
1380 kfree (np->priv_addr);
1382 kfree (dev);
1386 static struct pci_driver yellowfin_driver = {
1387 name: YELLOWFIN_MODULE_NAME,
1388 id_table: yellowfin_pci_tbl,
1389 probe: yellowfin_init_one,
1390 remove: yellowfin_remove_one,
1394 static int __init yellowfin_init (void)
1396 if (debug) /* Emit version even if no cards detected. */
1397 printk(KERN_INFO "%s", version);
1399 if (pci_register_driver (&yellowfin_driver) > 0)
1400 return 0;
1402 pci_unregister_driver (&yellowfin_driver);
1403 return -ENODEV;
1407 static void __exit yellowfin_cleanup (void)
1409 pci_unregister_driver (&yellowfin_driver);
1413 module_init(yellowfin_init);
1414 module_exit(yellowfin_cleanup);
1418 * Local variables:
1419 * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c yellowfin.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1420 * compile-command-alphaLX: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -c yellowfin.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS` -fomit-frame-pointer -fno-strength-reduce -mno-fp-regs -Wa,-m21164a -DBWX_USABLE -DBWIO_ENABLED"
1421 * SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c yellowfin.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1422 * c-indent-level: 4
1423 * c-basic-offset: 4
1424 * tab-width: 4
1425 * End: