Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / drivers / net / yellowfin.c
blob9cda23333a6fdf8dde8213b546e9d18028b6b6ed
1 /* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
2 /*
3 Written 1997-1999 by Donald Becker.
5 This software may be used and distributed according to the terms
6 of the GNU Public License, incorporated herein by reference.
8 This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
9 It also supports the Symbios Logic version of the same chip core.
11 The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
12 Center of Excellence in Space Data and Information Sciences
13 Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
15 Support and updates available at
16 http://cesdis.gsfc.nasa.gov/linux/drivers/yellowfin.html
19 static const char *version =
20 "yellowfin.c:v1.03a 7/30/99 Written by Donald Becker, becker@cesdis.edu\n"
21 " http://cesdis.gsfc.nasa.gov/linux/drivers/yellowfin.html\n";
23 /* A few user-configurable values. */
25 static int debug = 1;
26 static int max_interrupt_work = 20;
27 static int mtu = 0;
28 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
29 /* System-wide count of bogus-rx frames. */
30 static int bogus_rx = 0;
31 static int dma_ctrl = 0x004A0263; /* Constrained by errata */
32 static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
33 #elif YF_NEW /* A future perfect board :->. */
34 static int dma_ctrl = 0x00CAC277; /* Override when loading module! */
35 static int fifo_cfg = 0x0028;
36 #else
37 static int dma_ctrl = 0x004A0263; /* Constrained by errata */
38 static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
39 #endif
41 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
42 Setting to > 1514 effectively disables this feature. */
43 static int rx_copybreak = 0;
45 /* Used to pass the media type, etc.
46 No media types are currently defined. These exist for driver
47 interoperability.
49 #define MAX_UNITS 8 /* More are supported, limit only on options */
50 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
51 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
53 /* Do ugly workaround for GX server chipset errata. */
54 static int gx_fix = 0;
56 /* Operational parameters that are set at compile time. */
58 /* Keep the ring sizes a power of two for efficiency.
59 Making the Tx queue too long decreases the effectiveness of channel
60 bonding and packet priority.
61 There are no ill effects from too-large receive rings. */
62 #define TX_RING_SIZE 16
63 #define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */
64 #define RX_RING_SIZE 64
66 /* Operational parameters that usually are not changed. */
67 /* Time in jiffies before concluding the transmitter is hung. */
68 #define TX_TIMEOUT (2*HZ)
70 #define yellowfin_debug debug
72 #if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
73 #warning You must compile this file with the correct options!
74 #warning See the last lines of the source file.
75 #error You must compile this driver with "-O".
76 #endif
78 #include <linux/version.h>
79 #include <linux/module.h>
80 #include <linux/kernel.h>
81 #include <linux/string.h>
82 #include <linux/timer.h>
83 #include <linux/errno.h>
84 #include <linux/ioport.h>
85 #include <linux/malloc.h>
86 #include <linux/interrupt.h>
87 #include <linux/pci.h>
88 #include <linux/init.h>
89 #include <asm/processor.h> /* Processor type for cache alignment. */
90 #include <asm/unaligned.h>
91 #include <asm/bitops.h>
92 #include <asm/io.h>
94 #include <linux/netdevice.h>
95 #include <linux/etherdevice.h>
96 #include <linux/skbuff.h>
98 /* Condensed operations for readability.
99 Compatibility defines are now in drv_compat.h */
101 #define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
102 #define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
105 #ifdef USE_IO_OPS
106 #define YF_INB inb
107 #define YF_INW inw
108 #define YF_INL inl
109 #define YF_OUTB outb
110 #define YF_OUTW outw
111 #define YF_OUTL outl
112 #else
113 #define YF_INB readb
114 #define YF_INW readw
115 #define YF_INL readl
116 #define YF_OUTB writeb
117 #define YF_OUTW writew
118 #define YF_OUTL writel
119 #endif
122 Theory of Operation
124 I. Board Compatibility
126 This device driver is designed for the Packet Engines "Yellowfin" Gigabit
127 Ethernet adapter. The only PCA currently supported is the G-NIC 64-bit
128 PCI card.
130 II. Board-specific settings
132 PCI bus devices are configured by the system at boot time, so no jumpers
133 need to be set on the board. The system BIOS preferably should assign the
134 PCI INTA signal to an otherwise unused system IRQ line.
135 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
136 interrupt lines.
138 III. Driver operation
140 IIIa. Ring buffers
142 The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
143 This is a descriptor list scheme similar to that used by the EEPro100 and
144 Tulip. This driver uses two statically allocated fixed-size descriptor lists
145 formed into rings by a branch from the final descriptor to the beginning of
146 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
148 The driver allocates full frame size skbuffs for the Rx ring buffers at
149 open() time and passes the skb->data field to the Yellowfin as receive data
150 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
151 a fresh skbuff is allocated and the frame is copied to the new skbuff.
152 When the incoming frame is larger, the skbuff is passed directly up the
153 protocol stack and replaced by a newly allocated skbuff.
155 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
156 using a full-sized skbuff for small frames vs. the copying costs of larger
157 frames. For small frames the copying cost is negligible (esp. considering
158 that we are pre-loading the cache with immediately useful header
159 information). For large frames the copying cost is non-trivial, and the
160 larger copy might flush the cache of useful data.
162 IIIC. Synchronization
164 The driver runs as two independent, single-threaded flows of control. One
165 is the send-packet routine, which enforces single-threaded use by the
166 dev->tbusy flag. The other thread is the interrupt handler, which is single
167 threaded by the hardware and other software.
169 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
170 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
171 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
172 the 'yp->tx_full' flag.
174 The interrupt handler has exclusive control over the Rx ring and records stats
175 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
176 empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
177 clears both the tx_full and tbusy flags.
179 IV. Notes
181 Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
182 Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
183 and an AlphaStation to verifty the Alpha port!
185 IVb. References
187 Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
188 Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
189 Data Manual v3.0
190 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
191 http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
193 IVc. Errata
195 See Packet Engines confidential appendix (prototype chips only).
199 /* A few values that may be tweaked. */
200 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
202 /* The rest of these values should never change. */
204 enum capability_flags {
205 HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
206 HasMACAddrBug=32, /* Really only on early revs. */
210 /* The PCI I/O space extent. */
211 #define YELLOWFIN_SIZE 0x100
213 #define YELLOWFIN_MODULE_NAME "yellowfin"
214 #define PFX YELLOWFIN_MODULE_NAME ": "
217 typedef enum {
218 YELLOWFIN_GNIC,
219 SYM83C885,
220 } chip_t;
223 struct chip_info {
224 const char *name;
225 int flags;
229 /* index by chip_t */
230 static struct chip_info chip_info[] = {
231 {"Yellowfin G-NIC Gigabit Ethernet",
232 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug},
233 {"Symbios SYM83C885", HasMII },
237 static struct pci_device_id yellowfin_pci_tbl[] __devinitdata = {
238 { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, YELLOWFIN_GNIC },
239 { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SYM83C885 },
240 { 0, }
242 MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
245 /* Offsets to the Yellowfin registers. Various sizes and alignments. */
246 enum yellowfin_offsets {
247 TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
248 TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
249 RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
250 RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
251 EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
252 ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
253 Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
254 MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
255 MII_Status=0xAE,
256 RxDepth=0xB8, FlowCtrl=0xBC,
257 AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
258 EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
259 EEFeature=0xF5,
262 /* The Yellowfin Rx and Tx buffer descriptors.
263 Elements are written as 32 bit for endian portability. */
264 struct yellowfin_desc {
265 u32 dbdma_cmd;
266 u32 addr;
267 u32 branch_addr;
268 u32 result_status;
271 struct tx_status_words {
272 #if defined(__powerpc__)
273 u16 tx_errs;
274 u16 tx_cnt;
275 u16 paused;
276 u16 total_tx_cnt;
277 #else /* Little endian chips. */
278 u16 tx_cnt;
279 u16 tx_errs;
280 u16 total_tx_cnt;
281 u16 paused;
282 #endif
285 /* Bits in yellowfin_desc.cmd */
286 enum desc_cmd_bits {
287 CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
288 CMD_NOP=0x60000000, CMD_STOP=0x70000000,
289 BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
290 BRANCH_IFTRUE=0x040000,
293 /* Bits in yellowfin_desc.status */
294 enum desc_status_bits { RX_EOP=0x0040, };
296 /* Bits in the interrupt status/mask registers. */
297 enum intr_status_bits {
298 IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
299 IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
300 IntrEarlyRx=0x100, IntrWakeup=0x200, };
302 #define PRIV_ALIGN 31 /* Required alignment mask */
303 struct yellowfin_private {
304 /* Descriptor rings first for alignment. Tx requires a second descriptor
305 for status. */
306 struct yellowfin_desc rx_ring[RX_RING_SIZE];
307 struct yellowfin_desc tx_ring[TX_RING_SIZE*2];
308 /* The addresses of receive-in-place skbuffs. */
309 struct sk_buff* rx_skbuff[RX_RING_SIZE];
310 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
311 struct sk_buff* tx_skbuff[TX_RING_SIZE];
312 struct tx_status_words tx_status[TX_RING_SIZE];
313 struct timer_list timer; /* Media selection timer. */
314 struct net_device_stats stats;
315 /* Frequently used and paired value: keep adjacent for cache effect. */
316 struct pci_dev *pci_dev;
317 int chip_id, flags;
318 struct yellowfin_desc *rx_head_desc;
319 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
320 unsigned int rx_buf_sz; /* Based on MTU+slack. */
321 struct tx_status_words *tx_tail_desc;
322 unsigned int cur_tx, dirty_tx;
323 int tx_threshold;
324 unsigned int tx_full:1; /* The Tx queue is full. */
325 unsigned int full_duplex:1; /* Full-duplex operation requested. */
326 unsigned int duplex_lock:1;
327 unsigned int medialock:1; /* Do not sense media. */
328 unsigned int default_port:4; /* Last dev->if_port value. */
329 /* MII transceiver section. */
330 int mii_cnt; /* MII device addresses. */
331 u16 advertising; /* NWay media advertisement */
332 unsigned char phys[2]; /* MII device addresses. */
333 u32 pad[4]; /* Used for 32-byte alignment */
334 spinlock_t lock;
338 MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
339 MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
340 MODULE_PARM(max_interrupt_work, "i");
341 MODULE_PARM(mtu, "i");
342 MODULE_PARM(debug, "i");
343 MODULE_PARM(rx_copybreak, "i");
344 MODULE_PARM(gx_fix, "i");
345 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
346 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
349 static int read_eeprom(long ioaddr, int location);
350 static int mdio_read(long ioaddr, int phy_id, int location);
351 static void mdio_write(long ioaddr, int phy_id, int location, int value);
352 #ifdef HAVE_PRIVATE_IOCTL
353 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
354 #endif
355 static int yellowfin_open(struct net_device *dev);
356 static void yellowfin_timer(unsigned long data);
357 static void yellowfin_tx_timeout(struct net_device *dev);
358 static void yellowfin_init_ring(struct net_device *dev);
359 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev);
360 static void yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
361 static int yellowfin_rx(struct net_device *dev);
362 static void yellowfin_error(struct net_device *dev, int intr_status);
363 static int yellowfin_close(struct net_device *dev);
364 static struct net_device_stats *yellowfin_get_stats(struct net_device *dev);
365 static void set_rx_mode(struct net_device *dev);
368 static int __devinit read_eeprom(long ioaddr, int location)
370 int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */
372 YF_OUTB(location, ioaddr + EEAddr);
373 YF_OUTB(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
374 while ((YF_INB(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0)
376 return YF_INB(ioaddr + EERead);
379 /* MII Managemen Data I/O accesses.
380 These routines assume the MDIO controller is idle, and do not exit until
381 the command is finished. */
383 static int mdio_read(long ioaddr, int phy_id, int location)
385 int i;
387 YF_OUTW((phy_id<<8) + location, ioaddr + MII_Addr);
388 YF_OUTW(1, ioaddr + MII_Cmd);
389 for (i = 10000; i >= 0; i--)
390 if ((YF_INW(ioaddr + MII_Status) & 1) == 0)
391 break;
392 return YF_INW(ioaddr + MII_Rd_Data);
395 static void mdio_write(long ioaddr, int phy_id, int location, int value)
397 int i;
399 YF_OUTW((phy_id<<8) + location, ioaddr + MII_Addr);
400 YF_OUTW(value, ioaddr + MII_Wr_Data);
402 /* Wait for the command to finish. */
403 for (i = 10000; i >= 0; i--)
404 if ((YF_INW(ioaddr + MII_Status) & 1) == 0)
405 break;
406 return;
410 static int yellowfin_open(struct net_device *dev)
412 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
413 long ioaddr = dev->base_addr;
414 int i;
416 /* Reset the chip. */
417 YF_OUTL(0x80000000, ioaddr + DMACtrl);
419 if (request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ, dev->name, dev))
420 return -EAGAIN;
422 if (yellowfin_debug > 1)
423 printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
424 dev->name, dev->irq);
426 yellowfin_init_ring(dev);
428 YF_OUTL(virt_to_bus(yp->rx_ring), ioaddr + RxPtr);
429 YF_OUTL(virt_to_bus(yp->tx_ring), ioaddr + TxPtr);
431 for (i = 0; i < 6; i++)
432 YF_OUTB(dev->dev_addr[i], ioaddr + StnAddr + i);
434 /* Set up various condition 'select' registers.
435 There are no options here. */
436 YF_OUTL(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */
437 YF_OUTL(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */
438 YF_OUTL(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */
439 YF_OUTL(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */
440 YF_OUTL(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */
441 YF_OUTL(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */
443 /* Initialize other registers: with so many this eventually this will
444 converted to an offset/value list. */
445 YF_OUTL(dma_ctrl, ioaddr + DMACtrl);
446 YF_OUTW(fifo_cfg, ioaddr + FIFOcfg);
447 /* Enable automatic generation of flow control frames, period 0xffff. */
448 YF_OUTL(0x0030FFFF, ioaddr + FlowCtrl);
450 yp->tx_threshold = 32;
451 YF_OUTL(yp->tx_threshold, ioaddr + TxThreshold);
453 if (dev->if_port == 0)
454 dev->if_port = yp->default_port;
456 netif_start_queue (dev);
458 /* Setting the Rx mode will start the Rx process. */
459 if (yp->flags & IsGigabit) {
460 /* We are always in full-duplex mode with gigabit! */
461 yp->full_duplex = 1;
462 YF_OUTW(0x01CF, ioaddr + Cnfg);
463 } else {
464 YF_OUTW(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
465 YF_OUTW(0x1018, ioaddr + FrameGap1);
466 YF_OUTW(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
468 set_rx_mode(dev);
470 /* Enable interrupts by setting the interrupt mask. */
471 YF_OUTW(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */
472 YF_OUTW(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
473 YF_OUTL(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */
474 YF_OUTL(0x80008000, ioaddr + TxCtrl);
476 if (yellowfin_debug > 2) {
477 printk(KERN_DEBUG "%s: Done yellowfin_open().\n",
478 dev->name);
480 /* Set the timer to check for link beat. */
481 init_timer(&yp->timer);
482 yp->timer.expires = jiffies + 3*HZ;
483 yp->timer.data = (unsigned long)dev;
484 yp->timer.function = &yellowfin_timer; /* timer handler */
485 add_timer(&yp->timer);
487 return 0;
490 static void yellowfin_timer(unsigned long data)
492 struct net_device *dev = (struct net_device *)data;
493 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
494 long ioaddr = dev->base_addr;
495 int next_tick = 60*HZ;
497 if (yellowfin_debug > 3) {
498 printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n",
499 dev->name, YF_INW(ioaddr + IntrStatus));
502 if (yp->mii_cnt) {
503 int mii_reg1 = mdio_read(ioaddr, yp->phys[0], 1);
504 int mii_reg5 = mdio_read(ioaddr, yp->phys[0], 5);
505 int negotiated = mii_reg5 & yp->advertising;
506 if (yellowfin_debug > 1)
507 printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, "
508 "link partner capability %4.4x.\n",
509 dev->name, yp->phys[0], mii_reg1, mii_reg5);
511 if ( ! yp->duplex_lock &&
512 ((negotiated & 0x0300) == 0x0100
513 || (negotiated & 0x00C0) == 0x0040)) {
514 yp->full_duplex = 1;
516 YF_OUTW(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
518 if (mii_reg1 & 0x0004)
519 next_tick = 60*HZ;
520 else
521 next_tick = 3*HZ;
524 yp->timer.expires = jiffies + next_tick;
525 add_timer(&yp->timer);
528 static void yellowfin_tx_timeout(struct net_device *dev)
530 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
531 long ioaddr = dev->base_addr;
533 printk(KERN_WARNING "%s: Yellowfin transmit timed out at %d/%d Tx "
534 "status %4.4x, Rx status %4.4x, resetting...\n",
535 dev->name, yp->cur_tx, yp->dirty_tx,
536 YF_INL(ioaddr + TxStatus), YF_INL(ioaddr + RxStatus));
538 /* Note: these should be KERN_DEBUG. */
539 if (yellowfin_debug) {
540 int i;
541 printk(KERN_WARNING " Rx ring %p: ", yp->rx_ring);
542 for (i = 0; i < RX_RING_SIZE; i++)
543 printk(" %8.8x", yp->rx_ring[i].result_status);
544 printk("\n"KERN_WARNING" Tx ring %p: ", yp->tx_ring);
545 for (i = 0; i < TX_RING_SIZE; i++)
546 printk(" %4.4x /%8.8x", yp->tx_status[i].tx_errs,
547 yp->tx_ring[i].result_status);
548 printk("\n");
551 /* If the hardware is found to hang regularly, we will update the code
552 to reinitialize the chip here. */
553 dev->if_port = 0;
555 /* Wake the potentially-idle transmit channel. */
556 YF_OUTL(0x10001000, dev->base_addr + TxCtrl);
557 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
558 netif_wake_queue (dev); /* Typical path */
560 dev->trans_start = jiffies;
561 yp->stats.tx_errors++;
562 return;
565 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
566 static void yellowfin_init_ring(struct net_device *dev)
568 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
569 int i;
571 yp->tx_full = 0;
572 yp->cur_rx = yp->cur_tx = 0;
573 yp->dirty_tx = 0;
575 yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
576 yp->rx_head_desc = &yp->rx_ring[0];
578 for (i = 0; i < RX_RING_SIZE; i++) {
579 yp->rx_ring[i].dbdma_cmd =
580 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
581 yp->rx_ring[i].branch_addr = virt_to_le32desc(&yp->rx_ring[i+1]);
583 /* Mark the last entry as wrapping the ring. */
584 yp->rx_ring[i-1].branch_addr = virt_to_le32desc(&yp->rx_ring[0]);
586 for (i = 0; i < RX_RING_SIZE; i++) {
587 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
588 yp->rx_skbuff[i] = skb;
589 if (skb == NULL)
590 break;
591 skb->dev = dev; /* Mark as being used by this device. */
592 skb_reserve(skb, 2); /* 16 byte align the IP header. */
593 yp->rx_ring[i].addr = virt_to_le32desc(skb->tail);
595 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
596 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
598 #define NO_TXSTATS
599 #ifdef NO_TXSTATS
600 /* In this mode the Tx ring needs only a single descriptor. */
601 for (i = 0; i < TX_RING_SIZE; i++) {
602 yp->tx_skbuff[i] = 0;
603 yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
604 yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[i+1]);
606 /* Wrap ring */
607 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
608 yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[0]);
609 #else
610 /* Tx ring needs a pair of descriptors, the second for the status. */
611 for (i = 0; i < TX_RING_SIZE*2; i++) {
612 yp->tx_skbuff[i/2] = 0;
613 /* Branch on Tx error. */
614 yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
615 yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[i+1]);
616 i++;
617 if (yp->flags & FullTxStatus) {
618 yp->tx_ring[i].dbdma_cmd =
619 cpu_to_le32(CMD_TXSTATUS | sizeof(yp->tx_status[i]));
620 yp->tx_ring[i].request_cnt = sizeof(yp->tx_status[i]);
621 yp->tx_ring[i].addr = virt_to_le32desc(&yp->tx_status[i/2]);
622 } else { /* Symbios chips write only tx_errs word. */
623 yp->tx_ring[i].dbdma_cmd =
624 cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
625 yp->tx_ring[i].request_cnt = 2;
626 yp->tx_ring[i].addr = virt_to_le32desc(&yp->tx_status[i/2].tx_errs);
628 yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[i+1]);
630 /* Wrap ring */
631 yp->tx_ring[--i].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
632 yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[0]);
633 #endif
634 yp->tx_tail_desc = &yp->tx_status[0];
635 return;
638 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
640 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
641 unsigned entry;
643 netif_stop_queue (dev);
645 /* Caution: the write order is important here, set the base address
646 with the "ownership" bits last. */
648 /* Calculate the next Tx descriptor entry. */
649 entry = yp->cur_tx % TX_RING_SIZE;
651 yp->tx_skbuff[entry] = skb;
653 if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */
654 int cacheline_end = (virt_to_bus(skb->data) + skb->len) % 32;
655 /* Fix GX chipset errata. */
656 if (cacheline_end > 24 || cacheline_end == 0)
657 skb->len += 32 - cacheline_end + 1;
659 #ifdef NO_TXSTATS
660 yp->tx_ring[entry].addr = virt_to_le32desc(skb->data);
661 yp->tx_ring[entry].result_status = 0;
662 if (entry >= TX_RING_SIZE-1) {
663 /* New stop command. */
664 yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
665 yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
666 cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | skb->len);
667 } else {
668 yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
669 yp->tx_ring[entry].dbdma_cmd =
670 cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | skb->len);
672 yp->cur_tx++;
673 #else
674 yp->tx_ring[entry<<1].request_cnt = skb->len;
675 yp->tx_ring[entry<<1].addr = virt_to_le32desc(skb->data);
676 /* The input_last (status-write) command is constant, but we must rewrite
677 the subsequent 'stop' command. */
679 yp->cur_tx++;
681 unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
682 yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
684 /* Final step -- overwrite the old 'stop' command. */
686 yp->tx_ring[entry<<1].dbdma_cmd =
687 cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
688 CMD_TX_PKT | BRANCH_IFTRUE) | skb->len);
689 #endif
691 /* Non-x86 Todo: explicitly flush cache lines here. */
693 /* Wake the potentially-idle transmit channel. */
694 YF_OUTL(0x10001000, dev->base_addr + TxCtrl);
696 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
697 netif_start_queue (dev); /* Typical path */
698 else
699 yp->tx_full = 1;
700 dev->trans_start = jiffies;
702 if (yellowfin_debug > 4) {
703 printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n",
704 dev->name, yp->cur_tx, entry);
706 return 0;
709 /* The interrupt handler does all of the Rx thread work and cleans up
710 after the Tx thread. */
711 static void yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
713 struct net_device *dev = (struct net_device *)dev_instance;
714 struct yellowfin_private *yp;
715 long ioaddr, boguscnt = max_interrupt_work;
717 #ifndef final_version /* Can never occur. */
718 if (dev == NULL) {
719 printk (KERN_ERR "yellowfin_interrupt(): irq %d for unknown device.\n", irq);
720 return;
722 #endif
724 ioaddr = dev->base_addr;
725 yp = (struct yellowfin_private *)dev->priv;
727 spin_lock (&yp->lock);
729 do {
730 u16 intr_status = YF_INW(ioaddr + IntrClear);
732 if (yellowfin_debug > 4)
733 printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n",
734 dev->name, intr_status);
736 if (intr_status == 0)
737 break;
739 if (intr_status & (IntrRxDone | IntrEarlyRx)) {
740 yellowfin_rx(dev);
741 YF_OUTL(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */
744 #ifdef NO_TXSTATS
745 for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
746 int entry = yp->dirty_tx % TX_RING_SIZE;
747 if (yp->tx_ring[entry].result_status == 0)
748 break;
749 yp->stats.tx_bytes += yp->tx_skbuff[entry]->len;
750 yp->stats.tx_packets++;
751 /* Free the original skb. */
752 dev_kfree_skb_irq(yp->tx_skbuff[entry]);
753 yp->tx_skbuff[entry] = 0;
755 if (yp->tx_full
756 && yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
757 /* The ring is no longer full, clear tbusy. */
758 yp->tx_full = 0;
760 if (yp->tx_full)
761 netif_stop_queue(dev);
762 else
763 netif_wake_queue(dev);
764 #else
765 if (intr_status & IntrTxDone
766 || yp->tx_tail_desc->tx_errs) {
767 unsigned dirty_tx = yp->dirty_tx;
769 for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
770 dirty_tx++) {
771 /* Todo: optimize this. */
772 int entry = dirty_tx % TX_RING_SIZE;
773 u16 tx_errs = yp->tx_status[entry].tx_errs;
775 #ifndef final_version
776 if (yellowfin_debug > 5)
777 printk(KERN_DEBUG "%s: Tx queue %d check, Tx status "
778 "%4.4x %4.4x %4.4x %4.4x.\n",
779 dev->name, entry,
780 yp->tx_status[entry].tx_cnt,
781 yp->tx_status[entry].tx_errs,
782 yp->tx_status[entry].total_tx_cnt,
783 yp->tx_status[entry].paused);
784 #endif
785 if (tx_errs == 0)
786 break; /* It still hasn't been Txed */
787 if (tx_errs & 0xF810) {
788 /* There was an major error, log it. */
789 #ifndef final_version
790 if (yellowfin_debug > 1)
791 printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
792 dev->name, tx_errs);
793 #endif
794 yp->stats.tx_errors++;
795 if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++;
796 if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++;
797 if (tx_errs & 0x2000) yp->stats.tx_window_errors++;
798 if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++;
799 #ifdef ETHER_STATS
800 if (tx_errs & 0x1000) yp->stats.collisions16++;
801 #endif
802 } else {
803 #ifndef final_version
804 if (yellowfin_debug > 4)
805 printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n",
806 dev->name, tx_errs);
807 #endif
808 #ifdef ETHER_STATS
809 if (tx_errs & 0x0400) yp->stats.tx_deferred++;
810 #endif
811 yp->stats.tx_bytes += yp->tx_skbuff[entry]->len;
812 yp->stats.collisions += tx_errs & 15;
813 yp->stats.tx_packets++;
815 /* Free the original skb. */
816 dev_kfree_skb_irq(yp->tx_skbuff[entry]);
817 yp->tx_skbuff[entry] = 0;
818 /* Mark status as empty. */
819 yp->tx_status[entry].tx_errs = 0;
822 #ifndef final_version
823 if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
824 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
825 dev->name, dirty_tx, yp->cur_tx, yp->tx_full);
826 dirty_tx += TX_RING_SIZE;
828 #endif
830 if (yp->tx_full
831 && yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
832 /* The ring is no longer full, clear tbusy. */
833 yp->tx_full = 0;
835 if (yp->tx_full)
836 netif_stop_queue(dev);
837 else
838 netif_wake_queue(dev);
840 yp->dirty_tx = dirty_tx;
841 yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
843 #endif
845 /* Log errors and other uncommon events. */
846 if (intr_status & 0x2ee) /* Abnormal error summary. */
847 yellowfin_error(dev, intr_status);
849 if (--boguscnt < 0) {
850 printk(KERN_WARNING "%s: Too much work at interrupt, status=0x%4.4x.\n",
851 dev->name, intr_status);
852 break;
854 } while (1);
856 if (yellowfin_debug > 3)
857 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
858 dev->name, YF_INW(ioaddr + IntrStatus));
860 /* Code that should never be run! Perhaps remove after testing.. */
862 static int stopit = 10;
863 if ((!(netif_running(dev))) && --stopit < 0) {
864 printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n",
865 dev->name);
866 free_irq(irq, dev);
870 spin_unlock (&yp->lock);
873 /* This routine is logically part of the interrupt handler, but separated
874 for clarity and better register allocation. */
875 static int yellowfin_rx(struct net_device *dev)
877 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
878 int entry = yp->cur_rx % RX_RING_SIZE;
879 int boguscnt = 20;
881 if (yellowfin_debug > 4) {
882 printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %8.8x.\n",
883 entry, yp->rx_ring[entry].result_status);
884 printk(KERN_DEBUG " #%d desc. %8.8x %8.8x %8.8x.\n",
885 entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
886 yp->rx_ring[entry].result_status);
889 /* If EOP is set on the next entry, it's a new packet. Send it up. */
890 while (yp->rx_head_desc->result_status) {
891 struct yellowfin_desc *desc = yp->rx_head_desc;
892 u16 desc_status = le32_to_cpu(desc->result_status) >> 16;
893 int data_size =
894 (le32_to_cpu(desc->dbdma_cmd) - le32_to_cpu(desc->result_status))
895 & 0xffff;
896 u8 *buf_addr = le32desc_to_virt(desc->addr);
897 s16 frame_status = get_unaligned((s16*)&(buf_addr[data_size - 2]));
899 if (yellowfin_debug > 4)
900 printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n",
901 frame_status);
902 if (--boguscnt < 0)
903 break;
904 if ( ! (desc_status & RX_EOP)) {
905 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers,"
906 " status %4.4x!\n", dev->name, desc_status);
907 yp->stats.rx_length_errors++;
908 } else if ((yp->flags & IsGigabit) && (frame_status & 0x0038)) {
909 /* There was a error. */
910 if (yellowfin_debug > 3)
911 printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n",
912 frame_status);
913 yp->stats.rx_errors++;
914 if (frame_status & 0x0060) yp->stats.rx_length_errors++;
915 if (frame_status & 0x0008) yp->stats.rx_frame_errors++;
916 if (frame_status & 0x0010) yp->stats.rx_crc_errors++;
917 if (frame_status < 0) yp->stats.rx_dropped++;
918 } else if ( !(yp->flags & IsGigabit) &&
919 ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
920 u8 status1 = buf_addr[data_size-2];
921 u8 status2 = buf_addr[data_size-1];
922 yp->stats.rx_errors++;
923 if (status1 & 0xC0) yp->stats.rx_length_errors++;
924 if (status2 & 0x03) yp->stats.rx_frame_errors++;
925 if (status2 & 0x04) yp->stats.rx_crc_errors++;
926 if (status2 & 0x80) yp->stats.rx_dropped++;
927 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
928 } else if ((yp->flags & HasMACAddrBug) &&
929 memcmp(le32desc_to_virt(yp->rx_ring[entry].addr),
930 dev->dev_addr, 6) != 0
931 && memcmp(le32desc_to_virt(yp->rx_ring[entry].addr),
932 "\377\377\377\377\377\377", 6) != 0) {
933 if (bogus_rx++ == 0)
934 printk(KERN_WARNING "%s: Bad frame to %2.2x:%2.2x:%2.2x:%2.2x:"
935 "%2.2x:%2.2x.\n",
936 dev->name, buf_addr[0], buf_addr[1], buf_addr[2],
937 buf_addr[3], buf_addr[4], buf_addr[5]);
938 #endif
939 } else {
940 struct sk_buff *skb;
941 int pkt_len = data_size -
942 (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
943 /* To verify: Yellowfin Length should omit the CRC! */
945 #ifndef final_version
946 if (yellowfin_debug > 4)
947 printk(KERN_DEBUG " yellowfin_rx() normal Rx pkt length %d"
948 " of %d, bogus_cnt %d.\n",
949 pkt_len, data_size, boguscnt);
950 #endif
951 /* Check if the packet is long enough to just pass up the skbuff
952 without copying to a properly sized skbuff. */
953 if (pkt_len > rx_copybreak) {
954 char *temp = skb_put(skb = yp->rx_skbuff[entry], pkt_len);
955 #ifndef final_verison /* Remove after testing. */
956 if (le32desc_to_virt(yp->rx_ring[entry].addr) != temp)
957 printk(KERN_WARNING "%s: Warning -- the skbuff addresses "
958 "do not match in yellowfin_rx: %p vs. %p / %p.\n",
959 dev->name, le32desc_to_virt(yp->rx_ring[entry].addr),
960 skb->head, temp);
961 #endif
962 yp->rx_skbuff[entry] = NULL;
963 } else {
964 skb = dev_alloc_skb(pkt_len + 2);
965 if (skb == NULL)
966 break;
967 skb->dev = dev;
968 skb_reserve(skb, 2); /* 16 byte align the data fields */
969 #if 1 || USE_IP_CSUM
970 eth_copy_and_sum(skb, yp->rx_skbuff[entry]->tail, pkt_len, 0);
971 skb_put(skb, pkt_len);
972 #else
973 memcpy(skb_put(skb, pkt_len), yp->rx_skbuff[entry]->tail,
974 pkt_len);
975 #endif
977 skb->protocol = eth_type_trans(skb, dev);
978 netif_rx(skb);
979 dev->last_rx = jiffies;
980 yp->stats.rx_packets++;
981 yp->stats.rx_bytes += pkt_len;
983 entry = (++yp->cur_rx) % RX_RING_SIZE;
984 yp->rx_head_desc = &yp->rx_ring[entry];
987 /* Refill the Rx ring buffers. */
988 for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
989 entry = yp->dirty_rx % RX_RING_SIZE;
990 if (yp->rx_skbuff[entry] == NULL) {
991 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
992 if (skb == NULL)
993 break; /* Better luck next round. */
994 yp->rx_skbuff[entry] = skb;
995 skb->dev = dev; /* Mark as being used by this device. */
996 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
997 yp->rx_ring[entry].addr = virt_to_le32desc(skb->tail);
999 yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1000 yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
1001 if (entry != 0)
1002 yp->rx_ring[entry - 1].dbdma_cmd =
1003 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1004 else
1005 yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1006 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1007 | yp->rx_buf_sz);
1010 return 0;
1013 static void yellowfin_error(struct net_device *dev, int intr_status)
1015 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
1017 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1018 dev->name, intr_status);
1019 /* Hmmmmm, it's not clear what to do here. */
1020 if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1021 yp->stats.tx_errors++;
1022 if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1023 yp->stats.rx_errors++;
1026 static int yellowfin_close(struct net_device *dev)
1028 long ioaddr = dev->base_addr;
1029 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
1030 int i;
1032 netif_stop_queue (dev);
1034 if (yellowfin_debug > 1) {
1035 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x Rx %4.4x Int %2.2x.\n",
1036 dev->name, YF_INW(ioaddr + TxStatus),
1037 YF_INW(ioaddr + RxStatus),
1038 YF_INW(ioaddr + IntrStatus));
1039 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1040 dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
1043 /* Disable interrupts by clearing the interrupt mask. */
1044 YF_OUTW(0x0000, ioaddr + IntrEnb);
1046 /* Stop the chip's Tx and Rx processes. */
1047 YF_OUTL(0x80000000, ioaddr + RxCtrl);
1048 YF_OUTL(0x80000000, ioaddr + TxCtrl);
1050 del_timer(&yp->timer);
1052 #if !defined(final_version) && defined(__i386__)
1053 if (yellowfin_debug > 2) {
1054 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n", (int)virt_to_bus(yp->tx_ring));
1055 for (i = 0; i < TX_RING_SIZE*2; i++)
1056 printk(" %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n",
1057 YF_INL(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1058 i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1059 yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1060 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
1061 for (i = 0; i < TX_RING_SIZE; i++)
1062 printk(" #%d status %4.4x %4.4x %4.4x %4.4x.\n",
1063 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1064 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1066 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n", (int)virt_to_bus(yp->rx_ring));
1067 for (i = 0; i < RX_RING_SIZE; i++) {
1068 printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n",
1069 YF_INL(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1070 i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1071 yp->rx_ring[i].result_status);
1072 if (yellowfin_debug > 6) {
1073 if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1074 int j;
1075 for (j = 0; j < 0x50; j++)
1076 printk(" %4.4x",
1077 get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1078 printk("\n");
1083 #endif /* __i386__ debugging only */
1085 free_irq(dev->irq, dev);
1087 /* Free all the skbuffs in the Rx queue. */
1088 for (i = 0; i < RX_RING_SIZE; i++) {
1089 yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1090 yp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
1091 if (yp->rx_skbuff[i]) {
1092 dev_kfree_skb(yp->rx_skbuff[i]);
1094 yp->rx_skbuff[i] = 0;
1096 for (i = 0; i < TX_RING_SIZE; i++) {
1097 if (yp->tx_skbuff[i])
1098 dev_kfree_skb(yp->tx_skbuff[i]);
1099 yp->tx_skbuff[i] = 0;
1102 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
1103 if (yellowfin_debug > 0) {
1104 printk(KERN_DEBUG "%s: Received %d frames that we should not have.\n",
1105 dev->name, bogus_rx);
1107 #endif
1108 return 0;
1111 static struct net_device_stats *yellowfin_get_stats(struct net_device *dev)
1113 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
1114 return &yp->stats;
1117 /* Set or clear the multicast filter for this adaptor. */
1119 /* The little-endian AUTODIN32 ethernet CRC calculation.
1120 N.B. Do not use for bulk data, use a table-based routine instead.
1121 This is common code and should be moved to net/core/crc.c */
1122 static unsigned const ethernet_polynomial_le = 0xedb88320U;
1124 static inline unsigned ether_crc_le(int length, unsigned char *data)
1126 unsigned int crc = 0xffffffff; /* Initial value. */
1127 while(--length >= 0) {
1128 unsigned char current_octet = *data++;
1129 int bit;
1130 for (bit = 8; --bit >= 0; current_octet >>= 1) {
1131 if ((crc ^ current_octet) & 1) {
1132 crc >>= 1;
1133 crc ^= ethernet_polynomial_le;
1134 } else
1135 crc >>= 1;
1138 return crc;
1142 static void set_rx_mode(struct net_device *dev)
1144 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
1145 long ioaddr = dev->base_addr;
1146 u16 cfg_value = YF_INW(ioaddr + Cnfg);
1148 /* Stop the Rx process to change any value. */
1149 YF_OUTW(cfg_value & ~0x1000, ioaddr + Cnfg);
1150 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1151 /* Unconditionally log net taps. */
1152 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1153 YF_OUTW(0x000F, ioaddr + AddrMode);
1154 } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) {
1155 /* Too many to filter well, or accept all multicasts. */
1156 YF_OUTW(0x000B, ioaddr + AddrMode);
1157 } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
1158 struct dev_mc_list *mclist;
1159 u16 hash_table[4];
1160 int i;
1161 memset(hash_table, 0, sizeof(hash_table));
1162 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1163 i++, mclist = mclist->next) {
1164 /* Due to a bug in the early chip versions, multiple filter
1165 slots must be set for each address. */
1166 if (yp->flags & HasMulticastBug) {
1167 set_bit((ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f,
1168 hash_table);
1169 set_bit((ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f,
1170 hash_table);
1171 set_bit((ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f,
1172 hash_table);
1174 set_bit((ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f,
1175 hash_table);
1177 /* Copy the hash table to the chip. */
1178 for (i = 0; i < 4; i++)
1179 YF_OUTW(hash_table[i], ioaddr + HashTbl + i*2);
1180 YF_OUTW(0x0003, ioaddr + AddrMode);
1181 } else { /* Normal, unicast/broadcast-only mode. */
1182 YF_OUTW(0x0001, ioaddr + AddrMode);
1184 /* Restart the Rx process. */
1185 YF_OUTW(cfg_value | 0x1000, ioaddr + Cnfg);
1188 #ifdef HAVE_PRIVATE_IOCTL
1189 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1191 long ioaddr = dev->base_addr;
1192 u16 *data = (u16 *)&rq->ifr_data;
1194 switch(cmd) {
1195 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
1196 data[0] = ((struct yellowfin_private *)dev->priv)->phys[0] & 0x1f;
1197 /* Fall Through */
1198 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
1199 data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
1200 return 0;
1201 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
1202 if (!capable(CAP_NET_ADMIN))
1203 return -EPERM;
1204 mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1205 return 0;
1206 default:
1207 return -EOPNOTSUPP;
1210 #endif /* HAVE_PRIVATE_IOCTL */
1213 static int __devinit yellowfin_init_one(struct pci_dev *pdev,
1214 const struct pci_device_id *ent)
1216 struct net_device *dev;
1217 struct yellowfin_private *yp;
1218 int option, i, irq;
1219 int flags, chip_idx;
1220 static int find_cnt = 0;
1221 long ioaddr, real_ioaddr;
1223 chip_idx = ent->driver_data;
1224 flags = chip_info[chip_idx].flags;
1226 dev = init_etherdev(NULL, sizeof(*yp));
1227 if (!dev) {
1228 printk (KERN_ERR PFX "cannot allocate ethernet device\n");
1229 return -ENOMEM;
1231 SET_MODULE_OWNER(dev);
1233 yp = dev->priv;
1235 if (!request_region (pci_resource_start (pdev, 0),
1236 YELLOWFIN_SIZE, YELLOWFIN_MODULE_NAME)) {
1237 printk (KERN_ERR PFX "cannot obtain I/O port region\n");
1238 goto err_out_free_netdev;
1240 if (!request_mem_region (pci_resource_start (pdev, 1),
1241 YELLOWFIN_SIZE, YELLOWFIN_MODULE_NAME)) {
1242 printk (KERN_ERR PFX "cannot obtain MMIO region\n");
1243 goto err_out_free_pio_region;
1246 if (pci_enable_device (pdev))
1247 goto err_out_free_mmio_region;
1248 pci_set_master (pdev);
1250 #ifdef USE_IO_OPS
1251 real_ioaddr = ioaddr = pci_resource_start (pdev, 0);
1252 #else
1253 real_ioaddr = ioaddr = pci_resource_start (pdev, 1);
1254 ioaddr = (long) ioremap(ioaddr, YELLOWFIN_SIZE);
1255 if (!ioaddr)
1256 goto err_out_free_mmio_region;
1257 #endif
1258 irq = pdev->irq;
1260 printk(KERN_INFO "%s: %s type %8x at 0x%lx, ",
1261 dev->name, chip_info[chip_idx].name,
1262 YF_INL(ioaddr + ChipRev), real_ioaddr);
1264 if (flags & IsGigabit)
1265 for (i = 0; i < 6; i++)
1266 dev->dev_addr[i] = YF_INB(ioaddr + StnAddr + i);
1267 else {
1268 int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
1269 for (i = 0; i < 6; i++)
1270 dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
1272 for (i = 0; i < 5; i++)
1273 printk("%2.2x:", dev->dev_addr[i]);
1274 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
1276 /* Reset the chip. */
1277 YF_OUTL(0x80000000, ioaddr + DMACtrl);
1279 dev->base_addr = ioaddr;
1280 dev->irq = irq;
1282 pdev->driver_data = dev;
1283 yp->chip_id = chip_idx;
1284 yp->flags = flags;
1285 yp->lock = SPIN_LOCK_UNLOCKED;
1287 option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
1288 if (dev->mem_start)
1289 option = dev->mem_start;
1291 /* The lower four bits are the media type. */
1292 if (option > 0) {
1293 if (option & 0x200)
1294 yp->full_duplex = 1;
1295 yp->default_port = option & 15;
1296 if (yp->default_port)
1297 yp->medialock = 1;
1299 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
1300 yp->full_duplex = 1;
1302 if (yp->full_duplex)
1303 yp->duplex_lock = 1;
1305 /* The Yellowfin-specific entries in the device structure. */
1306 dev->open = &yellowfin_open;
1307 dev->hard_start_xmit = &yellowfin_start_xmit;
1308 dev->stop = &yellowfin_close;
1309 dev->get_stats = &yellowfin_get_stats;
1310 dev->set_multicast_list = &set_rx_mode;
1311 #ifdef HAVE_PRIVATE_IOCTL
1312 dev->do_ioctl = &mii_ioctl;
1313 #endif
1314 dev->tx_timeout = yellowfin_tx_timeout;
1315 dev->watchdog_timeo = TX_TIMEOUT;
1317 if (mtu)
1318 dev->mtu = mtu;
1320 if (yp->flags & HasMII) {
1321 int phy, phy_idx = 0;
1322 for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
1323 int mii_status = mdio_read(ioaddr, phy, 1);
1324 if (mii_status != 0xffff &&
1325 mii_status != 0x0000) {
1326 yp->phys[phy_idx++] = phy;
1327 yp->advertising = mdio_read(ioaddr, phy, 4);
1328 printk(KERN_INFO "%s: MII PHY found at address %d, status "
1329 "0x%4.4x advertising %4.4x.\n",
1330 dev->name, phy, mii_status, yp->advertising);
1333 yp->mii_cnt = phy_idx;
1336 find_cnt++;
1338 return 0;
1340 err_out_free_mmio_region:
1341 release_mem_region (pci_resource_start (pdev, 1), YELLOWFIN_SIZE);
1342 err_out_free_pio_region:
1343 release_region (pci_resource_start (pdev, 0), YELLOWFIN_SIZE);
1344 err_out_free_netdev:
1345 unregister_netdev (dev);
1346 kfree (dev);
1347 return -ENODEV;
1350 static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
1352 struct net_device *dev = pdev->driver_data;
1353 struct yellowfin_private *np;
1355 if (!dev)
1356 BUG();
1357 np = dev->priv;
1359 unregister_netdev (dev);
1361 release_region (dev->base_addr, YELLOWFIN_SIZE);
1362 release_mem_region (dev->base_addr, YELLOWFIN_SIZE);
1364 #ifndef USE_IO_OPS
1365 iounmap ((void *) dev->base_addr);
1366 #endif
1368 kfree (dev);
1372 static struct pci_driver yellowfin_driver = {
1373 name: YELLOWFIN_MODULE_NAME,
1374 id_table: yellowfin_pci_tbl,
1375 probe: yellowfin_init_one,
1376 remove: yellowfin_remove_one,
1380 static int __init yellowfin_init (void)
1382 if (debug) /* Emit version even if no cards detected. */
1383 printk(KERN_INFO "%s", version);
1385 return pci_module_init (&yellowfin_driver);
1389 static void __exit yellowfin_cleanup (void)
1391 pci_unregister_driver (&yellowfin_driver);
1395 module_init(yellowfin_init);
1396 module_exit(yellowfin_cleanup);
1400 * Local variables:
1401 * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c yellowfin.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1402 * compile-command-alphaLX: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -c yellowfin.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS` -fomit-frame-pointer -fno-strength-reduce -mno-fp-regs -Wa,-m21164a -DBWX_USABLE -DBWIO_ENABLED"
1403 * c-indent-level: 4
1404 * c-basic-offset: 4
1405 * tab-width: 4
1406 * End: