Import 2.3.34
[davej-history.git] / drivers / net / yellowfin.c
blob637873962ac6e2101860a674b0ff917086675e2c
1 /* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
2 /*
3 Written 1997-1998 by Donald Becker.
5 This software may be used and distributed according to the terms
6 of the GNU Public License, incorporated herein by reference.
8 This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
9 It also supports the Symbios Logic version of the same chip core.
11 The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
12 Center of Excellence in Space Data and Information Sciences
13 Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
15 Support and updates available at
16 http://cesdis.gsfc.nasa.gov/linux/drivers/yellowfin.html
19 static const char *version =
20 "yellowfin.c:v1.02 7/26/98 Written by Donald Becker, becker@cesdis.edu\n"
21 " http://cesdis.gsfc.nasa.gov/linux/drivers/yellowfin.html\n";
23 /* A few user-configurable values. */
25 static int max_interrupt_work = 20;
26 static int min_pci_latency = 64;
27 static int mtu = 0;
28 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
29 /* System-wide count of bogus-rx frames. */
30 static int bogus_rx = 0;
31 static int dma_ctrl = 0x004A0263; /* Constrained by errata */
32 static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
33 #elif YF_NEW /* A future perfect board :->. */
34 static int dma_ctrl = 0x00CAC277; /* Override when loading module! */
35 static int fifo_cfg = 0x0028;
36 #else
37 static int dma_ctrl = 0x004A0263; /* Constrained by errata */
38 static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
39 #endif
41 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
42 Setting to > 1514 effectively disables this feature. */
43 static int rx_copybreak = 0;
45 /* Used to pass the media type, etc.
46 No media types are currently defined. These exist for driver
47 interoperability.
49 #define MAX_UNITS 8 /* More are supported, limit only on options */
50 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
51 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
53 /* Operational parameters that are set at compile time. */
55 /* Keep the ring sizes a power of two for efficiency.
56 Making the Tx ring too large decreases the effectiveness of channel
57 bonding and packet priority.
58 There are no ill effects from too-large receive rings. */
59 #define TX_RING_SIZE 16
60 #define RX_RING_SIZE 32
62 /* Operational parameters that usually are not changed. */
63 /* Time in jiffies before concluding the transmitter is hung. */
64 #define TX_TIMEOUT ((2000*HZ)/1000)
66 #include <linux/module.h>
67 #include <linux/kernel.h>
68 #include <linux/sched.h>
69 #include <linux/string.h>
70 #include <linux/timer.h>
71 #include <linux/ptrace.h>
72 #include <linux/errno.h>
73 #include <linux/ioport.h>
74 #include <linux/malloc.h>
75 #include <linux/interrupt.h>
76 #include <linux/pci.h>
77 #include <asm/processor.h> /* Processor type for cache alignment. */
78 #include <asm/bitops.h>
79 #include <asm/unaligned.h>
80 #include <asm/io.h>
82 #include <linux/netdevice.h>
83 #include <linux/etherdevice.h>
84 #include <linux/skbuff.h>
86 /* Kernel compatibility defines, most common to the PCCard package. */
87 #include <linux/version.h> /* Evil and unneccessary */
89 #define RUN_AT(x) (jiffies + (x))
91 #if (LINUX_VERSION_CODE < 0x20123)
92 #define test_and_set_bit(val, addr) set_bit(val, addr)
93 #endif
94 #if LINUX_VERSION_CODE <= 0x20139
95 #define net_device_stats enet_statistics
96 #define NETSTATS_VER2
97 #endif
98 #if LINUX_VERSION_CODE < 0x20155
99 #define PCI_SUPPORT_VER1
100 #define pci_present pcibios_present
101 #endif
102 #if LINUX_VERSION_CODE < 0x20159
103 #define DEV_FREE_SKB(skb) dev_kfree_skb(skb, FREE_WRITE);
104 #else
105 #define DEV_FREE_SKB(skb) dev_kfree_skb(skb);
106 #endif
108 /* The PCI I/O space extent. */
109 #define YELLOWFIN_TOTAL_SIZE 0x100
111 int yellowfin_debug = 1;
114 Theory of Operation
116 I. Board Compatibility
118 This device driver is designed for the Packet Engines "Yellowfin" Gigabit
119 Ethernet adapter. The only PCA currently supported is the G-NIC 64-bit
120 PCI card.
122 II. Board-specific settings
124 PCI bus devices are configured by the system at boot time, so no jumpers
125 need to be set on the board. The system BIOS preferably should assign the
126 PCI INTA signal to an otherwise unused system IRQ line.
127 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
128 interrupt lines.
130 III. Driver operation
132 IIIa. Ring buffers
134 The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
135 This is a descriptor list scheme similar to that used by the EEPro100 and
136 Tulip. This driver uses two statically allocated fixed-size descriptor lists
137 formed into rings by a branch from the final descriptor to the beginning of
138 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
140 The driver allocates full frame size skbuffs for the Rx ring buffers at
141 open() time and passes the skb->data field to the Yellowfin as receive data
142 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
143 a fresh skbuff is allocated and the frame is copied to the new skbuff.
144 When the incoming frame is larger, the skbuff is passed directly up the
145 protocol stack and replaced by a newly allocated skbuff.
147 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
148 using a full-sized skbuff for small frames vs. the copying costs of larger
149 frames. For small frames the copying cost is negligible (esp. considering
150 that we are pre-loading the cache with immediately useful header
151 information). For large frames the copying cost is non-trivial, and the
152 larger copy might flush the cache of useful data.
154 IIIC. Synchronization
156 The driver runs as two independent, single-threaded flows of control. One
157 is the send-packet routine, which enforces single-threaded use by the
158 dev->tbusy flag. The other thread is the interrupt handler, which is single
159 threaded by the hardware and other software.
161 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
162 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
163 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
164 the 'yp->tx_full' flag.
166 The interrupt handler has exclusive control over the Rx ring and records stats
167 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
168 empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
169 clears both the tx_full and tbusy flags.
171 IV. Notes
173 Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
174 Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
175 and an AlphaStation to verifty the Alpha port!
177 IVb. References
179 Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
180 Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
181 Data Manual v3.0
182 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
183 http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
185 IVc. Errata
187 See Packet Engines confidential appendix (prototype chips only).
191 /* A few values that may be tweaked. */
192 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
194 #ifndef PCI_VENDOR_ID_PKT_ENG /* To be defined in linux/pci.h */
195 #define PCI_VENDOR_ID_PKT_ENG 0x1000 /* Hmm, likely number.. */
196 #define PCI_DEVICE_ID_SYM58C885 0x0701
197 #define PCI_DEVICE_ID_YELLOWFIN 0x0702
198 #endif
200 /* The rest of these values should never change. */
202 static void yellowfin_timer(unsigned long data);
204 enum capability_flags {HasMII=1, FullTxStatus=2};
205 static struct chip_info {
206 u16 vendor_id, device_id, device_id_mask, pci_flags;
207 const char *name;
208 void (*media_timer)(unsigned long data);
209 u32 chip_rev; /* As read from ChipRev, not PCI dev ID. */
210 int flags;
211 } chip_tbl[] = {
212 {0x1000, 0x0702, 0xffff, 0, "Yellowfin G-NIC Gbit Ethernet",
213 yellowfin_timer, 0x0702, FullTxStatus},
214 {0x1000, 0x0701, 0xffff, 0, "Symbios SYM83C885",
215 yellowfin_timer, 0x0701, HasMII},
216 {0,},
219 /* Offsets to the Yellowfin registers. Various sizes and alignments. */
220 enum yellowfin_offsets {
221 TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
222 TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
223 RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
224 RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
225 EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
226 ChipRev=0x8C, DMACtrl=0x90, Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
227 MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
228 MII_Status=0xAE,
229 RxDepth=0xB8, FlowCtrl=0xBC,
230 AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
231 EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
232 EEFeature=0xF5,
235 /* The Yellowfin Rx and Tx buffer descriptors. */
236 struct yellowfin_desc {
237 u16 request_cnt;
238 u16 cmd;
239 u32 addr;
240 u32 branch_addr;
241 u16 result_cnt;
242 u16 status;
245 struct tx_status_words {
246 u16 tx_cnt;
247 u16 tx_errs;
248 u16 total_tx_cnt;
249 u16 paused;
252 /* Bits in yellowfin_desc.cmd */
253 enum desc_cmd_bits {
254 CMD_TX_PKT=0x1000, CMD_RX_BUF=0x2000, CMD_TXSTATUS=0x3000,
255 CMD_NOP=0x6000, CMD_STOP=0x7000,
256 BRANCH_ALWAYS=0x0C, INTR_ALWAYS=0x30, WAIT_ALWAYS=0x03,
257 BRANCH_IFTRUE=0x04,
260 /* Bits in yellowfin_desc.status */
261 enum desc_status_bits { RX_EOP=0x0040, };
263 /* Bits in the interrupt status/mask registers. */
264 enum intr_status_bits {
265 IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
266 IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
267 IntrEarlyRx=0x100, IntrWakeup=0x200, };
269 struct yellowfin_private {
270 /* Descriptor rings first for alignment. Tx requires a second descriptor
271 for status. */
272 struct yellowfin_desc rx_ring[RX_RING_SIZE];
273 struct yellowfin_desc tx_ring[TX_RING_SIZE*2];
274 const char *product_name;
275 struct net_device *next_module;
276 /* The addresses of receive-in-place skbuffs. */
277 struct sk_buff* rx_skbuff[RX_RING_SIZE];
278 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
279 struct sk_buff* tx_skbuff[TX_RING_SIZE];
280 struct tx_status_words tx_status[TX_RING_SIZE];
281 struct timer_list timer; /* Media selection timer. */
282 struct enet_statistics stats;
283 /* Frequently used and paired value: keep adjacent for cache effect. */
284 int chip_id;
285 int in_interrupt;
286 struct yellowfin_desc *rx_head_desc;
287 struct tx_status_words *tx_tail_desc;
288 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
289 unsigned int cur_tx, dirty_tx;
290 unsigned int rx_buf_sz; /* Based on MTU+slack. */
291 unsigned int tx_full:1; /* The Tx queue is full. */
292 unsigned int full_duplex:1; /* Full-duplex operation requested. */
293 unsigned int duplex_lock:1;
294 unsigned int medialock:1; /* Do not sense media. */
295 unsigned int default_port:4; /* Last dev->if_port value. */
296 /* MII transceiver section. */
297 int mii_cnt; /* MII device addresses. */
298 u16 advertising; /* NWay media advertisement */
299 unsigned char phys[2]; /* MII device addresses. */
300 u32 pad[4]; /* Used for 32-byte alignment */
303 #ifdef MODULE
305 #if LINUX_VERSION_CODE > 0x20115
306 MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
307 MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
308 MODULE_PARM(max_interrupt_work, "i");
309 MODULE_PARM(min_pci_latency, "i");
310 MODULE_PARM(mtu, "i");
311 MODULE_PARM(debug, "i");
312 MODULE_PARM(rx_copybreak, "i");
313 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
314 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
315 #endif
317 #endif
319 static struct net_device *yellowfin_probe1(long ioaddr, int irq, int chip_id, int options);
320 static int read_eeprom(long ioaddr, int location);
321 static int mdio_read(long ioaddr, int phy_id, int location);
322 static void mdio_write(long ioaddr, int phy_id, int location, int value);
323 #ifdef HAVE_PRIVATE_IOCTL
324 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
325 #endif
326 static int yellowfin_open(struct net_device *dev);
327 static void yellowfin_timer(unsigned long data);
328 static void yellowfin_tx_timeout(struct net_device *dev);
329 static void yellowfin_init_ring(struct net_device *dev);
330 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev);
331 static void yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
332 static int yellowfin_rx(struct net_device *dev);
333 static void yellowfin_error(struct net_device *dev, int intr_status);
334 static int yellowfin_close(struct net_device *dev);
335 static struct enet_statistics *yellowfin_get_stats(struct net_device *dev);
336 static void set_rx_mode(struct net_device *dev);
340 /* A list of all installed Yellowfin devices, for removing the driver module. */
341 static struct net_device *root_yellowfin_dev = NULL;
343 int yellowfin_probe(void)
345 int cards_found = 0;
346 int pci_index = 0;
347 unsigned char pci_bus, pci_device_fn;
349 if ( ! pci_present())
350 return -ENODEV;
352 for (;pci_index < 0xff; pci_index++) {
353 u8 pci_latency;
354 u16 pci_command, new_command, vendor, device;
355 int chip_idx;
356 int irq;
357 long ioaddr;
359 if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8,
360 pci_index,
361 &pci_bus, &pci_device_fn)
362 != PCIBIOS_SUCCESSFUL)
363 break;
365 pcibios_read_config_word(pci_bus, pci_device_fn,
366 PCI_VENDOR_ID, &vendor);
367 pcibios_read_config_word(pci_bus, pci_device_fn,
368 PCI_DEVICE_ID, &device);
370 for (chip_idx = 0; chip_tbl[chip_idx].vendor_id; chip_idx++)
371 if (vendor == chip_tbl[chip_idx].vendor_id
372 && (device & chip_tbl[chip_idx].device_id_mask) ==
373 chip_tbl[chip_idx].device_id)
374 break;
375 if (chip_tbl[chip_idx].vendor_id == 0) /* Compiled out! */
376 continue;
379 struct pci_dev *pdev = pci_find_slot(pci_bus, pci_device_fn);
380 ioaddr = pdev->resource[0].start;
381 irq = pdev->irq;
384 if (yellowfin_debug > 2)
385 printk(KERN_INFO "Found %s at I/O %#lx, IRQ %d.\n",
386 chip_tbl[chip_idx].name, ioaddr, irq);
388 if (check_region(ioaddr, YELLOWFIN_TOTAL_SIZE))
389 continue;
391 pcibios_read_config_word(pci_bus, pci_device_fn,
392 PCI_COMMAND, &pci_command);
393 new_command = pci_command | PCI_COMMAND_MASTER|PCI_COMMAND_IO;
394 if (pci_command != new_command) {
395 printk(KERN_INFO " The PCI BIOS has not enabled the"
396 " device at %d/%d! Updating PCI command %4.4x->%4.4x.\n",
397 pci_bus, pci_device_fn, pci_command, new_command);
398 pcibios_write_config_word(pci_bus, pci_device_fn,
399 PCI_COMMAND, new_command);
402 if(yellowfin_probe1(ioaddr, irq, chip_idx, cards_found))
404 /* Get and check the bus-master and latency values. */
405 pcibios_read_config_byte(pci_bus, pci_device_fn,
406 PCI_LATENCY_TIMER, &pci_latency);
407 if (pci_latency < min_pci_latency) {
408 printk(KERN_INFO " PCI latency timer (CFLT) is "
409 "unreasonably low at %d. Setting to %d clocks.\n",
410 pci_latency, min_pci_latency);
411 pcibios_write_config_byte(pci_bus, pci_device_fn,
412 PCI_LATENCY_TIMER, min_pci_latency);
413 } else if (yellowfin_debug > 1)
414 printk(KERN_INFO " PCI latency timer (CFLT) is %#x.\n",
415 pci_latency);
416 cards_found++;
420 return cards_found ? 0 : -ENODEV;
423 static struct net_device *yellowfin_probe1(long ioaddr, int irq, int chip_id, int card_idx)
425 static int did_version = 0; /* Already printed version info. */
426 struct yellowfin_private *yp;
427 int option, i;
428 struct net_device *dev;
430 if (yellowfin_debug > 0 && did_version++ == 0)
431 printk(version);
433 dev = init_etherdev(NULL, sizeof(struct yellowfin_private));
435 printk(KERN_INFO "%s: %s type %8x at 0x%lx, ",
436 dev->name, chip_tbl[chip_id].name, inl(ioaddr + ChipRev), ioaddr);
438 if (inw(ioaddr + ChipRev) == 0x0702)
439 for (i = 0; i < 6; i++)
440 dev->dev_addr[i] = inb(ioaddr + StnAddr + i);
441 else {
442 int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
443 for (i = 0; i < 6; i++)
444 dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
446 for (i = 0; i < 5; i++)
447 printk("%2.2x:", dev->dev_addr[i]);
448 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
450 /* Reset the chip. */
451 outl(0x80000000, ioaddr + DMACtrl);
453 /* We do a request_region() only to register /proc/ioports info. */
454 request_region(ioaddr, YELLOWFIN_TOTAL_SIZE, dev->name);
456 dev->base_addr = ioaddr;
457 dev->irq = irq;
459 /* Make certain the descriptor lists are aligned. */
460 yp = (void *)(((long)kmalloc(sizeof(*yp), GFP_KERNEL) + 31) & ~31);
461 memset(yp, 0, sizeof(*yp));
462 dev->priv = yp;
464 yp->next_module = root_yellowfin_dev;
465 root_yellowfin_dev = dev;
467 yp->chip_id = chip_id;
469 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
470 if (dev->mem_start)
471 option = dev->mem_start;
473 /* The lower four bits are the media type. */
474 if (option > 0) {
475 if (option & 0x200)
476 yp->full_duplex = 1;
477 yp->default_port = option & 15;
478 if (yp->default_port)
479 yp->medialock = 1;
481 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
482 yp->full_duplex = 1;
484 if (yp->full_duplex)
485 yp->duplex_lock = 1;
487 /* The Yellowfin-specific entries in the device structure. */
488 dev->open = &yellowfin_open;
489 dev->hard_start_xmit = &yellowfin_start_xmit;
490 dev->stop = &yellowfin_close;
491 dev->get_stats = &yellowfin_get_stats;
492 dev->set_multicast_list = &set_rx_mode;
493 #ifdef HAVE_PRIVATE_IOCTL
494 dev->do_ioctl = &mii_ioctl;
495 #endif
496 if (mtu)
497 dev->mtu = mtu;
499 if (chip_tbl[yp->chip_id].flags & HasMII) {
500 int phy, phy_idx = 0;
501 for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
502 int mii_status = mdio_read(ioaddr, phy, 1);
503 if (mii_status != 0xffff &&
504 mii_status != 0x0000) {
505 yp->phys[phy_idx++] = phy;
506 yp->advertising = mdio_read(ioaddr, phy, 4);
507 printk(KERN_INFO "%s: MII PHY found at address %d, status "
508 "0x%4.4x advertising %4.4x.\n",
509 dev->name, phy, mii_status, yp->advertising);
512 yp->mii_cnt = phy_idx;
515 return dev;
518 static int read_eeprom(long ioaddr, int location)
520 int bogus_cnt = 1000;
522 outb(location, ioaddr + EEAddr);
523 outb(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
524 while ((inb(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0)
526 return inb(ioaddr + EERead);
529 /* MII Managemen Data I/O accesses.
530 These routines assume the MDIO controller is idle, and do not exit until
531 the command is finished. */
533 static int mdio_read(long ioaddr, int phy_id, int location)
535 int i;
537 outw((phy_id<<8) + location, ioaddr + MII_Addr);
538 outw(1, ioaddr + MII_Cmd);
539 for (i = 10000; i >= 0; i--)
540 if ((inw(ioaddr + MII_Status) & 1) == 0)
541 break;
542 return inw(ioaddr + MII_Rd_Data);
545 static void mdio_write(long ioaddr, int phy_id, int location, int value)
547 int i;
549 outw((phy_id<<8) + location, ioaddr + MII_Addr);
550 outw(value, ioaddr + MII_Wr_Data);
552 /* Wait for the command to finish. */
553 for (i = 10000; i >= 0; i--)
554 if ((inw(ioaddr + MII_Status) & 1) == 0)
555 break;
556 return;
560 static int yellowfin_open(struct net_device *dev)
562 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
563 long ioaddr = dev->base_addr;
564 int i;
566 /* Reset the chip. */
567 outl(0x80000000, ioaddr + DMACtrl);
569 if (request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ, dev->name, dev))
570 return -EAGAIN;
572 if (yellowfin_debug > 1)
573 printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
574 dev->name, dev->irq);
576 MOD_INC_USE_COUNT;
578 yellowfin_init_ring(dev);
580 outl(virt_to_bus(yp->rx_ring), ioaddr + RxPtr);
581 outl(virt_to_bus(yp->tx_ring), ioaddr + TxPtr);
583 for (i = 0; i < 6; i++)
584 outb(dev->dev_addr[i], ioaddr + StnAddr + i);
586 /* Set up various condition 'select' registers.
587 There are no options here. */
588 outl(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */
589 outl(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */
590 outl(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */
591 outl(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */
592 outl(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */
593 outl(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */
595 /* Initialize other registers: with so many this eventually this will
596 converted to an offset/value list. */
597 outl(dma_ctrl, ioaddr + DMACtrl);
598 outw(fifo_cfg, ioaddr + FIFOcfg);
599 /* Enable automatic generation of flow control frames, period 0xffff. */
600 outl(0x0030FFFF, ioaddr + FlowCtrl);
602 if (dev->if_port == 0)
603 dev->if_port = yp->default_port;
605 dev->tbusy = 0;
606 dev->interrupt = 0;
607 yp->in_interrupt = 0;
609 /* Setting the Rx mode will start the Rx process. */
610 if (yp->chip_id == 0) {
611 /* We are always in full-duplex mode with gigabit! */
612 yp->full_duplex = 1;
613 outw(0x01CF, ioaddr + Cnfg);
614 } else {
615 outw(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
616 outw(0x1018, ioaddr + FrameGap1);
617 outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
619 set_rx_mode(dev);
621 dev->start = 1;
623 /* Enable interrupts by setting the interrupt mask. */
624 outw(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */
625 outw(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
626 outl(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */
627 outl(0x80008000, ioaddr + TxCtrl);
629 if (yellowfin_debug > 2) {
630 printk(KERN_DEBUG "%s: Done yellowfin_open().\n",
631 dev->name);
633 /* Set the timer to check for link beat. */
634 init_timer(&yp->timer);
635 yp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
636 yp->timer.data = (unsigned long)dev;
637 yp->timer.function = &yellowfin_timer; /* timer handler */
638 add_timer(&yp->timer);
640 return 0;
643 static void yellowfin_timer(unsigned long data)
645 struct net_device *dev = (struct net_device *)data;
646 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
647 long ioaddr = dev->base_addr;
648 int next_tick = 0;
650 if (yellowfin_debug > 3) {
651 printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n",
652 dev->name, inw(ioaddr + IntrStatus));
655 if (yp->mii_cnt) {
656 int mii_reg1 = mdio_read(ioaddr, yp->phys[0], 1);
657 int mii_reg5 = mdio_read(ioaddr, yp->phys[0], 5);
658 int negotiated = mii_reg5 & yp->advertising;
659 if (yellowfin_debug > 1)
660 printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, "
661 "link partner capability %4.4x.\n",
662 dev->name, yp->phys[0], mii_reg1, mii_reg5);
664 if ( ! yp->duplex_lock &&
665 ((negotiated & 0x0300) == 0x0100
666 || (negotiated & 0x00C0) == 0x0040)) {
667 yp->full_duplex = 1;
669 outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
671 if (mii_reg1 & 0x0004)
672 next_tick = 60*HZ;
673 else
674 next_tick = 3*HZ;
677 if (next_tick) {
678 yp->timer.expires = RUN_AT(next_tick);
679 add_timer(&yp->timer);
683 static void yellowfin_tx_timeout(struct net_device *dev)
685 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
686 long ioaddr = dev->base_addr;
688 printk(KERN_WARNING "%s: Yellowfin transmit timed out, status %8.8x, resetting...\n",
689 dev->name, inl(ioaddr));
691 #ifndef __alpha__
693 int i;
694 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)yp->rx_ring);
695 for (i = 0; i < RX_RING_SIZE; i++)
696 printk(" %8.8x", (unsigned int)yp->rx_ring[i].status);
697 printk("\n"KERN_DEBUG" Tx ring %8.8x: ", (int)yp->tx_ring);
698 for (i = 0; i < TX_RING_SIZE; i++)
699 printk(" %4.4x /%4.4x", yp->tx_status[i].tx_errs, yp->tx_ring[i].status);
700 printk("\n");
702 #endif
704 /* Perhaps we should reinitialize the hardware here. */
705 dev->if_port = 0;
706 /* Stop and restart the chip's Tx processes . */
708 /* Trigger an immediate transmit demand. */
710 dev->trans_start = jiffies;
711 yp->stats.tx_errors++;
712 return;
716 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
717 static void yellowfin_init_ring(struct net_device *dev)
719 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
720 int i;
722 yp->tx_full = 0;
723 yp->cur_rx = yp->cur_tx = 0;
724 yp->dirty_rx = yp->dirty_tx = 0;
726 yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
727 yp->rx_head_desc = &yp->rx_ring[0];
729 for (i = 0; i < RX_RING_SIZE; i++) {
730 struct sk_buff *skb;
732 yp->rx_ring[i].request_cnt = yp->rx_buf_sz;
733 yp->rx_ring[i].cmd = CMD_RX_BUF | INTR_ALWAYS;
735 skb = dev_alloc_skb(yp->rx_buf_sz);
736 yp->rx_skbuff[i] = skb;
737 if (skb) {
738 skb->dev = dev; /* Mark as being used by this device. */
739 skb_reserve(skb, 2); /* 16 byte align the IP header. */
740 yp->rx_ring[i].addr = virt_to_bus(skb->tail);
741 } else if (yp->dirty_rx == 0)
742 yp->dirty_rx = (unsigned int)(0 - RX_RING_SIZE);
743 yp->rx_ring[i].branch_addr = virt_to_bus(&yp->rx_ring[i+1]);
745 /* Mark the last entry as wrapping the ring. */
746 yp->rx_ring[i-1].cmd = CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS;
747 yp->rx_ring[i-1].branch_addr = virt_to_bus(&yp->rx_ring[0]);
749 /*#define NO_TXSTATS*/
750 #ifdef NO_TXSTATS
751 /* In this mode the Tx ring needs only a single descriptor. */
752 for (i = 0; i < TX_RING_SIZE; i++) {
753 yp->tx_skbuff[i] = 0;
754 yp->tx_ring[i].cmd = CMD_STOP;
755 yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[i+1]);
757 yp->tx_ring[--i].cmd = CMD_STOP | BRANCH_ALWAYS; /* Wrap ring */
758 yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[0]);
759 #else
760 /* Tx ring needs a pair of descriptors, the second for the status. */
761 for (i = 0; i < TX_RING_SIZE*2; i++) {
762 yp->tx_skbuff[i/2] = 0;
763 yp->tx_ring[i].cmd = CMD_STOP; /* Branch on Tx error. */
764 yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[i+1]);
765 i++;
766 if (chip_tbl[yp->chip_id].flags & FullTxStatus) {
767 yp->tx_ring[i].cmd = CMD_TXSTATUS;
768 yp->tx_ring[i].request_cnt = sizeof(yp->tx_status[i]);
769 yp->tx_ring[i].addr = virt_to_bus(&yp->tx_status[i/2]);
770 } else { /* Symbios chips write only tx_errs word. */
771 yp->tx_ring[i].cmd = CMD_TXSTATUS | INTR_ALWAYS;
772 yp->tx_ring[i].request_cnt = 2;
773 yp->tx_ring[i].addr = virt_to_bus(&yp->tx_status[i/2].tx_errs);
775 yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[i+1]);
777 /* Wrap ring */
778 yp->tx_ring[--i].cmd = CMD_TXSTATUS | BRANCH_ALWAYS | INTR_ALWAYS;
779 yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[0]);
780 #endif
781 yp->tx_tail_desc = &yp->tx_status[0];
782 return;
785 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
787 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
788 unsigned entry;
790 /* Block a timer-based transmit from overlapping. This could better be
791 done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
792 if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
793 if (jiffies - dev->trans_start < TX_TIMEOUT)
794 return 1;
795 yellowfin_tx_timeout(dev);
796 return 1;
799 /* Caution: the write order is important here, set the base address
800 with the "ownership" bits last. */
802 /* Calculate the next Tx descriptor entry. */
803 entry = yp->cur_tx % TX_RING_SIZE;
805 yp->tx_skbuff[entry] = skb;
807 #ifdef NO_TXSTATS
808 yp->tx_ring[entry].request_cnt = skb->len;
809 yp->tx_ring[entry].addr = virt_to_bus(skb->data);
810 yp->tx_ring[entry].status = 0;
811 if (entry >= TX_RING_SIZE-1) {
812 yp->tx_ring[0].cmd = CMD_STOP; /* New stop command. */
813 yp->tx_ring[TX_RING_SIZE-1].cmd = CMD_TX_PKT | BRANCH_ALWAYS;
814 } else {
815 yp->tx_ring[entry+1].cmd = CMD_STOP; /* New stop command. */
816 yp->tx_ring[entry].cmd = CMD_TX_PKT | BRANCH_IFTRUE;
818 yp->cur_tx++;
819 #else
820 yp->tx_ring[entry<<1].request_cnt = skb->len;
821 yp->tx_ring[entry<<1].addr = virt_to_bus(skb->data);
822 /* The input_last (status-write) command is constant, but we must rewrite
823 the subsequent 'stop' command. */
825 yp->cur_tx++;
827 unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
828 yp->tx_ring[next_entry<<1].cmd = CMD_STOP;
830 /* Final step -- overwrite the old 'stop' command. */
832 yp->tx_ring[entry<<1].cmd =
833 (entry % 6) == 0 ? CMD_TX_PKT | INTR_ALWAYS | BRANCH_IFTRUE :
834 CMD_TX_PKT | BRANCH_IFTRUE;
835 #endif
837 /* Non-x86 Todo: explicitly flush cache lines here. */
839 /* Wake the potentially-idle transmit channel. */
840 outl(0x10001000, dev->base_addr + TxCtrl);
842 if (yp->cur_tx - yp->dirty_tx < TX_RING_SIZE - 1)
843 clear_bit(0, (void*)&dev->tbusy); /* Typical path */
844 else
845 yp->tx_full = 1;
846 dev->trans_start = jiffies;
848 if (yellowfin_debug > 4) {
849 printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n",
850 dev->name, yp->cur_tx, entry);
852 return 0;
855 /* The interrupt handler does all of the Rx thread work and cleans up
856 after the Tx thread. */
857 static void yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
859 struct net_device *dev = (struct net_device *)dev_instance;
860 struct yellowfin_private *yp;
861 long ioaddr, boguscnt = max_interrupt_work;
863 #ifndef final_version /* Can never occur. */
864 if (dev == NULL) {
865 printk (KERN_ERR "yellowfin_interrupt(): irq %d for unknown device.\n", irq);
866 return;
868 #endif
870 ioaddr = dev->base_addr;
871 yp = (struct yellowfin_private *)dev->priv;
872 if (test_and_set_bit(0, (void*)&yp->in_interrupt)) {
873 dev->interrupt = 1;
874 printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name);
875 return;
878 do {
879 u16 intr_status = inw(ioaddr + IntrClear);
881 if (yellowfin_debug > 4)
882 printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n",
883 dev->name, intr_status);
885 if (intr_status == 0)
886 break;
888 if (intr_status & (IntrRxDone | IntrEarlyRx)) {
889 yellowfin_rx(dev);
890 outl(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */
893 #ifdef NO_TXSTATS
894 for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
895 int entry = yp->dirty_tx % TX_RING_SIZE;
896 if (yp->tx_ring[entry].status == 0)
897 break;
898 /* Free the original skb. */
899 DEV_FREE_SKB(yp->tx_skbuff[entry]);
900 yp->tx_skbuff[entry] = 0;
901 yp->stats.tx_packets++;
903 if (yp->tx_full && dev->tbusy
904 && yp->cur_tx - yp->dirty_tx < TX_RING_SIZE - 4) {
905 /* The ring is no longer full, clear tbusy. */
906 yp->tx_full = 0;
907 clear_bit(0, (void*)&dev->tbusy);
908 mark_bh(NET_BH);
910 #else
911 if (intr_status & IntrTxDone
912 || yp->tx_tail_desc->tx_errs) {
913 unsigned dirty_tx = yp->dirty_tx;
915 for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
916 dirty_tx++) {
917 /* Todo: optimize this. */
918 int entry = dirty_tx % TX_RING_SIZE;
919 u16 tx_errs = yp->tx_status[entry].tx_errs;
921 #ifndef final_version
922 if (yellowfin_debug > 5)
923 printk(KERN_DEBUG "%s: Tx queue %d check, Tx status "
924 "%4.4x %4.4x %4.4x %4.4x.\n",
925 dev->name, entry,
926 yp->tx_status[entry].tx_cnt,
927 yp->tx_status[entry].tx_errs,
928 yp->tx_status[entry].total_tx_cnt,
929 yp->tx_status[entry].paused);
930 #endif
931 if (tx_errs == 0)
932 break; /* It still hasn't been Txed */
933 if (tx_errs & 0xF8100000) {
934 /* There was an major error, log it. */
935 #ifndef final_version
936 if (yellowfin_debug > 1)
937 printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
938 dev->name, tx_errs);
939 #endif
940 yp->stats.tx_errors++;
941 if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++;
942 if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++;
943 if (tx_errs & 0x2000) yp->stats.tx_window_errors++;
944 if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++;
945 #ifdef ETHER_STATS
946 if (tx_errs & 0x1000) yp->stats.collisions16++;
947 #endif
948 } else {
949 #ifndef final_version
950 if (yellowfin_debug > 4)
951 printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n",
952 dev->name, tx_errs);
953 #endif
954 #ifdef ETHER_STATS
955 if (tx_errs & 0x0400) yp->stats.tx_deferred++;
956 #endif
957 yp->stats.collisions += tx_errs & 15;
958 yp->stats.tx_packets++;
961 /* Free the original skb. */
962 DEV_FREE_SKB(yp->tx_skbuff[entry]);
963 yp->tx_skbuff[entry] = 0;
964 /* Mark status as empty. */
965 yp->tx_status[entry].tx_errs = 0;
968 #ifndef final_version
969 if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
970 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
971 dev->name, dirty_tx, yp->cur_tx, yp->tx_full);
972 dirty_tx += TX_RING_SIZE;
974 #endif
976 if (yp->tx_full && dev->tbusy
977 && yp->cur_tx - dirty_tx < TX_RING_SIZE - 2) {
978 /* The ring is no longer full, clear tbusy. */
979 yp->tx_full = 0;
980 clear_bit(0, (void*)&dev->tbusy);
981 mark_bh(NET_BH);
984 yp->dirty_tx = dirty_tx;
985 yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
987 #endif
989 /* Log errors and other uncommon events. */
990 if (intr_status & 0x2ee) /* Abnormal error summary. */
991 yellowfin_error(dev, intr_status);
993 if (--boguscnt < 0) {
994 printk(KERN_WARNING "%s: Too much work at interrupt, status=0x%4.4x.\n",
995 dev->name, intr_status);
996 break;
998 } while (1);
1000 if (yellowfin_debug > 3)
1001 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1002 dev->name, inw(ioaddr + IntrStatus));
1004 /* Code that should never be run! Perhaps remove after testing.. */
1006 static int stopit = 10;
1007 if (dev->start == 0 && --stopit < 0) {
1008 printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n",
1009 dev->name);
1010 free_irq(irq, dev);
1014 dev->interrupt = 0;
1015 clear_bit(0, (void*)&yp->in_interrupt);
1016 return;
1019 /* This routine is logically part of the interrupt handler, but separated
1020 for clarity and better register allocation. */
1021 static int yellowfin_rx(struct net_device *dev)
1023 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
1024 int entry = yp->cur_rx % RX_RING_SIZE;
1025 int boguscnt = 20;
1027 if (yellowfin_debug > 4) {
1028 printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %4.4x.\n",
1029 entry, yp->rx_ring[entry].status);
1030 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x %4.4x %4.4x.\n",
1031 entry, yp->rx_ring[entry].cmd,
1032 yp->rx_ring[entry].request_cnt, yp->rx_ring[entry].addr,
1033 yp->rx_ring[entry].result_cnt, yp->rx_ring[entry].status);
1036 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1037 while (yp->rx_head_desc->status) {
1038 struct yellowfin_desc *desc = yp->rx_head_desc;
1039 u16 desc_status = desc->status;
1040 int data_size = desc->request_cnt - desc->result_cnt;
1041 u8 *buf_addr = bus_to_virt(desc->addr);
1042 s16 frame_status = get_unaligned((s16*)(buf_addr+data_size-2));
1044 if (yellowfin_debug > 4)
1045 printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n",
1046 frame_status);
1047 if (--boguscnt < 0)
1048 break;
1049 if ( ! (desc_status & RX_EOP)) {
1050 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers,"
1051 " status %4.4x!\n", dev->name, desc_status);
1052 yp->stats.rx_length_errors++;
1053 } else if (yp->chip_id == 0 && (frame_status & 0x0038)) {
1054 /* There was a error. */
1055 if (yellowfin_debug > 3)
1056 printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n",
1057 frame_status);
1058 yp->stats.rx_errors++;
1059 if (frame_status & 0x0060) yp->stats.rx_length_errors++;
1060 if (frame_status & 0x0008) yp->stats.rx_frame_errors++;
1061 if (frame_status & 0x0010) yp->stats.rx_crc_errors++;
1062 if (frame_status < 0) yp->stats.rx_dropped++;
1063 } else if (yp->chip_id != 0 &&
1064 ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1065 u8 status1 = buf_addr[data_size-2];
1066 u8 status2 = buf_addr[data_size-1];
1067 yp->stats.rx_errors++;
1068 if (status1 & 0xC0) yp->stats.rx_length_errors++;
1069 if (status2 & 0x03) yp->stats.rx_frame_errors++;
1070 if (status2 & 0x04) yp->stats.rx_crc_errors++;
1071 if (status2 & 0x80) yp->stats.rx_dropped++;
1072 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
1073 } else if (memcmp(bus_to_virt(yp->rx_ring[entry].addr),
1074 dev->dev_addr, 6) != 0
1075 && memcmp(bus_to_virt(yp->rx_ring[entry].addr),
1076 "\377\377\377\377\377\377", 6) != 0) {
1077 printk(KERN_WARNING "%s: Bad frame to %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x.\n",
1078 dev->name, buf_addr[0], buf_addr[1], buf_addr[2],
1079 buf_addr[3], buf_addr[4], buf_addr[5]);
1080 bogus_rx++;
1081 #endif
1082 } else {
1083 struct sk_buff *skb;
1084 int pkt_len = data_size -
1085 (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1086 /* To verify: Yellowfin Length should omit the CRC! */
1088 #ifndef final_version
1089 if (yellowfin_debug > 4)
1090 printk(KERN_DEBUG " yellowfin_rx() normal Rx pkt length %d"
1091 " of %d, bogus_cnt %d.\n",
1092 pkt_len, data_size, boguscnt);
1093 #endif
1094 /* Check if the packet is long enough to just pass up the skbuff
1095 without copying to a properly sized skbuff. */
1096 if (pkt_len > rx_copybreak) {
1097 char *temp = skb_put(skb = yp->rx_skbuff[entry], pkt_len);
1098 #ifndef final_verison /* Remove after testing. */
1099 if (bus_to_virt(yp->rx_ring[entry].addr) != temp)
1100 printk(KERN_WARNING "%s: Warning -- the skbuff addresses "
1101 "do not match in yellowfin_rx: %p vs. %p / %p.\n",
1102 dev->name, bus_to_virt(yp->rx_ring[entry].addr),
1103 skb->head, temp);
1104 #endif
1105 yp->rx_skbuff[entry] = NULL;
1106 } else {
1107 skb = dev_alloc_skb(pkt_len + 2);
1108 if (skb == NULL)
1109 break;
1110 skb->dev = dev;
1111 skb_reserve(skb, 2); /* 16 byte align the data fields */
1112 #if 1
1113 eth_copy_and_sum(skb, bus_to_virt(yp->rx_ring[entry].addr),
1114 pkt_len, 0);
1115 skb_put(skb, pkt_len);
1116 #else
1117 memcpy(skb_put(skb, pkt_len),
1118 bus_to_virt(yp->rx_ring[entry].addr), pkt_len);
1119 #endif
1121 skb->protocol = eth_type_trans(skb, dev);
1122 netif_rx(skb);
1123 dev->last_rx = jiffies;
1124 yp->stats.rx_packets++;
1126 entry = (++yp->cur_rx) % RX_RING_SIZE;
1127 yp->rx_head_desc = &yp->rx_ring[entry];
1130 /* Refill the Rx ring buffers. */
1131 for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1132 struct sk_buff *skb;
1133 entry = yp->dirty_rx % RX_RING_SIZE;
1134 if (yp->rx_skbuff[entry] == NULL) {
1135 skb = dev_alloc_skb(yp->rx_buf_sz);
1136 if (skb == NULL)
1137 break; /* Better luck next round. */
1138 skb->dev = dev; /* Mark as being used by this device. */
1139 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1140 yp->rx_ring[entry].addr = virt_to_bus(skb->tail);
1141 yp->rx_skbuff[entry] = skb;
1143 yp->rx_ring[entry].cmd = CMD_STOP;
1144 yp->rx_ring[entry].status = 0; /* Clear complete bit. */
1145 if (entry != 0)
1146 yp->rx_ring[entry - 1].cmd = CMD_RX_BUF | INTR_ALWAYS;
1147 else
1148 yp->rx_ring[RX_RING_SIZE - 1].cmd =
1149 CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS;
1152 return 0;
1155 static void yellowfin_error(struct net_device *dev, int intr_status)
1157 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
1159 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1160 dev->name, intr_status);
1161 /* Hmmmmm, it's not clear what to do here. */
1162 if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1163 yp->stats.tx_errors++;
1164 if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1165 yp->stats.rx_errors++;
1168 static int yellowfin_close(struct net_device *dev)
1170 long ioaddr = dev->base_addr;
1171 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
1172 int i;
1174 dev->start = 0;
1175 dev->tbusy = 1;
1177 if (yellowfin_debug > 1) {
1178 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x Rx %4.4x Int %2.2x.\n",
1179 dev->name, inw(ioaddr + TxStatus),
1180 inw(ioaddr + RxStatus), inw(ioaddr + IntrStatus));
1181 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1182 dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
1185 /* Disable interrupts by clearing the interrupt mask. */
1186 outw(0x0000, ioaddr + IntrEnb);
1188 /* Stop the chip's Tx and Rx processes. */
1189 outl(0x80000000, ioaddr + RxCtrl);
1190 outl(0x80000000, ioaddr + TxCtrl);
1192 del_timer(&yp->timer);
1194 #ifdef __i386__
1195 if (yellowfin_debug > 2) {
1196 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n", (int)virt_to_bus(yp->tx_ring));
1197 for (i = 0; i < TX_RING_SIZE*2; i++)
1198 printk(" %c #%d desc. %4.4x %4.4x %8.8x %8.8x %4.4x %4.4x.\n",
1199 inl(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1200 i, yp->tx_ring[i].cmd,
1201 yp->tx_ring[i].request_cnt, yp->tx_ring[i].addr,
1202 yp->tx_ring[i].branch_addr,
1203 yp->tx_ring[i].result_cnt, yp->tx_ring[i].status);
1204 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
1205 for (i = 0; i < TX_RING_SIZE; i++)
1206 printk(" #%d status %4.4x %4.4x %4.4x %4.4x.\n",
1207 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1208 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1210 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n", (int)virt_to_bus(yp->rx_ring));
1211 for (i = 0; i < RX_RING_SIZE; i++) {
1212 printk(KERN_DEBUG " %c #%d desc. %4.4x %4.4x %8.8x %4.4x %4.4x\n",
1213 inl(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1214 i, yp->rx_ring[i].cmd,
1215 yp->rx_ring[i].request_cnt, yp->rx_ring[i].addr,
1216 yp->rx_ring[i].result_cnt, yp->rx_ring[i].status);
1217 if (yellowfin_debug > 6) {
1218 if (*(u8*)yp->rx_ring[i].addr != 0x69) {
1219 int j;
1220 for (j = 0; j < 0x50; j++)
1221 printk(" %4.4x", ((u16*)yp->rx_ring[i].addr)[j]);
1222 printk("\n");
1227 #endif /* __i386__ debugging only */
1229 free_irq(dev->irq, dev);
1231 /* Free all the skbuffs in the Rx queue. */
1232 for (i = 0; i < RX_RING_SIZE; i++) {
1233 yp->rx_ring[i].cmd = CMD_STOP;
1234 yp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
1235 if (yp->rx_skbuff[i]) {
1236 #if LINUX_VERSION_CODE < 0x20100
1237 yp->rx_skbuff[i]->free = 1;
1238 #endif
1239 DEV_FREE_SKB(yp->rx_skbuff[i]);
1241 yp->rx_skbuff[i] = 0;
1243 for (i = 0; i < TX_RING_SIZE; i++) {
1244 if (yp->tx_skbuff[i])
1245 DEV_FREE_SKB(yp->tx_skbuff[i]);
1246 yp->tx_skbuff[i] = 0;
1249 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
1250 if (yellowfin_debug > 0) {
1251 printk(KERN_DEBUG "%s: Received %d frames that we should not have.\n",
1252 dev->name, bogus_rx);
1254 #endif
1255 MOD_DEC_USE_COUNT;
1257 return 0;
1260 static struct enet_statistics *yellowfin_get_stats(struct net_device *dev)
1262 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
1263 return &yp->stats;
1266 /* Set or clear the multicast filter for this adaptor. */
1268 /* The little-endian AUTODIN32 ethernet CRC calculation.
1269 N.B. Do not use for bulk data, use a table-based routine instead.
1270 This is common code and should be moved to net/core/crc.c */
1271 static unsigned const ethernet_polynomial_le = 0xedb88320U;
1273 static inline unsigned ether_crc_le(int length, unsigned char *data)
1275 unsigned int crc = 0xffffffff; /* Initial value. */
1276 while(--length >= 0) {
1277 unsigned char current_octet = *data++;
1278 int bit;
1279 for (bit = 8; --bit >= 0; current_octet >>= 1) {
1280 if ((crc ^ current_octet) & 1) {
1281 crc >>= 1;
1282 crc ^= ethernet_polynomial_le;
1283 } else
1284 crc >>= 1;
1287 return crc;
1291 static void set_rx_mode(struct net_device *dev)
1293 struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
1294 long ioaddr = dev->base_addr;
1295 u16 cfg_value = inw(ioaddr + Cnfg);
1297 /* Stop the Rx process to change any value. */
1298 outw(cfg_value & ~0x1000, ioaddr + Cnfg);
1299 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1300 /* Unconditionally log net taps. */
1301 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1302 outw(0x000F, ioaddr + AddrMode);
1303 } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) {
1304 /* Too many to filter well, or accept all multicasts. */
1305 outw(0x000B, ioaddr + AddrMode);
1306 } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
1307 struct dev_mc_list *mclist;
1308 u16 hash_table[4];
1309 int i;
1310 memset(hash_table, 0, sizeof(hash_table));
1311 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1312 i++, mclist = mclist->next) {
1313 /* Due to a bug in the early chip versions, multiple filter
1314 slots must be set for each address. */
1315 if (yp->chip_id == 0) {
1316 set_bit((ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f,
1317 hash_table);
1318 set_bit((ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f,
1319 hash_table);
1320 set_bit((ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f,
1321 hash_table);
1323 set_bit((ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f,
1324 hash_table);
1326 /* Copy the hash table to the chip. */
1327 for (i = 0; i < 4; i++)
1328 outw(hash_table[i], ioaddr + HashTbl + i*2);
1329 outw(0x0003, ioaddr + AddrMode);
1330 } else { /* Normal, unicast/broadcast-only mode. */
1331 outw(0x0001, ioaddr + AddrMode);
1333 /* Restart the Rx process. */
1334 outw(cfg_value | 0x1000, ioaddr + Cnfg);
1337 #ifdef HAVE_PRIVATE_IOCTL
1338 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1340 long ioaddr = dev->base_addr;
1341 u16 *data = (u16 *)&rq->ifr_data;
1343 switch(cmd) {
1344 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
1345 data[0] = ((struct yellowfin_private *)dev->priv)->phys[0] & 0x1f;
1346 /* Fall Through */
1347 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
1348 data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
1349 return 0;
1350 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
1351 if (!suser())
1352 return -EPERM;
1353 mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1354 return 0;
1355 default:
1356 return -EOPNOTSUPP;
1359 #endif /* HAVE_PRIVATE_IOCTL */
1362 #ifdef MODULE
1364 /* An additional parameter that may be passed in... */
1365 static int debug = -1;
1367 int init_module(void)
1369 if (debug >= 0)
1370 yellowfin_debug = debug;
1372 return yellowfin_probe();
1375 void cleanup_module(void)
1377 struct net_device *next_dev;
1379 /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
1380 while (root_yellowfin_dev) {
1381 next_dev = ((struct yellowfin_private *)root_yellowfin_dev->priv)->next_module;
1382 unregister_netdev(root_yellowfin_dev);
1383 release_region(root_yellowfin_dev->base_addr, YELLOWFIN_TOTAL_SIZE);
1384 kfree(root_yellowfin_dev);
1385 root_yellowfin_dev = next_dev;
1389 #endif /* MODULE */
1392 * Local variables:
1393 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c yellowfin.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1394 * compile-command-alphaLX: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O2 -c yellowfin.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS` -fomit-frame-pointer -fno-strength-reduce -mno-fp-regs -Wa,-m21164a -DBWX_USABLE -DBWIO_ENABLED"
1395 * SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c yellowfin.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1396 * c-indent-level: 4
1397 * c-basic-offset: 4
1398 * tab-width: 4
1399 * End: