MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / drivers / net / yellowfin.c
blob814821902c2e28220cc65966cdcd203b27ab8b44
1 /* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
2 /*
3 Written 1997-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
13 It also supports the Symbios Logic version of the same chip core.
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
18 Annapolis MD 21403
20 Support and updates available at
21 http://www.scyld.com/network/yellowfin.html
24 Linux kernel changelog:
25 -----------------------
27 LK1.1.1 (jgarzik): Port to 2.4 kernel
29 LK1.1.2 (jgarzik):
30 * Merge in becker version 1.05
32 LK1.1.3 (jgarzik):
33 * Various cleanups
34 * Update yellowfin_timer to correctly calculate duplex.
35 (suggested by Manfred Spraul)
37 LK1.1.4 (val@nmt.edu):
38 * Fix three endian-ness bugs
39 * Support dual function SYM53C885E ethernet chip
41 LK1.1.5 (val@nmt.edu):
42 * Fix forced full-duplex bug I introduced
44 LK1.1.6 (val@nmt.edu):
45 * Only print warning on truly "oversized" packets
46 * Fix theoretical bug on gigabit cards - return to 1.1.3 behavior
50 #define DRV_NAME "yellowfin"
51 #define DRV_VERSION "1.05+LK1.1.6"
52 #define DRV_RELDATE "Feb 11, 2002"
54 #define PFX DRV_NAME ": "
56 /* The user-configurable values.
57 These may be modified when a driver module is loaded.*/
59 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
60 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
61 static int max_interrupt_work = 20;
62 static int mtu;
63 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
64 /* System-wide count of bogus-rx frames. */
65 static int bogus_rx;
66 static int dma_ctrl = 0x004A0263; /* Constrained by errata */
67 static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
68 #elif defined(YF_NEW) /* A future perfect board :->. */
69 static int dma_ctrl = 0x00CAC277; /* Override when loading module! */
70 static int fifo_cfg = 0x0028;
71 #else
72 static int dma_ctrl = 0x004A0263; /* Constrained by errata */
73 static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
74 #endif
76 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
77 Setting to > 1514 effectively disables this feature. */
78 static int rx_copybreak;
80 /* Used to pass the media type, etc.
81 No media types are currently defined. These exist for driver
82 interoperability.
84 #define MAX_UNITS 8 /* More are supported, limit only on options */
85 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
88 /* Do ugly workaround for GX server chipset errata. */
89 static int gx_fix;
91 /* Operational parameters that are set at compile time. */
93 /* Keep the ring sizes a power of two for efficiency.
94 Making the Tx ring too long decreases the effectiveness of channel
95 bonding and packet priority.
96 There are no ill effects from too-large receive rings. */
97 #define TX_RING_SIZE 16
98 #define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */
99 #define RX_RING_SIZE 64
100 #define STATUS_TOTAL_SIZE TX_RING_SIZE*sizeof(struct tx_status_words)
101 #define TX_TOTAL_SIZE 2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
102 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct yellowfin_desc)
104 /* Operational parameters that usually are not changed. */
105 /* Time in jiffies before concluding the transmitter is hung. */
106 #define TX_TIMEOUT (2*HZ)
107 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
109 #define yellowfin_debug debug
111 #include <linux/module.h>
112 #include <linux/kernel.h>
113 #include <linux/string.h>
114 #include <linux/timer.h>
115 #include <linux/errno.h>
116 #include <linux/ioport.h>
117 #include <linux/slab.h>
118 #include <linux/interrupt.h>
119 #include <linux/pci.h>
120 #include <linux/init.h>
121 #include <linux/mii.h>
122 #include <linux/netdevice.h>
123 #include <linux/etherdevice.h>
124 #include <linux/skbuff.h>
125 #include <linux/ethtool.h>
126 #include <linux/crc32.h>
127 #include <asm/uaccess.h>
128 #include <asm/processor.h> /* Processor type for cache alignment. */
129 #include <asm/unaligned.h>
130 #include <asm/bitops.h>
131 #include <asm/io.h>
133 /* These identify the driver base version and may not be removed. */
134 static char version[] __devinitdata =
135 KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
136 KERN_INFO " http://www.scyld.com/network/yellowfin.html\n"
137 KERN_INFO " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
139 #ifndef USE_IO_OPS
140 #undef inb
141 #undef inw
142 #undef inl
143 #undef outb
144 #undef outw
145 #undef outl
146 #define inb readb
147 #define inw readw
148 #define inl readl
149 #define outb writeb
150 #define outw writew
151 #define outl writel
152 #endif /* !USE_IO_OPS */
153 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
154 MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
155 MODULE_LICENSE("GPL");
157 MODULE_PARM(max_interrupt_work, "i");
158 MODULE_PARM(mtu, "i");
159 MODULE_PARM(debug, "i");
160 MODULE_PARM(rx_copybreak, "i");
161 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
162 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
163 MODULE_PARM(gx_fix, "i");
164 MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
165 MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
166 MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
167 MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
168 MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
169 MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
170 MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
173 Theory of Operation
175 I. Board Compatibility
177 This device driver is designed for the Packet Engines "Yellowfin" Gigabit
178 Ethernet adapter. The G-NIC 64-bit PCI card is supported, as well as the
179 Symbios 53C885E dual function chip.
181 II. Board-specific settings
183 PCI bus devices are configured by the system at boot time, so no jumpers
184 need to be set on the board. The system BIOS preferably should assign the
185 PCI INTA signal to an otherwise unused system IRQ line.
186 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
187 interrupt lines.
189 III. Driver operation
191 IIIa. Ring buffers
193 The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
194 This is a descriptor list scheme similar to that used by the EEPro100 and
195 Tulip. This driver uses two statically allocated fixed-size descriptor lists
196 formed into rings by a branch from the final descriptor to the beginning of
197 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
199 The driver allocates full frame size skbuffs for the Rx ring buffers at
200 open() time and passes the skb->data field to the Yellowfin as receive data
201 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
202 a fresh skbuff is allocated and the frame is copied to the new skbuff.
203 When the incoming frame is larger, the skbuff is passed directly up the
204 protocol stack and replaced by a newly allocated skbuff.
206 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
207 using a full-sized skbuff for small frames vs. the copying costs of larger
208 frames. For small frames the copying cost is negligible (esp. considering
209 that we are pre-loading the cache with immediately useful header
210 information). For large frames the copying cost is non-trivial, and the
211 larger copy might flush the cache of useful data.
213 IIIC. Synchronization
215 The driver runs as two independent, single-threaded flows of control. One
216 is the send-packet routine, which enforces single-threaded use by the
217 dev->tbusy flag. The other thread is the interrupt handler, which is single
218 threaded by the hardware and other software.
220 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
221 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
222 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
223 the 'yp->tx_full' flag.
225 The interrupt handler has exclusive control over the Rx ring and records stats
226 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
227 empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
228 clears both the tx_full and tbusy flags.
230 IV. Notes
232 Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
233 Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
234 and an AlphaStation to verifty the Alpha port!
236 IVb. References
238 Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
239 Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
240 Data Manual v3.0
241 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
242 http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
244 IVc. Errata
246 See Packet Engines confidential appendix (prototype chips only).
251 enum pci_id_flags_bits {
252 /* Set PCI command register bits before calling probe1(). */
253 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
254 /* Read and map the single following PCI BAR. */
255 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
256 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
257 PCI_UNUSED_IRQ=0x800,
259 enum capability_flags {
260 HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
261 HasMACAddrBug=32, /* Only on early revs. */
262 DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
264 /* The PCI I/O space extent. */
265 #define YELLOWFIN_SIZE 0x100
266 #ifdef USE_IO_OPS
267 #define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
268 #else
269 #define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
270 #endif
272 struct pci_id_info {
273 const char *name;
274 struct match_info {
275 int pci, pci_mask, subsystem, subsystem_mask;
276 int revision, revision_mask; /* Only 8 bits. */
277 } id;
278 enum pci_id_flags_bits pci_flags;
279 int io_size; /* Needed for I/O region check or ioremap(). */
280 int drv_flags; /* Driver use, intended as capability flags. */
283 static struct pci_id_info pci_id_tbl[] = {
284 {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
285 PCI_IOTYPE, YELLOWFIN_SIZE,
286 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
287 {"Symbios SYM83C885", { 0x07011000, 0xffffffff},
288 PCI_IOTYPE, YELLOWFIN_SIZE, HasMII | DontUseEeprom },
289 {NULL,},
292 static struct pci_device_id yellowfin_pci_tbl[] = {
293 { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
294 { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
295 { 0, }
297 MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
300 /* Offsets to the Yellowfin registers. Various sizes and alignments. */
301 enum yellowfin_offsets {
302 TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
303 TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
304 RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
305 RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
306 EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
307 ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
308 Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
309 MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
310 MII_Status=0xAE,
311 RxDepth=0xB8, FlowCtrl=0xBC,
312 AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
313 EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
314 EEFeature=0xF5,
317 /* The Yellowfin Rx and Tx buffer descriptors.
318 Elements are written as 32 bit for endian portability. */
319 struct yellowfin_desc {
320 u32 dbdma_cmd;
321 u32 addr;
322 u32 branch_addr;
323 u32 result_status;
326 struct tx_status_words {
327 #ifdef __BIG_ENDIAN
328 u16 tx_errs;
329 u16 tx_cnt;
330 u16 paused;
331 u16 total_tx_cnt;
332 #else /* Little endian chips. */
333 u16 tx_cnt;
334 u16 tx_errs;
335 u16 total_tx_cnt;
336 u16 paused;
337 #endif /* __BIG_ENDIAN */
340 /* Bits in yellowfin_desc.cmd */
341 enum desc_cmd_bits {
342 CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
343 CMD_NOP=0x60000000, CMD_STOP=0x70000000,
344 BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
345 BRANCH_IFTRUE=0x040000,
348 /* Bits in yellowfin_desc.status */
349 enum desc_status_bits { RX_EOP=0x0040, };
351 /* Bits in the interrupt status/mask registers. */
352 enum intr_status_bits {
353 IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
354 IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
355 IntrEarlyRx=0x100, IntrWakeup=0x200, };
357 #define PRIV_ALIGN 31 /* Required alignment mask */
358 #define MII_CNT 4
359 struct yellowfin_private {
360 /* Descriptor rings first for alignment.
361 Tx requires a second descriptor for status. */
362 struct yellowfin_desc *rx_ring;
363 struct yellowfin_desc *tx_ring;
364 struct sk_buff* rx_skbuff[RX_RING_SIZE];
365 struct sk_buff* tx_skbuff[TX_RING_SIZE];
366 dma_addr_t rx_ring_dma;
367 dma_addr_t tx_ring_dma;
369 struct tx_status_words *tx_status;
370 dma_addr_t tx_status_dma;
372 struct timer_list timer; /* Media selection timer. */
373 struct net_device_stats stats;
374 /* Frequently used and paired value: keep adjacent for cache effect. */
375 int chip_id, drv_flags;
376 struct pci_dev *pci_dev;
377 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
378 unsigned int rx_buf_sz; /* Based on MTU+slack. */
379 struct tx_status_words *tx_tail_desc;
380 unsigned int cur_tx, dirty_tx;
381 int tx_threshold;
382 unsigned int tx_full:1; /* The Tx queue is full. */
383 unsigned int full_duplex:1; /* Full-duplex operation requested. */
384 unsigned int duplex_lock:1;
385 unsigned int medialock:1; /* Do not sense media. */
386 unsigned int default_port:4; /* Last dev->if_port value. */
387 /* MII transceiver section. */
388 int mii_cnt; /* MII device addresses. */
389 u16 advertising; /* NWay media advertisement */
390 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used */
391 spinlock_t lock;
394 static int read_eeprom(long ioaddr, int location);
395 static int mdio_read(long ioaddr, int phy_id, int location);
396 static void mdio_write(long ioaddr, int phy_id, int location, int value);
397 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
398 static int yellowfin_open(struct net_device *dev);
399 static void yellowfin_timer(unsigned long data);
400 static void yellowfin_tx_timeout(struct net_device *dev);
401 static void yellowfin_init_ring(struct net_device *dev);
402 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev);
403 static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
404 static int yellowfin_rx(struct net_device *dev);
405 static void yellowfin_error(struct net_device *dev, int intr_status);
406 static int yellowfin_close(struct net_device *dev);
407 static struct net_device_stats *yellowfin_get_stats(struct net_device *dev);
408 static void set_rx_mode(struct net_device *dev);
411 static int __devinit yellowfin_init_one(struct pci_dev *pdev,
412 const struct pci_device_id *ent)
414 struct net_device *dev;
415 struct yellowfin_private *np;
416 int irq;
417 int chip_idx = ent->driver_data;
418 static int find_cnt;
419 long ioaddr, real_ioaddr;
420 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
421 int drv_flags = pci_id_tbl[chip_idx].drv_flags;
422 void *ring_space;
423 dma_addr_t ring_dma;
425 /* when built into the kernel, we only print version if device is found */
426 #ifndef MODULE
427 static int printed_version;
428 if (!printed_version++)
429 printk(version);
430 #endif
432 i = pci_enable_device(pdev);
433 if (i) return i;
435 dev = alloc_etherdev(sizeof(*np));
436 if (!dev) {
437 printk (KERN_ERR PFX "cannot allocate ethernet device\n");
438 return -ENOMEM;
440 SET_MODULE_OWNER(dev);
441 SET_NETDEV_DEV(dev, &pdev->dev);
443 np = dev->priv;
445 if (pci_request_regions(pdev, DRV_NAME))
446 goto err_out_free_netdev;
448 pci_set_master (pdev);
450 #ifdef USE_IO_OPS
451 real_ioaddr = ioaddr = pci_resource_start (pdev, 0);
452 #else
453 real_ioaddr = ioaddr = pci_resource_start (pdev, 1);
454 ioaddr = (long) ioremap(ioaddr, YELLOWFIN_SIZE);
455 if (!ioaddr)
456 goto err_out_free_res;
457 #endif
458 irq = pdev->irq;
460 if (drv_flags & DontUseEeprom)
461 for (i = 0; i < 6; i++)
462 dev->dev_addr[i] = inb(ioaddr + StnAddr + i);
463 else {
464 int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
465 for (i = 0; i < 6; i++)
466 dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
469 /* Reset the chip. */
470 outl(0x80000000, ioaddr + DMACtrl);
472 dev->base_addr = ioaddr;
473 dev->irq = irq;
475 pci_set_drvdata(pdev, dev);
476 spin_lock_init(&np->lock);
478 np->pci_dev = pdev;
479 np->chip_id = chip_idx;
480 np->drv_flags = drv_flags;
482 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
483 if (!ring_space)
484 goto err_out_cleardev;
485 np->tx_ring = (struct yellowfin_desc *)ring_space;
486 np->tx_ring_dma = ring_dma;
488 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
489 if (!ring_space)
490 goto err_out_unmap_tx;
491 np->rx_ring = (struct yellowfin_desc *)ring_space;
492 np->rx_ring_dma = ring_dma;
494 ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
495 if (!ring_space)
496 goto err_out_unmap_rx;
497 np->tx_status = (struct tx_status_words *)ring_space;
498 np->tx_status_dma = ring_dma;
500 if (dev->mem_start)
501 option = dev->mem_start;
503 /* The lower four bits are the media type. */
504 if (option > 0) {
505 if (option & 0x200)
506 np->full_duplex = 1;
507 np->default_port = option & 15;
508 if (np->default_port)
509 np->medialock = 1;
511 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
512 np->full_duplex = 1;
514 if (np->full_duplex)
515 np->duplex_lock = 1;
517 /* The Yellowfin-specific entries in the device structure. */
518 dev->open = &yellowfin_open;
519 dev->hard_start_xmit = &yellowfin_start_xmit;
520 dev->stop = &yellowfin_close;
521 dev->get_stats = &yellowfin_get_stats;
522 dev->set_multicast_list = &set_rx_mode;
523 dev->do_ioctl = &netdev_ioctl;
524 dev->tx_timeout = yellowfin_tx_timeout;
525 dev->watchdog_timeo = TX_TIMEOUT;
527 if (mtu)
528 dev->mtu = mtu;
530 i = register_netdev(dev);
531 if (i)
532 goto err_out_unmap_status;
534 printk(KERN_INFO "%s: %s type %8x at 0x%lx, ",
535 dev->name, pci_id_tbl[chip_idx].name, inl(ioaddr + ChipRev), ioaddr);
536 for (i = 0; i < 5; i++)
537 printk("%2.2x:", dev->dev_addr[i]);
538 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
540 if (np->drv_flags & HasMII) {
541 int phy, phy_idx = 0;
542 for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
543 int mii_status = mdio_read(ioaddr, phy, 1);
544 if (mii_status != 0xffff && mii_status != 0x0000) {
545 np->phys[phy_idx++] = phy;
546 np->advertising = mdio_read(ioaddr, phy, 4);
547 printk(KERN_INFO "%s: MII PHY found at address %d, status "
548 "0x%4.4x advertising %4.4x.\n",
549 dev->name, phy, mii_status, np->advertising);
552 np->mii_cnt = phy_idx;
555 find_cnt++;
557 return 0;
559 err_out_unmap_status:
560 pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
561 np->tx_status_dma);
562 err_out_unmap_rx:
563 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
564 err_out_unmap_tx:
565 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
566 err_out_cleardev:
567 pci_set_drvdata(pdev, NULL);
568 #ifndef USE_IO_OPS
569 iounmap((void *)ioaddr);
570 err_out_free_res:
571 #endif
572 pci_release_regions(pdev);
573 err_out_free_netdev:
574 free_netdev (dev);
575 return -ENODEV;
578 static int __devinit read_eeprom(long ioaddr, int location)
580 int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */
582 outb(location, ioaddr + EEAddr);
583 outb(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
584 while ((inb(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0)
586 return inb(ioaddr + EERead);
589 /* MII Managemen Data I/O accesses.
590 These routines assume the MDIO controller is idle, and do not exit until
591 the command is finished. */
593 static int mdio_read(long ioaddr, int phy_id, int location)
595 int i;
597 outw((phy_id<<8) + location, ioaddr + MII_Addr);
598 outw(1, ioaddr + MII_Cmd);
599 for (i = 10000; i >= 0; i--)
600 if ((inw(ioaddr + MII_Status) & 1) == 0)
601 break;
602 return inw(ioaddr + MII_Rd_Data);
605 static void mdio_write(long ioaddr, int phy_id, int location, int value)
607 int i;
609 outw((phy_id<<8) + location, ioaddr + MII_Addr);
610 outw(value, ioaddr + MII_Wr_Data);
612 /* Wait for the command to finish. */
613 for (i = 10000; i >= 0; i--)
614 if ((inw(ioaddr + MII_Status) & 1) == 0)
615 break;
616 return;
620 static int yellowfin_open(struct net_device *dev)
622 struct yellowfin_private *yp = dev->priv;
623 long ioaddr = dev->base_addr;
624 int i;
626 /* Reset the chip. */
627 outl(0x80000000, ioaddr + DMACtrl);
629 i = request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ, dev->name, dev);
630 if (i) return i;
632 if (yellowfin_debug > 1)
633 printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
634 dev->name, dev->irq);
636 yellowfin_init_ring(dev);
638 outl(yp->rx_ring_dma, ioaddr + RxPtr);
639 outl(yp->tx_ring_dma, ioaddr + TxPtr);
641 for (i = 0; i < 6; i++)
642 outb(dev->dev_addr[i], ioaddr + StnAddr + i);
644 /* Set up various condition 'select' registers.
645 There are no options here. */
646 outl(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */
647 outl(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */
648 outl(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */
649 outl(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */
650 outl(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */
651 outl(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */
653 /* Initialize other registers: with so many this eventually this will
654 converted to an offset/value list. */
655 outl(dma_ctrl, ioaddr + DMACtrl);
656 outw(fifo_cfg, ioaddr + FIFOcfg);
657 /* Enable automatic generation of flow control frames, period 0xffff. */
658 outl(0x0030FFFF, ioaddr + FlowCtrl);
660 yp->tx_threshold = 32;
661 outl(yp->tx_threshold, ioaddr + TxThreshold);
663 if (dev->if_port == 0)
664 dev->if_port = yp->default_port;
666 netif_start_queue(dev);
668 /* Setting the Rx mode will start the Rx process. */
669 if (yp->drv_flags & IsGigabit) {
670 /* We are always in full-duplex mode with gigabit! */
671 yp->full_duplex = 1;
672 outw(0x01CF, ioaddr + Cnfg);
673 } else {
674 outw(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
675 outw(0x1018, ioaddr + FrameGap1);
676 outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
678 set_rx_mode(dev);
680 /* Enable interrupts by setting the interrupt mask. */
681 outw(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */
682 outw(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
683 outl(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */
684 outl(0x80008000, ioaddr + TxCtrl);
686 if (yellowfin_debug > 2) {
687 printk(KERN_DEBUG "%s: Done yellowfin_open().\n",
688 dev->name);
691 /* Set the timer to check for link beat. */
692 init_timer(&yp->timer);
693 yp->timer.expires = jiffies + 3*HZ;
694 yp->timer.data = (unsigned long)dev;
695 yp->timer.function = &yellowfin_timer; /* timer handler */
696 add_timer(&yp->timer);
698 return 0;
701 static void yellowfin_timer(unsigned long data)
703 struct net_device *dev = (struct net_device *)data;
704 struct yellowfin_private *yp = dev->priv;
705 long ioaddr = dev->base_addr;
706 int next_tick = 60*HZ;
708 if (yellowfin_debug > 3) {
709 printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n",
710 dev->name, inw(ioaddr + IntrStatus));
713 if (yp->mii_cnt) {
714 int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
715 int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
716 int negotiated = lpa & yp->advertising;
717 if (yellowfin_debug > 1)
718 printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, "
719 "link partner capability %4.4x.\n",
720 dev->name, yp->phys[0], bmsr, lpa);
722 yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
724 outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
726 if (bmsr & BMSR_LSTATUS)
727 next_tick = 60*HZ;
728 else
729 next_tick = 3*HZ;
732 yp->timer.expires = jiffies + next_tick;
733 add_timer(&yp->timer);
736 static void yellowfin_tx_timeout(struct net_device *dev)
738 struct yellowfin_private *yp = dev->priv;
739 long ioaddr = dev->base_addr;
741 printk(KERN_WARNING "%s: Yellowfin transmit timed out at %d/%d Tx "
742 "status %4.4x, Rx status %4.4x, resetting...\n",
743 dev->name, yp->cur_tx, yp->dirty_tx,
744 inl(ioaddr + TxStatus), inl(ioaddr + RxStatus));
746 /* Note: these should be KERN_DEBUG. */
747 if (yellowfin_debug) {
748 int i;
749 printk(KERN_WARNING " Rx ring %p: ", yp->rx_ring);
750 for (i = 0; i < RX_RING_SIZE; i++)
751 printk(" %8.8x", yp->rx_ring[i].result_status);
752 printk("\n"KERN_WARNING" Tx ring %p: ", yp->tx_ring);
753 for (i = 0; i < TX_RING_SIZE; i++)
754 printk(" %4.4x /%8.8x", yp->tx_status[i].tx_errs,
755 yp->tx_ring[i].result_status);
756 printk("\n");
759 /* If the hardware is found to hang regularly, we will update the code
760 to reinitialize the chip here. */
761 dev->if_port = 0;
763 /* Wake the potentially-idle transmit channel. */
764 outl(0x10001000, dev->base_addr + TxCtrl);
765 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
766 netif_wake_queue (dev); /* Typical path */
768 dev->trans_start = jiffies;
769 yp->stats.tx_errors++;
772 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
773 static void yellowfin_init_ring(struct net_device *dev)
775 struct yellowfin_private *yp = dev->priv;
776 int i;
778 yp->tx_full = 0;
779 yp->cur_rx = yp->cur_tx = 0;
780 yp->dirty_tx = 0;
782 yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
784 for (i = 0; i < RX_RING_SIZE; i++) {
785 yp->rx_ring[i].dbdma_cmd =
786 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
787 yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
788 ((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
791 for (i = 0; i < RX_RING_SIZE; i++) {
792 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
793 yp->rx_skbuff[i] = skb;
794 if (skb == NULL)
795 break;
796 skb->dev = dev; /* Mark as being used by this device. */
797 skb_reserve(skb, 2); /* 16 byte align the IP header. */
798 yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
799 skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
801 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
802 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
804 #define NO_TXSTATS
805 #ifdef NO_TXSTATS
806 /* In this mode the Tx ring needs only a single descriptor. */
807 for (i = 0; i < TX_RING_SIZE; i++) {
808 yp->tx_skbuff[i] = NULL;
809 yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
810 yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
811 ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
813 /* Wrap ring */
814 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
815 #else
817 int j;
819 /* Tx ring needs a pair of descriptors, the second for the status. */
820 for (i = 0; i < TX_RING_SIZE; i++) {
821 j = 2*i;
822 yp->tx_skbuff[i] = 0;
823 /* Branch on Tx error. */
824 yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
825 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
826 (j+1)*sizeof(struct yellowfin_desc);
827 j++;
828 if (yp->flags & FullTxStatus) {
829 yp->tx_ring[j].dbdma_cmd =
830 cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
831 yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
832 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
833 i*sizeof(struct tx_status_words);
834 } else {
835 /* Symbios chips write only tx_errs word. */
836 yp->tx_ring[j].dbdma_cmd =
837 cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
838 yp->tx_ring[j].request_cnt = 2;
839 /* Om pade ummmmm... */
840 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
841 i*sizeof(struct tx_status_words) +
842 &(yp->tx_status[0].tx_errs) -
843 &(yp->tx_status[0]));
845 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
846 ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
848 /* Wrap ring */
849 yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
851 #endif
852 yp->tx_tail_desc = &yp->tx_status[0];
853 return;
856 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
858 struct yellowfin_private *yp = dev->priv;
859 unsigned entry;
860 int len = skb->len;
862 netif_stop_queue (dev);
864 /* Note: Ordering is important here, set the field with the
865 "ownership" bit last, and only then increment cur_tx. */
867 /* Calculate the next Tx descriptor entry. */
868 entry = yp->cur_tx % TX_RING_SIZE;
870 if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */
871 int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
872 /* Fix GX chipset errata. */
873 if (cacheline_end > 24 || cacheline_end == 0) {
874 len = skb->len + 32 - cacheline_end + 1;
875 if (len != skb->len)
876 skb = skb_padto(skb, len);
878 if (skb == NULL) {
879 yp->tx_skbuff[entry] = NULL;
880 netif_wake_queue(dev);
881 return 0;
884 yp->tx_skbuff[entry] = skb;
886 #ifdef NO_TXSTATS
887 yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
888 skb->data, len, PCI_DMA_TODEVICE));
889 yp->tx_ring[entry].result_status = 0;
890 if (entry >= TX_RING_SIZE-1) {
891 /* New stop command. */
892 yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
893 yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
894 cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
895 } else {
896 yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
897 yp->tx_ring[entry].dbdma_cmd =
898 cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
900 yp->cur_tx++;
901 #else
902 yp->tx_ring[entry<<1].request_cnt = len;
903 yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
904 skb->data, len, PCI_DMA_TODEVICE));
905 /* The input_last (status-write) command is constant, but we must
906 rewrite the subsequent 'stop' command. */
908 yp->cur_tx++;
910 unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
911 yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
913 /* Final step -- overwrite the old 'stop' command. */
915 yp->tx_ring[entry<<1].dbdma_cmd =
916 cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
917 CMD_TX_PKT | BRANCH_IFTRUE) | len);
918 #endif
920 /* Non-x86 Todo: explicitly flush cache lines here. */
922 /* Wake the potentially-idle transmit channel. */
923 outl(0x10001000, dev->base_addr + TxCtrl);
925 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
926 netif_start_queue (dev); /* Typical path */
927 else
928 yp->tx_full = 1;
929 dev->trans_start = jiffies;
931 if (yellowfin_debug > 4) {
932 printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n",
933 dev->name, yp->cur_tx, entry);
935 return 0;
938 /* The interrupt handler does all of the Rx thread work and cleans up
939 after the Tx thread. */
940 static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
942 struct net_device *dev = dev_instance;
943 struct yellowfin_private *yp;
944 long ioaddr;
945 int boguscnt = max_interrupt_work;
946 unsigned int handled = 0;
948 #ifndef final_version /* Can never occur. */
949 if (dev == NULL) {
950 printk (KERN_ERR "yellowfin_interrupt(): irq %d for unknown device.\n", irq);
951 return IRQ_NONE;
953 #endif
955 ioaddr = dev->base_addr;
956 yp = dev->priv;
958 spin_lock (&yp->lock);
960 do {
961 u16 intr_status = inw(ioaddr + IntrClear);
963 if (yellowfin_debug > 4)
964 printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n",
965 dev->name, intr_status);
967 if (intr_status == 0)
968 break;
969 handled = 1;
971 if (intr_status & (IntrRxDone | IntrEarlyRx)) {
972 yellowfin_rx(dev);
973 outl(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */
976 #ifdef NO_TXSTATS
977 for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
978 int entry = yp->dirty_tx % TX_RING_SIZE;
979 struct sk_buff *skb;
981 if (yp->tx_ring[entry].result_status == 0)
982 break;
983 skb = yp->tx_skbuff[entry];
984 yp->stats.tx_packets++;
985 yp->stats.tx_bytes += skb->len;
986 /* Free the original skb. */
987 pci_unmap_single(yp->pci_dev, yp->tx_ring[entry].addr,
988 skb->len, PCI_DMA_TODEVICE);
989 dev_kfree_skb_irq(skb);
990 yp->tx_skbuff[entry] = NULL;
992 if (yp->tx_full
993 && yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
994 /* The ring is no longer full, clear tbusy. */
995 yp->tx_full = 0;
996 netif_wake_queue(dev);
998 #else
999 if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
1000 unsigned dirty_tx = yp->dirty_tx;
1002 for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
1003 dirty_tx++) {
1004 /* Todo: optimize this. */
1005 int entry = dirty_tx % TX_RING_SIZE;
1006 u16 tx_errs = yp->tx_status[entry].tx_errs;
1007 struct sk_buff *skb;
1009 #ifndef final_version
1010 if (yellowfin_debug > 5)
1011 printk(KERN_DEBUG "%s: Tx queue %d check, Tx status "
1012 "%4.4x %4.4x %4.4x %4.4x.\n",
1013 dev->name, entry,
1014 yp->tx_status[entry].tx_cnt,
1015 yp->tx_status[entry].tx_errs,
1016 yp->tx_status[entry].total_tx_cnt,
1017 yp->tx_status[entry].paused);
1018 #endif
1019 if (tx_errs == 0)
1020 break; /* It still hasn't been Txed */
1021 skb = yp->tx_skbuff[entry];
1022 if (tx_errs & 0xF810) {
1023 /* There was an major error, log it. */
1024 #ifndef final_version
1025 if (yellowfin_debug > 1)
1026 printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
1027 dev->name, tx_errs);
1028 #endif
1029 yp->stats.tx_errors++;
1030 if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++;
1031 if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++;
1032 if (tx_errs & 0x2000) yp->stats.tx_window_errors++;
1033 if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++;
1034 } else {
1035 #ifndef final_version
1036 if (yellowfin_debug > 4)
1037 printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n",
1038 dev->name, tx_errs);
1039 #endif
1040 yp->stats.tx_bytes += skb->len;
1041 yp->stats.collisions += tx_errs & 15;
1042 yp->stats.tx_packets++;
1044 /* Free the original skb. */
1045 pci_unmap_single(yp->pci_dev,
1046 yp->tx_ring[entry<<1].addr, skb->len,
1047 PCI_DMA_TODEVICE);
1048 dev_kfree_skb_irq(skb);
1049 yp->tx_skbuff[entry] = 0;
1050 /* Mark status as empty. */
1051 yp->tx_status[entry].tx_errs = 0;
1054 #ifndef final_version
1055 if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
1056 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1057 dev->name, dirty_tx, yp->cur_tx, yp->tx_full);
1058 dirty_tx += TX_RING_SIZE;
1060 #endif
1062 if (yp->tx_full
1063 && yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1064 /* The ring is no longer full, clear tbusy. */
1065 yp->tx_full = 0;
1066 netif_wake_queue(dev);
1069 yp->dirty_tx = dirty_tx;
1070 yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1072 #endif
1074 /* Log errors and other uncommon events. */
1075 if (intr_status & 0x2ee) /* Abnormal error summary. */
1076 yellowfin_error(dev, intr_status);
1078 if (--boguscnt < 0) {
1079 printk(KERN_WARNING "%s: Too much work at interrupt, "
1080 "status=0x%4.4x.\n",
1081 dev->name, intr_status);
1082 break;
1084 } while (1);
1086 if (yellowfin_debug > 3)
1087 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1088 dev->name, inw(ioaddr + IntrStatus));
1090 spin_unlock (&yp->lock);
1091 return IRQ_RETVAL(handled);
1094 /* This routine is logically part of the interrupt handler, but separated
1095 for clarity and better register allocation. */
1096 static int yellowfin_rx(struct net_device *dev)
1098 struct yellowfin_private *yp = dev->priv;
1099 int entry = yp->cur_rx % RX_RING_SIZE;
1100 int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1102 if (yellowfin_debug > 4) {
1103 printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %8.8x.\n",
1104 entry, yp->rx_ring[entry].result_status);
1105 printk(KERN_DEBUG " #%d desc. %8.8x %8.8x %8.8x.\n",
1106 entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1107 yp->rx_ring[entry].result_status);
1110 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1111 while (1) {
1112 struct yellowfin_desc *desc = &yp->rx_ring[entry];
1113 struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1114 s16 frame_status;
1115 u16 desc_status;
1116 int data_size;
1117 u8 *buf_addr;
1119 if(!desc->result_status)
1120 break;
1121 pci_dma_sync_single_for_cpu(yp->pci_dev, desc->addr,
1122 yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1123 desc_status = le32_to_cpu(desc->result_status) >> 16;
1124 buf_addr = rx_skb->tail;
1125 data_size = (le32_to_cpu(desc->dbdma_cmd) -
1126 le32_to_cpu(desc->result_status)) & 0xffff;
1127 frame_status = le16_to_cpu(get_unaligned((s16*)&(buf_addr[data_size - 2])));
1128 if (yellowfin_debug > 4)
1129 printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n",
1130 frame_status);
1131 if (--boguscnt < 0)
1132 break;
1133 if ( ! (desc_status & RX_EOP)) {
1134 if (data_size != 0)
1135 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers,"
1136 " status %4.4x, data_size %d!\n", dev->name, desc_status, data_size);
1137 yp->stats.rx_length_errors++;
1138 } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
1139 /* There was a error. */
1140 if (yellowfin_debug > 3)
1141 printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n",
1142 frame_status);
1143 yp->stats.rx_errors++;
1144 if (frame_status & 0x0060) yp->stats.rx_length_errors++;
1145 if (frame_status & 0x0008) yp->stats.rx_frame_errors++;
1146 if (frame_status & 0x0010) yp->stats.rx_crc_errors++;
1147 if (frame_status < 0) yp->stats.rx_dropped++;
1148 } else if ( !(yp->drv_flags & IsGigabit) &&
1149 ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1150 u8 status1 = buf_addr[data_size-2];
1151 u8 status2 = buf_addr[data_size-1];
1152 yp->stats.rx_errors++;
1153 if (status1 & 0xC0) yp->stats.rx_length_errors++;
1154 if (status2 & 0x03) yp->stats.rx_frame_errors++;
1155 if (status2 & 0x04) yp->stats.rx_crc_errors++;
1156 if (status2 & 0x80) yp->stats.rx_dropped++;
1157 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
1158 } else if ((yp->flags & HasMACAddrBug) &&
1159 memcmp(le32_to_cpu(yp->rx_ring_dma +
1160 entry*sizeof(struct yellowfin_desc)),
1161 dev->dev_addr, 6) != 0 &&
1162 memcmp(le32_to_cpu(yp->rx_ring_dma +
1163 entry*sizeof(struct yellowfin_desc)),
1164 "\377\377\377\377\377\377", 6) != 0) {
1165 if (bogus_rx++ == 0)
1166 printk(KERN_WARNING "%s: Bad frame to %2.2x:%2.2x:%2.2x:%2.2x:"
1167 "%2.2x:%2.2x.\n",
1168 dev->name, buf_addr[0], buf_addr[1], buf_addr[2],
1169 buf_addr[3], buf_addr[4], buf_addr[5]);
1170 #endif
1171 } else {
1172 struct sk_buff *skb;
1173 int pkt_len = data_size -
1174 (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1175 /* To verify: Yellowfin Length should omit the CRC! */
1177 #ifndef final_version
1178 if (yellowfin_debug > 4)
1179 printk(KERN_DEBUG " yellowfin_rx() normal Rx pkt length %d"
1180 " of %d, bogus_cnt %d.\n",
1181 pkt_len, data_size, boguscnt);
1182 #endif
1183 /* Check if the packet is long enough to just pass up the skbuff
1184 without copying to a properly sized skbuff. */
1185 if (pkt_len > rx_copybreak) {
1186 skb_put(skb = rx_skb, pkt_len);
1187 pci_unmap_single(yp->pci_dev,
1188 yp->rx_ring[entry].addr,
1189 yp->rx_buf_sz,
1190 PCI_DMA_FROMDEVICE);
1191 yp->rx_skbuff[entry] = NULL;
1192 } else {
1193 skb = dev_alloc_skb(pkt_len + 2);
1194 if (skb == NULL)
1195 break;
1196 skb->dev = dev;
1197 skb_reserve(skb, 2); /* 16 byte align the IP header */
1198 eth_copy_and_sum(skb, rx_skb->tail, pkt_len, 0);
1199 skb_put(skb, pkt_len);
1200 pci_dma_sync_single_for_device(yp->pci_dev, desc->addr,
1201 yp->rx_buf_sz,
1202 PCI_DMA_FROMDEVICE);
1204 skb->protocol = eth_type_trans(skb, dev);
1205 netif_rx(skb);
1206 dev->last_rx = jiffies;
1207 yp->stats.rx_packets++;
1208 yp->stats.rx_bytes += pkt_len;
1210 entry = (++yp->cur_rx) % RX_RING_SIZE;
1213 /* Refill the Rx ring buffers. */
1214 for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1215 entry = yp->dirty_rx % RX_RING_SIZE;
1216 if (yp->rx_skbuff[entry] == NULL) {
1217 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
1218 if (skb == NULL)
1219 break; /* Better luck next round. */
1220 yp->rx_skbuff[entry] = skb;
1221 skb->dev = dev; /* Mark as being used by this device. */
1222 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1223 yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1224 skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1226 yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1227 yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
1228 if (entry != 0)
1229 yp->rx_ring[entry - 1].dbdma_cmd =
1230 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1231 else
1232 yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1233 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1234 | yp->rx_buf_sz);
1237 return 0;
1240 static void yellowfin_error(struct net_device *dev, int intr_status)
1242 struct yellowfin_private *yp = dev->priv;
1244 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1245 dev->name, intr_status);
1246 /* Hmmmmm, it's not clear what to do here. */
1247 if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1248 yp->stats.tx_errors++;
1249 if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1250 yp->stats.rx_errors++;
1253 static int yellowfin_close(struct net_device *dev)
1255 long ioaddr = dev->base_addr;
1256 struct yellowfin_private *yp = dev->priv;
1257 int i;
1259 netif_stop_queue (dev);
1261 if (yellowfin_debug > 1) {
1262 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
1263 "Rx %4.4x Int %2.2x.\n",
1264 dev->name, inw(ioaddr + TxStatus),
1265 inw(ioaddr + RxStatus), inw(ioaddr + IntrStatus));
1266 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1267 dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
1270 /* Disable interrupts by clearing the interrupt mask. */
1271 outw(0x0000, ioaddr + IntrEnb);
1273 /* Stop the chip's Tx and Rx processes. */
1274 outl(0x80000000, ioaddr + RxCtrl);
1275 outl(0x80000000, ioaddr + TxCtrl);
1277 del_timer(&yp->timer);
1279 #if defined(__i386__)
1280 if (yellowfin_debug > 2) {
1281 printk("\n"KERN_DEBUG" Tx ring at %8.8llx:\n",
1282 (unsigned long long)yp->tx_ring_dma);
1283 for (i = 0; i < TX_RING_SIZE*2; i++)
1284 printk(" %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n",
1285 inl(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1286 i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1287 yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1288 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
1289 for (i = 0; i < TX_RING_SIZE; i++)
1290 printk(" #%d status %4.4x %4.4x %4.4x %4.4x.\n",
1291 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1292 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1294 printk("\n"KERN_DEBUG " Rx ring %8.8llx:\n",
1295 (unsigned long long)yp->rx_ring_dma);
1296 for (i = 0; i < RX_RING_SIZE; i++) {
1297 printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n",
1298 inl(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1299 i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1300 yp->rx_ring[i].result_status);
1301 if (yellowfin_debug > 6) {
1302 if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1303 int j;
1304 for (j = 0; j < 0x50; j++)
1305 printk(" %4.4x",
1306 get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1307 printk("\n");
1312 #endif /* __i386__ debugging only */
1314 free_irq(dev->irq, dev);
1316 /* Free all the skbuffs in the Rx queue. */
1317 for (i = 0; i < RX_RING_SIZE; i++) {
1318 yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1319 yp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
1320 if (yp->rx_skbuff[i]) {
1321 dev_kfree_skb(yp->rx_skbuff[i]);
1323 yp->rx_skbuff[i] = NULL;
1325 for (i = 0; i < TX_RING_SIZE; i++) {
1326 if (yp->tx_skbuff[i])
1327 dev_kfree_skb(yp->tx_skbuff[i]);
1328 yp->tx_skbuff[i] = NULL;
1331 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
1332 if (yellowfin_debug > 0) {
1333 printk(KERN_DEBUG "%s: Received %d frames that we should not have.\n",
1334 dev->name, bogus_rx);
1336 #endif
1338 return 0;
1341 static struct net_device_stats *yellowfin_get_stats(struct net_device *dev)
1343 struct yellowfin_private *yp = dev->priv;
1344 return &yp->stats;
1347 /* Set or clear the multicast filter for this adaptor. */
1349 static void set_rx_mode(struct net_device *dev)
1351 struct yellowfin_private *yp = dev->priv;
1352 long ioaddr = dev->base_addr;
1353 u16 cfg_value = inw(ioaddr + Cnfg);
1355 /* Stop the Rx process to change any value. */
1356 outw(cfg_value & ~0x1000, ioaddr + Cnfg);
1357 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1358 /* Unconditionally log net taps. */
1359 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1360 outw(0x000F, ioaddr + AddrMode);
1361 } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) {
1362 /* Too many to filter well, or accept all multicasts. */
1363 outw(0x000B, ioaddr + AddrMode);
1364 } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
1365 struct dev_mc_list *mclist;
1366 u16 hash_table[4];
1367 int i;
1368 memset(hash_table, 0, sizeof(hash_table));
1369 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1370 i++, mclist = mclist->next) {
1371 unsigned int bit;
1373 /* Due to a bug in the early chip versions, multiple filter
1374 slots must be set for each address. */
1375 if (yp->drv_flags & HasMulticastBug) {
1376 bit = (ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f;
1377 hash_table[bit >> 4] |= (1 << bit);
1378 bit = (ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f;
1379 hash_table[bit >> 4] |= (1 << bit);
1380 bit = (ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f;
1381 hash_table[bit >> 4] |= (1 << bit);
1383 bit = (ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f;
1384 hash_table[bit >> 4] |= (1 << bit);
1386 /* Copy the hash table to the chip. */
1387 for (i = 0; i < 4; i++)
1388 outw(hash_table[i], ioaddr + HashTbl + i*2);
1389 outw(0x0003, ioaddr + AddrMode);
1390 } else { /* Normal, unicast/broadcast-only mode. */
1391 outw(0x0001, ioaddr + AddrMode);
1393 /* Restart the Rx process. */
1394 outw(cfg_value | 0x1000, ioaddr + Cnfg);
1397 static int netdev_ethtool_ioctl(struct net_device *dev, void __user *useraddr)
1399 struct yellowfin_private *np = dev->priv;
1400 u32 ethcmd;
1402 if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
1403 return -EFAULT;
1405 switch (ethcmd) {
1406 case ETHTOOL_GDRVINFO: {
1407 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
1408 strcpy(info.driver, DRV_NAME);
1409 strcpy(info.version, DRV_VERSION);
1410 strcpy(info.bus_info, pci_name(np->pci_dev));
1411 if (copy_to_user(useraddr, &info, sizeof(info)))
1412 return -EFAULT;
1413 return 0;
1418 return -EOPNOTSUPP;
1421 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1423 struct yellowfin_private *np = dev->priv;
1424 long ioaddr = dev->base_addr;
1425 struct mii_ioctl_data *data = if_mii(rq);
1427 switch(cmd) {
1428 case SIOCETHTOOL:
1429 return netdev_ethtool_ioctl(dev, rq->ifr_data);
1430 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1431 data->phy_id = np->phys[0] & 0x1f;
1432 /* Fall Through */
1434 case SIOCGMIIREG: /* Read MII PHY register. */
1435 data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1436 return 0;
1438 case SIOCSMIIREG: /* Write MII PHY register. */
1439 if (!capable(CAP_NET_ADMIN))
1440 return -EPERM;
1441 if (data->phy_id == np->phys[0]) {
1442 u16 value = data->val_in;
1443 switch (data->reg_num) {
1444 case 0:
1445 /* Check for autonegotiation on or reset. */
1446 np->medialock = (value & 0x9000) ? 0 : 1;
1447 if (np->medialock)
1448 np->full_duplex = (value & 0x0100) ? 1 : 0;
1449 break;
1450 case 4: np->advertising = value; break;
1452 /* Perhaps check_duplex(dev), depending on chip semantics. */
1454 mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1455 return 0;
1456 default:
1457 return -EOPNOTSUPP;
1462 static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
1464 struct net_device *dev = pci_get_drvdata(pdev);
1465 struct yellowfin_private *np;
1467 if (!dev)
1468 BUG();
1469 np = dev->priv;
1471 pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
1472 np->tx_status_dma);
1473 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
1474 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
1475 unregister_netdev (dev);
1477 pci_release_regions (pdev);
1479 #ifndef USE_IO_OPS
1480 iounmap ((void *) dev->base_addr);
1481 #endif
1483 free_netdev (dev);
1484 pci_set_drvdata(pdev, NULL);
1488 static struct pci_driver yellowfin_driver = {
1489 .name = DRV_NAME,
1490 .id_table = yellowfin_pci_tbl,
1491 .probe = yellowfin_init_one,
1492 .remove = __devexit_p(yellowfin_remove_one),
1496 static int __init yellowfin_init (void)
1498 /* when a module, this is printed whether or not devices are found in probe */
1499 #ifdef MODULE
1500 printk(version);
1501 #endif
1502 return pci_module_init (&yellowfin_driver);
1506 static void __exit yellowfin_cleanup (void)
1508 pci_unregister_driver (&yellowfin_driver);
1512 module_init(yellowfin_init);
1513 module_exit(yellowfin_cleanup);
1516 * Local variables:
1517 * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c yellowfin.c"
1518 * compile-command-alphaLX: "gcc -DMODULE -Wall -Wstrict-prototypes -O2 -c yellowfin.c -fomit-frame-pointer -fno-strength-reduce -mno-fp-regs -Wa,-m21164a -DBWX_USABLE -DBWIO_ENABLED"
1519 * simple-compile-command: "gcc -DMODULE -O6 -c yellowfin.c"
1520 * c-indent-level: 4
1521 * c-basic-offset: 4
1522 * tab-width: 4
1523 * End: