[NET]: Avoid allocating skb in skb_pad
[linux-2.6/libata-dev.git] / drivers / net / yellowfin.c
blobecec8e5db786e66dc4128edd216d8cb8a0072eb8
1 /* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
2 /*
3 Written 1997-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
13 It also supports the Symbios Logic version of the same chip core.
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
18 Annapolis MD 21403
20 Support and updates available at
21 http://www.scyld.com/network/yellowfin.html
24 Linux kernel changelog:
25 -----------------------
27 LK1.1.1 (jgarzik): Port to 2.4 kernel
29 LK1.1.2 (jgarzik):
30 * Merge in becker version 1.05
32 LK1.1.3 (jgarzik):
33 * Various cleanups
34 * Update yellowfin_timer to correctly calculate duplex.
35 (suggested by Manfred Spraul)
37 LK1.1.4 (val@nmt.edu):
38 * Fix three endian-ness bugs
39 * Support dual function SYM53C885E ethernet chip
41 LK1.1.5 (val@nmt.edu):
42 * Fix forced full-duplex bug I introduced
44 LK1.1.6 (val@nmt.edu):
45 * Only print warning on truly "oversized" packets
46 * Fix theoretical bug on gigabit cards - return to 1.1.3 behavior
50 #define DRV_NAME "yellowfin"
51 #define DRV_VERSION "1.05+LK1.1.6"
52 #define DRV_RELDATE "Feb 11, 2002"
54 #define PFX DRV_NAME ": "
56 /* The user-configurable values.
57 These may be modified when a driver module is loaded.*/
59 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
60 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
61 static int max_interrupt_work = 20;
62 static int mtu;
63 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
64 /* System-wide count of bogus-rx frames. */
65 static int bogus_rx;
66 static int dma_ctrl = 0x004A0263; /* Constrained by errata */
67 static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
68 #elif defined(YF_NEW) /* A future perfect board :->. */
69 static int dma_ctrl = 0x00CAC277; /* Override when loading module! */
70 static int fifo_cfg = 0x0028;
71 #else
72 static const int dma_ctrl = 0x004A0263; /* Constrained by errata */
73 static const int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
74 #endif
76 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
77 Setting to > 1514 effectively disables this feature. */
78 static int rx_copybreak;
80 /* Used to pass the media type, etc.
81 No media types are currently defined. These exist for driver
82 interoperability.
84 #define MAX_UNITS 8 /* More are supported, limit only on options */
85 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
88 /* Do ugly workaround for GX server chipset errata. */
89 static int gx_fix;
91 /* Operational parameters that are set at compile time. */
93 /* Keep the ring sizes a power of two for efficiency.
94 Making the Tx ring too long decreases the effectiveness of channel
95 bonding and packet priority.
96 There are no ill effects from too-large receive rings. */
97 #define TX_RING_SIZE 16
98 #define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */
99 #define RX_RING_SIZE 64
100 #define STATUS_TOTAL_SIZE TX_RING_SIZE*sizeof(struct tx_status_words)
101 #define TX_TOTAL_SIZE 2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
102 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct yellowfin_desc)
104 /* Operational parameters that usually are not changed. */
105 /* Time in jiffies before concluding the transmitter is hung. */
106 #define TX_TIMEOUT (2*HZ)
107 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
109 #define yellowfin_debug debug
111 #include <linux/module.h>
112 #include <linux/kernel.h>
113 #include <linux/string.h>
114 #include <linux/timer.h>
115 #include <linux/errno.h>
116 #include <linux/ioport.h>
117 #include <linux/slab.h>
118 #include <linux/interrupt.h>
119 #include <linux/pci.h>
120 #include <linux/init.h>
121 #include <linux/mii.h>
122 #include <linux/netdevice.h>
123 #include <linux/etherdevice.h>
124 #include <linux/skbuff.h>
125 #include <linux/ethtool.h>
126 #include <linux/crc32.h>
127 #include <linux/bitops.h>
128 #include <asm/uaccess.h>
129 #include <asm/processor.h> /* Processor type for cache alignment. */
130 #include <asm/unaligned.h>
131 #include <asm/io.h>
133 /* These identify the driver base version and may not be removed. */
134 static char version[] __devinitdata =
135 KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
136 KERN_INFO " http://www.scyld.com/network/yellowfin.html\n"
137 KERN_INFO " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
139 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
140 MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
141 MODULE_LICENSE("GPL");
143 module_param(max_interrupt_work, int, 0);
144 module_param(mtu, int, 0);
145 module_param(debug, int, 0);
146 module_param(rx_copybreak, int, 0);
147 module_param_array(options, int, NULL, 0);
148 module_param_array(full_duplex, int, NULL, 0);
149 module_param(gx_fix, int, 0);
150 MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
151 MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
152 MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
153 MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
154 MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
155 MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
156 MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
159 Theory of Operation
161 I. Board Compatibility
163 This device driver is designed for the Packet Engines "Yellowfin" Gigabit
164 Ethernet adapter. The G-NIC 64-bit PCI card is supported, as well as the
165 Symbios 53C885E dual function chip.
167 II. Board-specific settings
169 PCI bus devices are configured by the system at boot time, so no jumpers
170 need to be set on the board. The system BIOS preferably should assign the
171 PCI INTA signal to an otherwise unused system IRQ line.
172 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
173 interrupt lines.
175 III. Driver operation
177 IIIa. Ring buffers
179 The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
180 This is a descriptor list scheme similar to that used by the EEPro100 and
181 Tulip. This driver uses two statically allocated fixed-size descriptor lists
182 formed into rings by a branch from the final descriptor to the beginning of
183 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
185 The driver allocates full frame size skbuffs for the Rx ring buffers at
186 open() time and passes the skb->data field to the Yellowfin as receive data
187 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
188 a fresh skbuff is allocated and the frame is copied to the new skbuff.
189 When the incoming frame is larger, the skbuff is passed directly up the
190 protocol stack and replaced by a newly allocated skbuff.
192 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
193 using a full-sized skbuff for small frames vs. the copying costs of larger
194 frames. For small frames the copying cost is negligible (esp. considering
195 that we are pre-loading the cache with immediately useful header
196 information). For large frames the copying cost is non-trivial, and the
197 larger copy might flush the cache of useful data.
199 IIIC. Synchronization
201 The driver runs as two independent, single-threaded flows of control. One
202 is the send-packet routine, which enforces single-threaded use by the
203 dev->tbusy flag. The other thread is the interrupt handler, which is single
204 threaded by the hardware and other software.
206 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
207 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
208 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
209 the 'yp->tx_full' flag.
211 The interrupt handler has exclusive control over the Rx ring and records stats
212 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
213 empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
214 clears both the tx_full and tbusy flags.
216 IV. Notes
218 Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
219 Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
220 and an AlphaStation to verifty the Alpha port!
222 IVb. References
224 Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
225 Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
226 Data Manual v3.0
227 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
228 http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
230 IVc. Errata
232 See Packet Engines confidential appendix (prototype chips only).
237 enum pci_id_flags_bits {
238 /* Set PCI command register bits before calling probe1(). */
239 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
240 /* Read and map the single following PCI BAR. */
241 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
242 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
243 PCI_UNUSED_IRQ=0x800,
245 enum capability_flags {
246 HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
247 HasMACAddrBug=32, /* Only on early revs. */
248 DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
250 /* The PCI I/O space extent. */
251 #define YELLOWFIN_SIZE 0x100
252 #ifdef USE_IO_OPS
253 #define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
254 #else
255 #define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
256 #endif
258 struct pci_id_info {
259 const char *name;
260 struct match_info {
261 int pci, pci_mask, subsystem, subsystem_mask;
262 int revision, revision_mask; /* Only 8 bits. */
263 } id;
264 enum pci_id_flags_bits pci_flags;
265 int io_size; /* Needed for I/O region check or ioremap(). */
266 int drv_flags; /* Driver use, intended as capability flags. */
269 static const struct pci_id_info pci_id_tbl[] = {
270 {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
271 PCI_IOTYPE, YELLOWFIN_SIZE,
272 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
273 {"Symbios SYM83C885", { 0x07011000, 0xffffffff},
274 PCI_IOTYPE, YELLOWFIN_SIZE, HasMII | DontUseEeprom },
275 {NULL,},
278 static struct pci_device_id yellowfin_pci_tbl[] = {
279 { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
280 { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
281 { 0, }
283 MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
286 /* Offsets to the Yellowfin registers. Various sizes and alignments. */
287 enum yellowfin_offsets {
288 TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
289 TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
290 RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
291 RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
292 EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
293 ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
294 Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
295 MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
296 MII_Status=0xAE,
297 RxDepth=0xB8, FlowCtrl=0xBC,
298 AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
299 EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
300 EEFeature=0xF5,
303 /* The Yellowfin Rx and Tx buffer descriptors.
304 Elements are written as 32 bit for endian portability. */
305 struct yellowfin_desc {
306 u32 dbdma_cmd;
307 u32 addr;
308 u32 branch_addr;
309 u32 result_status;
312 struct tx_status_words {
313 #ifdef __BIG_ENDIAN
314 u16 tx_errs;
315 u16 tx_cnt;
316 u16 paused;
317 u16 total_tx_cnt;
318 #else /* Little endian chips. */
319 u16 tx_cnt;
320 u16 tx_errs;
321 u16 total_tx_cnt;
322 u16 paused;
323 #endif /* __BIG_ENDIAN */
326 /* Bits in yellowfin_desc.cmd */
327 enum desc_cmd_bits {
328 CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
329 CMD_NOP=0x60000000, CMD_STOP=0x70000000,
330 BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
331 BRANCH_IFTRUE=0x040000,
334 /* Bits in yellowfin_desc.status */
335 enum desc_status_bits { RX_EOP=0x0040, };
337 /* Bits in the interrupt status/mask registers. */
338 enum intr_status_bits {
339 IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
340 IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
341 IntrEarlyRx=0x100, IntrWakeup=0x200, };
343 #define PRIV_ALIGN 31 /* Required alignment mask */
344 #define MII_CNT 4
345 struct yellowfin_private {
346 /* Descriptor rings first for alignment.
347 Tx requires a second descriptor for status. */
348 struct yellowfin_desc *rx_ring;
349 struct yellowfin_desc *tx_ring;
350 struct sk_buff* rx_skbuff[RX_RING_SIZE];
351 struct sk_buff* tx_skbuff[TX_RING_SIZE];
352 dma_addr_t rx_ring_dma;
353 dma_addr_t tx_ring_dma;
355 struct tx_status_words *tx_status;
356 dma_addr_t tx_status_dma;
358 struct timer_list timer; /* Media selection timer. */
359 struct net_device_stats stats;
360 /* Frequently used and paired value: keep adjacent for cache effect. */
361 int chip_id, drv_flags;
362 struct pci_dev *pci_dev;
363 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
364 unsigned int rx_buf_sz; /* Based on MTU+slack. */
365 struct tx_status_words *tx_tail_desc;
366 unsigned int cur_tx, dirty_tx;
367 int tx_threshold;
368 unsigned int tx_full:1; /* The Tx queue is full. */
369 unsigned int full_duplex:1; /* Full-duplex operation requested. */
370 unsigned int duplex_lock:1;
371 unsigned int medialock:1; /* Do not sense media. */
372 unsigned int default_port:4; /* Last dev->if_port value. */
373 /* MII transceiver section. */
374 int mii_cnt; /* MII device addresses. */
375 u16 advertising; /* NWay media advertisement */
376 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used */
377 spinlock_t lock;
378 void __iomem *base;
381 static int read_eeprom(void __iomem *ioaddr, int location);
382 static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
383 static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
384 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
385 static int yellowfin_open(struct net_device *dev);
386 static void yellowfin_timer(unsigned long data);
387 static void yellowfin_tx_timeout(struct net_device *dev);
388 static void yellowfin_init_ring(struct net_device *dev);
389 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev);
390 static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
391 static int yellowfin_rx(struct net_device *dev);
392 static void yellowfin_error(struct net_device *dev, int intr_status);
393 static int yellowfin_close(struct net_device *dev);
394 static struct net_device_stats *yellowfin_get_stats(struct net_device *dev);
395 static void set_rx_mode(struct net_device *dev);
396 static struct ethtool_ops ethtool_ops;
399 static int __devinit yellowfin_init_one(struct pci_dev *pdev,
400 const struct pci_device_id *ent)
402 struct net_device *dev;
403 struct yellowfin_private *np;
404 int irq;
405 int chip_idx = ent->driver_data;
406 static int find_cnt;
407 void __iomem *ioaddr;
408 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
409 int drv_flags = pci_id_tbl[chip_idx].drv_flags;
410 void *ring_space;
411 dma_addr_t ring_dma;
412 #ifdef USE_IO_OPS
413 int bar = 0;
414 #else
415 int bar = 1;
416 #endif
418 /* when built into the kernel, we only print version if device is found */
419 #ifndef MODULE
420 static int printed_version;
421 if (!printed_version++)
422 printk(version);
423 #endif
425 i = pci_enable_device(pdev);
426 if (i) return i;
428 dev = alloc_etherdev(sizeof(*np));
429 if (!dev) {
430 printk (KERN_ERR PFX "cannot allocate ethernet device\n");
431 return -ENOMEM;
433 SET_MODULE_OWNER(dev);
434 SET_NETDEV_DEV(dev, &pdev->dev);
436 np = netdev_priv(dev);
438 if (pci_request_regions(pdev, DRV_NAME))
439 goto err_out_free_netdev;
441 pci_set_master (pdev);
443 ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
444 if (!ioaddr)
445 goto err_out_free_res;
447 irq = pdev->irq;
449 if (drv_flags & DontUseEeprom)
450 for (i = 0; i < 6; i++)
451 dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
452 else {
453 int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
454 for (i = 0; i < 6; i++)
455 dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
458 /* Reset the chip. */
459 iowrite32(0x80000000, ioaddr + DMACtrl);
461 dev->base_addr = (unsigned long)ioaddr;
462 dev->irq = irq;
464 pci_set_drvdata(pdev, dev);
465 spin_lock_init(&np->lock);
467 np->pci_dev = pdev;
468 np->chip_id = chip_idx;
469 np->drv_flags = drv_flags;
470 np->base = ioaddr;
472 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
473 if (!ring_space)
474 goto err_out_cleardev;
475 np->tx_ring = (struct yellowfin_desc *)ring_space;
476 np->tx_ring_dma = ring_dma;
478 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
479 if (!ring_space)
480 goto err_out_unmap_tx;
481 np->rx_ring = (struct yellowfin_desc *)ring_space;
482 np->rx_ring_dma = ring_dma;
484 ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
485 if (!ring_space)
486 goto err_out_unmap_rx;
487 np->tx_status = (struct tx_status_words *)ring_space;
488 np->tx_status_dma = ring_dma;
490 if (dev->mem_start)
491 option = dev->mem_start;
493 /* The lower four bits are the media type. */
494 if (option > 0) {
495 if (option & 0x200)
496 np->full_duplex = 1;
497 np->default_port = option & 15;
498 if (np->default_port)
499 np->medialock = 1;
501 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
502 np->full_duplex = 1;
504 if (np->full_duplex)
505 np->duplex_lock = 1;
507 /* The Yellowfin-specific entries in the device structure. */
508 dev->open = &yellowfin_open;
509 dev->hard_start_xmit = &yellowfin_start_xmit;
510 dev->stop = &yellowfin_close;
511 dev->get_stats = &yellowfin_get_stats;
512 dev->set_multicast_list = &set_rx_mode;
513 dev->do_ioctl = &netdev_ioctl;
514 SET_ETHTOOL_OPS(dev, &ethtool_ops);
515 dev->tx_timeout = yellowfin_tx_timeout;
516 dev->watchdog_timeo = TX_TIMEOUT;
518 if (mtu)
519 dev->mtu = mtu;
521 i = register_netdev(dev);
522 if (i)
523 goto err_out_unmap_status;
525 printk(KERN_INFO "%s: %s type %8x at %p, ",
526 dev->name, pci_id_tbl[chip_idx].name,
527 ioread32(ioaddr + ChipRev), ioaddr);
528 for (i = 0; i < 5; i++)
529 printk("%2.2x:", dev->dev_addr[i]);
530 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
532 if (np->drv_flags & HasMII) {
533 int phy, phy_idx = 0;
534 for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
535 int mii_status = mdio_read(ioaddr, phy, 1);
536 if (mii_status != 0xffff && mii_status != 0x0000) {
537 np->phys[phy_idx++] = phy;
538 np->advertising = mdio_read(ioaddr, phy, 4);
539 printk(KERN_INFO "%s: MII PHY found at address %d, status "
540 "0x%4.4x advertising %4.4x.\n",
541 dev->name, phy, mii_status, np->advertising);
544 np->mii_cnt = phy_idx;
547 find_cnt++;
549 return 0;
551 err_out_unmap_status:
552 pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
553 np->tx_status_dma);
554 err_out_unmap_rx:
555 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
556 err_out_unmap_tx:
557 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
558 err_out_cleardev:
559 pci_set_drvdata(pdev, NULL);
560 pci_iounmap(pdev, ioaddr);
561 err_out_free_res:
562 pci_release_regions(pdev);
563 err_out_free_netdev:
564 free_netdev (dev);
565 return -ENODEV;
568 static int __devinit read_eeprom(void __iomem *ioaddr, int location)
570 int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */
572 iowrite8(location, ioaddr + EEAddr);
573 iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
574 while ((ioread8(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0)
576 return ioread8(ioaddr + EERead);
579 /* MII Managemen Data I/O accesses.
580 These routines assume the MDIO controller is idle, and do not exit until
581 the command is finished. */
583 static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
585 int i;
587 iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
588 iowrite16(1, ioaddr + MII_Cmd);
589 for (i = 10000; i >= 0; i--)
590 if ((ioread16(ioaddr + MII_Status) & 1) == 0)
591 break;
592 return ioread16(ioaddr + MII_Rd_Data);
595 static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
597 int i;
599 iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
600 iowrite16(value, ioaddr + MII_Wr_Data);
602 /* Wait for the command to finish. */
603 for (i = 10000; i >= 0; i--)
604 if ((ioread16(ioaddr + MII_Status) & 1) == 0)
605 break;
606 return;
610 static int yellowfin_open(struct net_device *dev)
612 struct yellowfin_private *yp = netdev_priv(dev);
613 void __iomem *ioaddr = yp->base;
614 int i;
616 /* Reset the chip. */
617 iowrite32(0x80000000, ioaddr + DMACtrl);
619 i = request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ, dev->name, dev);
620 if (i) return i;
622 if (yellowfin_debug > 1)
623 printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
624 dev->name, dev->irq);
626 yellowfin_init_ring(dev);
628 iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
629 iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
631 for (i = 0; i < 6; i++)
632 iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
634 /* Set up various condition 'select' registers.
635 There are no options here. */
636 iowrite32(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */
637 iowrite32(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */
638 iowrite32(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */
639 iowrite32(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */
640 iowrite32(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */
641 iowrite32(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */
643 /* Initialize other registers: with so many this eventually this will
644 converted to an offset/value list. */
645 iowrite32(dma_ctrl, ioaddr + DMACtrl);
646 iowrite16(fifo_cfg, ioaddr + FIFOcfg);
647 /* Enable automatic generation of flow control frames, period 0xffff. */
648 iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
650 yp->tx_threshold = 32;
651 iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
653 if (dev->if_port == 0)
654 dev->if_port = yp->default_port;
656 netif_start_queue(dev);
658 /* Setting the Rx mode will start the Rx process. */
659 if (yp->drv_flags & IsGigabit) {
660 /* We are always in full-duplex mode with gigabit! */
661 yp->full_duplex = 1;
662 iowrite16(0x01CF, ioaddr + Cnfg);
663 } else {
664 iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
665 iowrite16(0x1018, ioaddr + FrameGap1);
666 iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
668 set_rx_mode(dev);
670 /* Enable interrupts by setting the interrupt mask. */
671 iowrite16(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */
672 iowrite16(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
673 iowrite32(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */
674 iowrite32(0x80008000, ioaddr + TxCtrl);
676 if (yellowfin_debug > 2) {
677 printk(KERN_DEBUG "%s: Done yellowfin_open().\n",
678 dev->name);
681 /* Set the timer to check for link beat. */
682 init_timer(&yp->timer);
683 yp->timer.expires = jiffies + 3*HZ;
684 yp->timer.data = (unsigned long)dev;
685 yp->timer.function = &yellowfin_timer; /* timer handler */
686 add_timer(&yp->timer);
688 return 0;
691 static void yellowfin_timer(unsigned long data)
693 struct net_device *dev = (struct net_device *)data;
694 struct yellowfin_private *yp = netdev_priv(dev);
695 void __iomem *ioaddr = yp->base;
696 int next_tick = 60*HZ;
698 if (yellowfin_debug > 3) {
699 printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n",
700 dev->name, ioread16(ioaddr + IntrStatus));
703 if (yp->mii_cnt) {
704 int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
705 int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
706 int negotiated = lpa & yp->advertising;
707 if (yellowfin_debug > 1)
708 printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, "
709 "link partner capability %4.4x.\n",
710 dev->name, yp->phys[0], bmsr, lpa);
712 yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
714 iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
716 if (bmsr & BMSR_LSTATUS)
717 next_tick = 60*HZ;
718 else
719 next_tick = 3*HZ;
722 yp->timer.expires = jiffies + next_tick;
723 add_timer(&yp->timer);
726 static void yellowfin_tx_timeout(struct net_device *dev)
728 struct yellowfin_private *yp = netdev_priv(dev);
729 void __iomem *ioaddr = yp->base;
731 printk(KERN_WARNING "%s: Yellowfin transmit timed out at %d/%d Tx "
732 "status %4.4x, Rx status %4.4x, resetting...\n",
733 dev->name, yp->cur_tx, yp->dirty_tx,
734 ioread32(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
736 /* Note: these should be KERN_DEBUG. */
737 if (yellowfin_debug) {
738 int i;
739 printk(KERN_WARNING " Rx ring %p: ", yp->rx_ring);
740 for (i = 0; i < RX_RING_SIZE; i++)
741 printk(" %8.8x", yp->rx_ring[i].result_status);
742 printk("\n"KERN_WARNING" Tx ring %p: ", yp->tx_ring);
743 for (i = 0; i < TX_RING_SIZE; i++)
744 printk(" %4.4x /%8.8x", yp->tx_status[i].tx_errs,
745 yp->tx_ring[i].result_status);
746 printk("\n");
749 /* If the hardware is found to hang regularly, we will update the code
750 to reinitialize the chip here. */
751 dev->if_port = 0;
753 /* Wake the potentially-idle transmit channel. */
754 iowrite32(0x10001000, yp->base + TxCtrl);
755 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
756 netif_wake_queue (dev); /* Typical path */
758 dev->trans_start = jiffies;
759 yp->stats.tx_errors++;
762 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
763 static void yellowfin_init_ring(struct net_device *dev)
765 struct yellowfin_private *yp = netdev_priv(dev);
766 int i;
768 yp->tx_full = 0;
769 yp->cur_rx = yp->cur_tx = 0;
770 yp->dirty_tx = 0;
772 yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
774 for (i = 0; i < RX_RING_SIZE; i++) {
775 yp->rx_ring[i].dbdma_cmd =
776 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
777 yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
778 ((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
781 for (i = 0; i < RX_RING_SIZE; i++) {
782 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
783 yp->rx_skbuff[i] = skb;
784 if (skb == NULL)
785 break;
786 skb->dev = dev; /* Mark as being used by this device. */
787 skb_reserve(skb, 2); /* 16 byte align the IP header. */
788 yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
789 skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
791 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
792 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
794 #define NO_TXSTATS
795 #ifdef NO_TXSTATS
796 /* In this mode the Tx ring needs only a single descriptor. */
797 for (i = 0; i < TX_RING_SIZE; i++) {
798 yp->tx_skbuff[i] = NULL;
799 yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
800 yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
801 ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
803 /* Wrap ring */
804 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
805 #else
807 int j;
809 /* Tx ring needs a pair of descriptors, the second for the status. */
810 for (i = 0; i < TX_RING_SIZE; i++) {
811 j = 2*i;
812 yp->tx_skbuff[i] = 0;
813 /* Branch on Tx error. */
814 yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
815 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
816 (j+1)*sizeof(struct yellowfin_desc);
817 j++;
818 if (yp->flags & FullTxStatus) {
819 yp->tx_ring[j].dbdma_cmd =
820 cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
821 yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
822 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
823 i*sizeof(struct tx_status_words);
824 } else {
825 /* Symbios chips write only tx_errs word. */
826 yp->tx_ring[j].dbdma_cmd =
827 cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
828 yp->tx_ring[j].request_cnt = 2;
829 /* Om pade ummmmm... */
830 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
831 i*sizeof(struct tx_status_words) +
832 &(yp->tx_status[0].tx_errs) -
833 &(yp->tx_status[0]));
835 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
836 ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
838 /* Wrap ring */
839 yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
841 #endif
842 yp->tx_tail_desc = &yp->tx_status[0];
843 return;
846 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
848 struct yellowfin_private *yp = netdev_priv(dev);
849 unsigned entry;
850 int len = skb->len;
852 netif_stop_queue (dev);
854 /* Note: Ordering is important here, set the field with the
855 "ownership" bit last, and only then increment cur_tx. */
857 /* Calculate the next Tx descriptor entry. */
858 entry = yp->cur_tx % TX_RING_SIZE;
860 if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */
861 int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
862 /* Fix GX chipset errata. */
863 if (cacheline_end > 24 || cacheline_end == 0) {
864 len = skb->len + 32 - cacheline_end + 1;
865 if (skb_padto(skb, len)) {
866 yp->tx_skbuff[entry] = NULL;
867 netif_wake_queue(dev);
868 return 0;
872 yp->tx_skbuff[entry] = skb;
874 #ifdef NO_TXSTATS
875 yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
876 skb->data, len, PCI_DMA_TODEVICE));
877 yp->tx_ring[entry].result_status = 0;
878 if (entry >= TX_RING_SIZE-1) {
879 /* New stop command. */
880 yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
881 yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
882 cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
883 } else {
884 yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
885 yp->tx_ring[entry].dbdma_cmd =
886 cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
888 yp->cur_tx++;
889 #else
890 yp->tx_ring[entry<<1].request_cnt = len;
891 yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
892 skb->data, len, PCI_DMA_TODEVICE));
893 /* The input_last (status-write) command is constant, but we must
894 rewrite the subsequent 'stop' command. */
896 yp->cur_tx++;
898 unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
899 yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
901 /* Final step -- overwrite the old 'stop' command. */
903 yp->tx_ring[entry<<1].dbdma_cmd =
904 cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
905 CMD_TX_PKT | BRANCH_IFTRUE) | len);
906 #endif
908 /* Non-x86 Todo: explicitly flush cache lines here. */
910 /* Wake the potentially-idle transmit channel. */
911 iowrite32(0x10001000, yp->base + TxCtrl);
913 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
914 netif_start_queue (dev); /* Typical path */
915 else
916 yp->tx_full = 1;
917 dev->trans_start = jiffies;
919 if (yellowfin_debug > 4) {
920 printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n",
921 dev->name, yp->cur_tx, entry);
923 return 0;
926 /* The interrupt handler does all of the Rx thread work and cleans up
927 after the Tx thread. */
928 static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
930 struct net_device *dev = dev_instance;
931 struct yellowfin_private *yp;
932 void __iomem *ioaddr;
933 int boguscnt = max_interrupt_work;
934 unsigned int handled = 0;
936 #ifndef final_version /* Can never occur. */
937 if (dev == NULL) {
938 printk (KERN_ERR "yellowfin_interrupt(): irq %d for unknown device.\n", irq);
939 return IRQ_NONE;
941 #endif
943 yp = netdev_priv(dev);
944 ioaddr = yp->base;
946 spin_lock (&yp->lock);
948 do {
949 u16 intr_status = ioread16(ioaddr + IntrClear);
951 if (yellowfin_debug > 4)
952 printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n",
953 dev->name, intr_status);
955 if (intr_status == 0)
956 break;
957 handled = 1;
959 if (intr_status & (IntrRxDone | IntrEarlyRx)) {
960 yellowfin_rx(dev);
961 iowrite32(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */
964 #ifdef NO_TXSTATS
965 for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
966 int entry = yp->dirty_tx % TX_RING_SIZE;
967 struct sk_buff *skb;
969 if (yp->tx_ring[entry].result_status == 0)
970 break;
971 skb = yp->tx_skbuff[entry];
972 yp->stats.tx_packets++;
973 yp->stats.tx_bytes += skb->len;
974 /* Free the original skb. */
975 pci_unmap_single(yp->pci_dev, yp->tx_ring[entry].addr,
976 skb->len, PCI_DMA_TODEVICE);
977 dev_kfree_skb_irq(skb);
978 yp->tx_skbuff[entry] = NULL;
980 if (yp->tx_full
981 && yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
982 /* The ring is no longer full, clear tbusy. */
983 yp->tx_full = 0;
984 netif_wake_queue(dev);
986 #else
987 if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
988 unsigned dirty_tx = yp->dirty_tx;
990 for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
991 dirty_tx++) {
992 /* Todo: optimize this. */
993 int entry = dirty_tx % TX_RING_SIZE;
994 u16 tx_errs = yp->tx_status[entry].tx_errs;
995 struct sk_buff *skb;
997 #ifndef final_version
998 if (yellowfin_debug > 5)
999 printk(KERN_DEBUG "%s: Tx queue %d check, Tx status "
1000 "%4.4x %4.4x %4.4x %4.4x.\n",
1001 dev->name, entry,
1002 yp->tx_status[entry].tx_cnt,
1003 yp->tx_status[entry].tx_errs,
1004 yp->tx_status[entry].total_tx_cnt,
1005 yp->tx_status[entry].paused);
1006 #endif
1007 if (tx_errs == 0)
1008 break; /* It still hasn't been Txed */
1009 skb = yp->tx_skbuff[entry];
1010 if (tx_errs & 0xF810) {
1011 /* There was an major error, log it. */
1012 #ifndef final_version
1013 if (yellowfin_debug > 1)
1014 printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
1015 dev->name, tx_errs);
1016 #endif
1017 yp->stats.tx_errors++;
1018 if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++;
1019 if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++;
1020 if (tx_errs & 0x2000) yp->stats.tx_window_errors++;
1021 if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++;
1022 } else {
1023 #ifndef final_version
1024 if (yellowfin_debug > 4)
1025 printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n",
1026 dev->name, tx_errs);
1027 #endif
1028 yp->stats.tx_bytes += skb->len;
1029 yp->stats.collisions += tx_errs & 15;
1030 yp->stats.tx_packets++;
1032 /* Free the original skb. */
1033 pci_unmap_single(yp->pci_dev,
1034 yp->tx_ring[entry<<1].addr, skb->len,
1035 PCI_DMA_TODEVICE);
1036 dev_kfree_skb_irq(skb);
1037 yp->tx_skbuff[entry] = 0;
1038 /* Mark status as empty. */
1039 yp->tx_status[entry].tx_errs = 0;
1042 #ifndef final_version
1043 if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
1044 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1045 dev->name, dirty_tx, yp->cur_tx, yp->tx_full);
1046 dirty_tx += TX_RING_SIZE;
1048 #endif
1050 if (yp->tx_full
1051 && yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1052 /* The ring is no longer full, clear tbusy. */
1053 yp->tx_full = 0;
1054 netif_wake_queue(dev);
1057 yp->dirty_tx = dirty_tx;
1058 yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1060 #endif
1062 /* Log errors and other uncommon events. */
1063 if (intr_status & 0x2ee) /* Abnormal error summary. */
1064 yellowfin_error(dev, intr_status);
1066 if (--boguscnt < 0) {
1067 printk(KERN_WARNING "%s: Too much work at interrupt, "
1068 "status=0x%4.4x.\n",
1069 dev->name, intr_status);
1070 break;
1072 } while (1);
1074 if (yellowfin_debug > 3)
1075 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1076 dev->name, ioread16(ioaddr + IntrStatus));
1078 spin_unlock (&yp->lock);
1079 return IRQ_RETVAL(handled);
1082 /* This routine is logically part of the interrupt handler, but separated
1083 for clarity and better register allocation. */
1084 static int yellowfin_rx(struct net_device *dev)
1086 struct yellowfin_private *yp = netdev_priv(dev);
1087 int entry = yp->cur_rx % RX_RING_SIZE;
1088 int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1090 if (yellowfin_debug > 4) {
1091 printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %8.8x.\n",
1092 entry, yp->rx_ring[entry].result_status);
1093 printk(KERN_DEBUG " #%d desc. %8.8x %8.8x %8.8x.\n",
1094 entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1095 yp->rx_ring[entry].result_status);
1098 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1099 while (1) {
1100 struct yellowfin_desc *desc = &yp->rx_ring[entry];
1101 struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1102 s16 frame_status;
1103 u16 desc_status;
1104 int data_size;
1105 u8 *buf_addr;
1107 if(!desc->result_status)
1108 break;
1109 pci_dma_sync_single_for_cpu(yp->pci_dev, desc->addr,
1110 yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1111 desc_status = le32_to_cpu(desc->result_status) >> 16;
1112 buf_addr = rx_skb->data;
1113 data_size = (le32_to_cpu(desc->dbdma_cmd) -
1114 le32_to_cpu(desc->result_status)) & 0xffff;
1115 frame_status = le16_to_cpu(get_unaligned((s16*)&(buf_addr[data_size - 2])));
1116 if (yellowfin_debug > 4)
1117 printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n",
1118 frame_status);
1119 if (--boguscnt < 0)
1120 break;
1121 if ( ! (desc_status & RX_EOP)) {
1122 if (data_size != 0)
1123 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers,"
1124 " status %4.4x, data_size %d!\n", dev->name, desc_status, data_size);
1125 yp->stats.rx_length_errors++;
1126 } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
1127 /* There was a error. */
1128 if (yellowfin_debug > 3)
1129 printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n",
1130 frame_status);
1131 yp->stats.rx_errors++;
1132 if (frame_status & 0x0060) yp->stats.rx_length_errors++;
1133 if (frame_status & 0x0008) yp->stats.rx_frame_errors++;
1134 if (frame_status & 0x0010) yp->stats.rx_crc_errors++;
1135 if (frame_status < 0) yp->stats.rx_dropped++;
1136 } else if ( !(yp->drv_flags & IsGigabit) &&
1137 ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1138 u8 status1 = buf_addr[data_size-2];
1139 u8 status2 = buf_addr[data_size-1];
1140 yp->stats.rx_errors++;
1141 if (status1 & 0xC0) yp->stats.rx_length_errors++;
1142 if (status2 & 0x03) yp->stats.rx_frame_errors++;
1143 if (status2 & 0x04) yp->stats.rx_crc_errors++;
1144 if (status2 & 0x80) yp->stats.rx_dropped++;
1145 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
1146 } else if ((yp->flags & HasMACAddrBug) &&
1147 memcmp(le32_to_cpu(yp->rx_ring_dma +
1148 entry*sizeof(struct yellowfin_desc)),
1149 dev->dev_addr, 6) != 0 &&
1150 memcmp(le32_to_cpu(yp->rx_ring_dma +
1151 entry*sizeof(struct yellowfin_desc)),
1152 "\377\377\377\377\377\377", 6) != 0) {
1153 if (bogus_rx++ == 0)
1154 printk(KERN_WARNING "%s: Bad frame to %2.2x:%2.2x:%2.2x:%2.2x:"
1155 "%2.2x:%2.2x.\n",
1156 dev->name, buf_addr[0], buf_addr[1], buf_addr[2],
1157 buf_addr[3], buf_addr[4], buf_addr[5]);
1158 #endif
1159 } else {
1160 struct sk_buff *skb;
1161 int pkt_len = data_size -
1162 (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1163 /* To verify: Yellowfin Length should omit the CRC! */
1165 #ifndef final_version
1166 if (yellowfin_debug > 4)
1167 printk(KERN_DEBUG " yellowfin_rx() normal Rx pkt length %d"
1168 " of %d, bogus_cnt %d.\n",
1169 pkt_len, data_size, boguscnt);
1170 #endif
1171 /* Check if the packet is long enough to just pass up the skbuff
1172 without copying to a properly sized skbuff. */
1173 if (pkt_len > rx_copybreak) {
1174 skb_put(skb = rx_skb, pkt_len);
1175 pci_unmap_single(yp->pci_dev,
1176 yp->rx_ring[entry].addr,
1177 yp->rx_buf_sz,
1178 PCI_DMA_FROMDEVICE);
1179 yp->rx_skbuff[entry] = NULL;
1180 } else {
1181 skb = dev_alloc_skb(pkt_len + 2);
1182 if (skb == NULL)
1183 break;
1184 skb->dev = dev;
1185 skb_reserve(skb, 2); /* 16 byte align the IP header */
1186 eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0);
1187 skb_put(skb, pkt_len);
1188 pci_dma_sync_single_for_device(yp->pci_dev, desc->addr,
1189 yp->rx_buf_sz,
1190 PCI_DMA_FROMDEVICE);
1192 skb->protocol = eth_type_trans(skb, dev);
1193 netif_rx(skb);
1194 dev->last_rx = jiffies;
1195 yp->stats.rx_packets++;
1196 yp->stats.rx_bytes += pkt_len;
1198 entry = (++yp->cur_rx) % RX_RING_SIZE;
1201 /* Refill the Rx ring buffers. */
1202 for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1203 entry = yp->dirty_rx % RX_RING_SIZE;
1204 if (yp->rx_skbuff[entry] == NULL) {
1205 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
1206 if (skb == NULL)
1207 break; /* Better luck next round. */
1208 yp->rx_skbuff[entry] = skb;
1209 skb->dev = dev; /* Mark as being used by this device. */
1210 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1211 yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1212 skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1214 yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1215 yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
1216 if (entry != 0)
1217 yp->rx_ring[entry - 1].dbdma_cmd =
1218 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1219 else
1220 yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1221 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1222 | yp->rx_buf_sz);
1225 return 0;
1228 static void yellowfin_error(struct net_device *dev, int intr_status)
1230 struct yellowfin_private *yp = netdev_priv(dev);
1232 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1233 dev->name, intr_status);
1234 /* Hmmmmm, it's not clear what to do here. */
1235 if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1236 yp->stats.tx_errors++;
1237 if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1238 yp->stats.rx_errors++;
1241 static int yellowfin_close(struct net_device *dev)
1243 struct yellowfin_private *yp = netdev_priv(dev);
1244 void __iomem *ioaddr = yp->base;
1245 int i;
1247 netif_stop_queue (dev);
1249 if (yellowfin_debug > 1) {
1250 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
1251 "Rx %4.4x Int %2.2x.\n",
1252 dev->name, ioread16(ioaddr + TxStatus),
1253 ioread16(ioaddr + RxStatus),
1254 ioread16(ioaddr + IntrStatus));
1255 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1256 dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
1259 /* Disable interrupts by clearing the interrupt mask. */
1260 iowrite16(0x0000, ioaddr + IntrEnb);
1262 /* Stop the chip's Tx and Rx processes. */
1263 iowrite32(0x80000000, ioaddr + RxCtrl);
1264 iowrite32(0x80000000, ioaddr + TxCtrl);
1266 del_timer(&yp->timer);
1268 #if defined(__i386__)
1269 if (yellowfin_debug > 2) {
1270 printk("\n"KERN_DEBUG" Tx ring at %8.8llx:\n",
1271 (unsigned long long)yp->tx_ring_dma);
1272 for (i = 0; i < TX_RING_SIZE*2; i++)
1273 printk(" %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n",
1274 ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1275 i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1276 yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1277 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
1278 for (i = 0; i < TX_RING_SIZE; i++)
1279 printk(" #%d status %4.4x %4.4x %4.4x %4.4x.\n",
1280 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1281 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1283 printk("\n"KERN_DEBUG " Rx ring %8.8llx:\n",
1284 (unsigned long long)yp->rx_ring_dma);
1285 for (i = 0; i < RX_RING_SIZE; i++) {
1286 printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n",
1287 ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1288 i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1289 yp->rx_ring[i].result_status);
1290 if (yellowfin_debug > 6) {
1291 if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1292 int j;
1293 for (j = 0; j < 0x50; j++)
1294 printk(" %4.4x",
1295 get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1296 printk("\n");
1301 #endif /* __i386__ debugging only */
1303 free_irq(dev->irq, dev);
1305 /* Free all the skbuffs in the Rx queue. */
1306 for (i = 0; i < RX_RING_SIZE; i++) {
1307 yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1308 yp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
1309 if (yp->rx_skbuff[i]) {
1310 dev_kfree_skb(yp->rx_skbuff[i]);
1312 yp->rx_skbuff[i] = NULL;
1314 for (i = 0; i < TX_RING_SIZE; i++) {
1315 if (yp->tx_skbuff[i])
1316 dev_kfree_skb(yp->tx_skbuff[i]);
1317 yp->tx_skbuff[i] = NULL;
1320 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
1321 if (yellowfin_debug > 0) {
1322 printk(KERN_DEBUG "%s: Received %d frames that we should not have.\n",
1323 dev->name, bogus_rx);
1325 #endif
1327 return 0;
1330 static struct net_device_stats *yellowfin_get_stats(struct net_device *dev)
1332 struct yellowfin_private *yp = netdev_priv(dev);
1333 return &yp->stats;
1336 /* Set or clear the multicast filter for this adaptor. */
1338 static void set_rx_mode(struct net_device *dev)
1340 struct yellowfin_private *yp = netdev_priv(dev);
1341 void __iomem *ioaddr = yp->base;
1342 u16 cfg_value = ioread16(ioaddr + Cnfg);
1344 /* Stop the Rx process to change any value. */
1345 iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
1346 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1347 /* Unconditionally log net taps. */
1348 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1349 iowrite16(0x000F, ioaddr + AddrMode);
1350 } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) {
1351 /* Too many to filter well, or accept all multicasts. */
1352 iowrite16(0x000B, ioaddr + AddrMode);
1353 } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
1354 struct dev_mc_list *mclist;
1355 u16 hash_table[4];
1356 int i;
1357 memset(hash_table, 0, sizeof(hash_table));
1358 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1359 i++, mclist = mclist->next) {
1360 unsigned int bit;
1362 /* Due to a bug in the early chip versions, multiple filter
1363 slots must be set for each address. */
1364 if (yp->drv_flags & HasMulticastBug) {
1365 bit = (ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f;
1366 hash_table[bit >> 4] |= (1 << bit);
1367 bit = (ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f;
1368 hash_table[bit >> 4] |= (1 << bit);
1369 bit = (ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f;
1370 hash_table[bit >> 4] |= (1 << bit);
1372 bit = (ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f;
1373 hash_table[bit >> 4] |= (1 << bit);
1375 /* Copy the hash table to the chip. */
1376 for (i = 0; i < 4; i++)
1377 iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
1378 iowrite16(0x0003, ioaddr + AddrMode);
1379 } else { /* Normal, unicast/broadcast-only mode. */
1380 iowrite16(0x0001, ioaddr + AddrMode);
1382 /* Restart the Rx process. */
1383 iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
1386 static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1388 struct yellowfin_private *np = netdev_priv(dev);
1389 strcpy(info->driver, DRV_NAME);
1390 strcpy(info->version, DRV_VERSION);
1391 strcpy(info->bus_info, pci_name(np->pci_dev));
1394 static struct ethtool_ops ethtool_ops = {
1395 .get_drvinfo = yellowfin_get_drvinfo
1398 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1400 struct yellowfin_private *np = netdev_priv(dev);
1401 void __iomem *ioaddr = np->base;
1402 struct mii_ioctl_data *data = if_mii(rq);
1404 switch(cmd) {
1405 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1406 data->phy_id = np->phys[0] & 0x1f;
1407 /* Fall Through */
1409 case SIOCGMIIREG: /* Read MII PHY register. */
1410 data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1411 return 0;
1413 case SIOCSMIIREG: /* Write MII PHY register. */
1414 if (!capable(CAP_NET_ADMIN))
1415 return -EPERM;
1416 if (data->phy_id == np->phys[0]) {
1417 u16 value = data->val_in;
1418 switch (data->reg_num) {
1419 case 0:
1420 /* Check for autonegotiation on or reset. */
1421 np->medialock = (value & 0x9000) ? 0 : 1;
1422 if (np->medialock)
1423 np->full_duplex = (value & 0x0100) ? 1 : 0;
1424 break;
1425 case 4: np->advertising = value; break;
1427 /* Perhaps check_duplex(dev), depending on chip semantics. */
1429 mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1430 return 0;
1431 default:
1432 return -EOPNOTSUPP;
1437 static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
1439 struct net_device *dev = pci_get_drvdata(pdev);
1440 struct yellowfin_private *np;
1442 BUG_ON(!dev);
1443 np = netdev_priv(dev);
1445 pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
1446 np->tx_status_dma);
1447 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
1448 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
1449 unregister_netdev (dev);
1451 pci_iounmap(pdev, np->base);
1453 pci_release_regions (pdev);
1455 free_netdev (dev);
1456 pci_set_drvdata(pdev, NULL);
1460 static struct pci_driver yellowfin_driver = {
1461 .name = DRV_NAME,
1462 .id_table = yellowfin_pci_tbl,
1463 .probe = yellowfin_init_one,
1464 .remove = __devexit_p(yellowfin_remove_one),
1468 static int __init yellowfin_init (void)
1470 /* when a module, this is printed whether or not devices are found in probe */
1471 #ifdef MODULE
1472 printk(version);
1473 #endif
1474 return pci_module_init (&yellowfin_driver);
1478 static void __exit yellowfin_cleanup (void)
1480 pci_unregister_driver (&yellowfin_driver);
1484 module_init(yellowfin_init);
1485 module_exit(yellowfin_cleanup);
1488 * Local variables:
1489 * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c yellowfin.c"
1490 * compile-command-alphaLX: "gcc -DMODULE -Wall -Wstrict-prototypes -O2 -c yellowfin.c -fomit-frame-pointer -fno-strength-reduce -mno-fp-regs -Wa,-m21164a -DBWX_USABLE -DBWIO_ENABLED"
1491 * simple-compile-command: "gcc -DMODULE -O6 -c yellowfin.c"
1492 * c-indent-level: 4
1493 * c-basic-offset: 4
1494 * tab-width: 4
1495 * End: