Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / drivers / net / starfire.c
blobfd453a2de28c8e949b51df184190e3a2c489d26e
1 /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
2 /*
3 Written 1998-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
17 Support and updates available at
18 http://www.scyld.com/network/starfire.html
20 -----------------------------------------------------------
22 Linux kernel-specific changes:
24 LK1.1.1 (jgarzik):
25 - Use PCI driver interface
26 - Fix MOD_xxx races
27 - softnet fixups
29 LK1.1.2 (jgarzik):
30 - Merge Becker version 0.15
32 LK1.1.3 (Andrew Morton)
33 - Timer cleanups
35 LK1.1.4 (jgarzik):
36 - Merge Becker version 1.03
39 /* These identify the driver base version and may not be removed. */
40 static const char version1[] =
41 "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n";
42 static const char version2[] =
43 " Updates and info at http://www.scyld.com/network/starfire.html\n";
45 static const char version3[] =
46 " (unofficial 2.4.x kernel port, version 1.1.4, August 10, 2000)\n";
48 /* The user-configurable values.
49 These may be modified when a driver module is loaded.*/
51 /* Used for tuning interrupt latency vs. overhead. */
52 static int interrupt_mitigation = 0x0;
54 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
55 static int max_interrupt_work = 20;
56 static int mtu = 0;
57 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
58 The Starfire has a 512 element hash table based on the Ethernet CRC. */
59 static int multicast_filter_limit = 32;
61 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
62 Setting to > 1518 effectively disables this feature. */
63 static int rx_copybreak = 0;
65 /* Used to pass the media type, etc.
66 Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
67 The media type is usually passed in 'options[]'.
69 #define MAX_UNITS 8 /* More are supported, limit only on options */
70 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
71 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
73 /* Operational parameters that are set at compile time. */
75 /* The "native" ring sizes are either 256 or 2048.
76 However in some modes a descriptor may be marked to wrap the ring earlier.
77 The driver allocates a single page for each descriptor ring, constraining
78 the maximum size in an architecture-dependent way.
80 #define RX_RING_SIZE 256
81 #define TX_RING_SIZE 32
82 /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
83 #define DONE_Q_SIZE 1024
85 /* Operational parameters that usually are not changed. */
86 /* Time in jiffies before concluding the transmitter is hung. */
87 #define TX_TIMEOUT (2*HZ)
89 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
91 #if !defined(__OPTIMIZE__)
92 #warning You must compile this file with the correct options!
93 #warning See the last lines of the source file.
94 #error You must compile this driver with "-O".
95 #endif
97 /* Include files, designed to support most kernel versions 2.0.0 and later. */
98 #include <linux/version.h>
99 #include <linux/module.h>
100 #if LINUX_VERSION_CODE < 0x20300 && defined(MODVERSIONS)
101 #include <linux/modversions.h>
102 #endif
104 #include <linux/kernel.h>
105 #include <linux/string.h>
106 #include <linux/timer.h>
107 #include <linux/errno.h>
108 #include <linux/ioport.h>
109 #include <linux/malloc.h>
110 #include <linux/interrupt.h>
111 #include <linux/pci.h>
112 #include <linux/netdevice.h>
113 #include <linux/etherdevice.h>
114 #include <linux/skbuff.h>
115 #include <linux/init.h>
116 #include <asm/processor.h> /* Processor type for cache alignment. */
117 #include <asm/bitops.h>
118 #include <asm/io.h>
120 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
121 MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
122 MODULE_PARM(max_interrupt_work, "i");
123 MODULE_PARM(mtu, "i");
124 MODULE_PARM(debug, "i");
125 MODULE_PARM(rx_copybreak, "i");
126 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
127 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
130 Theory of Operation
132 I. Board Compatibility
134 This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
136 II. Board-specific settings
138 III. Driver operation
140 IIIa. Ring buffers
142 The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
143 ring sizes are set fixed by the hardware, but may optionally be wrapped
144 earlier by the END bit in the descriptor.
145 This driver uses that hardware queue size for the Rx ring, where a large
146 number of entries has no ill effect beyond increases the potential backlog.
147 The Tx ring is wrapped with the END bit, since a large hardware Tx queue
148 disables the queue layer priority ordering and we have no mechanism to
149 utilize the hardware two-level priority queue. When modifying the
150 RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
151 levels.
153 IIIb/c. Transmit/Receive Structure
155 See the Adaptec manual for the many possible structures, and options for
156 each structure. There are far too many to document here.
158 For transmit this driver uses type 1 transmit descriptors, and relies on
159 automatic minimum-length padding. It does not use the completion queue
160 consumer index, but instead checks for non-zero status entries.
162 For receive this driver uses type 0 receive descriptors. The driver
163 allocates full frame size skbuffs for the Rx ring buffers, so all frames
164 should fit in a single descriptor. The driver does not use the completion
165 queue consumer index, but instead checks for non-zero status entries.
167 When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
168 is allocated and the frame is copied to the new skbuff. When the incoming
169 frame is larger, the skbuff is passed directly up the protocol stack.
170 Buffers consumed this way are replaced by newly allocated skbuffs in a later
171 phase of receive.
173 A notable aspect of operation is that unaligned buffers are not permitted by
174 the Starfire hardware. The IP header at offset 14 in an ethernet frame thus
175 isn't longword aligned, which may cause problems on some machine
176 e.g. Alphas. Copied frames are put into the skbuff at an offset of "+2",
177 16-byte aligning the IP header.
179 IIId. Synchronization
181 The driver runs as two independent, single-threaded flows of control. One
182 is the send-packet routine, which enforces single-threaded use by the
183 dev->tbusy flag. The other thread is the interrupt handler, which is single
184 threaded by the hardware and interrupt handling software.
186 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
187 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
188 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
189 the 'lp->tx_full' flag.
191 The interrupt handler has exclusive control over the Rx ring and records stats
192 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
193 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
194 clears both the tx_full and tbusy flags.
196 IV. Notes
198 IVb. References
200 The Adaptec Starfire manuals, available only from Adaptec.
201 http://www.scyld.com/expert/100mbps.html
202 http://www.scyld.com/expert/NWay.html
204 IVc. Errata
210 enum chip_capability_flags {CanHaveMII=1, };
211 #define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR0)
212 #define MEM_ADDR_SZ 0x80000 /* And maps in 0.5MB(!). */
214 #if 0
215 #define ADDR_64BITS 1 /* This chip uses 64 bit addresses. */
216 #endif
218 #define HAS_IP_COPYSUM 1
220 enum chipset {
221 CH_6915 = 0,
224 static struct pci_device_id starfire_pci_tbl[] __devinitdata = {
225 { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
226 { 0, }
228 MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
230 /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
231 static struct chip_info {
232 const char *name;
233 int io_size;
234 int drv_flags;
235 } netdrv_tbl[] __devinitdata = {
236 { "Adaptec Starfire 6915", MEM_ADDR_SZ, CanHaveMII },
240 /* Offsets to the device registers.
241 Unlike software-only systems, device drivers interact with complex hardware.
242 It's not useful to define symbolic names for every register bit in the
243 device. The name can only partially document the semantics and make
244 the driver longer and more difficult to read.
245 In general, only the important configuration values or bits changed
246 multiple times should be defined symbolically.
248 enum register_offsets {
249 PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
250 IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
251 MIICtrl=0x52000, StationAddr=0x50120, EEPROMCtrl=0x51000,
252 TxDescCtrl=0x50090,
253 TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
254 TxRingHiAddr=0x5009C, /* 64 bit address extension. */
255 TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
256 TxThreshold=0x500B0,
257 CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
258 RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
259 CompletionQConsumerIdx=0x500C4,
260 RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
261 RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
262 TxMode=0x55000,
265 /* Bits in the interrupt status/mask registers. */
266 enum intr_status_bits {
267 IntrNormalSummary=0x8000, IntrAbnormalSummary=0x02000000,
268 IntrRxDone=0x0300, IntrRxEmpty=0x10040, IntrRxPCIErr=0x80000,
269 IntrTxDone=0x4000, IntrTxEmpty=0x1000, IntrTxPCIErr=0x80000,
270 StatsMax=0x08000000, LinkChange=0xf0000000,
271 IntrTxDataLow=0x00040000,
274 /* Bits in the RxFilterMode register. */
275 enum rx_mode_bits {
276 AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
277 AcceptMulticast=0x10, AcceptMyPhys=0xE040,
280 /* The Rx and Tx buffer descriptors. */
281 struct starfire_rx_desc {
282 u32 rxaddr; /* Optionally 64 bits. */
284 enum rx_desc_bits {
285 RxDescValid=1, RxDescEndRing=2,
288 /* Completion queue entry.
289 You must update the page allocation, init_ring and the shift count in rx()
290 if using a larger format. */
291 struct rx_done_desc {
292 u32 status; /* Low 16 bits is length. */
293 #ifdef full_rx_status
294 u32 status2;
295 u16 vlanid;
296 u16 csum; /* partial checksum */
297 u32 timestamp;
298 #endif
300 enum rx_done_bits {
301 RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
304 /* Type 1 Tx descriptor. */
305 struct starfire_tx_desc {
306 u32 status; /* Upper bits are status, lower 16 length. */
307 u32 addr;
309 enum tx_desc_bits {
310 TxDescID=0xB1010000, /* Also marks single fragment, add CRC. */
311 TxDescIntr=0x08000000, TxRingWrap=0x04000000,
313 struct tx_done_report {
314 u32 status; /* timestamp, index. */
315 #if 0
316 u32 intrstatus; /* interrupt status */
317 #endif
320 #define PRIV_ALIGN 15 /* Required alignment mask */
321 struct ring_info {
322 struct sk_buff *skb;
323 dma_addr_t mapping;
326 struct netdev_private {
327 /* Descriptor rings first for alignment. */
328 struct starfire_rx_desc *rx_ring;
329 struct starfire_tx_desc *tx_ring;
330 dma_addr_t rx_ring_dma;
331 dma_addr_t tx_ring_dma;
332 /* The addresses of rx/tx-in-place skbuffs. */
333 struct ring_info rx_info[RX_RING_SIZE];
334 struct ring_info tx_info[TX_RING_SIZE];
335 /* Pointers to completion queues (full pages). I should cache line pad..*/
336 u8 pad0[100];
337 struct rx_done_desc *rx_done_q;
338 dma_addr_t rx_done_q_dma;
339 unsigned int rx_done;
340 struct tx_done_report *tx_done_q;
341 unsigned int tx_done;
342 dma_addr_t tx_done_q_dma;
343 struct net_device_stats stats;
344 struct timer_list timer; /* Media monitoring timer. */
345 struct pci_dev *pci_dev;
346 /* Frequently used values: keep some adjacent for cache effect. */
347 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
348 unsigned int cur_tx, dirty_tx;
349 unsigned int rx_buf_sz; /* Based on MTU+slack. */
350 unsigned int tx_full:1; /* The Tx queue is full. */
351 /* These values are keep track of the transceiver/media in use. */
352 unsigned int full_duplex:1, /* Full-duplex operation requested. */
353 medialock:1, /* Xcvr set to fixed speed/duplex. */
354 rx_flowctrl:1,
355 tx_flowctrl:1; /* Use 802.3x flow control. */
356 unsigned int default_port:4; /* Last dev->if_port value. */
357 u32 tx_mode;
358 u8 tx_threshold;
359 /* MII transceiver section. */
360 int mii_cnt; /* MII device addresses. */
361 u16 advertising; /* NWay media advertisement */
362 unsigned char phys[2]; /* MII device addresses. */
365 static int mdio_read(struct net_device *dev, int phy_id, int location);
366 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
367 static int netdev_open(struct net_device *dev);
368 static void check_duplex(struct net_device *dev, int startup);
369 static void netdev_timer(unsigned long data);
370 static void tx_timeout(struct net_device *dev);
371 static void init_ring(struct net_device *dev);
372 static int start_tx(struct sk_buff *skb, struct net_device *dev);
373 static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
374 static void netdev_error(struct net_device *dev, int intr_status);
375 static int netdev_rx(struct net_device *dev);
376 static void netdev_error(struct net_device *dev, int intr_status);
377 static void set_rx_mode(struct net_device *dev);
378 static struct net_device_stats *get_stats(struct net_device *dev);
379 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
380 static int netdev_close(struct net_device *dev);
384 static int __devinit starfire_init_one (struct pci_dev *pdev,
385 const struct pci_device_id *ent)
387 struct netdev_private *np;
388 int i, irq, option, chip_idx = ent->driver_data;
389 struct net_device *dev;
390 static int card_idx = -1;
391 static int printed_version = 0;
392 long ioaddr;
393 int drv_flags, io_size = netdrv_tbl[chip_idx].io_size;
395 card_idx++;
396 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
398 if (!printed_version++)
399 printk(KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
400 version1, version2, version3);
402 ioaddr = pci_resource_start (pdev, 0);
403 if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_MEM) == 0)) {
404 printk (KERN_ERR "starfire %d: no PCI MEM resources, aborting\n", card_idx);
405 return -ENODEV;
408 dev = init_etherdev(NULL, sizeof(*np));
409 if (!dev) {
410 printk (KERN_ERR "starfire %d: cannot alloc etherdev, aborting\n", card_idx);
411 return -ENOMEM;
414 irq = pdev->irq;
416 if (request_mem_region (ioaddr, io_size, dev->name) == NULL) {
417 printk (KERN_ERR "starfire %d: resource 0x%x @ 0x%lx busy, aborting\n",
418 card_idx, io_size, ioaddr);
419 goto err_out_free_netdev;
422 if (pci_enable_device (pdev))
423 goto err_out_free_res;
425 ioaddr = (long) ioremap (ioaddr, io_size);
426 if (!ioaddr) {
427 printk (KERN_ERR "starfire %d: cannot remap 0x%x @ 0x%lx, aborting\n",
428 card_idx, io_size, ioaddr);
429 goto err_out_free_res;
432 pci_set_master (pdev);
434 printk(KERN_INFO "%s: %s at 0x%lx, ",
435 dev->name, netdrv_tbl[chip_idx].name, ioaddr);
437 /* Serial EEPROM reads are hidden by the hardware. */
438 for (i = 0; i < 6; i++)
439 dev->dev_addr[i] = readb(ioaddr + EEPROMCtrl + 20-i);
440 for (i = 0; i < 5; i++)
441 printk("%2.2x:", dev->dev_addr[i]);
442 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
444 #if ! defined(final_version) /* Dump the EEPROM contents during development. */
445 if (debug > 4)
446 for (i = 0; i < 0x20; i++)
447 printk("%2.2x%s", (unsigned int)readb(ioaddr + EEPROMCtrl + i),
448 i % 16 != 15 ? " " : "\n");
449 #endif
451 /* Reset the chip to erase previous misconfiguration. */
452 writel(1, ioaddr + PCIDeviceConfig);
454 dev->base_addr = ioaddr;
455 dev->irq = irq;
457 np = dev->priv;
458 pdev->driver_data = dev;
460 np->pci_dev = pdev;
461 drv_flags = netdrv_tbl[chip_idx].drv_flags;
463 if (dev->mem_start)
464 option = dev->mem_start;
466 /* The lower four bits are the media type. */
467 if (option > 0) {
468 if (option & 0x200)
469 np->full_duplex = 1;
470 np->default_port = option & 15;
471 if (np->default_port)
472 np->medialock = 1;
474 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
475 np->full_duplex = 1;
477 if (np->full_duplex)
478 np->medialock = 1;
480 /* The chip-specific entries in the device structure. */
481 dev->open = &netdev_open;
482 dev->hard_start_xmit = &start_tx;
483 dev->tx_timeout = &tx_timeout;
484 dev->watchdog_timeo = TX_TIMEOUT;
485 dev->stop = &netdev_close;
486 dev->get_stats = &get_stats;
487 dev->set_multicast_list = &set_rx_mode;
488 dev->do_ioctl = &mii_ioctl;
490 if (mtu)
491 dev->mtu = mtu;
493 if (drv_flags & CanHaveMII) {
494 int phy, phy_idx = 0;
495 for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
496 int mii_status = mdio_read(dev, phy, 1);
497 if (mii_status != 0xffff && mii_status != 0x0000) {
498 np->phys[phy_idx++] = phy;
499 np->advertising = mdio_read(dev, phy, 4);
500 printk(KERN_INFO "%s: MII PHY found at address %d, status "
501 "0x%4.4x advertising %4.4x.\n",
502 dev->name, phy, mii_status, np->advertising);
505 np->mii_cnt = phy_idx;
508 return 0;
510 err_out_free_res:
511 release_mem_region (ioaddr, io_size);
512 err_out_free_netdev:
513 unregister_netdev (dev);
514 kfree (dev);
515 return -ENODEV;
519 /* Read the MII Management Data I/O (MDIO) interfaces. */
521 static int mdio_read(struct net_device *dev, int phy_id, int location)
523 long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
524 int result, boguscnt=1000;
525 /* ??? Should we add a busy-wait here? */
527 result = readl(mdio_addr);
528 while ((result & 0xC0000000) != 0x80000000 && --boguscnt >= 0);
529 return result & 0xffff;
532 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
534 long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
535 writel(value, mdio_addr);
536 /* The busy-wait will occur before a read. */
537 return;
541 static int netdev_open(struct net_device *dev)
543 struct netdev_private *np = (struct netdev_private *)dev->priv;
544 long ioaddr = dev->base_addr;
545 int i, retval;
547 /* Do we ever need to reset the chip??? */
549 MOD_INC_USE_COUNT;
551 retval = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
552 if (retval) {
553 MOD_DEC_USE_COUNT;
554 return retval;
557 /* Disable the Rx and Tx, and reset the chip. */
558 writel(0, ioaddr + GenCtrl);
559 writel(1, ioaddr + PCIDeviceConfig);
560 if (debug > 1)
561 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
562 dev->name, dev->irq);
563 /* Allocate the various queues, failing gracefully. */
564 if (np->tx_done_q == 0)
565 np->tx_done_q = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->tx_done_q_dma);
566 if (np->rx_done_q == 0)
567 np->rx_done_q = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->rx_done_q_dma);
568 if (np->tx_ring == 0)
569 np->tx_ring = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->tx_ring_dma);
570 if (np->rx_ring == 0)
571 np->rx_ring = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->rx_ring_dma);
572 if (np->tx_done_q == 0 || np->rx_done_q == 0
573 || np->rx_ring == 0 || np->tx_ring == 0) {
574 if (np->tx_done_q)
575 pci_free_consistent(np->pci_dev, PAGE_SIZE,
576 np->tx_done_q, np->tx_done_q_dma);
577 if (np->rx_done_q)
578 pci_free_consistent(np->pci_dev, PAGE_SIZE,
579 np->rx_done_q, np->rx_done_q_dma);
580 if (np->tx_ring)
581 pci_free_consistent(np->pci_dev, PAGE_SIZE,
582 np->tx_ring, np->tx_ring_dma);
583 if (np->rx_ring)
584 pci_free_consistent(np->pci_dev, PAGE_SIZE,
585 np->rx_ring, np->rx_ring_dma);
586 MOD_DEC_USE_COUNT;
587 return -ENOMEM;
590 init_ring(dev);
591 /* Set the size of the Rx buffers. */
592 writel((np->rx_buf_sz<<16) | 0xA000, ioaddr + RxDescQCtrl);
594 /* Set Tx descriptor to type 1 and padding to 0 bytes. */
595 writel(0x02000401, ioaddr + TxDescCtrl);
597 #if defined(ADDR_64BITS) && defined(__alpha__)
598 /* XXX We really need a 64-bit PCI dma interfaces too... -DaveM */
599 writel(np->rx_ring_dma >> 32, ioaddr + RxDescQHiAddr);
600 writel(np->tx_ring_dma >> 32, ioaddr + TxRingHiAddr);
601 #else
602 writel(0, ioaddr + RxDescQHiAddr);
603 writel(0, ioaddr + TxRingHiAddr);
604 writel(0, ioaddr + CompletionHiAddr);
605 #endif
606 writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
607 writel(np->tx_ring_dma, ioaddr + TxRingPtr);
609 writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
610 writel(np->rx_done_q_dma, ioaddr + RxCompletionAddr);
612 if (debug > 1)
613 printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
615 /* Fill both the unused Tx SA register and the Rx perfect filter. */
616 for (i = 0; i < 6; i++)
617 writeb(dev->dev_addr[i], ioaddr + StationAddr + 5-i);
618 for (i = 0; i < 16; i++) {
619 u16 *eaddrs = (u16 *)dev->dev_addr;
620 long setup_frm = ioaddr + 0x56000 + i*16;
621 writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4;
622 writew(cpu_to_be16(eaddrs[1]), setup_frm); setup_frm += 4;
623 writew(cpu_to_be16(eaddrs[0]), setup_frm); setup_frm += 8;
626 /* Initialize other registers. */
627 /* Configure the PCI bus bursts and FIFO thresholds. */
628 np->tx_mode = 0; /* Initialized when TxMode set. */
629 np->tx_threshold = 4;
630 writel(np->tx_threshold, ioaddr + TxThreshold);
631 writel(interrupt_mitigation, ioaddr + IntrTimerCtrl);
633 if (dev->if_port == 0)
634 dev->if_port = np->default_port;
636 netif_start_queue(dev);
638 if (debug > 1)
639 printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
640 set_rx_mode(dev);
642 np->advertising = mdio_read(dev, np->phys[0], 4);
643 check_duplex(dev, 1);
645 /* Set the interrupt mask and enable PCI interrupts. */
646 writel(IntrRxDone | IntrRxEmpty | IntrRxPCIErr |
647 IntrTxDone | IntrTxEmpty | IntrTxPCIErr |
648 StatsMax | LinkChange | IntrNormalSummary | IntrAbnormalSummary
649 | 0x0010 , ioaddr + IntrEnable);
650 writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
651 ioaddr + PCIDeviceConfig);
653 /* Enable the Rx and Tx units. */
654 writel(0x000F, ioaddr + GenCtrl);
656 if (debug > 2)
657 printk(KERN_DEBUG "%s: Done netdev_open().\n",
658 dev->name);
660 /* Set the timer to check for link beat. */
661 init_timer(&np->timer);
662 np->timer.expires = jiffies + 3*HZ;
663 np->timer.data = (unsigned long)dev;
664 np->timer.function = &netdev_timer; /* timer handler */
665 add_timer(&np->timer);
667 return 0;
670 static void check_duplex(struct net_device *dev, int startup)
672 struct netdev_private *np = (struct netdev_private *)dev->priv;
673 long ioaddr = dev->base_addr;
674 int new_tx_mode ;
676 new_tx_mode = 0x0C04 | (np->tx_flowctrl ? 0x0800:0)
677 | (np->rx_flowctrl ? 0x0400:0);
678 if (np->medialock) {
679 if (np->full_duplex)
680 new_tx_mode |= 2;
681 } else {
682 int mii_reg5 = mdio_read(dev, np->phys[0], 5);
683 int negotiated = mii_reg5 & np->advertising;
684 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
685 if (duplex)
686 new_tx_mode |= 2;
687 if (np->full_duplex != duplex) {
688 np->full_duplex = duplex;
689 if (debug > 1)
690 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d"
691 " negotiated capability %4.4x.\n", dev->name,
692 duplex ? "full" : "half", np->phys[0], negotiated);
695 if (new_tx_mode != np->tx_mode) {
696 np->tx_mode = new_tx_mode;
697 writel(np->tx_mode | 0x8000, ioaddr + TxMode);
698 writel(np->tx_mode, ioaddr + TxMode);
702 static void netdev_timer(unsigned long data)
704 struct net_device *dev = (struct net_device *)data;
705 struct netdev_private *np = (struct netdev_private *)dev->priv;
706 long ioaddr = dev->base_addr;
707 int next_tick = 60*HZ; /* Check before driver release. */
709 if (debug > 3) {
710 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x.\n",
711 dev->name, (int)readl(ioaddr + IntrStatus));
713 check_duplex(dev, 0);
714 #if ! defined(final_version)
715 /* This is often falsely triggered. */
716 if (readl(ioaddr + IntrStatus) & 1) {
717 int new_status = readl(ioaddr + IntrStatus);
718 /* Bogus hardware IRQ: Fake an interrupt handler call. */
719 if (new_status & 1) {
720 printk(KERN_ERR "%s: Interrupt blocked, status %8.8x/%8.8x.\n",
721 dev->name, new_status, (int)readl(ioaddr + IntrStatus));
722 intr_handler(dev->irq, dev, 0);
725 #endif
727 np->timer.expires = jiffies + next_tick;
728 add_timer(&np->timer);
731 static void tx_timeout(struct net_device *dev)
733 struct netdev_private *np = (struct netdev_private *)dev->priv;
734 long ioaddr = dev->base_addr;
736 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
737 " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
739 #ifndef __alpha__
741 int i;
742 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
743 for (i = 0; i < RX_RING_SIZE; i++)
744 printk(" %8.8x", (unsigned int)le32_to_cpu(np->rx_ring[i].rxaddr));
745 printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
746 for (i = 0; i < TX_RING_SIZE; i++)
747 printk(" %4.4x", le32_to_cpu(np->tx_ring[i].status));
748 printk("\n");
750 #endif
752 /* Perhaps we should reinitialize the hardware here. */
753 dev->if_port = 0;
754 /* Stop and restart the chip's Tx processes . */
756 /* Trigger an immediate transmit demand. */
758 dev->trans_start = jiffies;
759 np->stats.tx_errors++;
760 return;
764 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
765 static void init_ring(struct net_device *dev)
767 struct netdev_private *np = (struct netdev_private *)dev->priv;
768 int i;
770 np->tx_full = 0;
771 np->cur_rx = np->cur_tx = 0;
772 np->dirty_rx = np->rx_done = np->dirty_tx = np->tx_done = 0;
774 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
776 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
777 for (i = 0; i < RX_RING_SIZE; i++) {
778 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
779 np->rx_info[i].skb = skb;
780 if (skb == NULL)
781 break;
782 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
783 skb->dev = dev; /* Mark as being used by this device. */
784 /* Grrr, we cannot offset to correctly align the IP header. */
785 np->rx_ring[i].rxaddr = cpu_to_le32(np->rx_info[i].mapping | RxDescValid);
787 writew(i - 1, dev->base_addr + RxDescQIdx);
788 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
790 /* Clear the remainder of the Rx buffer ring. */
791 for ( ; i < RX_RING_SIZE; i++) {
792 np->rx_ring[i].rxaddr = 0;
793 np->rx_info[i].skb = NULL;
794 np->rx_info[i].mapping = 0;
796 /* Mark the last entry as wrapping the ring. */
797 np->rx_ring[i-1].rxaddr |= cpu_to_le32(RxDescEndRing);
799 /* Clear the completion rings. */
800 for (i = 0; i < DONE_Q_SIZE; i++) {
801 np->rx_done_q[i].status = 0;
802 np->tx_done_q[i].status = 0;
805 for (i = 0; i < TX_RING_SIZE; i++) {
806 np->tx_info[i].skb = NULL;
807 np->tx_info[i].mapping = 0;
808 np->tx_ring[i].status = 0;
810 return;
813 static int start_tx(struct sk_buff *skb, struct net_device *dev)
815 struct netdev_private *np = (struct netdev_private *)dev->priv;
816 unsigned entry;
818 /* Caution: the write order is important here, set the field
819 with the "ownership" bits last. */
821 /* Calculate the next Tx descriptor entry. */
822 entry = np->cur_tx % TX_RING_SIZE;
824 np->tx_info[entry].skb = skb;
825 np->tx_info[entry].mapping =
826 pci_map_single(np->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
828 np->tx_ring[entry].addr = cpu_to_le32(np->tx_info[entry].mapping);
829 /* Add "| TxDescIntr" to generate Tx-done interrupts. */
830 np->tx_ring[entry].status = cpu_to_le32(skb->len | TxDescID);
831 if (debug > 5) {
832 printk(KERN_DEBUG "%s: Tx #%d slot %d %8.8x %8.8x.\n",
833 dev->name, np->cur_tx, entry,
834 le32_to_cpu(np->tx_ring[entry].status),
835 le32_to_cpu(np->tx_ring[entry].addr));
837 np->cur_tx++;
838 #if 1
839 if (entry >= TX_RING_SIZE-1) { /* Wrap ring */
840 np->tx_ring[entry].status |= cpu_to_le32(TxRingWrap | TxDescIntr);
841 entry = -1;
843 #endif
845 /* Non-x86: explicitly flush descriptor cache lines here. */
847 /* Update the producer index. */
848 writel(++entry, dev->base_addr + TxProducerIdx);
850 if (np->cur_tx - np->dirty_tx >= TX_RING_SIZE - 1) {
851 np->tx_full = 1;
852 netif_stop_queue(dev);
854 dev->trans_start = jiffies;
856 if (debug > 4) {
857 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
858 dev->name, np->cur_tx, entry);
860 return 0;
863 /* The interrupt handler does all of the Rx thread work and cleans up
864 after the Tx thread. */
865 static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
867 struct net_device *dev = (struct net_device *)dev_instance;
868 struct netdev_private *np;
869 long ioaddr;
870 int boguscnt = max_interrupt_work;
872 #ifndef final_version /* Can never occur. */
873 if (dev == NULL) {
874 printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
875 "device.\n", irq);
876 return;
878 #endif
880 ioaddr = dev->base_addr;
881 np = (struct netdev_private *)dev->priv;
883 do {
884 u32 intr_status = readl(ioaddr + IntrClear);
886 if (debug > 4)
887 printk(KERN_DEBUG "%s: Interrupt status %4.4x.\n",
888 dev->name, intr_status);
890 if (intr_status == 0)
891 break;
893 if (intr_status & IntrRxDone)
894 netdev_rx(dev);
896 /* Scavenge the skbuff list based on the Tx-done queue.
897 There are redundant checks here that may be cleaned up
898 after the driver has proven to be reliable. */
900 int consumer = readl(ioaddr + TxConsumerIdx);
901 int tx_status;
902 if (debug > 4)
903 printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
904 dev->name, consumer);
905 #if 0
906 if (np->tx_done >= 250 || np->tx_done == 0)
907 printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x, "
908 "%d is %8.8x.\n", dev->name,
909 np->tx_done, le32_to_cpu(np->tx_done_q[np->tx_done].status),
910 (np->tx_done+1) & (DONE_Q_SIZE-1),
911 le32_to_cpu(np->tx_done_q[(np->tx_done+1)&(DONE_Q_SIZE-1)].status));
912 #endif
913 while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status))
914 != 0) {
915 if (debug > 4)
916 printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x.\n",
917 dev->name, np->tx_done, tx_status);
918 if ((tx_status & 0xe0000000) == 0xa0000000) {
919 np->stats.tx_packets++;
920 } else if ((tx_status & 0xe0000000) == 0x80000000) {
921 struct sk_buff *skb;
922 u16 entry = tx_status; /* Implicit truncate */
923 entry >>= 3;
925 skb = np->tx_info[entry].skb;
926 pci_unmap_single(np->pci_dev,
927 np->tx_info[entry].mapping,
928 skb->len, PCI_DMA_TODEVICE);
930 /* Scavenge the descriptor. */
931 dev_kfree_skb_irq(skb);
932 np->tx_info[entry].skb = NULL;
933 np->tx_info[entry].mapping = 0;
934 np->dirty_tx++;
936 np->tx_done_q[np->tx_done].status = 0;
937 np->tx_done = (np->tx_done+1) & (DONE_Q_SIZE-1);
939 writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
941 if (np->tx_full && np->cur_tx - np->dirty_tx < TX_RING_SIZE - 4) {
942 /* The ring is no longer full, wake the queue. */
943 np->tx_full = 0;
944 netif_wake_queue(dev);
947 /* Abnormal error summary/uncommon events handlers. */
948 if (intr_status & IntrAbnormalSummary)
949 netdev_error(dev, intr_status);
951 if (--boguscnt < 0) {
952 printk(KERN_WARNING "%s: Too much work at interrupt, "
953 "status=0x%4.4x.\n",
954 dev->name, intr_status);
955 break;
957 } while (1);
959 if (debug > 4)
960 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
961 dev->name, (int)readl(ioaddr + IntrStatus));
963 #ifndef final_version
964 /* Code that should never be run! Remove after testing.. */
966 static int stopit = 10;
967 if (!netif_running(dev) && --stopit < 0) {
968 printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n",
969 dev->name);
970 free_irq(irq, dev);
973 #endif
976 /* This routine is logically part of the interrupt handler, but separated
977 for clarity and better register allocation. */
978 static int netdev_rx(struct net_device *dev)
980 struct netdev_private *np = (struct netdev_private *)dev->priv;
981 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
982 u32 desc_status;
984 if (np->rx_done_q == 0) {
985 printk(KERN_ERR "%s: rx_done_q is NULL! rx_done is %d. %p.\n",
986 dev->name, np->rx_done, np->tx_done_q);
987 return 0;
990 /* If EOP is set on the next entry, it's a new packet. Send it up. */
991 while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
992 if (debug > 4)
993 printk(KERN_DEBUG " netdev_rx() status of %d was %8.8x.\n",
994 np->rx_done, desc_status);
995 if (--boguscnt < 0)
996 break;
997 if ( ! (desc_status & RxOK)) {
998 /* There was a error. */
999 if (debug > 2)
1000 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1001 desc_status);
1002 np->stats.rx_errors++;
1003 if (desc_status & RxFIFOErr)
1004 np->stats.rx_fifo_errors++;
1005 } else {
1006 struct sk_buff *skb;
1007 u16 pkt_len = desc_status; /* Implicitly Truncate */
1008 int entry = (desc_status >> 16) & 0x7ff;
1010 #ifndef final_version
1011 if (debug > 4)
1012 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1013 ", bogus_cnt %d.\n",
1014 pkt_len, boguscnt);
1015 #endif
1016 /* Check if the packet is long enough to accept without copying
1017 to a minimally-sized skbuff. */
1018 if (pkt_len < rx_copybreak
1019 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1020 skb->dev = dev;
1021 skb_reserve(skb, 2); /* 16 byte align the IP header */
1022 pci_dma_sync_single(np->pci_dev,
1023 np->rx_info[entry].mapping,
1024 pkt_len, PCI_DMA_FROMDEVICE);
1025 #if HAS_IP_COPYSUM /* Call copy + cksum if available. */
1026 eth_copy_and_sum(skb, np->rx_info[entry].skb->tail, pkt_len, 0);
1027 skb_put(skb, pkt_len);
1028 #else
1029 memcpy(skb_put(skb, pkt_len), np->rx_info[entry].skb->tail,
1030 pkt_len);
1031 #endif
1032 } else {
1033 char *temp;
1035 pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1036 skb = np->rx_info[entry].skb;
1037 temp = skb_put(skb, pkt_len);
1038 np->rx_info[entry].skb = NULL;
1039 np->rx_info[entry].mapping = 0;
1040 #ifndef final_version /* Remove after testing. */
1041 if (le32_to_cpu(np->rx_ring[entry].rxaddr & ~3) != ((unsigned long) temp))
1042 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
1043 "do not match in netdev_rx: %d vs. %p / %p.\n",
1044 dev->name,
1045 le32_to_cpu(np->rx_ring[entry].rxaddr),
1046 skb->head, temp);
1047 #endif
1049 #ifndef final_version /* Remove after testing. */
1050 /* You will want this info for the initial debug. */
1051 if (debug > 5)
1052 printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
1053 "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
1054 "%d.%d.%d.%d.\n",
1055 skb->data[0], skb->data[1], skb->data[2], skb->data[3],
1056 skb->data[4], skb->data[5], skb->data[6], skb->data[7],
1057 skb->data[8], skb->data[9], skb->data[10],
1058 skb->data[11], skb->data[12], skb->data[13],
1059 skb->data[14], skb->data[15], skb->data[16],
1060 skb->data[17]);
1061 #endif
1062 skb->protocol = eth_type_trans(skb, dev);
1063 #ifdef full_rx_status
1064 if (le32_to_cpu(np->rx_done_q[np->rx_done].status2) & 0x01000000)
1065 skb->ip_summed = CHECKSUM_UNNECESSARY;
1066 #endif
1067 netif_rx(skb);
1068 dev->last_rx = jiffies;
1069 np->stats.rx_packets++;
1071 np->cur_rx++;
1072 np->rx_done_q[np->rx_done].status = 0;
1073 np->rx_done = (np->rx_done + 1) & (DONE_Q_SIZE-1);
1075 writew(np->rx_done, dev->base_addr + CompletionQConsumerIdx);
1077 /* Refill the Rx ring buffers. */
1078 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1079 struct sk_buff *skb;
1080 int entry = np->dirty_rx % RX_RING_SIZE;
1081 if (np->rx_info[entry].skb == NULL) {
1082 skb = dev_alloc_skb(np->rx_buf_sz);
1083 np->rx_info[entry].skb = skb;
1084 if (skb == NULL)
1085 break; /* Better luck next round. */
1086 np->rx_info[entry].mapping =
1087 pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1088 skb->dev = dev; /* Mark as being used by this device. */
1089 np->rx_ring[entry].rxaddr =
1090 cpu_to_le32(np->rx_info[entry].mapping | RxDescValid);
1092 if (entry == RX_RING_SIZE - 1)
1093 np->rx_ring[entry].rxaddr |= cpu_to_le32(RxDescEndRing);
1094 /* We could defer this until later... */
1095 writew(entry, dev->base_addr + RxDescQIdx);
1098 if (debug > 5
1099 || memcmp(np->pad0, np->pad0 + 1, sizeof(np->pad0) -1))
1100 printk(KERN_DEBUG " exiting netdev_rx() status of %d was %8.8x %d.\n",
1101 np->rx_done, desc_status,
1102 memcmp(np->pad0, np->pad0 + 1, sizeof(np->pad0) -1));
1104 /* Restart Rx engine if stopped. */
1105 return 0;
1108 static void netdev_error(struct net_device *dev, int intr_status)
1110 struct netdev_private *np = (struct netdev_private *)dev->priv;
1112 if (intr_status & LinkChange) {
1113 printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
1114 " %4.4x partner %4.4x.\n", dev->name,
1115 mdio_read(dev, np->phys[0], 4),
1116 mdio_read(dev, np->phys[0], 5));
1117 check_duplex(dev, 0);
1119 if (intr_status & StatsMax) {
1120 get_stats(dev);
1122 /* Came close to underrunning the Tx FIFO, increase threshold. */
1123 if (intr_status & IntrTxDataLow)
1124 writel(++np->tx_threshold, dev->base_addr + TxThreshold);
1125 if ((intr_status &
1126 ~(IntrAbnormalSummary|LinkChange|StatsMax|IntrTxDataLow|1)) && debug)
1127 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1128 dev->name, intr_status);
1129 /* Hmmmmm, it's not clear how to recover from PCI faults. */
1130 if (intr_status & IntrTxPCIErr)
1131 np->stats.tx_fifo_errors++;
1132 if (intr_status & IntrRxPCIErr)
1133 np->stats.rx_fifo_errors++;
1136 static struct net_device_stats *get_stats(struct net_device *dev)
1138 long ioaddr = dev->base_addr;
1139 struct netdev_private *np = (struct netdev_private *)dev->priv;
1141 /* This adapter architecture needs no SMP locks. */
1142 np->stats.tx_bytes = readl(ioaddr + 0x57010);
1143 np->stats.rx_bytes = readl(ioaddr + 0x57044);
1144 np->stats.tx_packets = readl(ioaddr + 0x57000);
1145 np->stats.tx_aborted_errors =
1146 readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1147 np->stats.tx_window_errors = readl(ioaddr + 0x57018);
1148 np->stats.collisions = readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1150 /* The chip only need report frame silently dropped. */
1151 np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1152 writew(0, ioaddr + RxDMAStatus);
1153 np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1154 np->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1155 np->stats.rx_length_errors = readl(ioaddr + 0x57058);
1156 np->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1158 return &np->stats;
1161 /* The little-endian AUTODIN II ethernet CRC calculations.
1162 A big-endian version is also available.
1163 This is slow but compact code. Do not use this routine for bulk data,
1164 use a table-based routine instead.
1165 This is common code and should be moved to net/core/crc.c.
1166 Chips may use the upper or lower CRC bits, and may reverse and/or invert
1167 them. Select the endian-ness that results in minimal calculations.
1169 static unsigned const ethernet_polynomial_le = 0xedb88320U;
1170 static inline unsigned ether_crc_le(int length, unsigned char *data)
1172 unsigned int crc = 0xffffffff; /* Initial value. */
1173 while(--length >= 0) {
1174 unsigned char current_octet = *data++;
1175 int bit;
1176 for (bit = 8; --bit >= 0; current_octet >>= 1) {
1177 if ((crc ^ current_octet) & 1) {
1178 crc >>= 1;
1179 crc ^= ethernet_polynomial_le;
1180 } else
1181 crc >>= 1;
1184 return crc;
1187 static void set_rx_mode(struct net_device *dev)
1189 long ioaddr = dev->base_addr;
1190 u32 rx_mode;
1191 struct dev_mc_list *mclist;
1192 int i;
1194 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1195 /* Unconditionally log net taps. */
1196 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1197 rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptAll|AcceptMyPhys;
1198 } else if ((dev->mc_count > multicast_filter_limit)
1199 || (dev->flags & IFF_ALLMULTI)) {
1200 /* Too many to match, or accept all multicasts. */
1201 rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptMyPhys;
1202 } else if (dev->mc_count <= 15) {
1203 /* Use the 16 element perfect filter. */
1204 long filter_addr = ioaddr + 0x56000 + 1*16;
1205 for (i = 1, mclist = dev->mc_list; mclist && i <= dev->mc_count;
1206 i++, mclist = mclist->next) {
1207 u16 *eaddrs = (u16 *)mclist->dmi_addr;
1208 writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 4;
1209 writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1210 writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 8;
1212 while (i++ < 16) {
1213 writew(0xffff, filter_addr); filter_addr += 4;
1214 writew(0xffff, filter_addr); filter_addr += 4;
1215 writew(0xffff, filter_addr); filter_addr += 8;
1217 rx_mode = AcceptBroadcast | AcceptMyPhys;
1218 } else {
1219 /* Must use a multicast hash table. */
1220 long filter_addr;
1221 u16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1223 memset(mc_filter, 0, sizeof(mc_filter));
1224 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1225 i++, mclist = mclist->next) {
1226 set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23, mc_filter);
1228 /* Clear the perfect filter list. */
1229 filter_addr = ioaddr + 0x56000 + 1*16;
1230 for (i = 1; i < 16; i++) {
1231 writew(0xffff, filter_addr); filter_addr += 4;
1232 writew(0xffff, filter_addr); filter_addr += 4;
1233 writew(0xffff, filter_addr); filter_addr += 8;
1235 for (filter_addr=ioaddr + 0x56100, i=0; i < 32; filter_addr+= 16, i++)
1236 writew(mc_filter[i], filter_addr);
1237 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1239 writel(rx_mode, ioaddr + RxFilterMode);
1242 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1244 struct netdev_private *np = (struct netdev_private *)dev->priv;
1245 u16 *data = (u16 *)&rq->ifr_data;
1247 switch(cmd) {
1248 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
1249 data[0] = np->phys[0] & 0x1f;
1250 /* Fall Through */
1251 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
1252 data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
1253 return 0;
1254 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
1255 if (!capable(CAP_NET_ADMIN))
1256 return -EPERM;
1257 if (data[0] == np->phys[0]) {
1258 u16 value = data[2];
1259 switch (data[1]) {
1260 case 0:
1261 if (value & 0x9000) /* Autonegotiation. */
1262 np->medialock = 0;
1263 else {
1264 np->full_duplex = (value & 0x0100) ? 1 : 0;
1265 np->medialock = 1;
1267 break;
1268 case 4: np->advertising = value; break;
1270 check_duplex(dev, 0);
1272 mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1273 return 0;
1274 default:
1275 return -EOPNOTSUPP;
1279 static int netdev_close(struct net_device *dev)
1281 long ioaddr = dev->base_addr;
1282 struct netdev_private *np = (struct netdev_private *)dev->priv;
1283 int i;
1285 netif_stop_queue(dev);
1287 del_timer_sync(&np->timer);
1289 if (debug > 1) {
1290 printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %4.4x.\n",
1291 dev->name, (int)readl(ioaddr + IntrStatus));
1292 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1293 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1296 /* Disable interrupts by clearing the interrupt mask. */
1297 writel(0, ioaddr + IntrEnable);
1299 /* Stop the chip's Tx and Rx processes. */
1301 #ifdef __i386__
1302 if (debug > 2) {
1303 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1304 np->tx_ring_dma);
1305 for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
1306 printk(KERN_DEBUG " #%d desc. %8.8x %8.8x -> %8.8x.\n",
1307 i, le32_to_cpu(np->tx_ring[i].status),
1308 le32_to_cpu(np->tx_ring[i].addr),
1309 le32_to_cpu(np->tx_done_q[i].status));
1310 printk(KERN_DEBUG " Rx ring at %8.8x -> %p:\n",
1311 np->rx_ring_dma, np->rx_done_q);
1312 if (np->rx_done_q)
1313 for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
1314 printk(KERN_DEBUG " #%d desc. %8.8x -> %8.8x\n",
1315 i, le32_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
1318 #endif /* __i386__ debugging only */
1320 free_irq(dev->irq, dev);
1322 /* Free all the skbuffs in the Rx queue. */
1323 for (i = 0; i < RX_RING_SIZE; i++) {
1324 np->rx_ring[i].rxaddr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1325 if (np->rx_info[i].skb != NULL) {
1326 pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1327 dev_kfree_skb(np->rx_info[i].skb);
1329 np->rx_info[i].skb = NULL;
1330 np->rx_info[i].mapping = 0;
1332 for (i = 0; i < TX_RING_SIZE; i++) {
1333 struct sk_buff *skb = np->tx_info[i].skb;
1334 if (skb != NULL) {
1335 pci_unmap_single(np->pci_dev,
1336 np->tx_info[i].mapping,
1337 skb->len, PCI_DMA_TODEVICE);
1338 dev_kfree_skb(skb);
1340 np->tx_info[i].skb = NULL;
1341 np->tx_info[i].mapping = 0;
1344 MOD_DEC_USE_COUNT;
1346 return 0;
1350 static void __devexit starfire_remove_one (struct pci_dev *pdev)
1352 struct net_device *dev = pdev->driver_data;
1353 struct netdev_private *np;
1355 if (!dev)
1356 BUG();
1358 np = dev->priv;
1360 unregister_netdev(dev);
1361 iounmap((char *)dev->base_addr);
1363 if (np->tx_done_q)
1364 pci_free_consistent(np->pci_dev, PAGE_SIZE,
1365 np->tx_done_q, np->tx_done_q_dma);
1366 if (np->rx_done_q)
1367 pci_free_consistent(np->pci_dev, PAGE_SIZE,
1368 np->rx_done_q, np->rx_done_q_dma);
1369 if (np->tx_ring)
1370 pci_free_consistent(np->pci_dev, PAGE_SIZE,
1371 np->tx_ring, np->tx_ring_dma);
1372 if (np->rx_ring)
1373 pci_free_consistent(np->pci_dev, PAGE_SIZE,
1374 np->rx_ring, np->rx_ring_dma);
1376 kfree(dev);
1380 static struct pci_driver starfire_driver = {
1381 name: "starfire",
1382 probe: starfire_init_one,
1383 remove: starfire_remove_one,
1384 id_table: starfire_pci_tbl,
1388 static int __init starfire_init (void)
1390 return pci_module_init (&starfire_driver);
1394 static void __exit starfire_cleanup (void)
1396 pci_unregister_driver (&starfire_driver);
1400 module_init(starfire_init);
1401 module_exit(starfire_cleanup);
1405 * Local variables:
1406 * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c starfire.c"
1407 * simple-compile-command: "gcc -DMODULE -O6 -c starfire.c"
1408 * c-indent-level: 4
1409 * c-basic-offset: 4
1410 * tab-width: 4
1411 * End: