Import 2.3.18pre1
[davej-history.git] / drivers / net / starfire.c
blob09891202cdd85fdcaa6de534fc7d9b96b91fadb2
1 /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
2 /*
3 Written 1998-1999 by Donald Becker.
5 This software may be used and distributed according to the terms
6 of the GNU Public License (GPL), incorporated herein by reference.
8 The author may be reached as becker@usra.edu, or
9 Donald Becker
10 312 Severn Ave. #W302
11 Annapolis MD 21403
13 Support and updates available at
14 http://cesdis.gsfc.nasa.gov/linux/drivers/starfire.html
17 static const char *versionA =
18 "starfire.c:v0.12 5/28/99 Written by Donald Becker\n",
19 *versionB =" Undates and info at http://www.beowulf.org/linux/drivers.html\n";
21 /* A few user-configurable values. These may be modified when a driver
22 module is loaded.*/
24 /* Used for tuning interrupt latency vs. overhead. */
25 static int interrupt_mitigation = 0x0;
27 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
28 static int max_interrupt_work = 20;
29 static int min_pci_latency = 64;
30 static int mtu = 0;
31 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
32 The Starfire has a 512 element hash table based on the Ethernet CRC. */
33 static int multicast_filter_limit = 32;
35 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
36 Setting to > 1518 effectively disables this feature. */
37 static int rx_copybreak = 0;
39 /* Used to pass the media type, etc.
40 Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
41 The media type is usually passed in 'options[]'.
43 #define MAX_UNITS 8 /* More are supported, limit only on options */
44 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
45 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
47 /* Operational parameters that are set at compile time. */
49 /* The "native" ring sizes are either 256 or 2048.
50 However in some modes a descriptor may be marked to wrap the ring earlier.
51 The driver allocates a single page for each descriptor ring, constraining
52 the maximum size in an architecture-dependent way.
54 #define RX_RING_SIZE 256
55 #define TX_RING_SIZE 32
56 /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
57 #define DONE_Q_SIZE 1024
59 /* Operational parameters that usually are not changed. */
60 /* Time in jiffies before concluding the transmitter is hung. */
61 #define TX_TIMEOUT (2*HZ)
63 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
65 #if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
66 #warning You must compile this file with the correct options!
67 #warning See the last lines of the source file.
68 #error You must compile this driver with "-O".
69 #endif
71 /* Include files, designed to support most kernel versions 2.0.0 and later. */
72 #include <linux/config.h>
73 #ifdef MODULE
74 #ifdef MODVERSIONS
75 #include <linux/modversions.h>
76 #endif
77 #include <linux/module.h>
78 #else
79 #define MOD_INC_USE_COUNT
80 #define MOD_DEC_USE_COUNT
81 #endif
83 #include <linux/kernel.h>
84 #include <linux/sched.h>
85 #include <linux/string.h>
86 #include <linux/timer.h>
87 #include <linux/errno.h>
88 #include <linux/ioport.h>
89 #include <linux/malloc.h>
90 #include <linux/interrupt.h>
91 #include <linux/pci.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/skbuff.h>
95 #include <asm/processor.h> /* Processor type for cache alignment. */
96 #include <asm/bitops.h>
97 #include <asm/io.h>
99 /* Kernel compatibility defines, some common to David Hind's PCMCIA package.
100 This is only in the support-all-kernels source code. */
102 #define RUN_AT(x) (jiffies + (x))
104 #ifdef MODULE
105 MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
106 MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
107 MODULE_PARM(max_interrupt_work, "i");
108 MODULE_PARM(min_pci_latency, "i");
109 MODULE_PARM(mtu, "i");
110 MODULE_PARM(debug, "i");
111 MODULE_PARM(rx_copybreak, "i");
112 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
113 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
114 #endif
117 Theory of Operation
119 I. Board Compatibility
121 State the chips and boards this driver is known to work with.
122 Note any similar chips or boards that will not work.
124 This driver skeleton demonstrates the driver for an idealized
125 descriptor-based bus-master PCI chip.
127 II. Board-specific settings
129 No jumpers exist on most PCI boards, so this section is usually empty.
131 III. Driver operation
133 IIIa. Ring buffers
135 The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
136 ring sizes are set fixed by the hardware, but may optionally be wrapped
137 earlier by the END bit in the descriptor.
138 This driver uses that hardware queue size for the Rx ring, where a large
139 number of entries has no ill effect beyond increases the potential backlog.
140 The Tx ring is wrapped with the END bit, since a large hardware Tx queue
141 disables the queue layer priority ordering and we have no mechanism to
142 utilize the hardware two-level priority queue. When modifying the
143 RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
144 levels.
146 IIIb/c. Transmit/Receive Structure
148 See the Adaptec manual for the many possible structures, and options for
149 each structure. There are far too many to document here.
151 For transmit this driver uses type 1 transmit descriptors, and relies on
152 automatic minimum-length padding. It does not use the completion queue
153 consumer index, but instead checks for non-zero status entries.
155 For receive this driver uses type 0 receive descriptors. The driver
156 allocates full frame size skbuffs for the Rx ring buffers, so all frames
157 should fit in a single descriptor. The driver does not use the completion
158 queue consumer index, but instead checks for non-zero status entries.
160 When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
161 is allocated and the frame is copied to the new skbuff. When the incoming
162 frame is larger, the skbuff is passed directly up the protocol stack.
163 Buffers consumed this way are replaced by newly allocated skbuffs in a later
164 phase of receive.
166 A notable aspect of operation is that unaligned buffers are not permitted by
167 the Starfire hardware. The IP header at offset 14 in an ethernet frame thus
168 isn't longword aligned, which may cause problems on some machine
169 e.g. Alphas. Copied frames are put into the skbuff at an offset of "+2",
170 16-byte aligning the IP header.
172 IIId. Synchronization
174 The driver runs as two independent, single-threaded flows of control. One
175 is the send-packet routine, which enforces single-threaded use by the
176 dev->tbusy flag. The other thread is the interrupt handler, which is single
177 threaded by the hardware and interrupt handling software.
179 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
180 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
181 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
182 the 'lp->tx_full' flag.
184 The interrupt handler has exclusive control over the Rx ring and records stats
185 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
186 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
187 clears both the tx_full and tbusy flags.
189 IV. Notes
191 IVb. References
193 The Adaptec Starfire manuals.
194 http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
195 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
198 IVc. Errata
204 /* This table drives the PCI probe routines. It's mostly boilerplate in all
205 PCI drivers, and will likely be provided by some future kernel.
207 enum pci_flags_bit {
208 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
209 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
211 struct pci_id_info {
212 const char *name;
213 u16 vendor_id, device_id, device_id_mask, flags;
214 int io_size;
215 struct net_device *(*probe1)(int pci_bus, int pci_devfn, struct net_device *dev,
216 long ioaddr, int irq, int chip_idx, int fnd_cnt);
219 static struct net_device *starfire_probe1(int pci_bus, int pci_devfn,
220 struct net_device *dev, long ioaddr,
221 int irq, int chp_idx, int fnd_cnt);
223 #if 0
224 #define ADDR_64BITS 1 /* This chip uses 64 bit addresses. */
225 #endif
226 #define MEM_ADDR_SZ 0x80000 /* And maps in 0.5MB(!). */
228 static struct pci_id_info pci_tbl[] = {
229 { "Adaptec Starfire 6915",
230 0x9004, 0x6915, 0xffff, PCI_USES_MASTER, 128, starfire_probe1},
231 {0,}, /* 0 terminated list. */
235 /* A chip capabilities table, matching the entries in pci_tbl[] above. */
236 enum chip_capability_flags {CanHaveMII=1, };
237 struct chip_info {
238 char *chip_name;
239 int io_size;
240 int flags;
241 void (*media_timer)(unsigned long data);
242 } static skel_netdrv_tbl[] = {
243 {"Adaptec Starfire 6915", 128, CanHaveMII, 0, },
247 /* Offsets to the device registers.
248 Unlike software-only systems, device drivers interact with complex hardware.
249 It's not useful to define symbolic names for every register bit in the
250 device. The name can only partially document the semantics and make
251 the driver longer and more difficult to read.
252 In general, only the important configuration values or bits changed
253 multiple times should be defined symbolically.
255 enum register_offsets {
256 PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
257 IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
258 MIICtrl=0x52000, StationAddr=0x50120, EEPROMCtrl=0x51000,
259 TxDescCtrl=0x50090,
260 TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
261 TxRingHiAddr=0x5009C, /* 64 bit address extension. */
262 TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
263 TxThreshold=0x500B0,
264 CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
265 RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
266 CompletionQConsumerIdx=0x500C4,
267 RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
268 RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
269 TxMode=0x55000,
272 /* Bits in the interrupt status/mask registers. */
273 enum intr_status_bits {
274 IntrNormalSummary=0x8000, IntrAbnormalSummary=0x02000000,
275 IntrRxDone=0x0300, IntrRxEmpty=0x10040, IntrRxPCIErr=0x80000,
276 IntrTxDone=0x4000, IntrTxEmpty=0x1000, IntrTxPCIErr=0x80000,
277 StatsMax=0x08000000, LinkChange=0xf0000000,
278 IntrTxDataLow=0x00040000,
281 /* Bits in the RxFilterMode register. */
282 enum rx_mode_bits {
283 AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
284 AcceptMulticast=0x10, AcceptMyPhys=0xE040,
287 /* The Rx and Tx buffer descriptors. */
288 struct starfire_rx_desc {
289 u32 rxaddr; /* Optionally 64 bits. */
291 enum rx_desc_bits {
292 RxDescValid=1, RxDescEndRing=2,
295 /* Completion queue entry.
296 You must update the page allocation, init_ring and the shift count in rx()
297 if using a larger format. */
298 struct rx_done_desc {
299 u32 status; /* Low 16 bits is length. */
300 #ifdef full_rx_status
301 u32 status2;
302 u16 vlanid;
303 u16 csum; /* partial checksum */
304 u32 timestamp;
305 #endif
307 enum rx_done_bits {
308 RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
311 /* Type 1 Tx descriptor. */
312 struct starfire_tx_desc {
313 u32 status; /* Upper bits are status, lower 16 length. */
314 u32 addr;
316 enum tx_desc_bits {
317 TxDescID=0xB1010000, /* Also marks single fragment, add CRC. */
318 TxDescIntr=0x08000000, TxRingWrap=0x04000000,
320 struct tx_done_report {
321 u32 status; /* timestamp, index. */
322 #if 0
323 u32 intrstatus; /* interrupt status */
324 #endif
327 struct netdev_private {
328 /* Descriptor rings first for alignment. */
329 struct starfire_rx_desc *rx_ring;
330 struct starfire_tx_desc *tx_ring;
331 struct net_device *next_module; /* Link for devices of this type. */
332 const char *product_name;
333 /* The addresses of rx/tx-in-place skbuffs. */
334 struct sk_buff* rx_skbuff[RX_RING_SIZE];
335 struct sk_buff* tx_skbuff[TX_RING_SIZE];
336 /* Pointers to completion queues (full pages). I should cache line pad..*/
337 u8 pad0[100];
338 struct rx_done_desc *rx_done_q;
339 unsigned int rx_done;
340 struct tx_done_report *tx_done_q;
341 unsigned int tx_done;
342 struct net_device_stats stats;
343 struct timer_list timer; /* Media monitoring timer. */
344 /* Frequently used values: keep some adjacent for cache effect. */
345 int chip_id;
346 unsigned char pci_bus, pci_devfn;
347 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
348 unsigned int cur_tx, dirty_tx;
349 unsigned int rx_buf_sz; /* Based on MTU+slack. */
350 unsigned int tx_full:1; /* The Tx queue is full. */
351 /* These values are keep track of the transceiver/media in use. */
352 unsigned int duplex_lock:1;
353 unsigned int full_duplex:1, /* Full-duplex operation requested. */
354 rx_flowctrl:1,
355 tx_flowctrl:1; /* Use 802.3x flow control. */
356 unsigned int medialock:1; /* Do not sense media. */
357 unsigned int default_port:4; /* Last dev->if_port value. */
358 u32 tx_mode;
359 u8 tx_threshold;
360 /* MII transceiver section. */
361 int mii_cnt; /* MII device addresses. */
362 u16 advertising; /* NWay media advertisement */
363 unsigned char phys[2]; /* MII device addresses. */
364 u32 pad[4]; /* Used for 32-byte alignment */
367 static int mdio_read(struct net_device *dev, int phy_id, int location);
368 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
369 static int netdev_open(struct net_device *dev);
370 static void check_duplex(struct net_device *dev, int startup);
371 static void netdev_timer(unsigned long data);
372 static void tx_timeout(struct net_device *dev);
373 static void init_ring(struct net_device *dev);
374 static int start_tx(struct sk_buff *skb, struct net_device *dev);
375 static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
376 static void netdev_error(struct net_device *dev, int intr_status);
377 static int netdev_rx(struct net_device *dev);
378 static void netdev_error(struct net_device *dev, int intr_status);
379 static void set_rx_mode(struct net_device *dev);
380 static struct net_device_stats *get_stats(struct net_device *dev);
381 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
382 static int netdev_close(struct net_device *dev);
386 /* A list of our installed devices, for removing the driver module. */
387 static struct net_device *root_net_dev = NULL;
389 /* Ideally we would detect all network cards in slot order. That would
390 be best done a central PCI probe dispatch, which wouldn't work
391 well when dynamically adding drivers. So instead we detect just the
392 cards we know about in slot order. */
394 static int pci_etherdev_probe(struct net_device *dev, struct pci_id_info pci_tbl[])
396 int cards_found = 0;
397 int pci_index = 0;
398 unsigned char pci_bus, pci_device_fn;
400 if ( ! pcibios_present())
401 return -ENODEV;
403 for (;pci_index < 0xff; pci_index++) {
404 u16 vendor, device, pci_command, new_command;
405 int chip_idx, irq;
406 long pciaddr;
407 long ioaddr;
409 if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8, pci_index,
410 &pci_bus, &pci_device_fn)
411 != PCIBIOS_SUCCESSFUL)
412 break;
413 pcibios_read_config_word(pci_bus, pci_device_fn,
414 PCI_VENDOR_ID, &vendor);
415 pcibios_read_config_word(pci_bus, pci_device_fn,
416 PCI_DEVICE_ID, &device);
418 for (chip_idx = 0; pci_tbl[chip_idx].vendor_id; chip_idx++)
419 if (vendor == pci_tbl[chip_idx].vendor_id
420 && (device & pci_tbl[chip_idx].device_id_mask) ==
421 pci_tbl[chip_idx].device_id)
422 break;
423 if (pci_tbl[chip_idx].vendor_id == 0) /* Compiled out! */
424 continue;
427 struct pci_dev *pdev = pci_find_slot(pci_bus, pci_device_fn);
429 pciaddr = pdev->resource[0].start;
430 #if defined(ADDR_64BITS) && defined(__alpha__)
431 pciaddr |= ((long)pdev->base_address[1]) << 32;
432 #endif
433 irq = pdev->irq;
436 if (debug > 2)
437 printk(KERN_INFO "Found %s at PCI address %#lx, IRQ %d.\n",
438 pci_tbl[chip_idx].name, pciaddr, irq);
440 if ((pci_tbl[chip_idx].flags & PCI_USES_IO)) {
441 if (check_region(pciaddr, pci_tbl[chip_idx].io_size))
442 continue;
443 ioaddr = pciaddr;
444 } else if ((ioaddr = (long)ioremap(pciaddr&~0xf, MEM_ADDR_SZ)) == 0) {
445 printk(KERN_INFO "Failed to map PCI address %#lx.\n",
446 pciaddr);
447 continue;
450 pcibios_read_config_word(pci_bus, pci_device_fn,
451 PCI_COMMAND, &pci_command);
452 new_command = pci_command | (pci_tbl[chip_idx].flags & 7);
453 if (pci_command != new_command) {
454 printk(KERN_INFO " The PCI BIOS has not enabled the"
455 " device at %d/%d! Updating PCI command %4.4x->%4.4x.\n",
456 pci_bus, pci_device_fn, pci_command, new_command);
457 pcibios_write_config_word(pci_bus, pci_device_fn,
458 PCI_COMMAND, new_command);
461 dev = pci_tbl[chip_idx].probe1(pci_bus, pci_device_fn, dev, ioaddr,
462 irq, chip_idx, cards_found);
464 if (dev && (pci_tbl[chip_idx].flags & PCI_COMMAND_MASTER)) {
465 u8 pci_latency;
466 pcibios_read_config_byte(pci_bus, pci_device_fn,
467 PCI_LATENCY_TIMER, &pci_latency);
468 if (pci_latency < min_pci_latency) {
469 printk(KERN_INFO " PCI latency timer (CFLT) is "
470 "unreasonably low at %d. Setting to %d clocks.\n",
471 pci_latency, min_pci_latency);
472 pcibios_write_config_byte(pci_bus, pci_device_fn,
473 PCI_LATENCY_TIMER, min_pci_latency);
476 dev = 0;
477 cards_found++;
480 return cards_found ? 0 : -ENODEV;
483 int starfire_probe(struct net_device *dev)
485 if (pci_etherdev_probe(dev, pci_tbl) < 0)
486 return -ENODEV;
487 printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
488 return 0;
492 static struct net_device *
493 starfire_probe1(int pci_bus, int pci_devfn, struct net_device *dev,
494 long ioaddr, int irq, int chip_id, int card_idx)
496 struct netdev_private *np;
497 int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
499 dev = init_etherdev(dev, sizeof(struct netdev_private));
501 printk(KERN_INFO "%s: %s at 0x%lx, ",
502 dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr);
504 /* Serial EEPROM reads are hidden by the hardware. */
505 for (i = 0; i < 6; i++)
506 dev->dev_addr[i] = readb(ioaddr + EEPROMCtrl + 20-i);
507 for (i = 0; i < 5; i++)
508 printk("%2.2x:", dev->dev_addr[i]);
509 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
511 #if ! defined(final_version) /* Dump the EEPROM contents during development. */
512 if (debug > 4)
513 for (i = 0; i < 0x20; i++)
514 printk("%2.2x%s", readb(ioaddr + EEPROMCtrl + i),
515 i % 16 != 15 ? " " : "\n");
516 #endif
518 /* Reset the chip to erase previous misconfiguration. */
519 writel(1, ioaddr + PCIDeviceConfig);
521 dev->base_addr = ioaddr;
522 dev->irq = irq;
524 /* Make certain the descriptor lists are aligned. */
525 np = (void *)(((long)kmalloc(sizeof(*np), GFP_KERNEL) + 15) & ~15);
526 memset(np, 0, sizeof(*np));
527 dev->priv = np;
529 np->next_module = root_net_dev;
530 root_net_dev = dev;
532 np->pci_bus = pci_bus;
533 np->pci_devfn = pci_devfn;
534 np->chip_id = chip_id;
536 if (dev->mem_start)
537 option = dev->mem_start;
539 /* The lower four bits are the media type. */
540 if (option > 0) {
541 if (option & 0x200)
542 np->full_duplex = 1;
543 np->default_port = option & 15;
544 if (np->default_port)
545 np->medialock = 1;
547 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
548 np->full_duplex = 1;
550 if (np->full_duplex)
551 np->duplex_lock = 1;
553 /* The chip-specific entries in the device structure. */
554 dev->open = &netdev_open;
555 dev->hard_start_xmit = &start_tx;
556 dev->stop = &netdev_close;
557 dev->get_stats = &get_stats;
558 dev->set_multicast_list = &set_rx_mode;
559 dev->do_ioctl = &mii_ioctl;
561 if (mtu)
562 dev->mtu = mtu;
564 if (skel_netdrv_tbl[np->chip_id].flags & CanHaveMII) {
565 int phy, phy_idx = 0;
566 for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
567 int mii_status = mdio_read(dev, phy, 1);
568 if (mii_status != 0xffff && mii_status != 0x0000) {
569 np->phys[phy_idx++] = phy;
570 np->advertising = mdio_read(dev, phy, 4);
571 printk(KERN_INFO "%s: MII PHY found at address %d, status "
572 "0x%4.4x advertising %4.4x.\n",
573 dev->name, phy, mii_status, np->advertising);
576 np->mii_cnt = phy_idx;
579 return dev;
583 /* Read the MII Management Data I/O (MDIO) interfaces. */
585 static int mdio_read(struct net_device *dev, int phy_id, int location)
587 long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
588 int result, boguscnt=1000;
589 /* ??? Must add a busy-wait here. */
591 result = readl(mdio_addr);
592 while ((result & 0xC0000000) != 0x80000000 && --boguscnt >= 0);
593 return result & 0xffff;
596 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
598 long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
599 writel(value, mdio_addr);
600 /* The busy-wait will occur before a read. */
601 return;
605 static int netdev_open(struct net_device *dev)
607 struct netdev_private *np = (struct netdev_private *)dev->priv;
608 long ioaddr = dev->base_addr;
609 int i;
611 /* Do we need to reset the chip??? */
613 if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev))
614 return -EAGAIN;
616 /* Disable the Rx and Tx, and reset the chip. */
617 writel(0, ioaddr + GenCtrl);
618 writel(1, ioaddr + PCIDeviceConfig);
619 if (debug > 1)
620 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
621 dev->name, dev->irq);
622 /* Allocate the various queues, failing gracefully. */
623 if (np->tx_done_q == 0)
624 np->tx_done_q = (struct tx_done_report *)get_free_page(GFP_KERNEL);
625 if (np->rx_done_q == 0)
626 np->rx_done_q = (struct rx_done_desc *)get_free_page(GFP_KERNEL);
627 if (np->tx_ring == 0)
628 np->tx_ring = (struct starfire_tx_desc *)get_free_page(GFP_KERNEL);
629 if (np->rx_ring == 0)
630 np->rx_ring = (struct starfire_rx_desc *)get_free_page(GFP_KERNEL);
631 if (np->tx_done_q == 0 || np->rx_done_q == 0
632 || np->rx_ring == 0 || np->tx_ring == 0)
633 return -ENOMEM;
635 MOD_INC_USE_COUNT;
637 init_ring(dev);
638 /* Set the size of the Rx buffers. */
639 writel((np->rx_buf_sz<<16) | 0xA000, ioaddr + RxDescQCtrl);
641 /* Set Tx descriptor to type 1 and padding to 0 bytes. */
642 writel(0x02000401, ioaddr + TxDescCtrl);
644 #if defined(ADDR_64BITS) && defined(__alpha__)
645 writel(virt_to_bus(np->rx_ring) >> 32, ioaddr + RxDescQHiAddr);
646 writel(virt_to_bus(np->tx_ring) >> 32, ioaddr + TxRingHiAddr);
647 #else
648 writel(0, ioaddr + RxDescQHiAddr);
649 writel(0, ioaddr + TxRingHiAddr);
650 writel(0, ioaddr + CompletionHiAddr);
651 #endif
652 writel(virt_to_bus(np->rx_ring), ioaddr + RxDescQAddr);
653 writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
655 writel(virt_to_bus(np->tx_done_q), ioaddr + TxCompletionAddr);
656 writel(virt_to_bus(np->rx_done_q), ioaddr + RxCompletionAddr);
658 if (debug > 1)
659 printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
661 /* Fill both the unused Tx SA register and the Rx perfect filter. */
662 for (i = 0; i < 6; i++)
663 writeb(dev->dev_addr[i], ioaddr + StationAddr + 6-i);
664 for (i = 0; i < 16; i++) {
665 u16 *eaddrs = (u16 *)dev->dev_addr;
666 long setup_frm = ioaddr + 0x56000 + i*16;
667 writew(eaddrs[0], setup_frm); setup_frm += 4;
668 writew(eaddrs[1], setup_frm); setup_frm += 4;
669 writew(eaddrs[2], setup_frm); setup_frm += 4;
672 /* Initialize other registers. */
673 /* Configure the PCI bus bursts and FIFO thresholds. */
674 np->tx_threshold = 4;
675 writel(np->tx_threshold, ioaddr + TxThreshold);
676 writel(interrupt_mitigation, ioaddr + IntrTimerCtrl);
678 if (dev->if_port == 0)
679 dev->if_port = np->default_port;
681 dev->tbusy = 0;
682 dev->interrupt = 0;
684 if (debug > 1)
685 printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
686 set_rx_mode(dev);
688 check_duplex(dev, 1);
690 dev->start = 1;
692 /* Set the interrupt mask and enable PCI interrupts. */
693 writel(IntrRxDone | IntrRxEmpty | IntrRxPCIErr |
694 IntrTxDone | IntrTxEmpty | IntrTxPCIErr |
695 StatsMax | LinkChange | IntrNormalSummary | IntrAbnormalSummary
696 | 0x0010 , ioaddr + IntrEnable);
697 writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
698 ioaddr + PCIDeviceConfig);
700 /* Enable the Rx and Tx units. */
701 writel(0x000F, ioaddr + GenCtrl);
703 if (debug > 2)
704 printk(KERN_DEBUG "%s: Done netdev_open().\n",
705 dev->name);
707 /* Set the timer to check for link beat. */
708 init_timer(&np->timer);
709 np->timer.expires = RUN_AT(3*HZ);
710 np->timer.data = (unsigned long)dev;
711 np->timer.function = &netdev_timer; /* timer handler */
712 add_timer(&np->timer);
714 return 0;
717 static void check_duplex(struct net_device *dev, int startup)
719 struct netdev_private *np = (struct netdev_private *)dev->priv;
720 long ioaddr = dev->base_addr;
721 int mii_reg5 = mdio_read(dev, np->phys[0], 5);
722 int duplex, new_tx_mode ;
724 new_tx_mode = 0x0C04 | (np->tx_flowctrl ? 0x0800:0) | (np->rx_flowctrl ? 0x0400:0);
725 if (np->duplex_lock)
726 duplex = 1;
727 else
728 duplex = (mii_reg5 & 0x0100) || (mii_reg5 & 0x01C0) == 0x0040;
729 if (duplex)
730 new_tx_mode |= 2;
731 if (np->full_duplex != duplex) {
732 np->full_duplex = duplex;
733 if (debug)
734 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
735 " partner capability of %4.4x.\n", dev->name,
736 duplex ? "full" : "half", np->phys[0], mii_reg5);
738 if (new_tx_mode != np->tx_mode) {
739 np->tx_mode = new_tx_mode;
740 writel(np->tx_mode | 0x8000, ioaddr + TxMode);
741 writel(np->tx_mode, ioaddr + TxMode);
745 static void netdev_timer(unsigned long data)
747 struct net_device *dev = (struct net_device *)data;
748 struct netdev_private *np = (struct netdev_private *)dev->priv;
749 long ioaddr = dev->base_addr;
750 int next_tick = 60*HZ; /* Check before driver release. */
752 if (debug > 3) {
753 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x.\n",
754 dev->name, readl(ioaddr + IntrStatus));
756 check_duplex(dev, 0);
757 #if ! defined(final_version)
758 /* This is often falsely triggered. */
759 if (readl(ioaddr + IntrStatus) & 1) {
760 int new_status = readl(ioaddr + IntrStatus);
761 /* Bogus hardware IRQ: Fake an interrupt handler call. */
762 if (new_status & 1) {
763 printk(KERN_ERR "%s: Interrupt blocked, status %8.8x/%8.8x.\n",
764 dev->name, new_status, readl(ioaddr + IntrStatus));
765 intr_handler(dev->irq, dev, 0);
768 #endif
770 np->timer.expires = RUN_AT(next_tick);
771 add_timer(&np->timer);
774 static void tx_timeout(struct net_device *dev)
776 struct netdev_private *np = (struct netdev_private *)dev->priv;
777 long ioaddr = dev->base_addr;
779 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
780 " resetting...\n", dev->name, readl(ioaddr + IntrStatus));
782 #ifndef __alpha__
784 int i;
785 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
786 for (i = 0; i < RX_RING_SIZE; i++)
787 printk(" %8.8x", (unsigned int)le32_to_cpu(np->rx_ring[i].rxaddr));
788 printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
789 for (i = 0; i < TX_RING_SIZE; i++)
790 printk(" %4.4x", le32_to_cpu(np->tx_ring[i].status));
791 printk("\n");
793 #endif
795 /* Perhaps we should reinitialize the hardware here. */
796 dev->if_port = 0;
797 /* Stop and restart the chip's Tx processes . */
799 /* Trigger an immediate transmit demand. */
801 dev->trans_start = jiffies;
802 np->stats.tx_errors++;
803 return;
807 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
808 static void init_ring(struct net_device *dev)
810 struct netdev_private *np = (struct netdev_private *)dev->priv;
811 int i;
813 np->tx_full = 0;
814 np->cur_rx = np->cur_tx = 0;
815 np->dirty_rx = np->rx_done = np->dirty_tx = np->tx_done = 0;
817 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
819 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
820 for (i = 0; i < RX_RING_SIZE; i++) {
821 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
822 np->rx_skbuff[i] = skb;
823 if (skb == NULL)
824 break;
825 skb->dev = dev; /* Mark as being used by this device. */
826 /* Grrr, we cannot offset to correctly align the IP header. */
827 np->rx_ring[i].rxaddr = cpu_to_le32(virt_to_bus(skb->tail) | RxDescValid);
829 writew(i-1, dev->base_addr + RxDescQIdx);
830 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
832 /* Clear the remainder of the Rx buffer ring. */
833 for ( ; i < RX_RING_SIZE; i++) {
834 np->rx_ring[i].rxaddr = 0;
835 np->rx_skbuff[i] = 0;
837 /* Mark the last entry as wrapping the ring. */
838 np->rx_ring[i-1].rxaddr |= cpu_to_le32(RxDescEndRing);
840 /* Clear the completion rings. */
841 for (i = 0; i < DONE_Q_SIZE; i++) {
842 np->rx_done_q[i].status = 0;
843 np->tx_done_q[i].status = 0;
846 for (i = 0; i < TX_RING_SIZE; i++) {
847 np->tx_skbuff[i] = 0;
848 np->tx_ring[i].status = 0;
850 return;
853 static int start_tx(struct sk_buff *skb, struct net_device *dev)
855 struct netdev_private *np = (struct netdev_private *)dev->priv;
856 unsigned entry;
858 /* Block a timer-based transmit from overlapping. This could better be
859 done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
860 if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
861 if (jiffies - dev->trans_start < TX_TIMEOUT)
862 return 1;
863 tx_timeout(dev);
864 return 1;
867 /* Caution: the write order is important here, set the field
868 with the "ownership" bits last. */
870 /* Calculate the next Tx descriptor entry. */
871 entry = np->cur_tx % TX_RING_SIZE;
873 np->tx_skbuff[entry] = skb;
875 np->tx_ring[entry].addr = cpu_to_le32(virt_to_bus(skb->data));
876 /* Add |TxDescIntr to generate Tx-done interrupts. */
877 np->tx_ring[entry].status = cpu_to_le32(skb->len | TxDescID);
878 if (debug > 5) {
879 printk(KERN_DEBUG "%s: Tx #%d slot %d %8.8x %8.8x.\n",
880 dev->name, np->cur_tx, entry,
881 le32_to_cpu(np->tx_ring[entry].status),
882 le32_to_cpu(np->tx_ring[entry].addr));
884 np->cur_tx++;
885 #if 1
886 if (entry >= TX_RING_SIZE-1) { /* Wrap ring */
887 np->tx_ring[entry].status |= cpu_to_le32(TxRingWrap | TxDescIntr);
888 entry = -1;
890 #endif
892 /* Non-x86: explicitly flush descriptor cache lines here. */
894 /* Update the producer index. */
895 writel(++entry, dev->base_addr + TxProducerIdx);
897 if (np->cur_tx - np->dirty_tx >= TX_RING_SIZE - 1)
898 np->tx_full = 1;
899 if (! np->tx_full)
900 clear_bit(0, (void*)&dev->tbusy);
901 dev->trans_start = jiffies;
903 if (debug > 4) {
904 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
905 dev->name, np->cur_tx, entry);
907 return 0;
910 /* The interrupt handler does all of the Rx thread work and cleans up
911 after the Tx thread. */
912 static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
914 struct net_device *dev = (struct net_device *)dev_instance;
915 struct netdev_private *np;
916 long ioaddr, boguscnt = max_interrupt_work;
918 #ifndef final_version /* Can never occur. */
919 if (dev == NULL) {
920 printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
921 "device.\n", irq);
922 return;
924 #endif
926 ioaddr = dev->base_addr;
927 np = (struct netdev_private *)dev->priv;
928 #if defined(__i386__)
929 /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
930 if (test_and_set_bit(0, (void*)&dev->interrupt)) {
931 printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
932 dev->name);
933 dev->interrupt = 0; /* Avoid halting machine. */
934 return;
936 #else
937 if (dev->interrupt) {
938 printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name);
939 return;
941 dev->interrupt = 1;
942 #endif
944 do {
945 u32 intr_status = readl(ioaddr + IntrClear);
947 if (debug > 4)
948 printk(KERN_DEBUG "%s: Interrupt status %4.4x.\n",
949 dev->name, intr_status);
951 if (intr_status == 0)
952 break;
954 if (intr_status & IntrRxDone)
955 netdev_rx(dev);
957 /* Scavenge the skbuff list based on the Tx-done queue.
958 There are redundant checks here that may be cleaned up when
959 after the driver has proven reliable. */
961 int consumer = readl(ioaddr + TxConsumerIdx);
962 int tx_status;
963 if (debug > 4)
964 printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
965 dev->name, consumer);
966 #if 0
967 if (np->tx_done >= 250 || np->tx_done == 0)
968 printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x, "
969 "%d is %8.8x.\n", dev->name,
970 np->tx_done, le32_to_cpu(np->tx_done_q[np->tx_done].status),
971 (np->tx_done+1) & (DONE_Q_SIZE-1),
972 le32_to_cpu(np->tx_done_q[(np->tx_done+1)&(DONE_Q_SIZE-1)].status));
973 #endif
974 while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
975 if (debug > 4)
976 printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x.\n",
977 dev->name, np->tx_done, tx_status);
978 if ((tx_status & 0xe0000000) == 0xa0000000) {
979 np->stats.tx_packets++;
980 } else if ((tx_status & 0xe0000000) == 0x80000000) {
981 u16 entry = tx_status; /* Implicit truncate */
982 entry >>= 3;
983 /* Scavenge the descriptor. */
984 dev_kfree_skb(np->tx_skbuff[entry]);
985 np->tx_skbuff[entry] = 0;
986 np->dirty_tx++;
988 np->tx_done_q[np->tx_done].status = 0;
989 np->tx_done = (np->tx_done+1) & (DONE_Q_SIZE-1);
991 writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
993 if (np->tx_full && np->cur_tx - np->dirty_tx < TX_RING_SIZE - 4) {
994 /* The ring is no longer full, clear tbusy. */
995 np->tx_full = 0;
996 clear_bit(0, (void*)&dev->tbusy);
997 mark_bh(NET_BH);
1000 /* Abnormal error summary/uncommon events handlers. */
1001 if (intr_status & IntrAbnormalSummary)
1002 netdev_error(dev, intr_status);
1004 if (--boguscnt < 0) {
1005 printk(KERN_WARNING "%s: Too much work at interrupt, "
1006 "status=0x%4.4x.\n",
1007 dev->name, intr_status);
1008 break;
1010 } while (1);
1012 if (debug > 4)
1013 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1014 dev->name, readl(ioaddr + IntrStatus));
1016 #ifndef final_version
1017 /* Code that should never be run! Remove after testing.. */
1019 static int stopit = 10;
1020 if (dev->start == 0 && --stopit < 0) {
1021 printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n",
1022 dev->name);
1023 free_irq(irq, dev);
1026 #endif
1028 #if defined(__i386__)
1029 clear_bit(0, (void*)&dev->interrupt);
1030 #else
1031 dev->interrupt = 0;
1032 #endif
1033 return;
1036 /* This routine is logically part of the interrupt handler, but seperated
1037 for clarity and better register allocation. */
1038 static int netdev_rx(struct net_device *dev)
1040 struct netdev_private *np = (struct netdev_private *)dev->priv;
1041 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1042 u32 desc_status;
1043 if (np->rx_done_q == 0) {
1044 printk(KERN_ERR "%s: rx_done_q is NULL! rx_done is %d. %p.\n",
1045 dev->name, np->rx_done, np->tx_done_q);
1046 return 0;
1049 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1050 while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1051 if (debug > 4)
1052 printk(KERN_DEBUG " netdev_rx() status of %d was %8.8x.\n",
1053 np->rx_done, desc_status);
1054 if (--boguscnt < 0)
1055 break;
1056 if (! (desc_status & RxOK)) {
1057 /* There was a error. */
1058 if (debug > 2)
1059 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1060 desc_status);
1061 np->stats.rx_errors++;
1062 if (desc_status & RxFIFOErr)
1063 np->stats.rx_fifo_errors++;
1064 } else {
1065 struct sk_buff *skb;
1066 u16 pkt_len = desc_status; /* Implicitly Truncate */
1067 int entry = (desc_status >> 16) & 0x7ff;
1069 #ifndef final_version
1070 if (debug > 4)
1071 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1072 ", bogus_cnt %d.\n",
1073 pkt_len, boguscnt);
1074 #endif
1075 /* Check if the packet is long enough to accept without copying
1076 to a minimally-sized skbuff. */
1077 if (pkt_len < rx_copybreak
1078 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1079 skb->dev = dev;
1080 skb_reserve(skb, 2); /* 16 byte align the IP header */
1081 #if HAS_IP_COPYSUM /* Call copy + cksum if available. */
1082 eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
1083 skb_put(skb, pkt_len);
1084 #else
1085 memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
1086 pkt_len);
1087 #endif
1088 } else {
1089 char *temp = skb_put(skb = np->rx_skbuff[entry], pkt_len);
1090 np->rx_skbuff[entry] = NULL;
1091 #ifndef final_version /* Remove after testing. */
1092 if (bus_to_virt(le32_to_cpu(np->rx_ring[entry].rxaddr) & ~3) != temp)
1093 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
1094 "do not match in netdev_rx: %p vs. %p / %p.\n",
1095 dev->name, bus_to_virt(le32_to_cpu(np->rx_ring[entry].rxaddr)),
1096 skb->head, temp);
1097 #endif
1099 #ifndef final_version /* Remove after testing. */
1100 /* You will want this info for the initial debug. */
1101 if (debug > 5)
1102 printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
1103 "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
1104 "%d.%d.%d.%d.\n",
1105 skb->data[0], skb->data[1], skb->data[2], skb->data[3],
1106 skb->data[4], skb->data[5], skb->data[6], skb->data[7],
1107 skb->data[8], skb->data[9], skb->data[10],
1108 skb->data[11], skb->data[12], skb->data[13],
1109 skb->data[14], skb->data[15], skb->data[16],
1110 skb->data[17]);
1111 #endif
1112 skb->protocol = eth_type_trans(skb, dev);
1113 #ifdef full_rx_status
1114 if (le32_to_cpu(np->rx_done_q[np->rx_done].status2) & 0x01000000)
1115 skb->ip_summed = CHECKSUM_UNNECESSARY;
1116 #endif
1117 netif_rx(skb);
1118 dev->last_rx = jiffies;
1119 np->stats.rx_packets++;
1121 np->cur_rx++;
1122 np->rx_done_q[np->rx_done].status = 0;
1123 np->rx_done = (np->rx_done + 1) & (DONE_Q_SIZE-1);
1125 writew(np->rx_done, dev->base_addr + CompletionQConsumerIdx);
1127 /* Refill the Rx ring buffers. */
1128 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1129 struct sk_buff *skb;
1130 int entry = np->dirty_rx % RX_RING_SIZE;
1131 if (np->rx_skbuff[entry] == NULL) {
1132 skb = dev_alloc_skb(np->rx_buf_sz);
1133 np->rx_skbuff[entry] = skb;
1134 if (skb == NULL)
1135 break; /* Better luck next round. */
1136 skb->dev = dev; /* Mark as being used by this device. */
1137 np->rx_ring[entry].rxaddr = cpu_to_le32(virt_to_bus(skb->tail) | RxDescValid);
1139 if (entry == RX_RING_SIZE - 1)
1140 np->rx_ring[entry].rxaddr |= cpu_to_le32(RxDescEndRing);
1141 /* We could defer this until later... */
1142 writew(entry, dev->base_addr + RxDescQIdx);
1145 if (debug > 5
1146 || memcmp(np->pad0, np->pad0 + 1, sizeof(np->pad0) -1))
1147 printk(KERN_DEBUG " exiting netdev_rx() status of %d was %8.8x %d.\n",
1148 np->rx_done, desc_status,
1149 memcmp(np->pad0, np->pad0 + 1, sizeof(np->pad0) -1));
1151 /* Restart Rx engine if stopped. */
1152 return 0;
1155 static void netdev_error(struct net_device *dev, int intr_status)
1157 struct netdev_private *np = (struct netdev_private *)dev->priv;
1159 if (intr_status & LinkChange) {
1160 printk(KERN_ERR "%s: Link changed: Autonegotiation advertising"
1161 " %4.4x partner %4.4x.\n", dev->name,
1162 mdio_read(dev, np->phys[0], 4),
1163 mdio_read(dev, np->phys[0], 5));
1164 check_duplex(dev, 0);
1166 if (intr_status & StatsMax) {
1167 get_stats(dev);
1169 /* Came close to underrunning the Tx FIFO, increase threshold. */
1170 if (intr_status & IntrTxDataLow)
1171 writel(++np->tx_threshold, dev->base_addr + TxThreshold);
1172 if ((intr_status &
1173 ~(IntrAbnormalSummary|LinkChange|StatsMax|IntrTxDataLow|1)) && debug)
1174 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1175 dev->name, intr_status);
1176 /* Hmmmmm, it's not clear how to recover from PCI faults. */
1177 if (intr_status & IntrTxPCIErr)
1178 np->stats.tx_fifo_errors++;
1179 if (intr_status & IntrRxPCIErr)
1180 np->stats.rx_fifo_errors++;
1183 static struct enet_statistics *get_stats(struct net_device *dev)
1185 long ioaddr = dev->base_addr;
1186 struct netdev_private *np = (struct netdev_private *)dev->priv;
1188 /* We should lock this segment of code for SMP eventually, although
1189 the vulnerability window is very small and statistics are
1190 non-critical. */
1191 #if LINUX_VERSION_CODE > 0x20119
1192 np->stats.tx_bytes = readl(ioaddr + 0x57010);
1193 np->stats.rx_bytes = readl(ioaddr + 0x57044);
1194 #endif
1195 np->stats.tx_packets = readl(ioaddr + 0x57000);
1196 np->stats.tx_aborted_errors =
1197 readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1198 np->stats.tx_window_errors = readl(ioaddr + 0x57018);
1199 np->stats.collisions = readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1201 /* The chip only need report frame silently dropped. */
1202 np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1203 writew(0, ioaddr + RxDMAStatus);
1204 np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1205 np->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1206 np->stats.rx_length_errors = readl(ioaddr + 0x57058);
1207 np->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1209 return &np->stats;
1212 /* The little-endian AUTODIN II ethernet CRC calculations.
1213 A big-endian version is also available.
1214 This is slow but compact code. Do not use this routine for bulk data,
1215 use a table-based routine instead.
1216 This is common code and should be moved to net/core/crc.c.
1217 Chips may use the upper or lower CRC bits, and may reverse and/or invert
1218 them. Select the endian-ness that results in minimal calculations.
1220 static unsigned const ethernet_polynomial_le = 0xedb88320U;
1221 static inline unsigned ether_crc_le(int length, unsigned char *data)
1223 unsigned int crc = 0xffffffff; /* Initial value. */
1224 while(--length >= 0) {
1225 unsigned char current_octet = *data++;
1226 int bit;
1227 for (bit = 8; --bit >= 0; current_octet >>= 1) {
1228 if ((crc ^ current_octet) & 1) {
1229 crc >>= 1;
1230 crc ^= ethernet_polynomial_le;
1231 } else
1232 crc >>= 1;
1235 return crc;
1238 static void set_rx_mode(struct net_device *dev)
1240 long ioaddr = dev->base_addr;
1241 u32 rx_mode;
1242 struct dev_mc_list *mclist;
1243 int i;
1245 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1246 /* Unconditionally log net taps. */
1247 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1248 rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptAll|AcceptMyPhys;
1249 } else if ((dev->mc_count > multicast_filter_limit)
1250 || (dev->flags & IFF_ALLMULTI)) {
1251 /* Too many to match, or accept all multicasts. */
1252 rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptMyPhys;
1253 } else if (dev->mc_count <= 15) {
1254 /* Use the 16 element perfect filter. */
1255 long filter_addr = ioaddr + 0x56000 + 1*16;
1256 for (i = 1, mclist = dev->mc_list; mclist && i <= dev->mc_count;
1257 i++, mclist = mclist->next) {
1258 u16 *eaddrs = (u16 *)mclist->dmi_addr;
1259 writew(*eaddrs++, filter_addr); filter_addr += 4;
1260 writew(*eaddrs++, filter_addr); filter_addr += 4;
1261 writew(*eaddrs++, filter_addr); filter_addr += 8;
1263 while (i++ < 16) {
1264 writew(0xffff, filter_addr); filter_addr += 4;
1265 writew(0xffff, filter_addr); filter_addr += 4;
1266 writew(0xffff, filter_addr); filter_addr += 8;
1268 rx_mode = AcceptBroadcast | AcceptMyPhys;
1269 } else {
1270 /* Must use a multicast hash table. */
1271 long filter_addr;
1272 u16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1274 memset(mc_filter, 0, sizeof(mc_filter));
1275 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1276 i++, mclist = mclist->next) {
1277 set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23, mc_filter);
1279 /* Clear the perfect filter list. */
1280 filter_addr = ioaddr + 0x56000 + 1*16;
1281 for (i = 1; i < 16; i++) {
1282 writew(0xffff, filter_addr); filter_addr += 4;
1283 writew(0xffff, filter_addr); filter_addr += 4;
1284 writew(0xffff, filter_addr); filter_addr += 8;
1286 for (filter_addr=ioaddr + 0x56100, i=0; i < 32; filter_addr+= 16, i++)
1287 writew(mc_filter[i], filter_addr);
1288 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1290 writel(rx_mode|AcceptAll, ioaddr + RxFilterMode);
1293 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1295 u16 *data = (u16 *)&rq->ifr_data;
1297 switch(cmd) {
1298 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
1299 data[0] = ((struct netdev_private *)dev->priv)->phys[0] & 0x1f;
1300 /* Fall Through */
1301 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
1302 data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
1303 return 0;
1304 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
1305 if (!capable(CAP_NET_ADMIN))
1306 return -EPERM;
1307 mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1308 return 0;
1309 default:
1310 return -EOPNOTSUPP;
1314 static int netdev_close(struct net_device *dev)
1316 long ioaddr = dev->base_addr;
1317 struct netdev_private *np = (struct netdev_private *)dev->priv;
1318 int i;
1320 dev->start = 0;
1321 dev->tbusy = 1;
1323 if (debug > 1) {
1324 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Int %4.4x.\n",
1325 dev->name, readl(ioaddr + IntrStatus));
1326 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1327 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1330 /* Disable interrupts by clearing the interrupt mask. */
1331 writel(0, ioaddr + IntrEnable);
1333 /* Stop the chip's Tx and Rx processes. */
1335 del_timer(&np->timer);
1337 #ifdef __i386__
1338 if (debug > 2) {
1339 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1340 (int)virt_to_bus(np->tx_ring));
1341 for (i = 0; i < 8 /* TX_RING_SIZE */; i++)
1342 printk(KERN_DEBUG " #%d desc. %8.8x %8.8x -> %8.8x.\n",
1343 i, le32_to_cpu(np->tx_ring[i].status),
1344 le32_to_cpu(np->tx_ring[i].addr),
1345 le32_to_cpu(np->tx_done_q[i].status));
1346 printk(KERN_DEBUG " Rx ring at %8.8x -> %p:\n",
1347 (int)virt_to_bus(np->rx_ring), np->rx_done_q);
1348 if (np->rx_done_q)
1349 for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
1350 printk(KERN_DEBUG " #%d desc. %8.8x -> %8.8x\n",
1351 i, le32_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
1354 #endif /* __i386__ debugging only */
1356 free_irq(dev->irq, dev);
1358 /* Free all the skbuffs in the Rx queue. */
1359 for (i = 0; i < RX_RING_SIZE; i++) {
1360 np->rx_ring[i].rxaddr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1361 if (np->rx_skbuff[i]) {
1362 #if LINUX_VERSION_CODE < 0x20100
1363 np->rx_skbuff[i]->free = 1;
1364 #endif
1365 dev_kfree_skb(np->rx_skbuff[i]);
1367 np->rx_skbuff[i] = 0;
1369 for (i = 0; i < TX_RING_SIZE; i++) {
1370 if (np->tx_skbuff[i])
1371 dev_kfree_skb(np->tx_skbuff[i]);
1372 np->tx_skbuff[i] = 0;
1375 MOD_DEC_USE_COUNT;
1377 return 0;
1381 #ifdef MODULE
1382 int init_module(void)
1384 if (debug) /* Emit version even if no cards detected. */
1385 printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
1386 #ifdef CARDBUS
1387 register_driver(&etherdev_ops);
1388 return 0;
1389 #else
1390 if (pci_etherdev_probe(NULL, pci_tbl)) {
1391 printk(KERN_INFO " No Starfire adapters detected, driver not loaded.\n");
1392 return -ENODEV;
1394 return 0;
1395 #endif
1398 void cleanup_module(void)
1400 struct net_device *next_dev;
1402 #ifdef CARDBUS
1403 unregister_driver(&etherdev_ops);
1404 #endif
1406 /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
1407 while (root_net_dev) {
1408 struct netdev_private *np =
1409 (struct netdev_private *)root_net_dev->priv;
1410 next_dev = np->next_module;
1411 unregister_netdev(root_net_dev);
1412 iounmap((char *)root_net_dev->base_addr);
1413 if (np->tx_done_q) free_page((long)np->tx_done_q);
1414 if (np->rx_done_q) free_page((long)np->rx_done_q);
1415 kfree(root_net_dev);
1416 root_net_dev = next_dev;
1420 #endif /* MODULE */
1423 * Local variables:
1424 * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c starfire.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1425 * SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c starfire.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1426 * simple-compile-command: "gcc -DMODULE -D__KERNEL__ -O6 -c starfire.c"
1427 * c-indent-level: 4
1428 * c-basic-offset: 4
1429 * tab-width: 4
1430 * End: