GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / net / starfire.c
blob6528a865a95ce57cb86c626cde4640bd853aa8e2
1 /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
2 /*
3 Written 1998-2000 by Donald Becker.
5 Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
6 send all bug reports to me, and not to Donald Becker, as this code
7 has been heavily modified from Donald's original version.
9 This software may be used and distributed according to the terms of
10 the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on or derived from this code fall under the GPL and must
12 retain the authorship, copyright and license notice. This file is not
13 a complete program and may only be used when the entire operating
14 system is licensed under the GPL.
16 The information below comes from Donald Becker's original driver:
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
23 Support and updates available at
24 http://www.scyld.com/network/starfire.html
25 [link no longer provides useful info -jgarzik]
29 #define DRV_NAME "starfire"
30 #define DRV_VERSION "2.1"
31 #define DRV_RELDATE "July 6, 2008"
33 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/pci.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/init.h>
39 #include <linux/delay.h>
40 #include <linux/crc32.h>
41 #include <linux/ethtool.h>
42 #include <linux/mii.h>
43 #include <linux/if_vlan.h>
44 #include <linux/mm.h>
45 #include <linux/firmware.h>
46 #include <asm/processor.h> /* Processor type for cache alignment. */
47 #include <asm/uaccess.h>
48 #include <asm/io.h>
51 * The current frame processor firmware fails to checksum a fragment
52 * of length 1. If and when this is fixed, the #define below can be removed.
54 #define HAS_BROKEN_FIRMWARE
57 * If using the broken firmware, data must be padded to the next 32-bit boundary.
59 #ifdef HAS_BROKEN_FIRMWARE
60 #define PADDING_MASK 3
61 #endif
64 * Define this if using the driver with the zero-copy patch
66 #define ZEROCOPY
68 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
69 #define VLAN_SUPPORT
70 #endif
72 /* The user-configurable values.
73 These may be modified when a driver module is loaded.*/
75 /* Used for tuning interrupt latency vs. overhead. */
76 static int intr_latency;
77 static int small_frames;
79 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
80 static int max_interrupt_work = 20;
81 static int mtu;
82 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
83 The Starfire has a 512 element hash table based on the Ethernet CRC. */
84 static const int multicast_filter_limit = 512;
85 /* Whether to do TCP/UDP checksums in hardware */
86 static int enable_hw_cksum = 1;
88 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
90 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
91 * Setting to > 1518 effectively disables this feature.
93 * NOTE:
94 * The ia64 doesn't allow for unaligned loads even of integers being
95 * misaligned on a 2 byte boundary. Thus always force copying of
96 * packets as the starfire doesn't allow for misaligned DMAs ;-(
97 * 23/10/2000 - Jes
99 * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
100 * at least, having unaligned frames leads to a rather serious performance
101 * penalty. -Ion
103 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
104 static int rx_copybreak = PKT_BUF_SZ;
105 #else
106 static int rx_copybreak /* = 0 */;
107 #endif
109 /* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
110 #ifdef __sparc__
111 #define DMA_BURST_SIZE 64
112 #else
113 #define DMA_BURST_SIZE 128
114 #endif
116 /* Used to pass the media type, etc.
117 Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
118 The media type is usually passed in 'options[]'.
119 These variables are deprecated, use ethtool instead. -Ion
121 #define MAX_UNITS 8 /* More are supported, limit only on options */
122 static int options[MAX_UNITS] = {0, };
123 static int full_duplex[MAX_UNITS] = {0, };
125 /* Operational parameters that are set at compile time. */
127 /* The "native" ring sizes are either 256 or 2048.
128 However in some modes a descriptor may be marked to wrap the ring earlier.
130 #define RX_RING_SIZE 256
131 #define TX_RING_SIZE 32
132 /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
133 #define DONE_Q_SIZE 1024
134 /* All queues must be aligned on a 256-byte boundary */
135 #define QUEUE_ALIGN 256
137 #if RX_RING_SIZE > 256
138 #define RX_Q_ENTRIES Rx2048QEntries
139 #else
140 #define RX_Q_ENTRIES Rx256QEntries
141 #endif
143 /* Operational parameters that usually are not changed. */
144 /* Time in jiffies before concluding the transmitter is hung. */
145 #define TX_TIMEOUT (2 * HZ)
148 * This SUCKS.
149 * We need a much better method to determine if dma_addr_t is 64-bit.
151 #if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || \
152 defined(__ia64__) || defined(__alpha__) || defined(__mips64__) || (defined(__mips__) && \
153 defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
154 /* 64-bit dma_addr_t */
155 #define ADDR_64BITS /* This chip uses 64 bit addresses. */
156 #define netdrv_addr_t __le64
157 #define cpu_to_dma(x) cpu_to_le64(x)
158 #define dma_to_cpu(x) le64_to_cpu(x)
159 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
160 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
161 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
162 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
163 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
164 #else /* 32-bit dma_addr_t */
165 #define netdrv_addr_t __le32
166 #define cpu_to_dma(x) cpu_to_le32(x)
167 #define dma_to_cpu(x) le32_to_cpu(x)
168 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
169 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
170 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
171 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
172 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
173 #endif
175 #define skb_first_frag_len(skb) skb_headlen(skb)
176 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
178 /* Firmware names */
179 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
180 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
182 /* These identify the driver base version and may not be removed. */
183 static const char version[] __devinitconst =
184 KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
185 " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
187 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
188 MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
189 MODULE_LICENSE("GPL");
190 MODULE_VERSION(DRV_VERSION);
191 MODULE_FIRMWARE(FIRMWARE_RX);
192 MODULE_FIRMWARE(FIRMWARE_TX);
194 module_param(max_interrupt_work, int, 0);
195 module_param(mtu, int, 0);
196 module_param(debug, int, 0);
197 module_param(rx_copybreak, int, 0);
198 module_param(intr_latency, int, 0);
199 module_param(small_frames, int, 0);
200 module_param_array(options, int, NULL, 0);
201 module_param_array(full_duplex, int, NULL, 0);
202 module_param(enable_hw_cksum, int, 0);
203 MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
204 MODULE_PARM_DESC(mtu, "MTU (all boards)");
205 MODULE_PARM_DESC(debug, "Debug level (0-6)");
206 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
207 MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
208 MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
209 MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
210 MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)");
211 MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
214 Theory of Operation
216 I. Board Compatibility
218 This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
220 II. Board-specific settings
222 III. Driver operation
224 IIIa. Ring buffers
226 The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
227 ring sizes are set fixed by the hardware, but may optionally be wrapped
228 earlier by the END bit in the descriptor.
229 This driver uses that hardware queue size for the Rx ring, where a large
230 number of entries has no ill effect beyond increases the potential backlog.
231 The Tx ring is wrapped with the END bit, since a large hardware Tx queue
232 disables the queue layer priority ordering and we have no mechanism to
233 utilize the hardware two-level priority queue. When modifying the
234 RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
235 levels.
237 IIIb/c. Transmit/Receive Structure
239 See the Adaptec manual for the many possible structures, and options for
240 each structure. There are far too many to document all of them here.
242 For transmit this driver uses type 0/1 transmit descriptors (depending
243 on the 32/64 bitness of the architecture), and relies on automatic
244 minimum-length padding. It does not use the completion queue
245 consumer index, but instead checks for non-zero status entries.
247 For receive this driver uses type 2/3 receive descriptors. The driver
248 allocates full frame size skbuffs for the Rx ring buffers, so all frames
249 should fit in a single descriptor. The driver does not use the completion
250 queue consumer index, but instead checks for non-zero status entries.
252 When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
253 is allocated and the frame is copied to the new skbuff. When the incoming
254 frame is larger, the skbuff is passed directly up the protocol stack.
255 Buffers consumed this way are replaced by newly allocated skbuffs in a later
256 phase of receive.
258 A notable aspect of operation is that unaligned buffers are not permitted by
259 the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
260 isn't longword aligned, which may cause problems on some machine
261 e.g. Alphas and IA64. For these architectures, the driver is forced to copy
262 the frame into a new skbuff unconditionally. Copied frames are put into the
263 skbuff at an offset of "+2", thus 16-byte aligning the IP header.
265 IIId. Synchronization
267 The driver runs as two independent, single-threaded flows of control. One
268 is the send-packet routine, which enforces single-threaded use by the
269 dev->tbusy flag. The other thread is the interrupt handler, which is single
270 threaded by the hardware and interrupt handling software.
272 The send packet thread has partial control over the Tx ring and the netif_queue
273 status. If the number of free Tx slots in the ring falls below a certain number
274 (currently hardcoded to 4), it signals the upper layer to stop the queue.
276 The interrupt handler has exclusive control over the Rx ring and records stats
277 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
278 empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
279 number of free Tx slow is above the threshold, it signals the upper layer to
280 restart the queue.
282 IV. Notes
284 IVb. References
286 The Adaptec Starfire manuals, available only from Adaptec.
287 http://www.scyld.com/expert/100mbps.html
288 http://www.scyld.com/expert/NWay.html
290 IVc. Errata
292 - StopOnPerr is broken, don't enable
293 - Hardware ethernet padding exposes random data, perform software padding
294 instead (unverified -- works correctly for all the hardware I have)
300 enum chip_capability_flags {CanHaveMII=1, };
302 enum chipset {
303 CH_6915 = 0,
306 static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = {
307 { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
308 { 0, }
310 MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
312 /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
313 static const struct chip_info {
314 const char *name;
315 int drv_flags;
316 } netdrv_tbl[] __devinitdata = {
317 { "Adaptec Starfire 6915", CanHaveMII },
321 /* Offsets to the device registers.
322 Unlike software-only systems, device drivers interact with complex hardware.
323 It's not useful to define symbolic names for every register bit in the
324 device. The name can only partially document the semantics and make
325 the driver longer and more difficult to read.
326 In general, only the important configuration values or bits changed
327 multiple times should be defined symbolically.
329 enum register_offsets {
330 PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
331 IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
332 MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
333 GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
334 TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
335 TxRingHiAddr=0x5009C, /* 64 bit address extension. */
336 TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
337 TxThreshold=0x500B0,
338 CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
339 RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
340 CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
341 RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
342 RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
343 TxMode=0x55000, VlanType=0x55064,
344 PerfFilterTable=0x56000, HashTable=0x56100,
345 TxGfpMem=0x58000, RxGfpMem=0x5a000,
349 * Bits in the interrupt status/mask registers.
350 * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
351 * enables all the interrupt sources that are or'ed into those status bits.
353 enum intr_status_bits {
354 IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
355 IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
356 IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
357 IntrTxComplQLow=0x200000, IntrPCI=0x100000,
358 IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
359 IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
360 IntrNormalSummary=0x8000, IntrTxDone=0x4000,
361 IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
362 IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
363 IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
364 IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
365 IntrNoTxCsum=0x20, IntrTxBadID=0x10,
366 IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
367 IntrTxGfp=0x02, IntrPCIPad=0x01,
368 /* not quite bits */
369 IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
370 IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
371 IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
374 /* Bits in the RxFilterMode register. */
375 enum rx_mode_bits {
376 AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
377 AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
378 PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
379 WakeupOnGFP=0x0800,
382 /* Bits in the TxMode register */
383 enum tx_mode_bits {
384 MiiSoftReset=0x8000, MIILoopback=0x4000,
385 TxFlowEnable=0x0800, RxFlowEnable=0x0400,
386 PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
389 /* Bits in the TxDescCtrl register. */
390 enum tx_ctrl_bits {
391 TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
392 TxDescSpace128=0x30, TxDescSpace256=0x40,
393 TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
394 TxDescType3=0x03, TxDescType4=0x04,
395 TxNoDMACompletion=0x08,
396 TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
397 TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
398 TxDMABurstSizeShift=8,
401 /* Bits in the RxDescQCtrl register. */
402 enum rx_ctrl_bits {
403 RxBufferLenShift=16, RxMinDescrThreshShift=0,
404 RxPrefetchMode=0x8000, RxVariableQ=0x2000,
405 Rx2048QEntries=0x4000, Rx256QEntries=0,
406 RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
407 RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
408 RxDescSpace4=0x000, RxDescSpace8=0x100,
409 RxDescSpace16=0x200, RxDescSpace32=0x300,
410 RxDescSpace64=0x400, RxDescSpace128=0x500,
411 RxConsumerWrEn=0x80,
414 /* Bits in the RxDMACtrl register. */
415 enum rx_dmactrl_bits {
416 RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
417 RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
418 RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
419 RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
420 RxChecksumRejectTCPOnly=0x01000000,
421 RxCompletionQ2Enable=0x800000,
422 RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
423 RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
424 RxDMAQ2NonIP=0x400000,
425 RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
426 RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
427 RxBurstSizeShift=0,
430 /* Bits in the RxCompletionAddr register */
431 enum rx_compl_bits {
432 RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
433 RxComplProducerWrEn=0x40,
434 RxComplType0=0x00, RxComplType1=0x10,
435 RxComplType2=0x20, RxComplType3=0x30,
436 RxComplThreshShift=0,
439 /* Bits in the TxCompletionAddr register */
440 enum tx_compl_bits {
441 TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
442 TxComplProducerWrEn=0x40,
443 TxComplIntrStatus=0x20,
444 CommonQueueMode=0x10,
445 TxComplThreshShift=0,
448 /* Bits in the GenCtrl register */
449 enum gen_ctrl_bits {
450 RxEnable=0x05, TxEnable=0x0a,
451 RxGFPEnable=0x10, TxGFPEnable=0x20,
454 /* Bits in the IntrTimerCtrl register */
455 enum intr_ctrl_bits {
456 Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
457 SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
458 IntrLatencyMask=0x1f,
461 /* The Rx and Tx buffer descriptors. */
462 struct starfire_rx_desc {
463 netdrv_addr_t rxaddr;
465 enum rx_desc_bits {
466 RxDescValid=1, RxDescEndRing=2,
469 /* Completion queue entry. */
470 struct short_rx_done_desc {
471 __le32 status; /* Low 16 bits is length. */
473 struct basic_rx_done_desc {
474 __le32 status; /* Low 16 bits is length. */
475 __le16 vlanid;
476 __le16 status2;
478 struct csum_rx_done_desc {
479 __le32 status; /* Low 16 bits is length. */
480 __le16 csum; /* Partial checksum */
481 __le16 status2;
483 struct full_rx_done_desc {
484 __le32 status; /* Low 16 bits is length. */
485 __le16 status3;
486 __le16 status2;
487 __le16 vlanid;
488 __le16 csum; /* partial checksum */
489 __le32 timestamp;
491 #ifdef VLAN_SUPPORT
492 typedef struct full_rx_done_desc rx_done_desc;
493 #define RxComplType RxComplType3
494 #else /* not VLAN_SUPPORT */
495 typedef struct csum_rx_done_desc rx_done_desc;
496 #define RxComplType RxComplType2
497 #endif /* not VLAN_SUPPORT */
499 enum rx_done_bits {
500 RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
503 /* Type 1 Tx descriptor. */
504 struct starfire_tx_desc_1 {
505 __le32 status; /* Upper bits are status, lower 16 length. */
506 __le32 addr;
509 /* Type 2 Tx descriptor. */
510 struct starfire_tx_desc_2 {
511 __le32 status; /* Upper bits are status, lower 16 length. */
512 __le32 reserved;
513 __le64 addr;
516 #ifdef ADDR_64BITS
517 typedef struct starfire_tx_desc_2 starfire_tx_desc;
518 #define TX_DESC_TYPE TxDescType2
519 #else /* not ADDR_64BITS */
520 typedef struct starfire_tx_desc_1 starfire_tx_desc;
521 #define TX_DESC_TYPE TxDescType1
522 #endif /* not ADDR_64BITS */
523 #define TX_DESC_SPACING TxDescSpaceUnlim
525 enum tx_desc_bits {
526 TxDescID=0xB0000000,
527 TxCRCEn=0x01000000, TxDescIntr=0x08000000,
528 TxRingWrap=0x04000000, TxCalTCP=0x02000000,
530 struct tx_done_desc {
531 __le32 status; /* timestamp, index. */
534 struct rx_ring_info {
535 struct sk_buff *skb;
536 dma_addr_t mapping;
538 struct tx_ring_info {
539 struct sk_buff *skb;
540 dma_addr_t mapping;
541 unsigned int used_slots;
544 #define PHY_CNT 2
545 struct netdev_private {
546 /* Descriptor rings first for alignment. */
547 struct starfire_rx_desc *rx_ring;
548 starfire_tx_desc *tx_ring;
549 dma_addr_t rx_ring_dma;
550 dma_addr_t tx_ring_dma;
551 /* The addresses of rx/tx-in-place skbuffs. */
552 struct rx_ring_info rx_info[RX_RING_SIZE];
553 struct tx_ring_info tx_info[TX_RING_SIZE];
554 /* Pointers to completion queues (full pages). */
555 rx_done_desc *rx_done_q;
556 dma_addr_t rx_done_q_dma;
557 unsigned int rx_done;
558 struct tx_done_desc *tx_done_q;
559 dma_addr_t tx_done_q_dma;
560 unsigned int tx_done;
561 struct napi_struct napi;
562 struct net_device *dev;
563 struct pci_dev *pci_dev;
564 #ifdef VLAN_SUPPORT
565 struct vlan_group *vlgrp;
566 #endif
567 void *queue_mem;
568 dma_addr_t queue_mem_dma;
569 size_t queue_mem_size;
571 /* Frequently used values: keep some adjacent for cache effect. */
572 spinlock_t lock;
573 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
574 unsigned int cur_tx, dirty_tx, reap_tx;
575 unsigned int rx_buf_sz; /* Based on MTU+slack. */
576 /* These values keep track of the transceiver/media in use. */
577 int speed100; /* Set if speed == 100MBit. */
578 u32 tx_mode;
579 u32 intr_timer_ctrl;
580 u8 tx_threshold;
581 /* MII transceiver section. */
582 struct mii_if_info mii_if; /* MII lib hooks/info */
583 int phy_cnt; /* MII device addresses. */
584 unsigned char phys[PHY_CNT]; /* MII device addresses. */
585 void __iomem *base;
589 static int mdio_read(struct net_device *dev, int phy_id, int location);
590 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
591 static int netdev_open(struct net_device *dev);
592 static void check_duplex(struct net_device *dev);
593 static void tx_timeout(struct net_device *dev);
594 static void init_ring(struct net_device *dev);
595 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
596 static irqreturn_t intr_handler(int irq, void *dev_instance);
597 static void netdev_error(struct net_device *dev, int intr_status);
598 static int __netdev_rx(struct net_device *dev, int *quota);
599 static int netdev_poll(struct napi_struct *napi, int budget);
600 static void refill_rx_ring(struct net_device *dev);
601 static void netdev_error(struct net_device *dev, int intr_status);
602 static void set_rx_mode(struct net_device *dev);
603 static struct net_device_stats *get_stats(struct net_device *dev);
604 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
605 static int netdev_close(struct net_device *dev);
606 static void netdev_media_change(struct net_device *dev);
607 static const struct ethtool_ops ethtool_ops;
610 #ifdef VLAN_SUPPORT
611 static void netdev_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
613 struct netdev_private *np = netdev_priv(dev);
615 spin_lock(&np->lock);
616 if (debug > 2)
617 printk("%s: Setting vlgrp to %p\n", dev->name, grp);
618 np->vlgrp = grp;
619 set_rx_mode(dev);
620 spin_unlock(&np->lock);
623 static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
625 struct netdev_private *np = netdev_priv(dev);
627 spin_lock(&np->lock);
628 if (debug > 1)
629 printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
630 set_rx_mode(dev);
631 spin_unlock(&np->lock);
634 static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
636 struct netdev_private *np = netdev_priv(dev);
638 spin_lock(&np->lock);
639 if (debug > 1)
640 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
641 vlan_group_set_device(np->vlgrp, vid, NULL);
642 set_rx_mode(dev);
643 spin_unlock(&np->lock);
645 #endif /* VLAN_SUPPORT */
648 static const struct net_device_ops netdev_ops = {
649 .ndo_open = netdev_open,
650 .ndo_stop = netdev_close,
651 .ndo_start_xmit = start_tx,
652 .ndo_tx_timeout = tx_timeout,
653 .ndo_get_stats = get_stats,
654 .ndo_set_multicast_list = &set_rx_mode,
655 .ndo_do_ioctl = netdev_ioctl,
656 .ndo_change_mtu = eth_change_mtu,
657 .ndo_set_mac_address = eth_mac_addr,
658 .ndo_validate_addr = eth_validate_addr,
659 #ifdef VLAN_SUPPORT
660 .ndo_vlan_rx_register = netdev_vlan_rx_register,
661 .ndo_vlan_rx_add_vid = netdev_vlan_rx_add_vid,
662 .ndo_vlan_rx_kill_vid = netdev_vlan_rx_kill_vid,
663 #endif
666 static int __devinit starfire_init_one(struct pci_dev *pdev,
667 const struct pci_device_id *ent)
669 struct netdev_private *np;
670 int i, irq, option, chip_idx = ent->driver_data;
671 struct net_device *dev;
672 static int card_idx = -1;
673 long ioaddr;
674 void __iomem *base;
675 int drv_flags, io_size;
676 int boguscnt;
678 /* when built into the kernel, we only print version if device is found */
679 #ifndef MODULE
680 static int printed_version;
681 if (!printed_version++)
682 printk(version);
683 #endif
685 card_idx++;
687 if (pci_enable_device (pdev))
688 return -EIO;
690 ioaddr = pci_resource_start(pdev, 0);
691 io_size = pci_resource_len(pdev, 0);
692 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
693 printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
694 return -ENODEV;
697 dev = alloc_etherdev(sizeof(*np));
698 if (!dev) {
699 printk(KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx);
700 return -ENOMEM;
702 SET_NETDEV_DEV(dev, &pdev->dev);
704 irq = pdev->irq;
706 if (pci_request_regions (pdev, DRV_NAME)) {
707 printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
708 goto err_out_free_netdev;
711 base = ioremap(ioaddr, io_size);
712 if (!base) {
713 printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n",
714 card_idx, io_size, ioaddr);
715 goto err_out_free_res;
718 pci_set_master(pdev);
720 /* enable MWI -- it vastly improves Rx performance on sparc64 */
721 pci_try_set_mwi(pdev);
723 #ifdef ZEROCOPY
724 /* Starfire can do TCP/UDP checksumming */
725 if (enable_hw_cksum)
726 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
727 #endif /* ZEROCOPY */
729 #ifdef VLAN_SUPPORT
730 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
731 #endif /* VLAN_RX_KILL_VID */
732 #ifdef ADDR_64BITS
733 dev->features |= NETIF_F_HIGHDMA;
734 #endif /* ADDR_64BITS */
736 /* Serial EEPROM reads are hidden by the hardware. */
737 for (i = 0; i < 6; i++)
738 dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
740 #if !defined(final_version) /* Dump the EEPROM contents during development. */
741 if (debug > 4)
742 for (i = 0; i < 0x20; i++)
743 printk("%2.2x%s",
744 (unsigned int)readb(base + EEPROMCtrl + i),
745 i % 16 != 15 ? " " : "\n");
746 #endif
748 /* Issue soft reset */
749 writel(MiiSoftReset, base + TxMode);
750 udelay(1000);
751 writel(0, base + TxMode);
753 /* Reset the chip to erase previous misconfiguration. */
754 writel(1, base + PCIDeviceConfig);
755 boguscnt = 1000;
756 while (--boguscnt > 0) {
757 udelay(10);
758 if ((readl(base + PCIDeviceConfig) & 1) == 0)
759 break;
761 if (boguscnt == 0)
762 printk("%s: chipset reset never completed!\n", dev->name);
763 /* wait a little longer */
764 udelay(1000);
766 dev->base_addr = (unsigned long)base;
767 dev->irq = irq;
769 np = netdev_priv(dev);
770 np->dev = dev;
771 np->base = base;
772 spin_lock_init(&np->lock);
773 pci_set_drvdata(pdev, dev);
775 np->pci_dev = pdev;
777 np->mii_if.dev = dev;
778 np->mii_if.mdio_read = mdio_read;
779 np->mii_if.mdio_write = mdio_write;
780 np->mii_if.phy_id_mask = 0x1f;
781 np->mii_if.reg_num_mask = 0x1f;
783 drv_flags = netdrv_tbl[chip_idx].drv_flags;
785 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
786 if (dev->mem_start)
787 option = dev->mem_start;
789 /* The lower four bits are the media type. */
790 if (option & 0x200)
791 np->mii_if.full_duplex = 1;
793 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
794 np->mii_if.full_duplex = 1;
796 if (np->mii_if.full_duplex)
797 np->mii_if.force_media = 1;
798 else
799 np->mii_if.force_media = 0;
800 np->speed100 = 1;
802 /* timer resolution is 128 * 0.8us */
803 np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
804 Timer10X | EnableIntrMasking;
806 if (small_frames > 0) {
807 np->intr_timer_ctrl |= SmallFrameBypass;
808 switch (small_frames) {
809 case 1 ... 64:
810 np->intr_timer_ctrl |= SmallFrame64;
811 break;
812 case 65 ... 128:
813 np->intr_timer_ctrl |= SmallFrame128;
814 break;
815 case 129 ... 256:
816 np->intr_timer_ctrl |= SmallFrame256;
817 break;
818 default:
819 np->intr_timer_ctrl |= SmallFrame512;
820 if (small_frames > 512)
821 printk("Adjusting small_frames down to 512\n");
822 break;
826 dev->netdev_ops = &netdev_ops;
827 dev->watchdog_timeo = TX_TIMEOUT;
828 SET_ETHTOOL_OPS(dev, &ethtool_ops);
830 netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
832 if (mtu)
833 dev->mtu = mtu;
835 if (register_netdev(dev))
836 goto err_out_cleardev;
838 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
839 dev->name, netdrv_tbl[chip_idx].name, base,
840 dev->dev_addr, irq);
842 if (drv_flags & CanHaveMII) {
843 int phy, phy_idx = 0;
844 int mii_status;
845 for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
846 mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
847 mdelay(100);
848 boguscnt = 1000;
849 while (--boguscnt > 0)
850 if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
851 break;
852 if (boguscnt == 0) {
853 printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
854 continue;
856 mii_status = mdio_read(dev, phy, MII_BMSR);
857 if (mii_status != 0) {
858 np->phys[phy_idx++] = phy;
859 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
860 printk(KERN_INFO "%s: MII PHY found at address %d, status "
861 "%#4.4x advertising %#4.4x.\n",
862 dev->name, phy, mii_status, np->mii_if.advertising);
863 /* there can be only one PHY on-board */
864 break;
867 np->phy_cnt = phy_idx;
868 if (np->phy_cnt > 0)
869 np->mii_if.phy_id = np->phys[0];
870 else
871 memset(&np->mii_if, 0, sizeof(np->mii_if));
874 printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
875 dev->name, enable_hw_cksum ? "enabled" : "disabled");
876 return 0;
878 err_out_cleardev:
879 pci_set_drvdata(pdev, NULL);
880 iounmap(base);
881 err_out_free_res:
882 pci_release_regions (pdev);
883 err_out_free_netdev:
884 free_netdev(dev);
885 return -ENODEV;
889 /* Read the MII Management Data I/O (MDIO) interfaces. */
890 static int mdio_read(struct net_device *dev, int phy_id, int location)
892 struct netdev_private *np = netdev_priv(dev);
893 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
894 int result, boguscnt=1000;
895 /* ??? Should we add a busy-wait here? */
896 do {
897 result = readl(mdio_addr);
898 } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
899 if (boguscnt == 0)
900 return 0;
901 if ((result & 0xffff) == 0xffff)
902 return 0;
903 return result & 0xffff;
907 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
909 struct netdev_private *np = netdev_priv(dev);
910 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
911 writel(value, mdio_addr);
912 /* The busy-wait will occur before a read. */
916 static int netdev_open(struct net_device *dev)
918 const struct firmware *fw_rx, *fw_tx;
919 const __be32 *fw_rx_data, *fw_tx_data;
920 struct netdev_private *np = netdev_priv(dev);
921 void __iomem *ioaddr = np->base;
922 int i, retval;
923 size_t tx_size, rx_size;
924 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
926 /* Do we ever need to reset the chip??? */
928 retval = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
929 if (retval)
930 return retval;
932 /* Disable the Rx and Tx, and reset the chip. */
933 writel(0, ioaddr + GenCtrl);
934 writel(1, ioaddr + PCIDeviceConfig);
935 if (debug > 1)
936 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
937 dev->name, dev->irq);
939 /* Allocate the various queues. */
940 if (!np->queue_mem) {
941 tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
942 rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
943 tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
944 rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
945 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
946 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
947 if (np->queue_mem == NULL) {
948 free_irq(dev->irq, dev);
949 return -ENOMEM;
952 np->tx_done_q = np->queue_mem;
953 np->tx_done_q_dma = np->queue_mem_dma;
954 np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size;
955 np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
956 np->tx_ring = (void *) np->rx_done_q + rx_done_q_size;
957 np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size;
958 np->rx_ring = (void *) np->tx_ring + tx_ring_size;
959 np->rx_ring_dma = np->tx_ring_dma + tx_ring_size;
962 /* Start with no carrier, it gets adjusted later */
963 netif_carrier_off(dev);
964 init_ring(dev);
965 /* Set the size of the Rx buffers. */
966 writel((np->rx_buf_sz << RxBufferLenShift) |
967 (0 << RxMinDescrThreshShift) |
968 RxPrefetchMode | RxVariableQ |
969 RX_Q_ENTRIES |
970 RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
971 RxDescSpace4,
972 ioaddr + RxDescQCtrl);
974 /* Set up the Rx DMA controller. */
975 writel(RxChecksumIgnore |
976 (0 << RxEarlyIntThreshShift) |
977 (6 << RxHighPrioThreshShift) |
978 ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
979 ioaddr + RxDMACtrl);
981 /* Set Tx descriptor */
982 writel((2 << TxHiPriFIFOThreshShift) |
983 (0 << TxPadLenShift) |
984 ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
985 TX_DESC_Q_ADDR_SIZE |
986 TX_DESC_SPACING | TX_DESC_TYPE,
987 ioaddr + TxDescCtrl);
989 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
990 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
991 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
992 writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
993 writel(np->tx_ring_dma, ioaddr + TxRingPtr);
995 writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
996 writel(np->rx_done_q_dma |
997 RxComplType |
998 (0 << RxComplThreshShift),
999 ioaddr + RxCompletionAddr);
1001 if (debug > 1)
1002 printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
1004 /* Fill both the Tx SA register and the Rx perfect filter. */
1005 for (i = 0; i < 6; i++)
1006 writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
1007 /* The first entry is special because it bypasses the VLAN filter.
1008 Don't use it. */
1009 writew(0, ioaddr + PerfFilterTable);
1010 writew(0, ioaddr + PerfFilterTable + 4);
1011 writew(0, ioaddr + PerfFilterTable + 8);
1012 for (i = 1; i < 16; i++) {
1013 __be16 *eaddrs = (__be16 *)dev->dev_addr;
1014 void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
1015 writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
1016 writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
1017 writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8;
1020 /* Initialize other registers. */
1021 /* Configure the PCI bus bursts and FIFO thresholds. */
1022 np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */
1023 writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
1024 udelay(1000);
1025 writel(np->tx_mode, ioaddr + TxMode);
1026 np->tx_threshold = 4;
1027 writel(np->tx_threshold, ioaddr + TxThreshold);
1029 writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1031 napi_enable(&np->napi);
1033 netif_start_queue(dev);
1035 if (debug > 1)
1036 printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
1037 set_rx_mode(dev);
1039 np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1040 check_duplex(dev);
1042 /* Enable GPIO interrupts on link change */
1043 writel(0x0f00ff00, ioaddr + GPIOCtrl);
1045 /* Set the interrupt mask */
1046 writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
1047 IntrTxDMADone | IntrStatsMax | IntrLinkChange |
1048 IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
1049 ioaddr + IntrEnable);
1050 /* Enable PCI interrupts. */
1051 writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
1052 ioaddr + PCIDeviceConfig);
1054 #ifdef VLAN_SUPPORT
1055 /* Set VLAN type to 802.1q */
1056 writel(ETH_P_8021Q, ioaddr + VlanType);
1057 #endif /* VLAN_SUPPORT */
1059 retval = request_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev);
1060 if (retval) {
1061 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1062 FIRMWARE_RX);
1063 goto out_init;
1065 if (fw_rx->size % 4) {
1066 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1067 fw_rx->size, FIRMWARE_RX);
1068 retval = -EINVAL;
1069 goto out_rx;
1071 retval = request_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev);
1072 if (retval) {
1073 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1074 FIRMWARE_TX);
1075 goto out_rx;
1077 if (fw_tx->size % 4) {
1078 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1079 fw_tx->size, FIRMWARE_TX);
1080 retval = -EINVAL;
1081 goto out_tx;
1083 fw_rx_data = (const __be32 *)&fw_rx->data[0];
1084 fw_tx_data = (const __be32 *)&fw_tx->data[0];
1085 rx_size = fw_rx->size / 4;
1086 tx_size = fw_tx->size / 4;
1088 /* Load Rx/Tx firmware into the frame processors */
1089 for (i = 0; i < rx_size; i++)
1090 writel(be32_to_cpup(&fw_rx_data[i]), ioaddr + RxGfpMem + i * 4);
1091 for (i = 0; i < tx_size; i++)
1092 writel(be32_to_cpup(&fw_tx_data[i]), ioaddr + TxGfpMem + i * 4);
1093 if (enable_hw_cksum)
1094 /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1095 writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1096 else
1097 /* Enable the Rx and Tx units only. */
1098 writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1100 if (debug > 1)
1101 printk(KERN_DEBUG "%s: Done netdev_open().\n",
1102 dev->name);
1104 out_tx:
1105 release_firmware(fw_tx);
1106 out_rx:
1107 release_firmware(fw_rx);
1108 out_init:
1109 if (retval)
1110 netdev_close(dev);
1111 return retval;
1115 static void check_duplex(struct net_device *dev)
1117 struct netdev_private *np = netdev_priv(dev);
1118 u16 reg0;
1119 int silly_count = 1000;
1121 mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1122 mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1123 udelay(500);
1124 while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1125 /* do nothing */;
1126 if (!silly_count) {
1127 printk("%s: MII reset failed!\n", dev->name);
1128 return;
1131 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1133 if (!np->mii_if.force_media) {
1134 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1135 } else {
1136 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1137 if (np->speed100)
1138 reg0 |= BMCR_SPEED100;
1139 if (np->mii_if.full_duplex)
1140 reg0 |= BMCR_FULLDPLX;
1141 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1142 dev->name,
1143 np->speed100 ? "100" : "10",
1144 np->mii_if.full_duplex ? "full" : "half");
1146 mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1150 static void tx_timeout(struct net_device *dev)
1152 struct netdev_private *np = netdev_priv(dev);
1153 void __iomem *ioaddr = np->base;
1154 int old_debug;
1156 printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1157 "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1159 /* Perhaps we should reinitialize the hardware here. */
1162 * Stop and restart the interface.
1163 * Cheat and increase the debug level temporarily.
1165 old_debug = debug;
1166 debug = 2;
1167 netdev_close(dev);
1168 netdev_open(dev);
1169 debug = old_debug;
1171 /* Trigger an immediate transmit demand. */
1173 dev->trans_start = jiffies; /* prevent tx timeout */
1174 dev->stats.tx_errors++;
1175 netif_wake_queue(dev);
1179 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1180 static void init_ring(struct net_device *dev)
1182 struct netdev_private *np = netdev_priv(dev);
1183 int i;
1185 np->cur_rx = np->cur_tx = np->reap_tx = 0;
1186 np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1188 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1190 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1191 for (i = 0; i < RX_RING_SIZE; i++) {
1192 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1193 np->rx_info[i].skb = skb;
1194 if (skb == NULL)
1195 break;
1196 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1197 skb->dev = dev; /* Mark as being used by this device. */
1198 /* Grrr, we cannot offset to correctly align the IP header. */
1199 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1201 writew(i - 1, np->base + RxDescQIdx);
1202 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1204 /* Clear the remainder of the Rx buffer ring. */
1205 for ( ; i < RX_RING_SIZE; i++) {
1206 np->rx_ring[i].rxaddr = 0;
1207 np->rx_info[i].skb = NULL;
1208 np->rx_info[i].mapping = 0;
1210 /* Mark the last entry as wrapping the ring. */
1211 np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1213 /* Clear the completion rings. */
1214 for (i = 0; i < DONE_Q_SIZE; i++) {
1215 np->rx_done_q[i].status = 0;
1216 np->tx_done_q[i].status = 0;
1219 for (i = 0; i < TX_RING_SIZE; i++)
1220 memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1224 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1226 struct netdev_private *np = netdev_priv(dev);
1227 unsigned int entry;
1228 u32 status;
1229 int i;
1232 * be cautious here, wrapping the queue has weird semantics
1233 * and we may not have enough slots even when it seems we do.
1235 if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1236 netif_stop_queue(dev);
1237 return NETDEV_TX_BUSY;
1240 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1241 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1242 if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1243 return NETDEV_TX_OK;
1245 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1247 entry = np->cur_tx % TX_RING_SIZE;
1248 for (i = 0; i < skb_num_frags(skb); i++) {
1249 int wrap_ring = 0;
1250 status = TxDescID;
1252 if (i == 0) {
1253 np->tx_info[entry].skb = skb;
1254 status |= TxCRCEn;
1255 if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1256 status |= TxRingWrap;
1257 wrap_ring = 1;
1259 if (np->reap_tx) {
1260 status |= TxDescIntr;
1261 np->reap_tx = 0;
1263 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1264 status |= TxCalTCP;
1265 dev->stats.tx_compressed++;
1267 status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1269 np->tx_info[entry].mapping =
1270 pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1271 } else {
1272 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1273 status |= this_frag->size;
1274 np->tx_info[entry].mapping =
1275 pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE);
1278 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1279 np->tx_ring[entry].status = cpu_to_le32(status);
1280 if (debug > 3)
1281 printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1282 dev->name, np->cur_tx, np->dirty_tx,
1283 entry, status);
1284 if (wrap_ring) {
1285 np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1286 np->cur_tx += np->tx_info[entry].used_slots;
1287 entry = 0;
1288 } else {
1289 np->tx_info[entry].used_slots = 1;
1290 np->cur_tx += np->tx_info[entry].used_slots;
1291 entry++;
1293 /* scavenge the tx descriptors twice per TX_RING_SIZE */
1294 if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1295 np->reap_tx = 1;
1298 /* Non-x86: explicitly flush descriptor cache lines here. */
1299 /* Ensure all descriptors are written back before the transmit is
1300 initiated. - Jes */
1301 wmb();
1303 /* Update the producer index. */
1304 writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1306 /* 4 is arbitrary, but should be ok */
1307 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1308 netif_stop_queue(dev);
1310 return NETDEV_TX_OK;
1314 /* The interrupt handler does all of the Rx thread work and cleans up
1315 after the Tx thread. */
1316 static irqreturn_t intr_handler(int irq, void *dev_instance)
1318 struct net_device *dev = dev_instance;
1319 struct netdev_private *np = netdev_priv(dev);
1320 void __iomem *ioaddr = np->base;
1321 int boguscnt = max_interrupt_work;
1322 int consumer;
1323 int tx_status;
1324 int handled = 0;
1326 do {
1327 u32 intr_status = readl(ioaddr + IntrClear);
1329 if (debug > 4)
1330 printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1331 dev->name, intr_status);
1333 if (intr_status == 0 || intr_status == (u32) -1)
1334 break;
1336 handled = 1;
1338 if (intr_status & (IntrRxDone | IntrRxEmpty)) {
1339 u32 enable;
1341 if (likely(napi_schedule_prep(&np->napi))) {
1342 __napi_schedule(&np->napi);
1343 enable = readl(ioaddr + IntrEnable);
1344 enable &= ~(IntrRxDone | IntrRxEmpty);
1345 writel(enable, ioaddr + IntrEnable);
1346 /* flush PCI posting buffers */
1347 readl(ioaddr + IntrEnable);
1348 } else {
1349 /* Paranoia check */
1350 enable = readl(ioaddr + IntrEnable);
1351 if (enable & (IntrRxDone | IntrRxEmpty)) {
1352 printk(KERN_INFO
1353 "%s: interrupt while in poll!\n",
1354 dev->name);
1355 enable &= ~(IntrRxDone | IntrRxEmpty);
1356 writel(enable, ioaddr + IntrEnable);
1361 /* Scavenge the skbuff list based on the Tx-done queue.
1362 There are redundant checks here that may be cleaned up
1363 after the driver has proven to be reliable. */
1364 consumer = readl(ioaddr + TxConsumerIdx);
1365 if (debug > 3)
1366 printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1367 dev->name, consumer);
1369 while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1370 if (debug > 3)
1371 printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1372 dev->name, np->dirty_tx, np->tx_done, tx_status);
1373 if ((tx_status & 0xe0000000) == 0xa0000000) {
1374 dev->stats.tx_packets++;
1375 } else if ((tx_status & 0xe0000000) == 0x80000000) {
1376 u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1377 struct sk_buff *skb = np->tx_info[entry].skb;
1378 np->tx_info[entry].skb = NULL;
1379 pci_unmap_single(np->pci_dev,
1380 np->tx_info[entry].mapping,
1381 skb_first_frag_len(skb),
1382 PCI_DMA_TODEVICE);
1383 np->tx_info[entry].mapping = 0;
1384 np->dirty_tx += np->tx_info[entry].used_slots;
1385 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1387 int i;
1388 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1389 pci_unmap_single(np->pci_dev,
1390 np->tx_info[entry].mapping,
1391 skb_shinfo(skb)->frags[i].size,
1392 PCI_DMA_TODEVICE);
1393 np->dirty_tx++;
1394 entry++;
1398 dev_kfree_skb_irq(skb);
1400 np->tx_done_q[np->tx_done].status = 0;
1401 np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1403 writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1405 if (netif_queue_stopped(dev) &&
1406 (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1407 /* The ring is no longer full, wake the queue. */
1408 netif_wake_queue(dev);
1411 /* Stats overflow */
1412 if (intr_status & IntrStatsMax)
1413 get_stats(dev);
1415 /* Media change interrupt. */
1416 if (intr_status & IntrLinkChange)
1417 netdev_media_change(dev);
1419 /* Abnormal error summary/uncommon events handlers. */
1420 if (intr_status & IntrAbnormalSummary)
1421 netdev_error(dev, intr_status);
1423 if (--boguscnt < 0) {
1424 if (debug > 1)
1425 printk(KERN_WARNING "%s: Too much work at interrupt, "
1426 "status=%#8.8x.\n",
1427 dev->name, intr_status);
1428 break;
1430 } while (1);
1432 if (debug > 4)
1433 printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1434 dev->name, (int) readl(ioaddr + IntrStatus));
1435 return IRQ_RETVAL(handled);
1440 * This routine is logically part of the interrupt/poll handler, but separated
1441 * for clarity and better register allocation.
1443 static int __netdev_rx(struct net_device *dev, int *quota)
1445 struct netdev_private *np = netdev_priv(dev);
1446 u32 desc_status;
1447 int retcode = 0;
1449 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1450 while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1451 struct sk_buff *skb;
1452 u16 pkt_len;
1453 int entry;
1454 rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1456 if (debug > 4)
1457 printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1458 if (!(desc_status & RxOK)) {
1459 /* There was an error. */
1460 if (debug > 2)
1461 printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status);
1462 dev->stats.rx_errors++;
1463 if (desc_status & RxFIFOErr)
1464 dev->stats.rx_fifo_errors++;
1465 goto next_rx;
1468 if (*quota <= 0) { /* out of rx quota */
1469 retcode = 1;
1470 goto out;
1472 (*quota)--;
1474 pkt_len = desc_status; /* Implicitly Truncate */
1475 entry = (desc_status >> 16) & 0x7ff;
1477 if (debug > 4)
1478 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1479 /* Check if the packet is long enough to accept without copying
1480 to a minimally-sized skbuff. */
1481 if (pkt_len < rx_copybreak &&
1482 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1483 skb_reserve(skb, 2); /* 16 byte align the IP header */
1484 pci_dma_sync_single_for_cpu(np->pci_dev,
1485 np->rx_info[entry].mapping,
1486 pkt_len, PCI_DMA_FROMDEVICE);
1487 skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
1488 pci_dma_sync_single_for_device(np->pci_dev,
1489 np->rx_info[entry].mapping,
1490 pkt_len, PCI_DMA_FROMDEVICE);
1491 skb_put(skb, pkt_len);
1492 } else {
1493 pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1494 skb = np->rx_info[entry].skb;
1495 skb_put(skb, pkt_len);
1496 np->rx_info[entry].skb = NULL;
1497 np->rx_info[entry].mapping = 0;
1499 #ifndef final_version /* Remove after testing. */
1500 /* You will want this info for the initial debug. */
1501 if (debug > 5) {
1502 printk(KERN_DEBUG " Rx data %pM %pM %2.2x%2.2x.\n",
1503 skb->data, skb->data + 6,
1504 skb->data[12], skb->data[13]);
1506 #endif
1508 skb->protocol = eth_type_trans(skb, dev);
1509 #ifdef VLAN_SUPPORT
1510 if (debug > 4)
1511 printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1512 #endif
1513 if (le16_to_cpu(desc->status2) & 0x0100) {
1514 skb->ip_summed = CHECKSUM_UNNECESSARY;
1515 dev->stats.rx_compressed++;
1518 * This feature doesn't seem to be working, at least
1519 * with the two firmware versions I have. If the GFP sees
1520 * an IP fragment, it either ignores it completely, or reports
1521 * "bad checksum" on it.
1523 * Maybe I missed something -- corrections are welcome.
1524 * Until then, the printk stays. :-) -Ion
1526 else if (le16_to_cpu(desc->status2) & 0x0040) {
1527 skb->ip_summed = CHECKSUM_COMPLETE;
1528 skb->csum = le16_to_cpu(desc->csum);
1529 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1531 #ifdef VLAN_SUPPORT
1532 if (np->vlgrp && le16_to_cpu(desc->status2) & 0x0200) {
1533 u16 vlid = le16_to_cpu(desc->vlanid);
1535 if (debug > 4) {
1536 printk(KERN_DEBUG " netdev_rx() vlanid = %d\n",
1537 vlid);
1540 * vlan_hwaccel_rx expects a packet with the VLAN tag
1541 * stripped out.
1543 vlan_hwaccel_rx(skb, np->vlgrp, vlid);
1544 } else
1545 #endif /* VLAN_SUPPORT */
1546 netif_receive_skb(skb);
1547 dev->stats.rx_packets++;
1549 next_rx:
1550 np->cur_rx++;
1551 desc->status = 0;
1552 np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1555 if (*quota == 0) { /* out of rx quota */
1556 retcode = 1;
1557 goto out;
1559 writew(np->rx_done, np->base + CompletionQConsumerIdx);
1561 out:
1562 refill_rx_ring(dev);
1563 if (debug > 5)
1564 printk(KERN_DEBUG " exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1565 retcode, np->rx_done, desc_status);
1566 return retcode;
1569 static int netdev_poll(struct napi_struct *napi, int budget)
1571 struct netdev_private *np = container_of(napi, struct netdev_private, napi);
1572 struct net_device *dev = np->dev;
1573 u32 intr_status;
1574 void __iomem *ioaddr = np->base;
1575 int quota = budget;
1577 do {
1578 writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1580 if (__netdev_rx(dev, &quota))
1581 goto out;
1583 intr_status = readl(ioaddr + IntrStatus);
1584 } while (intr_status & (IntrRxDone | IntrRxEmpty));
1586 napi_complete(napi);
1587 intr_status = readl(ioaddr + IntrEnable);
1588 intr_status |= IntrRxDone | IntrRxEmpty;
1589 writel(intr_status, ioaddr + IntrEnable);
1591 out:
1592 if (debug > 5)
1593 printk(KERN_DEBUG " exiting netdev_poll(): %d.\n",
1594 budget - quota);
1596 /* Restart Rx engine if stopped. */
1597 return budget - quota;
1600 static void refill_rx_ring(struct net_device *dev)
1602 struct netdev_private *np = netdev_priv(dev);
1603 struct sk_buff *skb;
1604 int entry = -1;
1606 /* Refill the Rx ring buffers. */
1607 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1608 entry = np->dirty_rx % RX_RING_SIZE;
1609 if (np->rx_info[entry].skb == NULL) {
1610 skb = dev_alloc_skb(np->rx_buf_sz);
1611 np->rx_info[entry].skb = skb;
1612 if (skb == NULL)
1613 break; /* Better luck next round. */
1614 np->rx_info[entry].mapping =
1615 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1616 skb->dev = dev; /* Mark as being used by this device. */
1617 np->rx_ring[entry].rxaddr =
1618 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1620 if (entry == RX_RING_SIZE - 1)
1621 np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1623 if (entry >= 0)
1624 writew(entry, np->base + RxDescQIdx);
1628 static void netdev_media_change(struct net_device *dev)
1630 struct netdev_private *np = netdev_priv(dev);
1631 void __iomem *ioaddr = np->base;
1632 u16 reg0, reg1, reg4, reg5;
1633 u32 new_tx_mode;
1634 u32 new_intr_timer_ctrl;
1636 /* reset status first */
1637 mdio_read(dev, np->phys[0], MII_BMCR);
1638 mdio_read(dev, np->phys[0], MII_BMSR);
1640 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1641 reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1643 if (reg1 & BMSR_LSTATUS) {
1644 /* link is up */
1645 if (reg0 & BMCR_ANENABLE) {
1646 /* autonegotiation is enabled */
1647 reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1648 reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1649 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1650 np->speed100 = 1;
1651 np->mii_if.full_duplex = 1;
1652 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1653 np->speed100 = 1;
1654 np->mii_if.full_duplex = 0;
1655 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1656 np->speed100 = 0;
1657 np->mii_if.full_duplex = 1;
1658 } else {
1659 np->speed100 = 0;
1660 np->mii_if.full_duplex = 0;
1662 } else {
1663 /* autonegotiation is disabled */
1664 if (reg0 & BMCR_SPEED100)
1665 np->speed100 = 1;
1666 else
1667 np->speed100 = 0;
1668 if (reg0 & BMCR_FULLDPLX)
1669 np->mii_if.full_duplex = 1;
1670 else
1671 np->mii_if.full_duplex = 0;
1673 netif_carrier_on(dev);
1674 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1675 dev->name,
1676 np->speed100 ? "100" : "10",
1677 np->mii_if.full_duplex ? "full" : "half");
1679 new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */
1680 if (np->mii_if.full_duplex)
1681 new_tx_mode |= FullDuplex;
1682 if (np->tx_mode != new_tx_mode) {
1683 np->tx_mode = new_tx_mode;
1684 writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1685 udelay(1000);
1686 writel(np->tx_mode, ioaddr + TxMode);
1689 new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1690 if (np->speed100)
1691 new_intr_timer_ctrl |= Timer10X;
1692 if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1693 np->intr_timer_ctrl = new_intr_timer_ctrl;
1694 writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1696 } else {
1697 netif_carrier_off(dev);
1698 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1703 static void netdev_error(struct net_device *dev, int intr_status)
1705 struct netdev_private *np = netdev_priv(dev);
1707 /* Came close to underrunning the Tx FIFO, increase threshold. */
1708 if (intr_status & IntrTxDataLow) {
1709 if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1710 writel(++np->tx_threshold, np->base + TxThreshold);
1711 printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1712 dev->name, np->tx_threshold * 16);
1713 } else
1714 printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1716 if (intr_status & IntrRxGFPDead) {
1717 dev->stats.rx_fifo_errors++;
1718 dev->stats.rx_errors++;
1720 if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1721 dev->stats.tx_fifo_errors++;
1722 dev->stats.tx_errors++;
1724 if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1725 printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1726 dev->name, intr_status);
1730 static struct net_device_stats *get_stats(struct net_device *dev)
1732 struct netdev_private *np = netdev_priv(dev);
1733 void __iomem *ioaddr = np->base;
1735 /* This adapter architecture needs no SMP locks. */
1736 dev->stats.tx_bytes = readl(ioaddr + 0x57010);
1737 dev->stats.rx_bytes = readl(ioaddr + 0x57044);
1738 dev->stats.tx_packets = readl(ioaddr + 0x57000);
1739 dev->stats.tx_aborted_errors =
1740 readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1741 dev->stats.tx_window_errors = readl(ioaddr + 0x57018);
1742 dev->stats.collisions =
1743 readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1745 /* The chip only need report frame silently dropped. */
1746 dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1747 writew(0, ioaddr + RxDMAStatus);
1748 dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1749 dev->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1750 dev->stats.rx_length_errors = readl(ioaddr + 0x57058);
1751 dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1753 return &dev->stats;
1757 static void set_rx_mode(struct net_device *dev)
1759 struct netdev_private *np = netdev_priv(dev);
1760 void __iomem *ioaddr = np->base;
1761 u32 rx_mode = MinVLANPrio;
1762 struct netdev_hw_addr *ha;
1763 int i;
1764 #ifdef VLAN_SUPPORT
1766 rx_mode |= VlanMode;
1767 if (np->vlgrp) {
1768 int vlan_count = 0;
1769 void __iomem *filter_addr = ioaddr + HashTable + 8;
1770 for (i = 0; i < VLAN_VID_MASK; i++) {
1771 if (vlan_group_get_device(np->vlgrp, i)) {
1772 if (vlan_count >= 32)
1773 break;
1774 writew(i, filter_addr);
1775 filter_addr += 16;
1776 vlan_count++;
1779 if (i == VLAN_VID_MASK) {
1780 rx_mode |= PerfectFilterVlan;
1781 while (vlan_count < 32) {
1782 writew(0, filter_addr);
1783 filter_addr += 16;
1784 vlan_count++;
1788 #endif /* VLAN_SUPPORT */
1790 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1791 rx_mode |= AcceptAll;
1792 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1793 (dev->flags & IFF_ALLMULTI)) {
1794 /* Too many to match, or accept all multicasts. */
1795 rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1796 } else if (netdev_mc_count(dev) <= 14) {
1797 /* Use the 16 element perfect filter, skip first two entries. */
1798 void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1799 __be16 *eaddrs;
1800 netdev_for_each_mc_addr(ha, dev) {
1801 eaddrs = (__be16 *) ha->addr;
1802 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
1803 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1804 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
1806 eaddrs = (__be16 *)dev->dev_addr;
1807 i = netdev_mc_count(dev) + 2;
1808 while (i++ < 16) {
1809 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1810 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1811 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1813 rx_mode |= AcceptBroadcast|PerfectFilter;
1814 } else {
1815 /* Must use a multicast hash table. */
1816 void __iomem *filter_addr;
1817 __be16 *eaddrs;
1818 __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1820 memset(mc_filter, 0, sizeof(mc_filter));
1821 netdev_for_each_mc_addr(ha, dev) {
1822 /* The chip uses the upper 9 CRC bits
1823 as index into the hash table */
1824 int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
1825 __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
1827 *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1829 /* Clear the perfect filter list, skip first two entries. */
1830 filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1831 eaddrs = (__be16 *)dev->dev_addr;
1832 for (i = 2; i < 16; i++) {
1833 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1834 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1835 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1837 for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1838 writew(mc_filter[i], filter_addr);
1839 rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1841 writel(rx_mode, ioaddr + RxFilterMode);
1844 static int check_if_running(struct net_device *dev)
1846 if (!netif_running(dev))
1847 return -EINVAL;
1848 return 0;
1851 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1853 struct netdev_private *np = netdev_priv(dev);
1854 strcpy(info->driver, DRV_NAME);
1855 strcpy(info->version, DRV_VERSION);
1856 strcpy(info->bus_info, pci_name(np->pci_dev));
1859 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1861 struct netdev_private *np = netdev_priv(dev);
1862 spin_lock_irq(&np->lock);
1863 mii_ethtool_gset(&np->mii_if, ecmd);
1864 spin_unlock_irq(&np->lock);
1865 return 0;
1868 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1870 struct netdev_private *np = netdev_priv(dev);
1871 int res;
1872 spin_lock_irq(&np->lock);
1873 res = mii_ethtool_sset(&np->mii_if, ecmd);
1874 spin_unlock_irq(&np->lock);
1875 check_duplex(dev);
1876 return res;
1879 static int nway_reset(struct net_device *dev)
1881 struct netdev_private *np = netdev_priv(dev);
1882 return mii_nway_restart(&np->mii_if);
1885 static u32 get_link(struct net_device *dev)
1887 struct netdev_private *np = netdev_priv(dev);
1888 return mii_link_ok(&np->mii_if);
1891 static u32 get_msglevel(struct net_device *dev)
1893 return debug;
1896 static void set_msglevel(struct net_device *dev, u32 val)
1898 debug = val;
1901 static const struct ethtool_ops ethtool_ops = {
1902 .begin = check_if_running,
1903 .get_drvinfo = get_drvinfo,
1904 .get_settings = get_settings,
1905 .set_settings = set_settings,
1906 .nway_reset = nway_reset,
1907 .get_link = get_link,
1908 .get_msglevel = get_msglevel,
1909 .set_msglevel = set_msglevel,
1912 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1914 struct netdev_private *np = netdev_priv(dev);
1915 struct mii_ioctl_data *data = if_mii(rq);
1916 int rc;
1918 if (!netif_running(dev))
1919 return -EINVAL;
1921 spin_lock_irq(&np->lock);
1922 rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1923 spin_unlock_irq(&np->lock);
1925 if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
1926 check_duplex(dev);
1928 return rc;
1931 static int netdev_close(struct net_device *dev)
1933 struct netdev_private *np = netdev_priv(dev);
1934 void __iomem *ioaddr = np->base;
1935 int i;
1937 netif_stop_queue(dev);
1939 napi_disable(&np->napi);
1941 if (debug > 1) {
1942 printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
1943 dev->name, (int) readl(ioaddr + IntrStatus));
1944 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1945 dev->name, np->cur_tx, np->dirty_tx,
1946 np->cur_rx, np->dirty_rx);
1949 /* Disable interrupts by clearing the interrupt mask. */
1950 writel(0, ioaddr + IntrEnable);
1952 /* Stop the chip's Tx and Rx processes. */
1953 writel(0, ioaddr + GenCtrl);
1954 readl(ioaddr + GenCtrl);
1956 if (debug > 5) {
1957 printk(KERN_DEBUG" Tx ring at %#llx:\n",
1958 (long long) np->tx_ring_dma);
1959 for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
1960 printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
1961 i, le32_to_cpu(np->tx_ring[i].status),
1962 (long long) dma_to_cpu(np->tx_ring[i].addr),
1963 le32_to_cpu(np->tx_done_q[i].status));
1964 printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n",
1965 (long long) np->rx_ring_dma, np->rx_done_q);
1966 if (np->rx_done_q)
1967 for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
1968 printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
1969 i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
1973 free_irq(dev->irq, dev);
1975 /* Free all the skbuffs in the Rx queue. */
1976 for (i = 0; i < RX_RING_SIZE; i++) {
1977 np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
1978 if (np->rx_info[i].skb != NULL) {
1979 pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1980 dev_kfree_skb(np->rx_info[i].skb);
1982 np->rx_info[i].skb = NULL;
1983 np->rx_info[i].mapping = 0;
1985 for (i = 0; i < TX_RING_SIZE; i++) {
1986 struct sk_buff *skb = np->tx_info[i].skb;
1987 if (skb == NULL)
1988 continue;
1989 pci_unmap_single(np->pci_dev,
1990 np->tx_info[i].mapping,
1991 skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1992 np->tx_info[i].mapping = 0;
1993 dev_kfree_skb(skb);
1994 np->tx_info[i].skb = NULL;
1997 return 0;
2000 #ifdef CONFIG_PM
2001 static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)
2003 struct net_device *dev = pci_get_drvdata(pdev);
2005 if (netif_running(dev)) {
2006 netif_device_detach(dev);
2007 netdev_close(dev);
2010 pci_save_state(pdev);
2011 pci_set_power_state(pdev, pci_choose_state(pdev,state));
2013 return 0;
2016 static int starfire_resume(struct pci_dev *pdev)
2018 struct net_device *dev = pci_get_drvdata(pdev);
2020 pci_set_power_state(pdev, PCI_D0);
2021 pci_restore_state(pdev);
2023 if (netif_running(dev)) {
2024 netdev_open(dev);
2025 netif_device_attach(dev);
2028 return 0;
2030 #endif /* CONFIG_PM */
2033 static void __devexit starfire_remove_one (struct pci_dev *pdev)
2035 struct net_device *dev = pci_get_drvdata(pdev);
2036 struct netdev_private *np = netdev_priv(dev);
2038 BUG_ON(!dev);
2040 unregister_netdev(dev);
2042 if (np->queue_mem)
2043 pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
2046 pci_set_power_state(pdev, PCI_D3hot); /* go to sleep in D3 mode */
2047 pci_disable_device(pdev);
2049 iounmap(np->base);
2050 pci_release_regions(pdev);
2052 pci_set_drvdata(pdev, NULL);
2053 free_netdev(dev); /* Will also free np!! */
2057 static struct pci_driver starfire_driver = {
2058 .name = DRV_NAME,
2059 .probe = starfire_init_one,
2060 .remove = __devexit_p(starfire_remove_one),
2061 #ifdef CONFIG_PM
2062 .suspend = starfire_suspend,
2063 .resume = starfire_resume,
2064 #endif /* CONFIG_PM */
2065 .id_table = starfire_pci_tbl,
2069 static int __init starfire_init (void)
2071 /* when a module, this is printed whether or not devices are found in probe */
2072 #ifdef MODULE
2073 printk(version);
2075 printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2076 #endif
2078 /* we can do this test only at run-time... sigh */
2079 if (sizeof(dma_addr_t) != sizeof(netdrv_addr_t)) {
2080 printk("This driver has dma_addr_t issues, please send email to maintainer\n");
2081 return -ENODEV;
2084 return pci_register_driver(&starfire_driver);
2088 static void __exit starfire_cleanup (void)
2090 pci_unregister_driver (&starfire_driver);
2094 module_init(starfire_init);
2095 module_exit(starfire_cleanup);
2099 * Local variables:
2100 * c-basic-offset: 8
2101 * tab-width: 8
2102 * End: