MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / drivers / net / tulip / xircom_tulip_cb.c
blob62417cc980eb180a0cc6d7125b173a409e3bd7e6
1 /* xircom_tulip_cb.c: A Xircom CBE-100 ethernet driver for Linux. */
2 /*
3 Written/copyright 1994-1999 by Donald Becker.
5 This software may be used and distributed according to the terms
6 of the GNU General Public License, incorporated herein by reference.
8 The author may be reached as becker@scyld.com, or C/O
9 Scyld Computing Corporation
10 410 Severn Ave., Suite 210
11 Annapolis MD 21403
13 -----------------------------------------------------------
15 Linux kernel-specific changes:
17 LK1.0 (Ion Badulescu)
18 - Major cleanup
19 - Use 2.4 PCI API
20 - Support ethtool
21 - Rewrite perfect filter/hash code
22 - Use interrupts for media changes
24 LK1.1 (Ion Badulescu)
25 - Disallow negotiation of unsupported full-duplex modes
28 #define DRV_NAME "xircom_tulip_cb"
29 #define DRV_VERSION "0.91+LK1.1"
30 #define DRV_RELDATE "October 11, 2001"
32 #define CARDBUS 1
34 /* A few user-configurable values. */
36 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
37 static int max_interrupt_work = 25;
39 #define MAX_UNITS 4
40 /* Used to pass the full-duplex flag, etc. */
41 static int full_duplex[MAX_UNITS];
42 static int options[MAX_UNITS];
43 static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
45 /* Keep the ring sizes a power of two for efficiency.
46 Making the Tx ring too large decreases the effectiveness of channel
47 bonding and packet priority.
48 There are no ill effects from too-large receive rings. */
49 #define TX_RING_SIZE 16
50 #define RX_RING_SIZE 32
52 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
53 #ifdef __alpha__
54 static int rx_copybreak = 1518;
55 #else
56 static int rx_copybreak = 100;
57 #endif
60 Set the bus performance register.
61 Typical: Set 16 longword cache alignment, no burst limit.
62 Cache alignment bits 15:14 Burst length 13:8
63 0000 No alignment 0x00000000 unlimited 0800 8 longwords
64 4000 8 longwords 0100 1 longword 1000 16 longwords
65 8000 16 longwords 0200 2 longwords 2000 32 longwords
66 C000 32 longwords 0400 4 longwords
67 Warning: many older 486 systems are broken and require setting 0x00A04800
68 8 longword cache alignment, 8 longword burst.
69 ToDo: Non-Intel setting could be better.
72 #if defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
73 static int csr0 = 0x01A00000 | 0xE000;
74 #elif defined(__powerpc__)
75 static int csr0 = 0x01B00000 | 0x8000;
76 #elif defined(__sparc__)
77 static int csr0 = 0x01B00080 | 0x8000;
78 #elif defined(__i386__)
79 static int csr0 = 0x01A00000 | 0x8000;
80 #else
81 #warning Processor architecture undefined!
82 static int csr0 = 0x00A00000 | 0x4800;
83 #endif
85 /* Operational parameters that usually are not changed. */
86 /* Time in jiffies before concluding the transmitter is hung. */
87 #define TX_TIMEOUT (4 * HZ)
88 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
89 #define PKT_SETUP_SZ 192 /* Size of the setup frame */
91 /* PCI registers */
92 #define PCI_POWERMGMT 0x40
94 #include <linux/config.h>
95 #include <linux/module.h>
96 #include <linux/kernel.h>
97 #include <linux/pci.h>
98 #include <linux/netdevice.h>
99 #include <linux/etherdevice.h>
100 #include <linux/delay.h>
101 #include <linux/init.h>
102 #include <linux/mii.h>
103 #include <linux/ethtool.h>
104 #include <linux/crc32.h>
106 #include <asm/io.h>
107 #include <asm/processor.h> /* Processor type for cache alignment. */
108 #include <asm/uaccess.h>
111 /* These identify the driver base version and may not be removed. */
112 static char version[] __devinitdata =
113 KERN_INFO DRV_NAME ".c derived from tulip.c:v0.91 4/14/99 becker@scyld.com\n"
114 KERN_INFO " unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE "\n";
116 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
117 MODULE_DESCRIPTION("Xircom CBE-100 ethernet driver");
118 MODULE_LICENSE("GPL v2");
120 MODULE_PARM(debug, "i");
121 MODULE_PARM(max_interrupt_work, "i");
122 MODULE_PARM(rx_copybreak, "i");
123 MODULE_PARM(csr0, "i");
124 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
125 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
127 #define RUN_AT(x) (jiffies + (x))
129 #define xircom_debug debug
130 #ifdef XIRCOM_DEBUG
131 static int xircom_debug = XIRCOM_DEBUG;
132 #else
133 static int xircom_debug = 1;
134 #endif
137 Theory of Operation
139 I. Board Compatibility
141 This device driver was forked from the driver for the DECchip "Tulip",
142 Digital's single-chip ethernet controllers for PCI. It supports Xircom's
143 almost-Tulip-compatible CBE-100 CardBus adapters.
145 II. Board-specific settings
147 PCI bus devices are configured by the system at boot time, so no jumpers
148 need to be set on the board. The system BIOS preferably should assign the
149 PCI INTA signal to an otherwise unused system IRQ line.
151 III. Driver operation
153 IIIa. Ring buffers
155 The Xircom can use either ring buffers or lists of Tx and Rx descriptors.
156 This driver uses statically allocated rings of Rx and Tx descriptors, set at
157 compile time by RX/TX_RING_SIZE. This version of the driver allocates skbuffs
158 for the Rx ring buffers at open() time and passes the skb->data field to the
159 Xircom as receive data buffers. When an incoming frame is less than
160 RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is
161 copied to the new skbuff. When the incoming frame is larger, the skbuff is
162 passed directly up the protocol stack and replaced by a newly allocated
163 skbuff.
165 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
166 using a full-sized skbuff for small frames vs. the copying costs of larger
167 frames. For small frames the copying cost is negligible (esp. considering
168 that we are pre-loading the cache with immediately useful header
169 information). For large frames the copying cost is non-trivial, and the
170 larger copy might flush the cache of useful data. A subtle aspect of this
171 choice is that the Xircom only receives into longword aligned buffers, thus
172 the IP header at offset 14 isn't longword aligned for further processing.
173 Copied frames are put into the new skbuff at an offset of "+2", thus copying
174 has the beneficial effect of aligning the IP header and preloading the
175 cache.
177 IIIC. Synchronization
178 The driver runs as two independent, single-threaded flows of control. One
179 is the send-packet routine, which enforces single-threaded use by the
180 dev->tbusy flag. The other thread is the interrupt handler, which is single
181 threaded by the hardware and other software.
183 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
184 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
185 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
186 the 'tp->tx_full' flag.
188 The interrupt handler has exclusive control over the Rx ring and records stats
189 from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
190 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
191 stats.) After reaping the stats, it marks the queue entry as empty by setting
192 the 'base' to zero. Iff the 'tp->tx_full' flag is set, it clears both the
193 tx_full and tbusy flags.
195 IV. Notes
197 IVb. References
199 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
200 http://www.digital.com (search for current 21*4* datasheets and "21X4 SROM")
201 http://www.national.com/pf/DP/DP83840A.html
203 IVc. Errata
207 /* A full-duplex map for media types. */
208 enum MediaIs {
209 MediaIsFD = 1, MediaAlwaysFD=2, MediaIsMII=4, MediaIsFx=8,
210 MediaIs100=16};
211 static const char media_cap[] =
212 {0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20 };
214 /* Offsets to the Command and Status Registers, "CSRs". All accesses
215 must be longword instructions and quadword aligned. */
216 enum xircom_offsets {
217 CSR0=0, CSR1=0x08, CSR2=0x10, CSR3=0x18, CSR4=0x20, CSR5=0x28,
218 CSR6=0x30, CSR7=0x38, CSR8=0x40, CSR9=0x48, CSR10=0x50, CSR11=0x58,
219 CSR12=0x60, CSR13=0x68, CSR14=0x70, CSR15=0x78, CSR16=0x04, };
221 /* The bits in the CSR5 status registers, mostly interrupt sources. */
222 enum status_bits {
223 LinkChange=0x08000000,
224 NormalIntr=0x10000, NormalIntrMask=0x00014045,
225 AbnormalIntr=0x8000, AbnormalIntrMask=0x0a00a5a2,
226 ReservedIntrMask=0xe0001a18,
227 EarlyRxIntr=0x4000, BusErrorIntr=0x2000,
228 EarlyTxIntr=0x400, RxDied=0x100, RxNoBuf=0x80, RxIntr=0x40,
229 TxFIFOUnderflow=0x20, TxNoBuf=0x04, TxDied=0x02, TxIntr=0x01,
232 enum csr0_control_bits {
233 EnableMWI=0x01000000, EnableMRL=0x00800000,
234 EnableMRM=0x00200000, EqualBusPrio=0x02,
235 SoftwareReset=0x01,
238 enum csr6_control_bits {
239 ReceiveAllBit=0x40000000, AllMultiBit=0x80, PromiscBit=0x40,
240 HashFilterBit=0x01, FullDuplexBit=0x0200,
241 TxThresh10=0x400000, TxStoreForw=0x200000,
242 TxThreshMask=0xc000, TxThreshShift=14,
243 EnableTx=0x2000, EnableRx=0x02,
244 ReservedZeroMask=0x8d930134, ReservedOneMask=0x320c0000,
245 EnableTxRx=(EnableTx | EnableRx),
249 enum tbl_flag {
250 HAS_MII=1, HAS_ACPI=2,
252 static struct xircom_chip_table {
253 char *chip_name;
254 int valid_intrs; /* CSR7 interrupt enable settings */
255 int flags;
256 } xircom_tbl[] = {
257 { "Xircom Cardbus Adapter",
258 LinkChange | NormalIntr | AbnormalIntr | BusErrorIntr |
259 RxDied | RxNoBuf | RxIntr | TxFIFOUnderflow | TxNoBuf | TxDied | TxIntr,
260 HAS_MII | HAS_ACPI, },
261 { NULL, },
263 /* This matches the table above. */
264 enum chips {
265 X3201_3,
269 /* The Xircom Rx and Tx buffer descriptors. */
270 struct xircom_rx_desc {
271 s32 status;
272 s32 length;
273 u32 buffer1, buffer2;
276 struct xircom_tx_desc {
277 s32 status;
278 s32 length;
279 u32 buffer1, buffer2; /* We use only buffer 1. */
282 enum tx_desc0_status_bits {
283 Tx0DescOwned=0x80000000, Tx0DescError=0x8000, Tx0NoCarrier=0x0800,
284 Tx0LateColl=0x0200, Tx0ManyColl=0x0100, Tx0Underflow=0x02,
286 enum tx_desc1_status_bits {
287 Tx1ComplIntr=0x80000000, Tx1LastSeg=0x40000000, Tx1FirstSeg=0x20000000,
288 Tx1SetupPkt=0x08000000, Tx1DisableCRC=0x04000000, Tx1RingWrap=0x02000000,
289 Tx1ChainDesc=0x01000000, Tx1NoPad=0x800000, Tx1HashSetup=0x400000,
290 Tx1WholePkt=(Tx1FirstSeg | Tx1LastSeg),
292 enum rx_desc0_status_bits {
293 Rx0DescOwned=0x80000000, Rx0DescError=0x8000, Rx0NoSpace=0x4000,
294 Rx0Runt=0x0800, Rx0McastPkt=0x0400, Rx0FirstSeg=0x0200, Rx0LastSeg=0x0100,
295 Rx0HugeFrame=0x80, Rx0CRCError=0x02,
296 Rx0WholePkt=(Rx0FirstSeg | Rx0LastSeg),
298 enum rx_desc1_status_bits {
299 Rx1RingWrap=0x02000000, Rx1ChainDesc=0x01000000,
302 struct xircom_private {
303 struct xircom_rx_desc rx_ring[RX_RING_SIZE];
304 struct xircom_tx_desc tx_ring[TX_RING_SIZE];
305 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
306 struct sk_buff* tx_skbuff[TX_RING_SIZE];
307 #ifdef CARDBUS
308 /* The X3201-3 requires 4-byte aligned tx bufs */
309 struct sk_buff* tx_aligned_skbuff[TX_RING_SIZE];
310 #endif
311 /* The addresses of receive-in-place skbuffs. */
312 struct sk_buff* rx_skbuff[RX_RING_SIZE];
313 u16 setup_frame[PKT_SETUP_SZ / sizeof(u16)]; /* Pseudo-Tx frame to init address table. */
314 int chip_id;
315 struct net_device_stats stats;
316 unsigned int cur_rx, cur_tx; /* The next free ring entry */
317 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
318 unsigned int tx_full:1; /* The Tx queue is full. */
319 unsigned int speed100:1;
320 unsigned int full_duplex:1; /* Full-duplex operation requested. */
321 unsigned int autoneg:1;
322 unsigned int default_port:4; /* Last dev->if_port value. */
323 unsigned int open:1;
324 unsigned int csr0; /* CSR0 setting. */
325 unsigned int csr6; /* Current CSR6 control settings. */
326 u16 to_advertise; /* NWay capabilities advertised. */
327 u16 advertising[4];
328 signed char phys[4], mii_cnt; /* MII device addresses. */
329 int saved_if_port;
330 struct pci_dev *pdev;
331 spinlock_t lock;
332 #ifdef CONFIG_PM
333 u32 pci_state[16];
334 #endif
337 static int mdio_read(struct net_device *dev, int phy_id, int location);
338 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
339 static void xircom_up(struct net_device *dev);
340 static void xircom_down(struct net_device *dev);
341 static int xircom_open(struct net_device *dev);
342 static void xircom_tx_timeout(struct net_device *dev);
343 static void xircom_init_ring(struct net_device *dev);
344 static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev);
345 static int xircom_rx(struct net_device *dev);
346 static void xircom_media_change(struct net_device *dev);
347 static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
348 static int xircom_close(struct net_device *dev);
349 static struct net_device_stats *xircom_get_stats(struct net_device *dev);
350 static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
351 static void set_rx_mode(struct net_device *dev);
352 static void check_duplex(struct net_device *dev);
355 /* The Xircom cards are picky about when certain bits in CSR6 can be
356 manipulated. Keith Owens <kaos@ocs.com.au>. */
357 static void outl_CSR6(u32 newcsr6, long ioaddr)
359 const int strict_bits =
360 TxThresh10 | TxStoreForw | TxThreshMask | EnableTxRx | FullDuplexBit;
361 int csr5, csr5_22_20, csr5_19_17, currcsr6, attempts = 200;
362 unsigned long flags;
363 save_flags(flags);
364 cli();
365 /* mask out the reserved bits that always read 0 on the Xircom cards */
366 newcsr6 &= ~ReservedZeroMask;
367 /* or in the reserved bits that always read 1 */
368 newcsr6 |= ReservedOneMask;
369 currcsr6 = inl(ioaddr + CSR6);
370 if (((newcsr6 & strict_bits) == (currcsr6 & strict_bits)) ||
371 ((currcsr6 & ~EnableTxRx) == 0)) {
372 outl(newcsr6, ioaddr + CSR6); /* safe */
373 restore_flags(flags);
374 return;
376 /* make sure the transmitter and receiver are stopped first */
377 currcsr6 &= ~EnableTxRx;
378 while (1) {
379 csr5 = inl(ioaddr + CSR5);
380 if (csr5 == 0xffffffff)
381 break; /* cannot read csr5, card removed? */
382 csr5_22_20 = csr5 & 0x700000;
383 csr5_19_17 = csr5 & 0x0e0000;
384 if ((csr5_22_20 == 0 || csr5_22_20 == 0x600000) &&
385 (csr5_19_17 == 0 || csr5_19_17 == 0x80000 || csr5_19_17 == 0xc0000))
386 break; /* both are stopped or suspended */
387 if (!--attempts) {
388 printk(KERN_INFO DRV_NAME ": outl_CSR6 too many attempts,"
389 "csr5=0x%08x\n", csr5);
390 outl(newcsr6, ioaddr + CSR6); /* unsafe but do it anyway */
391 restore_flags(flags);
392 return;
394 outl(currcsr6, ioaddr + CSR6);
395 udelay(1);
397 /* now it is safe to change csr6 */
398 outl(newcsr6, ioaddr + CSR6);
399 restore_flags(flags);
403 static void __devinit read_mac_address(struct net_device *dev)
405 long ioaddr = dev->base_addr;
406 int i, j;
407 unsigned char tuple, link, data_id, data_count;
409 /* Xircom has its address stored in the CIS;
410 * we access it through the boot rom interface for now
411 * this might not work, as the CIS is not parsed but I
412 * (danilo) use the offset I found on my card's CIS !!!
414 * Doug Ledford: I changed this routine around so that it
415 * walks the CIS memory space, parsing the config items, and
416 * finds the proper lan_node_id tuple and uses the data
417 * stored there.
419 outl(1 << 12, ioaddr + CSR9); /* enable boot rom access */
420 for (i = 0x100; i < 0x1f7; i += link+2) {
421 outl(i, ioaddr + CSR10);
422 tuple = inl(ioaddr + CSR9) & 0xff;
423 outl(i + 1, ioaddr + CSR10);
424 link = inl(ioaddr + CSR9) & 0xff;
425 outl(i + 2, ioaddr + CSR10);
426 data_id = inl(ioaddr + CSR9) & 0xff;
427 outl(i + 3, ioaddr + CSR10);
428 data_count = inl(ioaddr + CSR9) & 0xff;
429 if ( (tuple == 0x22) &&
430 (data_id == 0x04) && (data_count == 0x06) ) {
432 * This is it. We have the data we want.
434 for (j = 0; j < 6; j++) {
435 outl(i + j + 4, ioaddr + CSR10);
436 dev->dev_addr[j] = inl(ioaddr + CSR9) & 0xff;
438 break;
439 } else if (link == 0) {
440 break;
447 * locate the MII interfaces and initialize them.
448 * we disable full-duplex modes here,
449 * because we don't know how to handle them.
451 static void find_mii_transceivers(struct net_device *dev)
453 struct xircom_private *tp = dev->priv;
454 int phy, phy_idx;
456 if (media_cap[tp->default_port] & MediaIsMII) {
457 u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
458 tp->to_advertise = media2advert[tp->default_port - 9];
459 } else
460 tp->to_advertise =
461 /*ADVERTISE_100BASE4 | ADVERTISE_100FULL |*/ ADVERTISE_100HALF |
462 /*ADVERTISE_10FULL |*/ ADVERTISE_10HALF | ADVERTISE_CSMA;
464 /* Find the connected MII xcvrs.
465 Doing this in open() would allow detecting external xcvrs later,
466 but takes much time. */
467 for (phy = 0, phy_idx = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) {
468 int mii_status = mdio_read(dev, phy, MII_BMSR);
469 if ((mii_status & (BMSR_100BASE4 | BMSR_100HALF | BMSR_10HALF)) == BMSR_100BASE4 ||
470 ((mii_status & BMSR_100BASE4) == 0 &&
471 (mii_status & (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | BMSR_10HALF)) != 0)) {
472 int mii_reg0 = mdio_read(dev, phy, MII_BMCR);
473 int mii_advert = mdio_read(dev, phy, MII_ADVERTISE);
474 int reg4 = ((mii_status >> 6) & tp->to_advertise) | ADVERTISE_CSMA;
475 tp->phys[phy_idx] = phy;
476 tp->advertising[phy_idx++] = reg4;
477 printk(KERN_INFO "%s: MII transceiver #%d "
478 "config %4.4x status %4.4x advertising %4.4x.\n",
479 dev->name, phy, mii_reg0, mii_status, mii_advert);
482 tp->mii_cnt = phy_idx;
483 if (phy_idx == 0) {
484 printk(KERN_INFO "%s: ***WARNING***: No MII transceiver found!\n",
485 dev->name);
486 tp->phys[0] = 0;
492 * To quote Arjan van de Ven:
493 * transceiver_voodoo() enables the external UTP plug thingy.
494 * it's called voodoo as I stole this code and cannot cross-reference
495 * it with the specification.
496 * Actually it seems to go like this:
497 * - GPIO2 enables the MII itself so we can talk to it. The MII gets reset
498 * so any prior MII settings are lost.
499 * - GPIO0 enables the TP port so the MII can talk to the network.
500 * - a software reset will reset both GPIO pins.
501 * I also moved the software reset here, because doing it in xircom_up()
502 * required enabling the GPIO pins each time, which reset the MII each time.
503 * Thus we couldn't control the MII -- which sucks because we don't know
504 * how to handle full-duplex modes so we *must* disable them.
506 static void transceiver_voodoo(struct net_device *dev)
508 struct xircom_private *tp = dev->priv;
509 long ioaddr = dev->base_addr;
511 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
512 outl(SoftwareReset, ioaddr + CSR0);
513 udelay(2);
515 /* Deassert reset. */
516 outl(tp->csr0, ioaddr + CSR0);
518 /* Reset the xcvr interface and turn on heartbeat. */
519 outl(0x0008, ioaddr + CSR15);
520 udelay(5); /* The delays are Xircom-recommended to give the
521 * chipset time to reset the actual hardware
522 * on the PCMCIA card
524 outl(0xa8050000, ioaddr + CSR15);
525 udelay(5);
526 outl(0xa00f0000, ioaddr + CSR15);
527 udelay(5);
529 outl_CSR6(0, ioaddr);
530 //outl_CSR6(FullDuplexBit, ioaddr);
534 static int __devinit xircom_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
536 struct net_device *dev;
537 struct xircom_private *tp;
538 static int board_idx = -1;
539 int chip_idx = id->driver_data;
540 long ioaddr;
541 int i;
542 u8 chip_rev;
544 /* when built into the kernel, we only print version if device is found */
545 #ifndef MODULE
546 static int printed_version;
547 if (!printed_version++)
548 printk(version);
549 #endif
551 //printk(KERN_INFO "xircom_init_one(%s)\n", pci_name(pdev));
553 board_idx++;
555 if (pci_enable_device(pdev))
556 return -ENODEV;
558 pci_set_master(pdev);
560 ioaddr = pci_resource_start(pdev, 0);
561 dev = alloc_etherdev(sizeof(*tp));
562 if (!dev) {
563 printk (KERN_ERR DRV_NAME "%d: cannot alloc etherdev, aborting\n", board_idx);
564 return -ENOMEM;
566 SET_MODULE_OWNER(dev);
567 SET_NETDEV_DEV(dev, &pdev->dev);
569 dev->base_addr = ioaddr;
570 dev->irq = pdev->irq;
572 if (pci_request_regions(pdev, dev->name)) {
573 printk (KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", board_idx);
574 goto err_out_free_netdev;
577 /* Bring the chip out of sleep mode.
578 Caution: Snooze mode does not work with some boards! */
579 if (xircom_tbl[chip_idx].flags & HAS_ACPI)
580 pci_write_config_dword(pdev, PCI_POWERMGMT, 0);
582 /* Stop the chip's Tx and Rx processes. */
583 outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
584 /* Clear the missed-packet counter. */
585 (volatile int)inl(ioaddr + CSR8);
587 tp = dev->priv;
589 tp->lock = SPIN_LOCK_UNLOCKED;
590 tp->pdev = pdev;
591 tp->chip_id = chip_idx;
592 /* BugFixes: The 21143-TD hangs with PCI Write-and-Invalidate cycles. */
593 /* XXX: is this necessary for Xircom? */
594 tp->csr0 = csr0 & ~EnableMWI;
596 pci_set_drvdata(pdev, dev);
598 /* The lower four bits are the media type. */
599 if (board_idx >= 0 && board_idx < MAX_UNITS) {
600 tp->default_port = options[board_idx] & 15;
601 if ((options[board_idx] & 0x90) || full_duplex[board_idx] > 0)
602 tp->full_duplex = 1;
603 if (mtu[board_idx] > 0)
604 dev->mtu = mtu[board_idx];
606 if (dev->mem_start)
607 tp->default_port = dev->mem_start;
608 if (tp->default_port) {
609 if (media_cap[tp->default_port] & MediaAlwaysFD)
610 tp->full_duplex = 1;
612 if (tp->full_duplex)
613 tp->autoneg = 0;
614 else
615 tp->autoneg = 1;
616 tp->speed100 = 1;
618 /* The Xircom-specific entries in the device structure. */
619 dev->open = &xircom_open;
620 dev->hard_start_xmit = &xircom_start_xmit;
621 dev->stop = &xircom_close;
622 dev->get_stats = &xircom_get_stats;
623 dev->do_ioctl = &xircom_ioctl;
624 #ifdef HAVE_MULTICAST
625 dev->set_multicast_list = &set_rx_mode;
626 #endif
627 dev->tx_timeout = xircom_tx_timeout;
628 dev->watchdog_timeo = TX_TIMEOUT;
630 transceiver_voodoo(dev);
632 read_mac_address(dev);
634 if (register_netdev(dev))
635 goto err_out_cleardev;
637 pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
638 printk(KERN_INFO "%s: %s rev %d at %#3lx,",
639 dev->name, xircom_tbl[chip_idx].chip_name, chip_rev, ioaddr);
640 for (i = 0; i < 6; i++)
641 printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]);
642 printk(", IRQ %d.\n", dev->irq);
644 if (xircom_tbl[chip_idx].flags & HAS_MII) {
645 find_mii_transceivers(dev);
646 check_duplex(dev);
649 return 0;
651 err_out_cleardev:
652 pci_set_drvdata(pdev, NULL);
653 pci_release_regions(pdev);
654 err_out_free_netdev:
655 free_netdev(dev);
656 return -ENODEV;
660 /* MII transceiver control section.
661 Read and write the MII registers using software-generated serial
662 MDIO protocol. See the MII specifications or DP83840A data sheet
663 for details. */
665 /* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
666 met by back-to-back PCI I/O cycles, but we insert a delay to avoid
667 "overclocking" issues or future 66Mhz PCI. */
668 #define mdio_delay() inl(mdio_addr)
670 /* Read and write the MII registers using software-generated serial
671 MDIO protocol. It is just different enough from the EEPROM protocol
672 to not share code. The maxium data clock rate is 2.5 Mhz. */
673 #define MDIO_SHIFT_CLK 0x10000
674 #define MDIO_DATA_WRITE0 0x00000
675 #define MDIO_DATA_WRITE1 0x20000
676 #define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */
677 #define MDIO_ENB_IN 0x40000
678 #define MDIO_DATA_READ 0x80000
680 static int mdio_read(struct net_device *dev, int phy_id, int location)
682 int i;
683 int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
684 int retval = 0;
685 long ioaddr = dev->base_addr;
686 long mdio_addr = ioaddr + CSR9;
688 /* Establish sync by sending at least 32 logic ones. */
689 for (i = 32; i >= 0; i--) {
690 outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
691 mdio_delay();
692 outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
693 mdio_delay();
695 /* Shift the read command bits out. */
696 for (i = 15; i >= 0; i--) {
697 int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
699 outl(MDIO_ENB | dataval, mdio_addr);
700 mdio_delay();
701 outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
702 mdio_delay();
704 /* Read the two transition, 16 data, and wire-idle bits. */
705 for (i = 19; i > 0; i--) {
706 outl(MDIO_ENB_IN, mdio_addr);
707 mdio_delay();
708 retval = (retval << 1) | ((inl(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
709 outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
710 mdio_delay();
712 return (retval>>1) & 0xffff;
716 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
718 int i;
719 int cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
720 long ioaddr = dev->base_addr;
721 long mdio_addr = ioaddr + CSR9;
723 /* Establish sync by sending 32 logic ones. */
724 for (i = 32; i >= 0; i--) {
725 outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
726 mdio_delay();
727 outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
728 mdio_delay();
730 /* Shift the command bits out. */
731 for (i = 31; i >= 0; i--) {
732 int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
733 outl(MDIO_ENB | dataval, mdio_addr);
734 mdio_delay();
735 outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
736 mdio_delay();
738 /* Clear out extra bits. */
739 for (i = 2; i > 0; i--) {
740 outl(MDIO_ENB_IN, mdio_addr);
741 mdio_delay();
742 outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
743 mdio_delay();
745 return;
749 static void
750 xircom_up(struct net_device *dev)
752 struct xircom_private *tp = dev->priv;
753 long ioaddr = dev->base_addr;
754 int i;
756 xircom_init_ring(dev);
757 /* Clear the tx ring */
758 for (i = 0; i < TX_RING_SIZE; i++) {
759 tp->tx_skbuff[i] = NULL;
760 tp->tx_ring[i].status = 0;
763 if (xircom_debug > 1)
764 printk(KERN_DEBUG "%s: xircom_up() irq %d.\n", dev->name, dev->irq);
766 outl(virt_to_bus(tp->rx_ring), ioaddr + CSR3);
767 outl(virt_to_bus(tp->tx_ring), ioaddr + CSR4);
769 tp->saved_if_port = dev->if_port;
770 if (dev->if_port == 0)
771 dev->if_port = tp->default_port;
773 tp->csr6 = TxThresh10 /*| FullDuplexBit*/; /* XXX: why 10 and not 100? */
775 set_rx_mode(dev);
777 /* Start the chip's Tx to process setup frame. */
778 outl_CSR6(tp->csr6, ioaddr);
779 outl_CSR6(tp->csr6 | EnableTx, ioaddr);
781 /* Acknowledge all outstanding interrupts sources */
782 outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
783 /* Enable interrupts by setting the interrupt mask. */
784 outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
785 /* Enable Rx */
786 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
787 /* Rx poll demand */
788 outl(0, ioaddr + CSR2);
790 /* Tell the net layer we're ready */
791 netif_start_queue (dev);
793 /* Check current media state */
794 xircom_media_change(dev);
796 if (xircom_debug > 2) {
797 printk(KERN_DEBUG "%s: Done xircom_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
798 dev->name, inl(ioaddr + CSR0), inl(ioaddr + CSR5),
799 inl(ioaddr + CSR6));
804 static int
805 xircom_open(struct net_device *dev)
807 struct xircom_private *tp = dev->priv;
809 if (request_irq(dev->irq, &xircom_interrupt, SA_SHIRQ, dev->name, dev))
810 return -EAGAIN;
812 xircom_up(dev);
813 tp->open = 1;
815 return 0;
819 static void xircom_tx_timeout(struct net_device *dev)
821 struct xircom_private *tp = dev->priv;
822 long ioaddr = dev->base_addr;
824 if (media_cap[dev->if_port] & MediaIsMII) {
825 /* Do nothing -- the media monitor should handle this. */
826 if (xircom_debug > 1)
827 printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
828 dev->name);
831 #if defined(way_too_many_messages)
832 if (xircom_debug > 3) {
833 int i;
834 for (i = 0; i < RX_RING_SIZE; i++) {
835 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
836 int j;
837 printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
838 "%2.2x %2.2x %2.2x.\n",
839 i, (unsigned int)tp->rx_ring[i].status,
840 (unsigned int)tp->rx_ring[i].length,
841 (unsigned int)tp->rx_ring[i].buffer1,
842 (unsigned int)tp->rx_ring[i].buffer2,
843 buf[0], buf[1], buf[2]);
844 for (j = 0; buf[j] != 0xee && j < 1600; j++)
845 if (j < 100) printk(" %2.2x", buf[j]);
846 printk(" j=%d.\n", j);
848 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
849 for (i = 0; i < RX_RING_SIZE; i++)
850 printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
851 printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
852 for (i = 0; i < TX_RING_SIZE; i++)
853 printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
854 printk("\n");
856 #endif
858 /* Stop and restart the chip's Tx/Rx processes . */
859 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
860 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
861 /* Trigger an immediate transmit demand. */
862 outl(0, ioaddr + CSR1);
864 dev->trans_start = jiffies;
865 netif_wake_queue (dev);
866 tp->stats.tx_errors++;
870 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
871 static void xircom_init_ring(struct net_device *dev)
873 struct xircom_private *tp = dev->priv;
874 int i;
876 tp->tx_full = 0;
877 tp->cur_rx = tp->cur_tx = 0;
878 tp->dirty_rx = tp->dirty_tx = 0;
880 for (i = 0; i < RX_RING_SIZE; i++) {
881 tp->rx_ring[i].status = 0;
882 tp->rx_ring[i].length = PKT_BUF_SZ;
883 tp->rx_ring[i].buffer2 = virt_to_bus(&tp->rx_ring[i+1]);
884 tp->rx_skbuff[i] = NULL;
886 /* Mark the last entry as wrapping the ring. */
887 tp->rx_ring[i-1].length = PKT_BUF_SZ | Rx1RingWrap;
888 tp->rx_ring[i-1].buffer2 = virt_to_bus(&tp->rx_ring[0]);
890 for (i = 0; i < RX_RING_SIZE; i++) {
891 /* Note the receive buffer must be longword aligned.
892 dev_alloc_skb() provides 16 byte alignment. But do *not*
893 use skb_reserve() to align the IP header! */
894 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
895 tp->rx_skbuff[i] = skb;
896 if (skb == NULL)
897 break;
898 skb->dev = dev; /* Mark as being used by this device. */
899 tp->rx_ring[i].status = Rx0DescOwned; /* Owned by Xircom chip */
900 tp->rx_ring[i].buffer1 = virt_to_bus(skb->tail);
902 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
904 /* The Tx buffer descriptor is filled in as needed, but we
905 do need to clear the ownership bit. */
906 for (i = 0; i < TX_RING_SIZE; i++) {
907 tp->tx_skbuff[i] = NULL;
908 tp->tx_ring[i].status = 0;
909 tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]);
910 #ifdef CARDBUS
911 if (tp->chip_id == X3201_3)
912 tp->tx_aligned_skbuff[i] = dev_alloc_skb(PKT_BUF_SZ);
913 #endif /* CARDBUS */
915 tp->tx_ring[i-1].buffer2 = virt_to_bus(&tp->tx_ring[0]);
919 static int
920 xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
922 struct xircom_private *tp = dev->priv;
923 int entry;
924 u32 flag;
926 /* Caution: the write order is important here, set the base address
927 with the "ownership" bits last. */
929 /* Calculate the next Tx descriptor entry. */
930 entry = tp->cur_tx % TX_RING_SIZE;
932 tp->tx_skbuff[entry] = skb;
933 #ifdef CARDBUS
934 if (tp->chip_id == X3201_3) {
935 memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len);
936 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data);
937 } else
938 #endif
939 tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
941 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
942 flag = Tx1WholePkt; /* No interrupt */
943 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
944 flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
945 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
946 flag = Tx1WholePkt; /* No Tx-done intr. */
947 } else {
948 /* Leave room for set_rx_mode() to fill entries. */
949 flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
950 tp->tx_full = 1;
952 if (entry == TX_RING_SIZE - 1)
953 flag |= Tx1WholePkt | Tx1ComplIntr | Tx1RingWrap;
955 tp->tx_ring[entry].length = skb->len | flag;
956 tp->tx_ring[entry].status = Tx0DescOwned; /* Pass ownership to the chip. */
957 tp->cur_tx++;
958 if (tp->tx_full)
959 netif_stop_queue (dev);
960 else
961 netif_wake_queue (dev);
963 /* Trigger an immediate transmit demand. */
964 outl(0, dev->base_addr + CSR1);
966 dev->trans_start = jiffies;
968 return 0;
972 static void xircom_media_change(struct net_device *dev)
974 struct xircom_private *tp = dev->priv;
975 long ioaddr = dev->base_addr;
976 u16 reg0, reg1, reg4, reg5;
977 u32 csr6 = inl(ioaddr + CSR6), newcsr6;
979 /* reset status first */
980 mdio_read(dev, tp->phys[0], MII_BMCR);
981 mdio_read(dev, tp->phys[0], MII_BMSR);
983 reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
984 reg1 = mdio_read(dev, tp->phys[0], MII_BMSR);
986 if (reg1 & BMSR_LSTATUS) {
987 /* link is up */
988 if (reg0 & BMCR_ANENABLE) {
989 /* autonegotiation is enabled */
990 reg4 = mdio_read(dev, tp->phys[0], MII_ADVERTISE);
991 reg5 = mdio_read(dev, tp->phys[0], MII_LPA);
992 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
993 tp->speed100 = 1;
994 tp->full_duplex = 1;
995 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
996 tp->speed100 = 1;
997 tp->full_duplex = 0;
998 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
999 tp->speed100 = 0;
1000 tp->full_duplex = 1;
1001 } else {
1002 tp->speed100 = 0;
1003 tp->full_duplex = 0;
1005 } else {
1006 /* autonegotiation is disabled */
1007 if (reg0 & BMCR_SPEED100)
1008 tp->speed100 = 1;
1009 else
1010 tp->speed100 = 0;
1011 if (reg0 & BMCR_FULLDPLX)
1012 tp->full_duplex = 1;
1013 else
1014 tp->full_duplex = 0;
1016 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1017 dev->name,
1018 tp->speed100 ? "100" : "10",
1019 tp->full_duplex ? "full" : "half");
1020 netif_carrier_on(dev);
1021 newcsr6 = csr6 & ~FullDuplexBit;
1022 if (tp->full_duplex)
1023 newcsr6 |= FullDuplexBit;
1024 if (newcsr6 != csr6)
1025 outl_CSR6(newcsr6, ioaddr + CSR6);
1026 } else {
1027 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1028 netif_carrier_off(dev);
1033 static void check_duplex(struct net_device *dev)
1035 struct xircom_private *tp = dev->priv;
1036 u16 reg0;
1038 mdio_write(dev, tp->phys[0], MII_BMCR, BMCR_RESET);
1039 udelay(500);
1040 while (mdio_read(dev, tp->phys[0], MII_BMCR) & BMCR_RESET);
1042 reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
1043 mdio_write(dev, tp->phys[0], MII_ADVERTISE, tp->advertising[0]);
1045 if (tp->autoneg) {
1046 reg0 &= ~(BMCR_SPEED100 | BMCR_FULLDPLX);
1047 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1048 } else {
1049 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1050 if (tp->speed100)
1051 reg0 |= BMCR_SPEED100;
1052 if (tp->full_duplex)
1053 reg0 |= BMCR_FULLDPLX;
1054 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1055 dev->name,
1056 tp->speed100 ? "100" : "10",
1057 tp->full_duplex ? "full" : "half");
1059 mdio_write(dev, tp->phys[0], MII_BMCR, reg0);
1063 /* The interrupt handler does all of the Rx thread work and cleans up
1064 after the Tx thread. */
1065 static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1067 struct net_device *dev = dev_instance;
1068 struct xircom_private *tp = dev->priv;
1069 long ioaddr = dev->base_addr;
1070 int csr5, work_budget = max_interrupt_work;
1071 int handled = 0;
1073 spin_lock (&tp->lock);
1075 do {
1076 csr5 = inl(ioaddr + CSR5);
1077 /* Acknowledge all of the current interrupt sources ASAP. */
1078 outl(csr5 & 0x0001ffff, ioaddr + CSR5);
1080 if (xircom_debug > 4)
1081 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
1082 dev->name, csr5, inl(dev->base_addr + CSR5));
1084 if (csr5 == 0xffffffff)
1085 break; /* all bits set, assume PCMCIA card removed */
1087 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
1088 break;
1090 handled = 1;
1092 if (csr5 & (RxIntr | RxNoBuf))
1093 work_budget -= xircom_rx(dev);
1095 if (csr5 & (TxNoBuf | TxDied | TxIntr)) {
1096 unsigned int dirty_tx;
1098 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
1099 dirty_tx++) {
1100 int entry = dirty_tx % TX_RING_SIZE;
1101 int status = tp->tx_ring[entry].status;
1103 if (status < 0)
1104 break; /* It still hasn't been Txed */
1105 /* Check for Rx filter setup frames. */
1106 if (tp->tx_skbuff[entry] == NULL)
1107 continue;
1109 if (status & Tx0DescError) {
1110 /* There was an major error, log it. */
1111 #ifndef final_version
1112 if (xircom_debug > 1)
1113 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1114 dev->name, status);
1115 #endif
1116 tp->stats.tx_errors++;
1117 if (status & Tx0ManyColl) {
1118 tp->stats.tx_aborted_errors++;
1120 if (status & Tx0NoCarrier) tp->stats.tx_carrier_errors++;
1121 if (status & Tx0LateColl) tp->stats.tx_window_errors++;
1122 if (status & Tx0Underflow) tp->stats.tx_fifo_errors++;
1123 } else {
1124 tp->stats.tx_bytes += tp->tx_ring[entry].length & 0x7ff;
1125 tp->stats.collisions += (status >> 3) & 15;
1126 tp->stats.tx_packets++;
1129 /* Free the original skb. */
1130 dev_kfree_skb_irq(tp->tx_skbuff[entry]);
1131 tp->tx_skbuff[entry] = NULL;
1134 #ifndef final_version
1135 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
1136 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1137 dev->name, dirty_tx, tp->cur_tx, tp->tx_full);
1138 dirty_tx += TX_RING_SIZE;
1140 #endif
1142 if (tp->tx_full &&
1143 tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
1144 /* The ring is no longer full */
1145 tp->tx_full = 0;
1147 if (tp->tx_full)
1148 netif_stop_queue (dev);
1149 else
1150 netif_wake_queue (dev);
1152 tp->dirty_tx = dirty_tx;
1153 if (csr5 & TxDied) {
1154 if (xircom_debug > 2)
1155 printk(KERN_WARNING "%s: The transmitter stopped."
1156 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
1157 dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
1158 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1159 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1163 /* Log errors. */
1164 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
1165 if (csr5 & LinkChange)
1166 xircom_media_change(dev);
1167 if (csr5 & TxFIFOUnderflow) {
1168 if ((tp->csr6 & TxThreshMask) != TxThreshMask)
1169 tp->csr6 += (1 << TxThreshShift); /* Bump up the Tx threshold */
1170 else
1171 tp->csr6 |= TxStoreForw; /* Store-n-forward. */
1172 /* Restart the transmit process. */
1173 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1174 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1176 if (csr5 & RxDied) { /* Missed a Rx frame. */
1177 tp->stats.rx_errors++;
1178 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1179 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1181 /* Clear all error sources, included undocumented ones! */
1182 outl(0x0800f7ba, ioaddr + CSR5);
1184 if (--work_budget < 0) {
1185 if (xircom_debug > 1)
1186 printk(KERN_WARNING "%s: Too much work during an interrupt, "
1187 "csr5=0x%8.8x.\n", dev->name, csr5);
1188 /* Acknowledge all interrupt sources. */
1189 outl(0x8001ffff, ioaddr + CSR5);
1190 break;
1192 } while (1);
1194 if (xircom_debug > 3)
1195 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
1196 dev->name, inl(ioaddr + CSR5));
1198 spin_unlock (&tp->lock);
1199 return IRQ_RETVAL(handled);
1203 static int
1204 xircom_rx(struct net_device *dev)
1206 struct xircom_private *tp = dev->priv;
1207 int entry = tp->cur_rx % RX_RING_SIZE;
1208 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
1209 int work_done = 0;
1211 if (xircom_debug > 4)
1212 printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1213 tp->rx_ring[entry].status);
1214 /* If we own the next entry, it's a new packet. Send it up. */
1215 while (tp->rx_ring[entry].status >= 0) {
1216 s32 status = tp->rx_ring[entry].status;
1218 if (xircom_debug > 5)
1219 printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1220 tp->rx_ring[entry].status);
1221 if (--rx_work_limit < 0)
1222 break;
1223 if ((status & 0x38008300) != 0x0300) {
1224 if ((status & 0x38000300) != 0x0300) {
1225 /* Ignore earlier buffers. */
1226 if ((status & 0xffff) != 0x7fff) {
1227 if (xircom_debug > 1)
1228 printk(KERN_WARNING "%s: Oversized Ethernet frame "
1229 "spanned multiple buffers, status %8.8x!\n",
1230 dev->name, status);
1231 tp->stats.rx_length_errors++;
1233 } else if (status & Rx0DescError) {
1234 /* There was a fatal error. */
1235 if (xircom_debug > 2)
1236 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
1237 dev->name, status);
1238 tp->stats.rx_errors++; /* end of a packet.*/
1239 if (status & (Rx0Runt | Rx0HugeFrame)) tp->stats.rx_length_errors++;
1240 if (status & Rx0CRCError) tp->stats.rx_crc_errors++;
1242 } else {
1243 /* Omit the four octet CRC from the length. */
1244 short pkt_len = ((status >> 16) & 0x7ff) - 4;
1245 struct sk_buff *skb;
1247 #ifndef final_version
1248 if (pkt_len > 1518) {
1249 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
1250 dev->name, pkt_len, pkt_len);
1251 pkt_len = 1518;
1252 tp->stats.rx_length_errors++;
1254 #endif
1255 /* Check if the packet is long enough to accept without copying
1256 to a minimally-sized skbuff. */
1257 if (pkt_len < rx_copybreak
1258 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1259 skb->dev = dev;
1260 skb_reserve(skb, 2); /* 16 byte align the IP header */
1261 #if ! defined(__alpha__)
1262 eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
1263 pkt_len, 0);
1264 skb_put(skb, pkt_len);
1265 #else
1266 memcpy(skb_put(skb, pkt_len),
1267 bus_to_virt(tp->rx_ring[entry].buffer1), pkt_len);
1268 #endif
1269 work_done++;
1270 } else { /* Pass up the skb already on the Rx ring. */
1271 skb_put(skb = tp->rx_skbuff[entry], pkt_len);
1272 tp->rx_skbuff[entry] = NULL;
1274 skb->protocol = eth_type_trans(skb, dev);
1275 netif_rx(skb);
1276 dev->last_rx = jiffies;
1277 tp->stats.rx_packets++;
1278 tp->stats.rx_bytes += pkt_len;
1280 entry = (++tp->cur_rx) % RX_RING_SIZE;
1283 /* Refill the Rx ring buffers. */
1284 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
1285 entry = tp->dirty_rx % RX_RING_SIZE;
1286 if (tp->rx_skbuff[entry] == NULL) {
1287 struct sk_buff *skb;
1288 skb = tp->rx_skbuff[entry] = dev_alloc_skb(PKT_BUF_SZ);
1289 if (skb == NULL)
1290 break;
1291 skb->dev = dev; /* Mark as being used by this device. */
1292 tp->rx_ring[entry].buffer1 = virt_to_bus(skb->tail);
1293 work_done++;
1295 tp->rx_ring[entry].status = Rx0DescOwned;
1298 return work_done;
1302 static void
1303 xircom_down(struct net_device *dev)
1305 long ioaddr = dev->base_addr;
1306 struct xircom_private *tp = dev->priv;
1308 /* Disable interrupts by clearing the interrupt mask. */
1309 outl(0, ioaddr + CSR7);
1310 /* Stop the chip's Tx and Rx processes. */
1311 outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
1313 if (inl(ioaddr + CSR6) != 0xffffffff)
1314 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1316 dev->if_port = tp->saved_if_port;
1320 static int
1321 xircom_close(struct net_device *dev)
1323 long ioaddr = dev->base_addr;
1324 struct xircom_private *tp = dev->priv;
1325 int i;
1327 if (xircom_debug > 1)
1328 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1329 dev->name, inl(ioaddr + CSR5));
1331 netif_stop_queue(dev);
1333 if (netif_device_present(dev))
1334 xircom_down(dev);
1336 free_irq(dev->irq, dev);
1338 /* Free all the skbuffs in the Rx queue. */
1339 for (i = 0; i < RX_RING_SIZE; i++) {
1340 struct sk_buff *skb = tp->rx_skbuff[i];
1341 tp->rx_skbuff[i] = NULL;
1342 tp->rx_ring[i].status = 0; /* Not owned by Xircom chip. */
1343 tp->rx_ring[i].length = 0;
1344 tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
1345 if (skb) {
1346 dev_kfree_skb(skb);
1349 for (i = 0; i < TX_RING_SIZE; i++) {
1350 if (tp->tx_skbuff[i])
1351 dev_kfree_skb(tp->tx_skbuff[i]);
1352 tp->tx_skbuff[i] = NULL;
1355 tp->open = 0;
1356 return 0;
1360 static struct net_device_stats *xircom_get_stats(struct net_device *dev)
1362 struct xircom_private *tp = dev->priv;
1363 long ioaddr = dev->base_addr;
1365 if (netif_device_present(dev))
1366 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1368 return &tp->stats;
1372 static int xircom_ethtool_ioctl(struct net_device *dev, void __user *useraddr)
1374 struct ethtool_cmd ecmd;
1375 struct xircom_private *tp = dev->priv;
1377 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1378 return -EFAULT;
1380 switch (ecmd.cmd) {
1381 case ETHTOOL_GSET:
1382 ecmd.supported =
1383 SUPPORTED_10baseT_Half |
1384 SUPPORTED_10baseT_Full |
1385 SUPPORTED_100baseT_Half |
1386 SUPPORTED_100baseT_Full |
1387 SUPPORTED_Autoneg |
1388 SUPPORTED_MII;
1390 ecmd.advertising = ADVERTISED_MII;
1391 if (tp->advertising[0] & ADVERTISE_10HALF)
1392 ecmd.advertising |= ADVERTISED_10baseT_Half;
1393 if (tp->advertising[0] & ADVERTISE_10FULL)
1394 ecmd.advertising |= ADVERTISED_10baseT_Full;
1395 if (tp->advertising[0] & ADVERTISE_100HALF)
1396 ecmd.advertising |= ADVERTISED_100baseT_Half;
1397 if (tp->advertising[0] & ADVERTISE_100FULL)
1398 ecmd.advertising |= ADVERTISED_100baseT_Full;
1399 if (tp->autoneg) {
1400 ecmd.advertising |= ADVERTISED_Autoneg;
1401 ecmd.autoneg = AUTONEG_ENABLE;
1402 } else
1403 ecmd.autoneg = AUTONEG_DISABLE;
1405 ecmd.port = PORT_MII;
1406 ecmd.transceiver = XCVR_INTERNAL;
1407 ecmd.phy_address = tp->phys[0];
1408 ecmd.speed = tp->speed100 ? SPEED_100 : SPEED_10;
1409 ecmd.duplex = tp->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1410 ecmd.maxtxpkt = TX_RING_SIZE / 2;
1411 ecmd.maxrxpkt = 0;
1413 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1414 return -EFAULT;
1415 return 0;
1417 case ETHTOOL_SSET: {
1418 u16 autoneg, speed100, full_duplex;
1420 autoneg = (ecmd.autoneg == AUTONEG_ENABLE);
1421 speed100 = (ecmd.speed == SPEED_100);
1422 full_duplex = (ecmd.duplex == DUPLEX_FULL);
1424 tp->autoneg = autoneg;
1425 if (speed100 != tp->speed100 ||
1426 full_duplex != tp->full_duplex) {
1427 tp->speed100 = speed100;
1428 tp->full_duplex = full_duplex;
1429 /* change advertising bits */
1430 tp->advertising[0] &= ~(ADVERTISE_10HALF |
1431 ADVERTISE_10FULL |
1432 ADVERTISE_100HALF |
1433 ADVERTISE_100FULL |
1434 ADVERTISE_100BASE4);
1435 if (speed100) {
1436 if (full_duplex)
1437 tp->advertising[0] |= ADVERTISE_100FULL;
1438 else
1439 tp->advertising[0] |= ADVERTISE_100HALF;
1440 } else {
1441 if (full_duplex)
1442 tp->advertising[0] |= ADVERTISE_10FULL;
1443 else
1444 tp->advertising[0] |= ADVERTISE_10HALF;
1447 check_duplex(dev);
1448 return 0;
1451 case ETHTOOL_GDRVINFO: {
1452 struct ethtool_drvinfo info;
1453 memset(&info, 0, sizeof(info));
1454 info.cmd = ecmd.cmd;
1455 strcpy(info.driver, DRV_NAME);
1456 strcpy(info.version, DRV_VERSION);
1457 *info.fw_version = 0;
1458 strcpy(info.bus_info, pci_name(tp->pdev));
1459 if (copy_to_user(useraddr, &info, sizeof(info)))
1460 return -EFAULT;
1461 return 0;
1464 default:
1465 return -EOPNOTSUPP;
1470 /* Provide ioctl() calls to examine the MII xcvr state. */
1471 static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1473 struct xircom_private *tp = dev->priv;
1474 u16 *data = (u16 *)&rq->ifr_ifru;
1475 int phy = tp->phys[0] & 0x1f;
1476 unsigned long flags;
1478 switch(cmd) {
1479 case SIOCETHTOOL:
1480 return xircom_ethtool_ioctl(dev, rq->ifr_data);
1482 /* Legacy mii-diag interface */
1483 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1484 if (tp->mii_cnt)
1485 data[0] = phy;
1486 else
1487 return -ENODEV;
1488 return 0;
1489 case SIOCGMIIREG: /* Read MII PHY register. */
1490 save_flags(flags);
1491 cli();
1492 data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
1493 restore_flags(flags);
1494 return 0;
1495 case SIOCSMIIREG: /* Write MII PHY register. */
1496 if (!capable(CAP_NET_ADMIN))
1497 return -EPERM;
1498 save_flags(flags);
1499 cli();
1500 if (data[0] == tp->phys[0]) {
1501 u16 value = data[2];
1502 switch (data[1]) {
1503 case 0:
1504 if (value & (BMCR_RESET | BMCR_ANENABLE))
1505 /* Autonegotiation. */
1506 tp->autoneg = 1;
1507 else {
1508 tp->full_duplex = (value & BMCR_FULLDPLX) ? 1 : 0;
1509 tp->autoneg = 0;
1511 break;
1512 case 4:
1513 tp->advertising[0] = value;
1514 break;
1516 check_duplex(dev);
1518 mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1519 restore_flags(flags);
1520 return 0;
1521 default:
1522 return -EOPNOTSUPP;
1525 return -EOPNOTSUPP;
1528 /* Set or clear the multicast filter for this adaptor.
1529 Note that we only use exclusion around actually queueing the
1530 new frame, not around filling tp->setup_frame. This is non-deterministic
1531 when re-entered but still correct. */
1532 static void set_rx_mode(struct net_device *dev)
1534 struct xircom_private *tp = dev->priv;
1535 struct dev_mc_list *mclist;
1536 long ioaddr = dev->base_addr;
1537 int csr6 = inl(ioaddr + CSR6);
1538 u16 *eaddrs, *setup_frm;
1539 u32 tx_flags;
1540 int i;
1542 tp->csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1543 csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1544 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1545 tp->csr6 |= PromiscBit;
1546 csr6 |= PromiscBit;
1547 goto out;
1550 if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
1551 /* Too many to filter well -- accept all multicasts. */
1552 tp->csr6 |= AllMultiBit;
1553 csr6 |= AllMultiBit;
1554 goto out;
1557 tx_flags = Tx1WholePkt | Tx1SetupPkt | PKT_SETUP_SZ;
1559 /* Note that only the low-address shortword of setup_frame is valid! */
1560 setup_frm = tp->setup_frame;
1561 mclist = dev->mc_list;
1563 /* Fill the first entry with our physical address. */
1564 eaddrs = (u16 *)dev->dev_addr;
1565 *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1566 *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1567 *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1569 if (dev->mc_count > 14) { /* Must use a multicast hash table. */
1570 u32 *hash_table = (u32 *)(tp->setup_frame + 4 * 12);
1571 u32 hash, hash2;
1573 tx_flags |= Tx1HashSetup;
1574 tp->csr6 |= HashFilterBit;
1575 csr6 |= HashFilterBit;
1577 /* Fill the unused 3 entries with the broadcast address.
1578 At least one entry *must* contain the broadcast address!!!*/
1579 for (i = 0; i < 3; i++) {
1580 *setup_frm = 0xffff; setup_frm += 2;
1581 *setup_frm = 0xffff; setup_frm += 2;
1582 *setup_frm = 0xffff; setup_frm += 2;
1585 /* Truly brain-damaged hash filter layout */
1586 /* XXX: not sure if I should take the last or the first 9 bits */
1587 for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1588 u32 *hptr;
1589 hash = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
1590 if (hash < 384) {
1591 hash2 = hash + ((hash >> 4) << 4) +
1592 ((hash >> 5) << 5);
1593 } else {
1594 hash -= 384;
1595 hash2 = 64 + hash + (hash >> 4) * 80;
1597 hptr = &hash_table[hash2 & ~0x1f];
1598 *hptr |= cpu_to_le32(1 << (hash2 & 0x1f));
1600 } else {
1601 /* We have <= 14 mcast addresses so we can use Xircom's
1602 wonderful 16-address perfect filter. */
1603 for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1604 eaddrs = (u16 *)mclist->dmi_addr;
1605 *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1606 *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1607 *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1609 /* Fill the unused entries with the broadcast address.
1610 At least one entry *must* contain the broadcast address!!!*/
1611 for (; i < 15; i++) {
1612 *setup_frm = 0xffff; setup_frm += 2;
1613 *setup_frm = 0xffff; setup_frm += 2;
1614 *setup_frm = 0xffff; setup_frm += 2;
1618 /* Now add this frame to the Tx list. */
1619 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1620 /* Same setup recently queued, we need not add it. */
1621 /* XXX: Huh? All it means is that the Tx list is full...*/
1622 } else {
1623 unsigned long flags;
1624 unsigned int entry;
1625 int dummy = -1;
1627 save_flags(flags); cli();
1628 entry = tp->cur_tx++ % TX_RING_SIZE;
1630 if (entry != 0) {
1631 /* Avoid a chip errata by prefixing a dummy entry. */
1632 tp->tx_skbuff[entry] = NULL;
1633 tp->tx_ring[entry].length =
1634 (entry == TX_RING_SIZE - 1) ? Tx1RingWrap : 0;
1635 tp->tx_ring[entry].buffer1 = 0;
1636 /* race with chip, set Tx0DescOwned later */
1637 dummy = entry;
1638 entry = tp->cur_tx++ % TX_RING_SIZE;
1641 tp->tx_skbuff[entry] = NULL;
1642 /* Put the setup frame on the Tx list. */
1643 if (entry == TX_RING_SIZE - 1)
1644 tx_flags |= Tx1RingWrap; /* Wrap ring. */
1645 tp->tx_ring[entry].length = tx_flags;
1646 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->setup_frame);
1647 tp->tx_ring[entry].status = Tx0DescOwned;
1648 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) {
1649 tp->tx_full = 1;
1650 netif_stop_queue (dev);
1652 if (dummy >= 0)
1653 tp->tx_ring[dummy].status = Tx0DescOwned;
1654 restore_flags(flags);
1655 /* Trigger an immediate transmit demand. */
1656 outl(0, ioaddr + CSR1);
1659 out:
1660 outl_CSR6(csr6, ioaddr);
1664 static struct pci_device_id xircom_pci_table[] = {
1665 { 0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, X3201_3 },
1666 {0},
1668 MODULE_DEVICE_TABLE(pci, xircom_pci_table);
1671 #ifdef CONFIG_PM
1672 static int xircom_suspend(struct pci_dev *pdev, u32 state)
1674 struct net_device *dev = pci_get_drvdata(pdev);
1675 struct xircom_private *tp = dev->priv;
1676 printk(KERN_INFO "xircom_suspend(%s)\n", dev->name);
1677 if (tp->open)
1678 xircom_down(dev);
1680 pci_save_state(pdev, tp->pci_state);
1681 pci_disable_device(pdev);
1682 pci_set_power_state(pdev, 3);
1684 return 0;
1688 static int xircom_resume(struct pci_dev *pdev)
1690 struct net_device *dev = pci_get_drvdata(pdev);
1691 struct xircom_private *tp = dev->priv;
1692 printk(KERN_INFO "xircom_resume(%s)\n", dev->name);
1694 pci_set_power_state(pdev,0);
1695 pci_enable_device(pdev);
1696 pci_restore_state(pdev, tp->pci_state);
1698 /* Bring the chip out of sleep mode.
1699 Caution: Snooze mode does not work with some boards! */
1700 if (xircom_tbl[tp->chip_id].flags & HAS_ACPI)
1701 pci_write_config_dword(tp->pdev, PCI_POWERMGMT, 0);
1703 transceiver_voodoo(dev);
1704 if (xircom_tbl[tp->chip_id].flags & HAS_MII)
1705 check_duplex(dev);
1707 if (tp->open)
1708 xircom_up(dev);
1709 return 0;
1711 #endif /* CONFIG_PM */
1714 static void __devexit xircom_remove_one(struct pci_dev *pdev)
1716 struct net_device *dev = pci_get_drvdata(pdev);
1718 printk(KERN_INFO "xircom_remove_one(%s)\n", dev->name);
1719 unregister_netdev(dev);
1720 pci_release_regions(pdev);
1721 free_netdev(dev);
1722 pci_set_drvdata(pdev, NULL);
1726 static struct pci_driver xircom_driver = {
1727 .name = DRV_NAME,
1728 .id_table = xircom_pci_table,
1729 .probe = xircom_init_one,
1730 .remove = __devexit_p(xircom_remove_one),
1731 #ifdef CONFIG_PM
1732 .suspend = xircom_suspend,
1733 .resume = xircom_resume
1734 #endif /* CONFIG_PM */
1738 static int __init xircom_init(void)
1740 /* when a module, this is printed whether or not devices are found in probe */
1741 #ifdef MODULE
1742 printk(version);
1743 #endif
1744 return pci_module_init(&xircom_driver);
1748 static void __exit xircom_exit(void)
1750 pci_unregister_driver(&xircom_driver);
1753 module_init(xircom_init)
1754 module_exit(xircom_exit)
1757 * Local variables:
1758 * c-indent-level: 4
1759 * c-basic-offset: 4
1760 * tab-width: 4
1761 * End: