[netdrvr] tulip, de2104x: fix typo: s/__sparc_/__sparc__/
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / tulip / tulip_core.c
blob30db8ef8154f04f0b0e57220ba4fe0f704d7b572
1 /* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux. */
3 /*
4 Maintained by Jeff Garzik <jgarzik@pobox.com>
5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker.
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
11 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
12 for more information on this driver, or visit the project
13 Web page at http://sourceforge.net/projects/tulip/
17 #include <linux/config.h>
19 #define DRV_NAME "tulip"
20 #ifdef CONFIG_TULIP_NAPI
21 #define DRV_VERSION "1.1.13-NAPI" /* Keep at least for test */
22 #else
23 #define DRV_VERSION "1.1.13"
24 #endif
25 #define DRV_RELDATE "May 11, 2002"
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include "tulip.h"
31 #include <linux/init.h>
32 #include <linux/etherdevice.h>
33 #include <linux/delay.h>
34 #include <linux/mii.h>
35 #include <linux/ethtool.h>
36 #include <linux/crc32.h>
37 #include <asm/unaligned.h>
38 #include <asm/uaccess.h>
40 #ifdef __sparc__
41 #include <asm/pbm.h>
42 #endif
44 static char version[] __devinitdata =
45 "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
48 /* A few user-configurable values. */
50 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
51 static unsigned int max_interrupt_work = 25;
53 #define MAX_UNITS 8
54 /* Used to pass the full-duplex flag, etc. */
55 static int full_duplex[MAX_UNITS];
56 static int options[MAX_UNITS];
57 static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
59 /* The possible media types that can be set in options[] are: */
60 const char * const medianame[32] = {
61 "10baseT", "10base2", "AUI", "100baseTx",
62 "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
63 "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
64 "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
65 "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
66 "","","","", "","","","", "","","","Transceiver reset",
69 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
70 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
71 || defined(__sparc__) || defined(__ia64__) \
72 || defined(__sh__) || defined(__mips__)
73 static int rx_copybreak = 1518;
74 #else
75 static int rx_copybreak = 100;
76 #endif
79 Set the bus performance register.
80 Typical: Set 16 longword cache alignment, no burst limit.
81 Cache alignment bits 15:14 Burst length 13:8
82 0000 No alignment 0x00000000 unlimited 0800 8 longwords
83 4000 8 longwords 0100 1 longword 1000 16 longwords
84 8000 16 longwords 0200 2 longwords 2000 32 longwords
85 C000 32 longwords 0400 4 longwords
86 Warning: many older 486 systems are broken and require setting 0x00A04800
87 8 longword cache alignment, 8 longword burst.
88 ToDo: Non-Intel setting could be better.
91 #if defined(__alpha__) || defined(__ia64__)
92 static int csr0 = 0x01A00000 | 0xE000;
93 #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
94 static int csr0 = 0x01A00000 | 0x8000;
95 #elif defined(__sparc__) || defined(__hppa__)
96 /* The UltraSparc PCI controllers will disconnect at every 64-byte
97 * crossing anyways so it makes no sense to tell Tulip to burst
98 * any more than that.
100 static int csr0 = 0x01A00000 | 0x9000;
101 #elif defined(__arm__) || defined(__sh__)
102 static int csr0 = 0x01A00000 | 0x4800;
103 #elif defined(__mips__)
104 static int csr0 = 0x00200000 | 0x4000;
105 #else
106 #warning Processor architecture undefined!
107 static int csr0 = 0x00A00000 | 0x4800;
108 #endif
110 /* Operational parameters that usually are not changed. */
111 /* Time in jiffies before concluding the transmitter is hung. */
112 #define TX_TIMEOUT (4*HZ)
115 MODULE_AUTHOR("The Linux Kernel Team");
116 MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
117 MODULE_LICENSE("GPL");
118 MODULE_VERSION(DRV_VERSION);
119 module_param(tulip_debug, int, 0);
120 module_param(max_interrupt_work, int, 0);
121 module_param(rx_copybreak, int, 0);
122 module_param(csr0, int, 0);
123 module_param_array(options, int, NULL, 0);
124 module_param_array(full_duplex, int, NULL, 0);
126 #define PFX DRV_NAME ": "
128 #ifdef TULIP_DEBUG
129 int tulip_debug = TULIP_DEBUG;
130 #else
131 int tulip_debug = 1;
132 #endif
137 * This table use during operation for capabilities and media timer.
139 * It is indexed via the values in 'enum chips'
142 struct tulip_chip_table tulip_tbl[] = {
143 { }, /* placeholder for array, slot unused currently */
144 { }, /* placeholder for array, slot unused currently */
146 /* DC21140 */
147 { "Digital DS21140 Tulip", 128, 0x0001ebef,
148 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer },
150 /* DC21142, DC21143 */
151 { "Digital DS21143 Tulip", 128, 0x0801fbff,
152 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
153 | HAS_INTR_MITIGATION | HAS_PCI_MWI, t21142_timer },
155 /* LC82C168 */
156 { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
157 HAS_MII | HAS_PNICNWAY, pnic_timer },
159 /* MX98713 */
160 { "Macronix 98713 PMAC", 128, 0x0001ebef,
161 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
163 /* MX98715 */
164 { "Macronix 98715 PMAC", 256, 0x0001ebef,
165 HAS_MEDIA_TABLE, mxic_timer },
167 /* MX98725 */
168 { "Macronix 98725 PMAC", 256, 0x0001ebef,
169 HAS_MEDIA_TABLE, mxic_timer },
171 /* AX88140 */
172 { "ASIX AX88140", 128, 0x0001fbff,
173 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
174 | IS_ASIX, tulip_timer },
176 /* PNIC2 */
177 { "Lite-On PNIC-II", 256, 0x0801fbff,
178 HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer },
180 /* COMET */
181 { "ADMtek Comet", 256, 0x0001abef,
182 HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer },
184 /* COMPEX9881 */
185 { "Compex 9881 PMAC", 128, 0x0001ebef,
186 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
188 /* I21145 */
189 { "Intel DS21145 Tulip", 128, 0x0801fbff,
190 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
191 | HAS_NWAY | HAS_PCI_MWI, t21142_timer },
193 /* DM910X */
194 { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
195 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
196 tulip_timer },
198 /* RS7112 */
199 { "Conexant LANfinity", 256, 0x0001ebef,
200 HAS_MII | HAS_ACPI, tulip_timer },
205 static struct pci_device_id tulip_pci_tbl[] = {
206 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
207 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
208 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
209 { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
210 { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
211 /* { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
212 { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
213 { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
214 { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
215 { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
216 { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
217 { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
218 { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
219 { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
220 { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
221 { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
222 { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
223 { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
225 { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
226 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
227 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
228 { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
229 { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
230 { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
231 { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
232 { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
233 { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
234 { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
235 { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
236 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
237 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
238 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
239 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
240 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
241 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
242 { } /* terminate list */
244 MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
247 /* A full-duplex map for media types. */
248 const char tulip_media_cap[32] =
249 {0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
251 static void tulip_tx_timeout(struct net_device *dev);
252 static void tulip_init_ring(struct net_device *dev);
253 static int tulip_start_xmit(struct sk_buff *skb, struct net_device *dev);
254 static int tulip_open(struct net_device *dev);
255 static int tulip_close(struct net_device *dev);
256 static void tulip_up(struct net_device *dev);
257 static void tulip_down(struct net_device *dev);
258 static struct net_device_stats *tulip_get_stats(struct net_device *dev);
259 static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
260 static void set_rx_mode(struct net_device *dev);
261 #ifdef CONFIG_NET_POLL_CONTROLLER
262 static void poll_tulip(struct net_device *dev);
263 #endif
265 static void tulip_set_power_state (struct tulip_private *tp,
266 int sleep, int snooze)
268 if (tp->flags & HAS_ACPI) {
269 u32 tmp, newtmp;
270 pci_read_config_dword (tp->pdev, CFDD, &tmp);
271 newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
272 if (sleep)
273 newtmp |= CFDD_Sleep;
274 else if (snooze)
275 newtmp |= CFDD_Snooze;
276 if (tmp != newtmp)
277 pci_write_config_dword (tp->pdev, CFDD, newtmp);
283 static void tulip_up(struct net_device *dev)
285 struct tulip_private *tp = netdev_priv(dev);
286 void __iomem *ioaddr = tp->base_addr;
287 int next_tick = 3*HZ;
288 int i;
290 /* Wake the chip from sleep/snooze mode. */
291 tulip_set_power_state (tp, 0, 0);
293 /* On some chip revs we must set the MII/SYM port before the reset!? */
294 if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
295 iowrite32(0x00040000, ioaddr + CSR6);
297 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
298 iowrite32(0x00000001, ioaddr + CSR0);
299 udelay(100);
301 /* Deassert reset.
302 Wait the specified 50 PCI cycles after a reset by initializing
303 Tx and Rx queues and the address filter list. */
304 iowrite32(tp->csr0, ioaddr + CSR0);
305 udelay(100);
307 if (tulip_debug > 1)
308 printk(KERN_DEBUG "%s: tulip_up(), irq==%d.\n", dev->name, dev->irq);
310 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
311 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
312 tp->cur_rx = tp->cur_tx = 0;
313 tp->dirty_rx = tp->dirty_tx = 0;
315 if (tp->flags & MC_HASH_ONLY) {
316 u32 addr_low = le32_to_cpu(get_unaligned((u32 *)dev->dev_addr));
317 u32 addr_high = le16_to_cpu(get_unaligned((u16 *)(dev->dev_addr+4)));
318 if (tp->chip_id == AX88140) {
319 iowrite32(0, ioaddr + CSR13);
320 iowrite32(addr_low, ioaddr + CSR14);
321 iowrite32(1, ioaddr + CSR13);
322 iowrite32(addr_high, ioaddr + CSR14);
323 } else if (tp->flags & COMET_MAC_ADDR) {
324 iowrite32(addr_low, ioaddr + 0xA4);
325 iowrite32(addr_high, ioaddr + 0xA8);
326 iowrite32(0, ioaddr + 0xAC);
327 iowrite32(0, ioaddr + 0xB0);
329 } else {
330 /* This is set_rx_mode(), but without starting the transmitter. */
331 u16 *eaddrs = (u16 *)dev->dev_addr;
332 u16 *setup_frm = &tp->setup_frame[15*6];
333 dma_addr_t mapping;
335 /* 21140 bug: you must add the broadcast address. */
336 memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
337 /* Fill the final entry of the table with our physical address. */
338 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
339 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
340 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
342 mapping = pci_map_single(tp->pdev, tp->setup_frame,
343 sizeof(tp->setup_frame),
344 PCI_DMA_TODEVICE);
345 tp->tx_buffers[tp->cur_tx].skb = NULL;
346 tp->tx_buffers[tp->cur_tx].mapping = mapping;
348 /* Put the setup frame on the Tx list. */
349 tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
350 tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
351 tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
353 tp->cur_tx++;
356 tp->saved_if_port = dev->if_port;
357 if (dev->if_port == 0)
358 dev->if_port = tp->default_port;
360 /* Allow selecting a default media. */
361 i = 0;
362 if (tp->mtable == NULL)
363 goto media_picked;
364 if (dev->if_port) {
365 int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
366 (dev->if_port == 12 ? 0 : dev->if_port);
367 for (i = 0; i < tp->mtable->leafcount; i++)
368 if (tp->mtable->mleaf[i].media == looking_for) {
369 printk(KERN_INFO "%s: Using user-specified media %s.\n",
370 dev->name, medianame[dev->if_port]);
371 goto media_picked;
374 if ((tp->mtable->defaultmedia & 0x0800) == 0) {
375 int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
376 for (i = 0; i < tp->mtable->leafcount; i++)
377 if (tp->mtable->mleaf[i].media == looking_for) {
378 printk(KERN_INFO "%s: Using EEPROM-set media %s.\n",
379 dev->name, medianame[looking_for]);
380 goto media_picked;
383 /* Start sensing first non-full-duplex media. */
384 for (i = tp->mtable->leafcount - 1;
385 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
387 media_picked:
389 tp->csr6 = 0;
390 tp->cur_index = i;
391 tp->nwayset = 0;
393 if (dev->if_port) {
394 if (tp->chip_id == DC21143 &&
395 (tulip_media_cap[dev->if_port] & MediaIsMII)) {
396 /* We must reset the media CSRs when we force-select MII mode. */
397 iowrite32(0x0000, ioaddr + CSR13);
398 iowrite32(0x0000, ioaddr + CSR14);
399 iowrite32(0x0008, ioaddr + CSR15);
401 tulip_select_media(dev, 1);
402 } else if (tp->chip_id == DC21142) {
403 if (tp->mii_cnt) {
404 tulip_select_media(dev, 1);
405 if (tulip_debug > 1)
406 printk(KERN_INFO "%s: Using MII transceiver %d, status "
407 "%4.4x.\n",
408 dev->name, tp->phys[0], tulip_mdio_read(dev, tp->phys[0], 1));
409 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
410 tp->csr6 = csr6_mask_hdcap;
411 dev->if_port = 11;
412 iowrite32(0x0000, ioaddr + CSR13);
413 iowrite32(0x0000, ioaddr + CSR14);
414 } else
415 t21142_start_nway(dev);
416 } else if (tp->chip_id == PNIC2) {
417 /* for initial startup advertise 10/100 Full and Half */
418 tp->sym_advertise = 0x01E0;
419 /* enable autonegotiate end interrupt */
420 iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
421 iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
422 pnic2_start_nway(dev);
423 } else if (tp->chip_id == LC82C168 && ! tp->medialock) {
424 if (tp->mii_cnt) {
425 dev->if_port = 11;
426 tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
427 iowrite32(0x0001, ioaddr + CSR15);
428 } else if (ioread32(ioaddr + CSR5) & TPLnkPass)
429 pnic_do_nway(dev);
430 else {
431 /* Start with 10mbps to do autonegotiation. */
432 iowrite32(0x32, ioaddr + CSR12);
433 tp->csr6 = 0x00420000;
434 iowrite32(0x0001B078, ioaddr + 0xB8);
435 iowrite32(0x0201B078, ioaddr + 0xB8);
436 next_tick = 1*HZ;
438 } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881)
439 && ! tp->medialock) {
440 dev->if_port = 0;
441 tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
442 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
443 } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
444 /* Provided by BOLO, Macronix - 12/10/1998. */
445 dev->if_port = 0;
446 tp->csr6 = 0x01a80200;
447 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
448 iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
449 } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
450 /* Enable automatic Tx underrun recovery. */
451 iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
452 dev->if_port = tp->mii_cnt ? 11 : 0;
453 tp->csr6 = 0x00040000;
454 } else if (tp->chip_id == AX88140) {
455 tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
456 } else
457 tulip_select_media(dev, 1);
459 /* Start the chip's Tx to process setup frame. */
460 tulip_stop_rxtx(tp);
461 barrier();
462 udelay(5);
463 iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
465 /* Enable interrupts by setting the interrupt mask. */
466 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
467 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
468 tulip_start_rxtx(tp);
469 iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
471 if (tulip_debug > 2) {
472 printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
473 dev->name, ioread32(ioaddr + CSR0), ioread32(ioaddr + CSR5),
474 ioread32(ioaddr + CSR6));
477 /* Set the timer to switch to check for link beat and perhaps switch
478 to an alternate media type. */
479 tp->timer.expires = RUN_AT(next_tick);
480 add_timer(&tp->timer);
481 #ifdef CONFIG_TULIP_NAPI
482 init_timer(&tp->oom_timer);
483 tp->oom_timer.data = (unsigned long)dev;
484 tp->oom_timer.function = oom_timer;
485 #endif
488 static int
489 tulip_open(struct net_device *dev)
491 int retval;
493 if ((retval = request_irq(dev->irq, &tulip_interrupt, SA_SHIRQ, dev->name, dev)))
494 return retval;
496 tulip_init_ring (dev);
498 tulip_up (dev);
500 netif_start_queue (dev);
502 return 0;
506 static void tulip_tx_timeout(struct net_device *dev)
508 struct tulip_private *tp = netdev_priv(dev);
509 void __iomem *ioaddr = tp->base_addr;
510 unsigned long flags;
512 spin_lock_irqsave (&tp->lock, flags);
514 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
515 /* Do nothing -- the media monitor should handle this. */
516 if (tulip_debug > 1)
517 printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
518 dev->name);
519 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142
520 || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881
521 || tp->chip_id == DM910X) {
522 printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, "
523 "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
524 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
525 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
526 if ( ! tp->medialock && tp->mtable) {
528 --tp->cur_index;
529 while (tp->cur_index >= 0
530 && (tulip_media_cap[tp->mtable->mleaf[tp->cur_index].media]
531 & MediaIsFD));
532 if (--tp->cur_index < 0) {
533 /* We start again, but should instead look for default. */
534 tp->cur_index = tp->mtable->leafcount - 1;
536 tulip_select_media(dev, 0);
537 printk(KERN_WARNING "%s: transmit timed out, switching to %s "
538 "media.\n", dev->name, medianame[dev->if_port]);
540 } else if (tp->chip_id == PNIC2) {
541 printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, "
542 "CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n",
543 dev->name, (int)ioread32(ioaddr + CSR5), (int)ioread32(ioaddr + CSR6),
544 (int)ioread32(ioaddr + CSR7), (int)ioread32(ioaddr + CSR12));
545 } else {
546 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, CSR12 "
547 "%8.8x, resetting...\n",
548 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
549 dev->if_port = 0;
552 #if defined(way_too_many_messages)
553 if (tulip_debug > 3) {
554 int i;
555 for (i = 0; i < RX_RING_SIZE; i++) {
556 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
557 int j;
558 printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
559 "%2.2x %2.2x %2.2x.\n",
560 i, (unsigned int)tp->rx_ring[i].status,
561 (unsigned int)tp->rx_ring[i].length,
562 (unsigned int)tp->rx_ring[i].buffer1,
563 (unsigned int)tp->rx_ring[i].buffer2,
564 buf[0], buf[1], buf[2]);
565 for (j = 0; buf[j] != 0xee && j < 1600; j++)
566 if (j < 100) printk(" %2.2x", buf[j]);
567 printk(" j=%d.\n", j);
569 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
570 for (i = 0; i < RX_RING_SIZE; i++)
571 printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
572 printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
573 for (i = 0; i < TX_RING_SIZE; i++)
574 printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
575 printk("\n");
577 #endif
579 /* Stop and restart the chip's Tx processes . */
581 tulip_restart_rxtx(tp);
582 /* Trigger an immediate transmit demand. */
583 iowrite32(0, ioaddr + CSR1);
585 tp->stats.tx_errors++;
587 spin_unlock_irqrestore (&tp->lock, flags);
588 dev->trans_start = jiffies;
589 netif_wake_queue (dev);
593 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
594 static void tulip_init_ring(struct net_device *dev)
596 struct tulip_private *tp = netdev_priv(dev);
597 int i;
599 tp->susp_rx = 0;
600 tp->ttimer = 0;
601 tp->nir = 0;
603 for (i = 0; i < RX_RING_SIZE; i++) {
604 tp->rx_ring[i].status = 0x00000000;
605 tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
606 tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
607 tp->rx_buffers[i].skb = NULL;
608 tp->rx_buffers[i].mapping = 0;
610 /* Mark the last entry as wrapping the ring. */
611 tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
612 tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
614 for (i = 0; i < RX_RING_SIZE; i++) {
615 dma_addr_t mapping;
617 /* Note the receive buffer must be longword aligned.
618 dev_alloc_skb() provides 16 byte alignment. But do *not*
619 use skb_reserve() to align the IP header! */
620 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
621 tp->rx_buffers[i].skb = skb;
622 if (skb == NULL)
623 break;
624 mapping = pci_map_single(tp->pdev, skb->data,
625 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
626 tp->rx_buffers[i].mapping = mapping;
627 skb->dev = dev; /* Mark as being used by this device. */
628 tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
629 tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
631 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
633 /* The Tx buffer descriptor is filled in as needed, but we
634 do need to clear the ownership bit. */
635 for (i = 0; i < TX_RING_SIZE; i++) {
636 tp->tx_buffers[i].skb = NULL;
637 tp->tx_buffers[i].mapping = 0;
638 tp->tx_ring[i].status = 0x00000000;
639 tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
641 tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
644 static int
645 tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
647 struct tulip_private *tp = netdev_priv(dev);
648 int entry;
649 u32 flag;
650 dma_addr_t mapping;
652 spin_lock_irq(&tp->lock);
654 /* Calculate the next Tx descriptor entry. */
655 entry = tp->cur_tx % TX_RING_SIZE;
657 tp->tx_buffers[entry].skb = skb;
658 mapping = pci_map_single(tp->pdev, skb->data,
659 skb->len, PCI_DMA_TODEVICE);
660 tp->tx_buffers[entry].mapping = mapping;
661 tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
663 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
664 flag = 0x60000000; /* No interrupt */
665 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
666 flag = 0xe0000000; /* Tx-done intr. */
667 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
668 flag = 0x60000000; /* No Tx-done intr. */
669 } else { /* Leave room for set_rx_mode() to fill entries. */
670 flag = 0xe0000000; /* Tx-done intr. */
671 netif_stop_queue(dev);
673 if (entry == TX_RING_SIZE-1)
674 flag = 0xe0000000 | DESC_RING_WRAP;
676 tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
677 /* if we were using Transmit Automatic Polling, we would need a
678 * wmb() here. */
679 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
680 wmb();
682 tp->cur_tx++;
684 /* Trigger an immediate transmit demand. */
685 iowrite32(0, tp->base_addr + CSR1);
687 spin_unlock_irq(&tp->lock);
689 dev->trans_start = jiffies;
691 return 0;
694 static void tulip_clean_tx_ring(struct tulip_private *tp)
696 unsigned int dirty_tx;
698 for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
699 dirty_tx++) {
700 int entry = dirty_tx % TX_RING_SIZE;
701 int status = le32_to_cpu(tp->tx_ring[entry].status);
703 if (status < 0) {
704 tp->stats.tx_errors++; /* It wasn't Txed */
705 tp->tx_ring[entry].status = 0;
708 /* Check for Tx filter setup frames. */
709 if (tp->tx_buffers[entry].skb == NULL) {
710 /* test because dummy frames not mapped */
711 if (tp->tx_buffers[entry].mapping)
712 pci_unmap_single(tp->pdev,
713 tp->tx_buffers[entry].mapping,
714 sizeof(tp->setup_frame),
715 PCI_DMA_TODEVICE);
716 continue;
719 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
720 tp->tx_buffers[entry].skb->len,
721 PCI_DMA_TODEVICE);
723 /* Free the original skb. */
724 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
725 tp->tx_buffers[entry].skb = NULL;
726 tp->tx_buffers[entry].mapping = 0;
730 static void tulip_down (struct net_device *dev)
732 struct tulip_private *tp = netdev_priv(dev);
733 void __iomem *ioaddr = tp->base_addr;
734 unsigned long flags;
736 del_timer_sync (&tp->timer);
737 #ifdef CONFIG_TULIP_NAPI
738 del_timer_sync (&tp->oom_timer);
739 #endif
740 spin_lock_irqsave (&tp->lock, flags);
742 /* Disable interrupts by clearing the interrupt mask. */
743 iowrite32 (0x00000000, ioaddr + CSR7);
745 /* Stop the Tx and Rx processes. */
746 tulip_stop_rxtx(tp);
748 /* prepare receive buffers */
749 tulip_refill_rx(dev);
751 /* release any unconsumed transmit buffers */
752 tulip_clean_tx_ring(tp);
754 if (ioread32 (ioaddr + CSR6) != 0xffffffff)
755 tp->stats.rx_missed_errors += ioread32 (ioaddr + CSR8) & 0xffff;
757 spin_unlock_irqrestore (&tp->lock, flags);
759 init_timer(&tp->timer);
760 tp->timer.data = (unsigned long)dev;
761 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
763 dev->if_port = tp->saved_if_port;
765 /* Leave the driver in snooze, not sleep, mode. */
766 tulip_set_power_state (tp, 0, 1);
770 static int tulip_close (struct net_device *dev)
772 struct tulip_private *tp = netdev_priv(dev);
773 void __iomem *ioaddr = tp->base_addr;
774 int i;
776 netif_stop_queue (dev);
778 tulip_down (dev);
780 if (tulip_debug > 1)
781 printk (KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
782 dev->name, ioread32 (ioaddr + CSR5));
784 free_irq (dev->irq, dev);
786 /* Free all the skbuffs in the Rx queue. */
787 for (i = 0; i < RX_RING_SIZE; i++) {
788 struct sk_buff *skb = tp->rx_buffers[i].skb;
789 dma_addr_t mapping = tp->rx_buffers[i].mapping;
791 tp->rx_buffers[i].skb = NULL;
792 tp->rx_buffers[i].mapping = 0;
794 tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */
795 tp->rx_ring[i].length = 0;
796 tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
797 if (skb) {
798 pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
799 PCI_DMA_FROMDEVICE);
800 dev_kfree_skb (skb);
803 for (i = 0; i < TX_RING_SIZE; i++) {
804 struct sk_buff *skb = tp->tx_buffers[i].skb;
806 if (skb != NULL) {
807 pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
808 skb->len, PCI_DMA_TODEVICE);
809 dev_kfree_skb (skb);
811 tp->tx_buffers[i].skb = NULL;
812 tp->tx_buffers[i].mapping = 0;
815 return 0;
818 static struct net_device_stats *tulip_get_stats(struct net_device *dev)
820 struct tulip_private *tp = netdev_priv(dev);
821 void __iomem *ioaddr = tp->base_addr;
823 if (netif_running(dev)) {
824 unsigned long flags;
826 spin_lock_irqsave (&tp->lock, flags);
828 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
830 spin_unlock_irqrestore(&tp->lock, flags);
833 return &tp->stats;
837 static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
839 struct tulip_private *np = netdev_priv(dev);
840 strcpy(info->driver, DRV_NAME);
841 strcpy(info->version, DRV_VERSION);
842 strcpy(info->bus_info, pci_name(np->pdev));
845 static struct ethtool_ops ops = {
846 .get_drvinfo = tulip_get_drvinfo
849 /* Provide ioctl() calls to examine the MII xcvr state. */
850 static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
852 struct tulip_private *tp = netdev_priv(dev);
853 void __iomem *ioaddr = tp->base_addr;
854 struct mii_ioctl_data *data = if_mii(rq);
855 const unsigned int phy_idx = 0;
856 int phy = tp->phys[phy_idx] & 0x1f;
857 unsigned int regnum = data->reg_num;
859 switch (cmd) {
860 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
861 if (tp->mii_cnt)
862 data->phy_id = phy;
863 else if (tp->flags & HAS_NWAY)
864 data->phy_id = 32;
865 else if (tp->chip_id == COMET)
866 data->phy_id = 1;
867 else
868 return -ENODEV;
870 case SIOCGMIIREG: /* Read MII PHY register. */
871 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
872 int csr12 = ioread32 (ioaddr + CSR12);
873 int csr14 = ioread32 (ioaddr + CSR14);
874 switch (regnum) {
875 case 0:
876 if (((csr14<<5) & 0x1000) ||
877 (dev->if_port == 5 && tp->nwayset))
878 data->val_out = 0x1000;
879 else
880 data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
881 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
882 break;
883 case 1:
884 data->val_out =
885 0x1848 +
886 ((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
887 ((csr12&0x06) == 6 ? 0 : 4);
888 data->val_out |= 0x6048;
889 break;
890 case 4:
891 /* Advertised value, bogus 10baseTx-FD value from CSR6. */
892 data->val_out =
893 ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
894 ((csr14 >> 1) & 0x20) + 1;
895 data->val_out |= ((csr14 >> 9) & 0x03C0);
896 break;
897 case 5: data->val_out = tp->lpar; break;
898 default: data->val_out = 0; break;
900 } else {
901 data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
903 return 0;
905 case SIOCSMIIREG: /* Write MII PHY register. */
906 if (!capable (CAP_NET_ADMIN))
907 return -EPERM;
908 if (regnum & ~0x1f)
909 return -EINVAL;
910 if (data->phy_id == phy) {
911 u16 value = data->val_in;
912 switch (regnum) {
913 case 0: /* Check for autonegotiation on or reset. */
914 tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
915 if (tp->full_duplex_lock)
916 tp->full_duplex = (value & 0x0100) ? 1 : 0;
917 break;
918 case 4:
919 tp->advertising[phy_idx] =
920 tp->mii_advertise = data->val_in;
921 break;
924 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
925 u16 value = data->val_in;
926 if (regnum == 0) {
927 if ((value & 0x1200) == 0x1200) {
928 if (tp->chip_id == PNIC2) {
929 pnic2_start_nway (dev);
930 } else {
931 t21142_start_nway (dev);
934 } else if (regnum == 4)
935 tp->sym_advertise = value;
936 } else {
937 tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
939 return 0;
940 default:
941 return -EOPNOTSUPP;
944 return -EOPNOTSUPP;
948 /* Set or clear the multicast filter for this adaptor.
949 Note that we only use exclusion around actually queueing the
950 new frame, not around filling tp->setup_frame. This is non-deterministic
951 when re-entered but still correct. */
953 #undef set_bit_le
954 #define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
956 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
958 struct tulip_private *tp = netdev_priv(dev);
959 u16 hash_table[32];
960 struct dev_mc_list *mclist;
961 int i;
962 u16 *eaddrs;
964 memset(hash_table, 0, sizeof(hash_table));
965 set_bit_le(255, hash_table); /* Broadcast entry */
966 /* This should work on big-endian machines as well. */
967 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
968 i++, mclist = mclist->next) {
969 int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
971 set_bit_le(index, hash_table);
974 for (i = 0; i < 32; i++) {
975 *setup_frm++ = hash_table[i];
976 *setup_frm++ = hash_table[i];
978 setup_frm = &tp->setup_frame[13*6];
980 /* Fill the final entry with our physical address. */
981 eaddrs = (u16 *)dev->dev_addr;
982 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
983 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
984 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
987 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
989 struct tulip_private *tp = netdev_priv(dev);
990 struct dev_mc_list *mclist;
991 int i;
992 u16 *eaddrs;
994 /* We have <= 14 addresses so we can use the wonderful
995 16 address perfect filtering of the Tulip. */
996 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
997 i++, mclist = mclist->next) {
998 eaddrs = (u16 *)mclist->dmi_addr;
999 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1000 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1001 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1003 /* Fill the unused entries with the broadcast address. */
1004 memset(setup_frm, 0xff, (15-i)*12);
1005 setup_frm = &tp->setup_frame[15*6];
1007 /* Fill the final entry with our physical address. */
1008 eaddrs = (u16 *)dev->dev_addr;
1009 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1010 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1011 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1015 static void set_rx_mode(struct net_device *dev)
1017 struct tulip_private *tp = netdev_priv(dev);
1018 void __iomem *ioaddr = tp->base_addr;
1019 int csr6;
1021 csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1023 tp->csr6 &= ~0x00D5;
1024 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1025 tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1026 csr6 |= AcceptAllMulticast | AcceptAllPhys;
1027 /* Unconditionally log net taps. */
1028 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1029 } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
1030 /* Too many to filter well -- accept all multicasts. */
1031 tp->csr6 |= AcceptAllMulticast;
1032 csr6 |= AcceptAllMulticast;
1033 } else if (tp->flags & MC_HASH_ONLY) {
1034 /* Some work-alikes have only a 64-entry hash filter table. */
1035 /* Should verify correctness on big-endian/__powerpc__ */
1036 struct dev_mc_list *mclist;
1037 int i;
1038 if (dev->mc_count > 64) { /* Arbitrary non-effective limit. */
1039 tp->csr6 |= AcceptAllMulticast;
1040 csr6 |= AcceptAllMulticast;
1041 } else {
1042 u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
1043 int filterbit;
1044 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1045 i++, mclist = mclist->next) {
1046 if (tp->flags & COMET_MAC_ADDR)
1047 filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
1048 else
1049 filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1050 filterbit &= 0x3f;
1051 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1052 if (tulip_debug > 2) {
1053 printk(KERN_INFO "%s: Added filter for %2.2x:%2.2x:%2.2x:"
1054 "%2.2x:%2.2x:%2.2x %8.8x bit %d.\n", dev->name,
1055 mclist->dmi_addr[0], mclist->dmi_addr[1],
1056 mclist->dmi_addr[2], mclist->dmi_addr[3],
1057 mclist->dmi_addr[4], mclist->dmi_addr[5],
1058 ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
1061 if (mc_filter[0] == tp->mc_filter[0] &&
1062 mc_filter[1] == tp->mc_filter[1])
1063 ; /* No change. */
1064 else if (tp->flags & IS_ASIX) {
1065 iowrite32(2, ioaddr + CSR13);
1066 iowrite32(mc_filter[0], ioaddr + CSR14);
1067 iowrite32(3, ioaddr + CSR13);
1068 iowrite32(mc_filter[1], ioaddr + CSR14);
1069 } else if (tp->flags & COMET_MAC_ADDR) {
1070 iowrite32(mc_filter[0], ioaddr + 0xAC);
1071 iowrite32(mc_filter[1], ioaddr + 0xB0);
1073 tp->mc_filter[0] = mc_filter[0];
1074 tp->mc_filter[1] = mc_filter[1];
1076 } else {
1077 unsigned long flags;
1078 u32 tx_flags = 0x08000000 | 192;
1080 /* Note that only the low-address shortword of setup_frame is valid!
1081 The values are doubled for big-endian architectures. */
1082 if (dev->mc_count > 14) { /* Must use a multicast hash table. */
1083 build_setup_frame_hash(tp->setup_frame, dev);
1084 tx_flags = 0x08400000 | 192;
1085 } else {
1086 build_setup_frame_perfect(tp->setup_frame, dev);
1089 spin_lock_irqsave(&tp->lock, flags);
1091 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1092 /* Same setup recently queued, we need not add it. */
1093 } else {
1094 unsigned int entry;
1095 int dummy = -1;
1097 /* Now add this frame to the Tx list. */
1099 entry = tp->cur_tx++ % TX_RING_SIZE;
1101 if (entry != 0) {
1102 /* Avoid a chip errata by prefixing a dummy entry. */
1103 tp->tx_buffers[entry].skb = NULL;
1104 tp->tx_buffers[entry].mapping = 0;
1105 tp->tx_ring[entry].length =
1106 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1107 tp->tx_ring[entry].buffer1 = 0;
1108 /* Must set DescOwned later to avoid race with chip */
1109 dummy = entry;
1110 entry = tp->cur_tx++ % TX_RING_SIZE;
1114 tp->tx_buffers[entry].skb = NULL;
1115 tp->tx_buffers[entry].mapping =
1116 pci_map_single(tp->pdev, tp->setup_frame,
1117 sizeof(tp->setup_frame),
1118 PCI_DMA_TODEVICE);
1119 /* Put the setup frame on the Tx list. */
1120 if (entry == TX_RING_SIZE-1)
1121 tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
1122 tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1123 tp->tx_ring[entry].buffer1 =
1124 cpu_to_le32(tp->tx_buffers[entry].mapping);
1125 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1126 if (dummy >= 0)
1127 tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1128 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1129 netif_stop_queue(dev);
1131 /* Trigger an immediate transmit demand. */
1132 iowrite32(0, ioaddr + CSR1);
1135 spin_unlock_irqrestore(&tp->lock, flags);
1138 iowrite32(csr6, ioaddr + CSR6);
1141 #ifdef CONFIG_TULIP_MWI
1142 static void __devinit tulip_mwi_config (struct pci_dev *pdev,
1143 struct net_device *dev)
1145 struct tulip_private *tp = netdev_priv(dev);
1146 u8 cache;
1147 u16 pci_command;
1148 u32 csr0;
1150 if (tulip_debug > 3)
1151 printk(KERN_DEBUG "%s: tulip_mwi_config()\n", pci_name(pdev));
1153 tp->csr0 = csr0 = 0;
1155 /* if we have any cache line size at all, we can do MRM */
1156 csr0 |= MRM;
1158 /* ...and barring hardware bugs, MWI */
1159 if (!(tp->chip_id == DC21143 && tp->revision == 65))
1160 csr0 |= MWI;
1162 /* set or disable MWI in the standard PCI command bit.
1163 * Check for the case where mwi is desired but not available
1165 if (csr0 & MWI) pci_set_mwi(pdev);
1166 else pci_clear_mwi(pdev);
1168 /* read result from hardware (in case bit refused to enable) */
1169 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1170 if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1171 csr0 &= ~MWI;
1173 /* if cache line size hardwired to zero, no MWI */
1174 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1175 if ((csr0 & MWI) && (cache == 0)) {
1176 csr0 &= ~MWI;
1177 pci_clear_mwi(pdev);
1180 /* assign per-cacheline-size cache alignment and
1181 * burst length values
1183 switch (cache) {
1184 case 8:
1185 csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1186 break;
1187 case 16:
1188 csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1189 break;
1190 case 32:
1191 csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1192 break;
1193 default:
1194 cache = 0;
1195 break;
1198 /* if we have a good cache line size, we by now have a good
1199 * csr0, so save it and exit
1201 if (cache)
1202 goto out;
1204 /* we don't have a good csr0 or cache line size, disable MWI */
1205 if (csr0 & MWI) {
1206 pci_clear_mwi(pdev);
1207 csr0 &= ~MWI;
1210 /* sane defaults for burst length and cache alignment
1211 * originally from de4x5 driver
1213 csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1215 out:
1216 tp->csr0 = csr0;
1217 if (tulip_debug > 2)
1218 printk(KERN_DEBUG "%s: MWI config cacheline=%d, csr0=%08x\n",
1219 pci_name(pdev), cache, csr0);
1221 #endif
1224 * Chips that have the MRM/reserved bit quirk and the burst quirk. That
1225 * is the DM910X and the on chip ULi devices
1228 static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1230 if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1231 return 1;
1232 return 0;
1235 static int __devinit tulip_init_one (struct pci_dev *pdev,
1236 const struct pci_device_id *ent)
1238 struct tulip_private *tp;
1239 /* See note below on the multiport cards. */
1240 static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
1241 static struct pci_device_id early_486_chipsets[] = {
1242 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1243 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1244 { },
1246 static int last_irq;
1247 static int multiport_cnt; /* For four-port boards w/one EEPROM */
1248 u8 chip_rev;
1249 int i, irq;
1250 unsigned short sum;
1251 unsigned char *ee_data;
1252 struct net_device *dev;
1253 void __iomem *ioaddr;
1254 static int board_idx = -1;
1255 int chip_idx = ent->driver_data;
1256 const char *chip_name = tulip_tbl[chip_idx].chip_name;
1257 unsigned int eeprom_missing = 0;
1258 unsigned int force_csr0 = 0;
1260 #ifndef MODULE
1261 static int did_version; /* Already printed version info. */
1262 if (tulip_debug > 0 && did_version++ == 0)
1263 printk (KERN_INFO "%s", version);
1264 #endif
1266 board_idx++;
1269 * Lan media wire a tulip chip to a wan interface. Needs a very
1270 * different driver (lmc driver)
1273 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1274 printk (KERN_ERR PFX "skipping LMC card.\n");
1275 return -ENODEV;
1279 * Early DM9100's need software CRC and the DMFE driver
1282 if (pdev->vendor == 0x1282 && pdev->device == 0x9100)
1284 u32 dev_rev;
1285 /* Read Chip revision */
1286 pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
1287 if(dev_rev < 0x02000030)
1289 printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
1290 return -ENODEV;
1295 * Looks for early PCI chipsets where people report hangs
1296 * without the workarounds being on.
1299 /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1300 aligned. Aries might need this too. The Saturn errata are not
1301 pretty reading but thankfully it's an old 486 chipset.
1303 2. The dreaded SiS496 486 chipset. Same workaround as Intel
1304 Saturn.
1307 if (pci_dev_present(early_486_chipsets)) {
1308 csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1309 force_csr0 = 1;
1312 /* bugfix: the ASIX must have a burst limit or horrible things happen. */
1313 if (chip_idx == AX88140) {
1314 if ((csr0 & 0x3f00) == 0)
1315 csr0 |= 0x2000;
1318 /* PNIC doesn't have MWI/MRL/MRM... */
1319 if (chip_idx == LC82C168)
1320 csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1322 /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1323 if (tulip_uli_dm_quirk(pdev)) {
1324 csr0 &= ~0x01f100ff;
1325 #if defined(__sparc__)
1326 csr0 = (csr0 & ~0xff00) | 0xe000;
1327 #endif
1330 * And back to business
1333 i = pci_enable_device(pdev);
1334 if (i) {
1335 printk (KERN_ERR PFX
1336 "Cannot enable tulip board #%d, aborting\n",
1337 board_idx);
1338 return i;
1341 irq = pdev->irq;
1343 /* alloc_etherdev ensures aligned and zeroed private structures */
1344 dev = alloc_etherdev (sizeof (*tp));
1345 if (!dev) {
1346 printk (KERN_ERR PFX "ether device alloc failed, aborting\n");
1347 return -ENOMEM;
1350 SET_MODULE_OWNER(dev);
1351 SET_NETDEV_DEV(dev, &pdev->dev);
1352 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1353 printk (KERN_ERR PFX "%s: I/O region (0x%lx@0x%lx) too small, "
1354 "aborting\n", pci_name(pdev),
1355 pci_resource_len (pdev, 0),
1356 pci_resource_start (pdev, 0));
1357 goto err_out_free_netdev;
1360 /* grab all resources from both PIO and MMIO regions, as we
1361 * don't want anyone else messing around with our hardware */
1362 if (pci_request_regions (pdev, "tulip"))
1363 goto err_out_free_netdev;
1365 #ifndef USE_IO_OPS
1366 ioaddr = pci_iomap(pdev, 1, tulip_tbl[chip_idx].io_size);
1367 #else
1368 ioaddr = pci_iomap(pdev, 0, tulip_tbl[chip_idx].io_size);
1369 #endif
1370 if (!ioaddr)
1371 goto err_out_free_res;
1373 pci_read_config_byte (pdev, PCI_REVISION_ID, &chip_rev);
1376 * initialize private data structure 'tp'
1377 * it is zeroed and aligned in alloc_etherdev
1379 tp = netdev_priv(dev);
1381 tp->rx_ring = pci_alloc_consistent(pdev,
1382 sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1383 sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1384 &tp->rx_ring_dma);
1385 if (!tp->rx_ring)
1386 goto err_out_mtable;
1387 tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1388 tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1390 tp->chip_id = chip_idx;
1391 tp->flags = tulip_tbl[chip_idx].flags;
1392 tp->pdev = pdev;
1393 tp->base_addr = ioaddr;
1394 tp->revision = chip_rev;
1395 tp->csr0 = csr0;
1396 spin_lock_init(&tp->lock);
1397 spin_lock_init(&tp->mii_lock);
1398 init_timer(&tp->timer);
1399 tp->timer.data = (unsigned long)dev;
1400 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
1402 dev->base_addr = (unsigned long)ioaddr;
1404 #ifdef CONFIG_TULIP_MWI
1405 if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1406 tulip_mwi_config (pdev, dev);
1407 #else
1408 /* MWI is broken for DC21143 rev 65... */
1409 if (chip_idx == DC21143 && chip_rev == 65)
1410 tp->csr0 &= ~MWI;
1411 #endif
1413 /* Stop the chip's Tx and Rx processes. */
1414 tulip_stop_rxtx(tp);
1416 pci_set_master(pdev);
1418 #ifdef CONFIG_GSC
1419 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1420 switch (pdev->subsystem_device) {
1421 default:
1422 break;
1423 case 0x1061:
1424 case 0x1062:
1425 case 0x1063:
1426 case 0x1098:
1427 case 0x1099:
1428 case 0x10EE:
1429 tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1430 chip_name = "GSC DS21140 Tulip";
1433 #endif
1435 /* Clear the missed-packet counter. */
1436 ioread32(ioaddr + CSR8);
1438 /* The station address ROM is read byte serially. The register must
1439 be polled, waiting for the value to be read bit serially from the
1440 EEPROM.
1442 ee_data = tp->eeprom;
1443 sum = 0;
1444 if (chip_idx == LC82C168) {
1445 for (i = 0; i < 3; i++) {
1446 int value, boguscnt = 100000;
1447 iowrite32(0x600 | i, ioaddr + 0x98);
1449 value = ioread32(ioaddr + CSR9);
1450 while (value < 0 && --boguscnt > 0);
1451 put_unaligned(le16_to_cpu(value), ((u16*)dev->dev_addr) + i);
1452 sum += value & 0xffff;
1454 } else if (chip_idx == COMET) {
1455 /* No need to read the EEPROM. */
1456 put_unaligned(cpu_to_le32(ioread32(ioaddr + 0xA4)), (u32 *)dev->dev_addr);
1457 put_unaligned(cpu_to_le16(ioread32(ioaddr + 0xA8)), (u16 *)(dev->dev_addr + 4));
1458 for (i = 0; i < 6; i ++)
1459 sum += dev->dev_addr[i];
1460 } else {
1461 /* A serial EEPROM interface, we read now and sort it out later. */
1462 int sa_offset = 0;
1463 int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1465 for (i = 0; i < sizeof(tp->eeprom); i+=2) {
1466 u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1467 ee_data[i] = data & 0xff;
1468 ee_data[i + 1] = data >> 8;
1471 /* DEC now has a specification (see Notes) but early board makers
1472 just put the address in the first EEPROM locations. */
1473 /* This does memcmp(ee_data, ee_data+16, 8) */
1474 for (i = 0; i < 8; i ++)
1475 if (ee_data[i] != ee_data[16+i])
1476 sa_offset = 20;
1477 if (chip_idx == CONEXANT) {
1478 /* Check that the tuple type and length is correct. */
1479 if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
1480 sa_offset = 0x19A;
1481 } else if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
1482 ee_data[2] == 0) {
1483 sa_offset = 2; /* Grrr, damn Matrox boards. */
1484 multiport_cnt = 4;
1486 #ifdef CONFIG_DDB5476
1487 if ((pdev->bus->number == 0) && (PCI_SLOT(pdev->devfn) == 6)) {
1488 /* DDB5476 MAC address in first EEPROM locations. */
1489 sa_offset = 0;
1490 /* No media table either */
1491 tp->flags &= ~HAS_MEDIA_TABLE;
1493 #endif
1494 #ifdef CONFIG_DDB5477
1495 if ((pdev->bus->number == 0) && (PCI_SLOT(pdev->devfn) == 4)) {
1496 /* DDB5477 MAC address in first EEPROM locations. */
1497 sa_offset = 0;
1498 /* No media table either */
1499 tp->flags &= ~HAS_MEDIA_TABLE;
1501 #endif
1502 #ifdef CONFIG_MIPS_COBALT
1503 if ((pdev->bus->number == 0) &&
1504 ((PCI_SLOT(pdev->devfn) == 7) ||
1505 (PCI_SLOT(pdev->devfn) == 12))) {
1506 /* Cobalt MAC address in first EEPROM locations. */
1507 sa_offset = 0;
1508 /* Ensure our media table fixup get's applied */
1509 memcpy(ee_data + 16, ee_data, 8);
1511 #endif
1512 #ifdef CONFIG_GSC
1513 /* Check to see if we have a broken srom */
1514 if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1515 /* pci_vendor_id and subsystem_id are swapped */
1516 ee_data[0] = ee_data[2];
1517 ee_data[1] = ee_data[3];
1518 ee_data[2] = 0x61;
1519 ee_data[3] = 0x10;
1521 /* HSC-PCI boards need to be byte-swaped and shifted
1522 * up 1 word. This shift needs to happen at the end
1523 * of the MAC first because of the 2 byte overlap.
1525 for (i = 4; i >= 0; i -= 2) {
1526 ee_data[17 + i + 3] = ee_data[17 + i];
1527 ee_data[16 + i + 5] = ee_data[16 + i];
1530 #endif
1532 for (i = 0; i < 6; i ++) {
1533 dev->dev_addr[i] = ee_data[i + sa_offset];
1534 sum += ee_data[i + sa_offset];
1537 /* Lite-On boards have the address byte-swapped. */
1538 if ((dev->dev_addr[0] == 0xA0 || dev->dev_addr[0] == 0xC0 || dev->dev_addr[0] == 0x02)
1539 && dev->dev_addr[1] == 0x00)
1540 for (i = 0; i < 6; i+=2) {
1541 char tmp = dev->dev_addr[i];
1542 dev->dev_addr[i] = dev->dev_addr[i+1];
1543 dev->dev_addr[i+1] = tmp;
1545 /* On the Zynx 315 Etherarray and other multiport boards only the
1546 first Tulip has an EEPROM.
1547 On Sparc systems the mac address is held in the OBP property
1548 "local-mac-address".
1549 The addresses of the subsequent ports are derived from the first.
1550 Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1551 that here as well. */
1552 if (sum == 0 || sum == 6*0xff) {
1553 #if defined(__sparc__)
1554 struct pcidev_cookie *pcp = pdev->sysdata;
1555 #endif
1556 eeprom_missing = 1;
1557 for (i = 0; i < 5; i++)
1558 dev->dev_addr[i] = last_phys_addr[i];
1559 dev->dev_addr[i] = last_phys_addr[i] + 1;
1560 #if defined(__sparc__)
1561 if ((pcp != NULL) && prom_getproplen(pcp->prom_node,
1562 "local-mac-address") == 6) {
1563 prom_getproperty(pcp->prom_node, "local-mac-address",
1564 dev->dev_addr, 6);
1566 #endif
1567 #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
1568 if (last_irq)
1569 irq = last_irq;
1570 #endif
1573 for (i = 0; i < 6; i++)
1574 last_phys_addr[i] = dev->dev_addr[i];
1575 last_irq = irq;
1576 dev->irq = irq;
1578 /* The lower four bits are the media type. */
1579 if (board_idx >= 0 && board_idx < MAX_UNITS) {
1580 if (options[board_idx] & MEDIA_MASK)
1581 tp->default_port = options[board_idx] & MEDIA_MASK;
1582 if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1583 tp->full_duplex = 1;
1584 if (mtu[board_idx] > 0)
1585 dev->mtu = mtu[board_idx];
1587 if (dev->mem_start & MEDIA_MASK)
1588 tp->default_port = dev->mem_start & MEDIA_MASK;
1589 if (tp->default_port) {
1590 printk(KERN_INFO "tulip%d: Transceiver selection forced to %s.\n",
1591 board_idx, medianame[tp->default_port & MEDIA_MASK]);
1592 tp->medialock = 1;
1593 if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1594 tp->full_duplex = 1;
1596 if (tp->full_duplex)
1597 tp->full_duplex_lock = 1;
1599 if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1600 u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
1601 tp->mii_advertise = media2advert[tp->default_port - 9];
1602 tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1605 if (tp->flags & HAS_MEDIA_TABLE) {
1606 sprintf(dev->name, "tulip%d", board_idx); /* hack */
1607 tulip_parse_eeprom(dev);
1608 strcpy(dev->name, "eth%d"); /* un-hack */
1611 if ((tp->flags & ALWAYS_CHECK_MII) ||
1612 (tp->mtable && tp->mtable->has_mii) ||
1613 ( ! tp->mtable && (tp->flags & HAS_MII))) {
1614 if (tp->mtable && tp->mtable->has_mii) {
1615 for (i = 0; i < tp->mtable->leafcount; i++)
1616 if (tp->mtable->mleaf[i].media == 11) {
1617 tp->cur_index = i;
1618 tp->saved_if_port = dev->if_port;
1619 tulip_select_media(dev, 2);
1620 dev->if_port = tp->saved_if_port;
1621 break;
1625 /* Find the connected MII xcvrs.
1626 Doing this in open() would allow detecting external xcvrs
1627 later, but takes much time. */
1628 tulip_find_mii (dev, board_idx);
1631 /* The Tulip-specific entries in the device structure. */
1632 dev->open = tulip_open;
1633 dev->hard_start_xmit = tulip_start_xmit;
1634 dev->tx_timeout = tulip_tx_timeout;
1635 dev->watchdog_timeo = TX_TIMEOUT;
1636 #ifdef CONFIG_TULIP_NAPI
1637 dev->poll = tulip_poll;
1638 dev->weight = 16;
1639 #endif
1640 dev->stop = tulip_close;
1641 dev->get_stats = tulip_get_stats;
1642 dev->do_ioctl = private_ioctl;
1643 dev->set_multicast_list = set_rx_mode;
1644 #ifdef CONFIG_NET_POLL_CONTROLLER
1645 dev->poll_controller = &poll_tulip;
1646 #endif
1647 SET_ETHTOOL_OPS(dev, &ops);
1649 if (register_netdev(dev))
1650 goto err_out_free_ring;
1652 printk(KERN_INFO "%s: %s rev %d at %p,",
1653 dev->name, chip_name, chip_rev, ioaddr);
1654 pci_set_drvdata(pdev, dev);
1656 if (eeprom_missing)
1657 printk(" EEPROM not present,");
1658 for (i = 0; i < 6; i++)
1659 printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]);
1660 printk(", IRQ %d.\n", irq);
1662 if (tp->chip_id == PNIC2)
1663 tp->link_change = pnic2_lnk_change;
1664 else if (tp->flags & HAS_NWAY)
1665 tp->link_change = t21142_lnk_change;
1666 else if (tp->flags & HAS_PNICNWAY)
1667 tp->link_change = pnic_lnk_change;
1669 /* Reset the xcvr interface and turn on heartbeat. */
1670 switch (chip_idx) {
1671 case DC21140:
1672 case DM910X:
1673 default:
1674 if (tp->mtable)
1675 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1676 break;
1677 case DC21142:
1678 if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) {
1679 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1680 iowrite32(0x0000, ioaddr + CSR13);
1681 iowrite32(0x0000, ioaddr + CSR14);
1682 iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1683 } else
1684 t21142_start_nway(dev);
1685 break;
1686 case PNIC2:
1687 /* just do a reset for sanity sake */
1688 iowrite32(0x0000, ioaddr + CSR13);
1689 iowrite32(0x0000, ioaddr + CSR14);
1690 break;
1691 case LC82C168:
1692 if ( ! tp->mii_cnt) {
1693 tp->nway = 1;
1694 tp->nwayset = 0;
1695 iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1696 iowrite32(0x30, ioaddr + CSR12);
1697 iowrite32(0x0001F078, ioaddr + CSR6);
1698 iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1700 break;
1701 case MX98713:
1702 case COMPEX9881:
1703 iowrite32(0x00000000, ioaddr + CSR6);
1704 iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1705 iowrite32(0x00000001, ioaddr + CSR13);
1706 break;
1707 case MX98715:
1708 case MX98725:
1709 iowrite32(0x01a80000, ioaddr + CSR6);
1710 iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1711 iowrite32(0x00001000, ioaddr + CSR12);
1712 break;
1713 case COMET:
1714 /* No initialization necessary. */
1715 break;
1718 /* put the chip in snooze mode until opened */
1719 tulip_set_power_state (tp, 0, 1);
1721 return 0;
1723 err_out_free_ring:
1724 pci_free_consistent (pdev,
1725 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1726 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1727 tp->rx_ring, tp->rx_ring_dma);
1729 err_out_mtable:
1730 kfree (tp->mtable);
1731 pci_iounmap(pdev, ioaddr);
1733 err_out_free_res:
1734 pci_release_regions (pdev);
1736 err_out_free_netdev:
1737 free_netdev (dev);
1738 return -ENODEV;
1742 #ifdef CONFIG_PM
1744 static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1746 struct net_device *dev = pci_get_drvdata(pdev);
1748 if (!dev)
1749 return -EINVAL;
1751 if (netif_running(dev))
1752 tulip_down(dev);
1754 netif_device_detach(dev);
1755 free_irq(dev->irq, dev);
1757 pci_save_state(pdev);
1758 pci_disable_device(pdev);
1759 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1761 return 0;
1765 static int tulip_resume(struct pci_dev *pdev)
1767 struct net_device *dev = pci_get_drvdata(pdev);
1768 int retval;
1770 if (!dev)
1771 return -EINVAL;
1773 pci_set_power_state(pdev, PCI_D0);
1774 pci_restore_state(pdev);
1776 pci_enable_device(pdev);
1778 if ((retval = request_irq(dev->irq, &tulip_interrupt, SA_SHIRQ, dev->name, dev))) {
1779 printk (KERN_ERR "tulip: request_irq failed in resume\n");
1780 return retval;
1783 netif_device_attach(dev);
1785 if (netif_running(dev))
1786 tulip_up(dev);
1788 return 0;
1791 #endif /* CONFIG_PM */
1794 static void __devexit tulip_remove_one (struct pci_dev *pdev)
1796 struct net_device *dev = pci_get_drvdata (pdev);
1797 struct tulip_private *tp;
1799 if (!dev)
1800 return;
1802 tp = netdev_priv(dev);
1803 unregister_netdev(dev);
1804 pci_free_consistent (pdev,
1805 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1806 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1807 tp->rx_ring, tp->rx_ring_dma);
1808 kfree (tp->mtable);
1809 pci_iounmap(pdev, tp->base_addr);
1810 free_netdev (dev);
1811 pci_release_regions (pdev);
1812 pci_set_drvdata (pdev, NULL);
1814 /* pci_power_off (pdev, -1); */
1817 #ifdef CONFIG_NET_POLL_CONTROLLER
1819 * Polling 'interrupt' - used by things like netconsole to send skbs
1820 * without having to re-enable interrupts. It's not called while
1821 * the interrupt routine is executing.
1824 static void poll_tulip (struct net_device *dev)
1826 /* disable_irq here is not very nice, but with the lockless
1827 interrupt handler we have no other choice. */
1828 disable_irq(dev->irq);
1829 tulip_interrupt (dev->irq, dev, NULL);
1830 enable_irq(dev->irq);
1832 #endif
1834 static struct pci_driver tulip_driver = {
1835 .name = DRV_NAME,
1836 .id_table = tulip_pci_tbl,
1837 .probe = tulip_init_one,
1838 .remove = __devexit_p(tulip_remove_one),
1839 #ifdef CONFIG_PM
1840 .suspend = tulip_suspend,
1841 .resume = tulip_resume,
1842 #endif /* CONFIG_PM */
1846 static int __init tulip_init (void)
1848 #ifdef MODULE
1849 printk (KERN_INFO "%s", version);
1850 #endif
1852 /* copy module parms into globals */
1853 tulip_rx_copybreak = rx_copybreak;
1854 tulip_max_interrupt_work = max_interrupt_work;
1856 /* probe for and init boards */
1857 return pci_module_init (&tulip_driver);
1861 static void __exit tulip_cleanup (void)
1863 pci_unregister_driver (&tulip_driver);
1867 module_init(tulip_init);
1868 module_exit(tulip_cleanup);