GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / net / tulip / de2104x.c
blob20145ac2962a4d86c4e0ef305d62b920e44de1df
1 /* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
2 /*
3 Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
5 Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c]
6 Written/copyright 1994-2001 by Donald Becker. [tulip.c]
8 This software may be used and distributed according to the terms of
9 the GNU General Public License (GPL), incorporated herein by reference.
10 Drivers based on or derived from this code fall under the GPL and must
11 retain the authorship, copyright and license notice. This file is not
12 a complete program and may only be used when the entire operating
13 system is licensed under the GPL.
15 See the file COPYING in this distribution for more information.
17 TODO, in rough priority order:
18 * Support forcing media type with a module parameter,
19 like dl2k.c/sundance.c
20 * Constants (module parms?) for Rx work limit
21 * Complete reset on PciErr
22 * Jumbo frames / dev->change_mtu
23 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
24 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
25 * Implement Tx software interrupt mitigation via
26 Tx descriptor bit
30 #define DRV_NAME "de2104x"
31 #define DRV_VERSION "0.7"
32 #define DRV_RELDATE "Mar 17, 2004"
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/init.h>
39 #include <linux/pci.h>
40 #include <linux/delay.h>
41 #include <linux/ethtool.h>
42 #include <linux/compiler.h>
43 #include <linux/rtnetlink.h>
44 #include <linux/crc32.h>
45 #include <linux/slab.h>
47 #include <asm/io.h>
48 #include <asm/irq.h>
49 #include <asm/uaccess.h>
50 #include <asm/unaligned.h>
52 /* These identify the driver base version and may not be removed. */
53 static char version[] =
54 KERN_INFO DRV_NAME " PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
56 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
57 MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
58 MODULE_LICENSE("GPL");
59 MODULE_VERSION(DRV_VERSION);
61 static int debug = -1;
62 module_param (debug, int, 0);
63 MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
65 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
66 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
67 defined(CONFIG_SPARC) || defined(__ia64__) || defined(__sh__) || defined(__mips__)
68 static int rx_copybreak = 1518;
69 #else
70 static int rx_copybreak = 100;
71 #endif
72 module_param (rx_copybreak, int, 0);
73 MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
75 #define PFX DRV_NAME ": "
77 #define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
78 NETIF_MSG_PROBE | \
79 NETIF_MSG_LINK | \
80 NETIF_MSG_IFDOWN | \
81 NETIF_MSG_IFUP | \
82 NETIF_MSG_RX_ERR | \
83 NETIF_MSG_TX_ERR)
85 /* Descriptor skip length in 32 bit longwords. */
86 #ifndef CONFIG_DE2104X_DSL
87 #define DSL 0
88 #else
89 #define DSL CONFIG_DE2104X_DSL
90 #endif
92 #define DE_RX_RING_SIZE 64
93 #define DE_TX_RING_SIZE 64
94 #define DE_RING_BYTES \
95 ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
96 (sizeof(struct de_desc) * DE_TX_RING_SIZE))
97 #define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
98 #define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
99 #define TX_BUFFS_AVAIL(CP) \
100 (((CP)->tx_tail <= (CP)->tx_head) ? \
101 (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
102 (CP)->tx_tail - (CP)->tx_head - 1)
104 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
105 #define RX_OFFSET 2
107 #define DE_SETUP_SKB ((struct sk_buff *) 1)
108 #define DE_DUMMY_SKB ((struct sk_buff *) 2)
109 #define DE_SETUP_FRAME_WORDS 96
110 #define DE_EEPROM_WORDS 256
111 #define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
112 #define DE_MAX_MEDIA 5
114 #define DE_MEDIA_TP_AUTO 0
115 #define DE_MEDIA_BNC 1
116 #define DE_MEDIA_AUI 2
117 #define DE_MEDIA_TP 3
118 #define DE_MEDIA_TP_FD 4
119 #define DE_MEDIA_INVALID DE_MAX_MEDIA
120 #define DE_MEDIA_FIRST 0
121 #define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
122 #define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
124 #define DE_TIMER_LINK (60 * HZ)
125 #define DE_TIMER_NO_LINK (5 * HZ)
127 #define DE_NUM_REGS 16
128 #define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
129 #define DE_REGS_VER 1
131 /* Time in jiffies before concluding the transmitter is hung. */
132 #define TX_TIMEOUT (6*HZ)
134 /* This is a mysterious value that can be written to CSR11 in the 21040 (only)
135 to support a pre-NWay full-duplex signaling mechanism using short frames.
136 No one knows what it should be, but if left at its default value some
137 10base2(!) packets trigger a full-duplex-request interrupt. */
138 #define FULL_DUPLEX_MAGIC 0x6969
140 enum {
141 /* NIC registers */
142 BusMode = 0x00,
143 TxPoll = 0x08,
144 RxPoll = 0x10,
145 RxRingAddr = 0x18,
146 TxRingAddr = 0x20,
147 MacStatus = 0x28,
148 MacMode = 0x30,
149 IntrMask = 0x38,
150 RxMissed = 0x40,
151 ROMCmd = 0x48,
152 CSR11 = 0x58,
153 SIAStatus = 0x60,
154 CSR13 = 0x68,
155 CSR14 = 0x70,
156 CSR15 = 0x78,
157 PCIPM = 0x40,
159 /* BusMode bits */
160 CmdReset = (1 << 0),
161 CacheAlign16 = 0x00008000,
162 BurstLen4 = 0x00000400,
163 DescSkipLen = (DSL << 2),
165 /* Rx/TxPoll bits */
166 NormalTxPoll = (1 << 0),
167 NormalRxPoll = (1 << 0),
169 /* Tx/Rx descriptor status bits */
170 DescOwn = (1 << 31),
171 RxError = (1 << 15),
172 RxErrLong = (1 << 7),
173 RxErrCRC = (1 << 1),
174 RxErrFIFO = (1 << 0),
175 RxErrRunt = (1 << 11),
176 RxErrFrame = (1 << 14),
177 RingEnd = (1 << 25),
178 FirstFrag = (1 << 29),
179 LastFrag = (1 << 30),
180 TxError = (1 << 15),
181 TxFIFOUnder = (1 << 1),
182 TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
183 TxMaxCol = (1 << 8),
184 TxOWC = (1 << 9),
185 TxJabber = (1 << 14),
186 SetupFrame = (1 << 27),
187 TxSwInt = (1 << 31),
189 /* MacStatus bits */
190 IntrOK = (1 << 16),
191 IntrErr = (1 << 15),
192 RxIntr = (1 << 6),
193 RxEmpty = (1 << 7),
194 TxIntr = (1 << 0),
195 TxEmpty = (1 << 2),
196 PciErr = (1 << 13),
197 TxState = (1 << 22) | (1 << 21) | (1 << 20),
198 RxState = (1 << 19) | (1 << 18) | (1 << 17),
199 LinkFail = (1 << 12),
200 LinkPass = (1 << 4),
201 RxStopped = (1 << 8),
202 TxStopped = (1 << 1),
204 /* MacMode bits */
205 TxEnable = (1 << 13),
206 RxEnable = (1 << 1),
207 RxTx = TxEnable | RxEnable,
208 FullDuplex = (1 << 9),
209 AcceptAllMulticast = (1 << 7),
210 AcceptAllPhys = (1 << 6),
211 BOCnt = (1 << 5),
212 MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
213 RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
215 /* ROMCmd bits */
216 EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */
217 EE_CS = 0x01, /* EEPROM chip select. */
218 EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */
219 EE_WRITE_0 = 0x01,
220 EE_WRITE_1 = 0x05,
221 EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */
222 EE_ENB = (0x4800 | EE_CS),
224 /* The EEPROM commands include the alway-set leading bit. */
225 EE_READ_CMD = 6,
227 /* RxMissed bits */
228 RxMissedOver = (1 << 16),
229 RxMissedMask = 0xffff,
231 /* SROM-related bits */
232 SROMC0InfoLeaf = 27,
233 MediaBlockMask = 0x3f,
234 MediaCustomCSRs = (1 << 6),
236 /* PCIPM bits */
237 PM_Sleep = (1 << 31),
238 PM_Snooze = (1 << 30),
239 PM_Mask = PM_Sleep | PM_Snooze,
241 /* SIAStatus bits */
242 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
243 NWayRestart = (1 << 12),
244 NonselPortActive = (1 << 9),
245 SelPortActive = (1 << 8),
246 LinkFailStatus = (1 << 2),
247 NetCxnErr = (1 << 1),
250 static const u32 de_intr_mask =
251 IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
252 LinkPass | LinkFail | PciErr;
255 * Set the programmable burst length to 4 longwords for all:
256 * DMA errors result without these values. Cache align 16 long.
258 static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
260 struct de_srom_media_block {
261 u8 opts;
262 u16 csr13;
263 u16 csr14;
264 u16 csr15;
265 } __packed;
267 struct de_srom_info_leaf {
268 u16 default_media;
269 u8 n_blocks;
270 u8 unused;
271 } __packed;
273 struct de_desc {
274 __le32 opts1;
275 __le32 opts2;
276 __le32 addr1;
277 __le32 addr2;
278 #if DSL
279 __le32 skip[DSL];
280 #endif
283 struct media_info {
284 u16 type; /* DE_MEDIA_xxx */
285 u16 csr13;
286 u16 csr14;
287 u16 csr15;
290 struct ring_info {
291 struct sk_buff *skb;
292 dma_addr_t mapping;
295 struct de_private {
296 unsigned tx_head;
297 unsigned tx_tail;
298 unsigned rx_tail;
300 void __iomem *regs;
301 struct net_device *dev;
302 spinlock_t lock;
304 struct de_desc *rx_ring;
305 struct de_desc *tx_ring;
306 struct ring_info tx_skb[DE_TX_RING_SIZE];
307 struct ring_info rx_skb[DE_RX_RING_SIZE];
308 unsigned rx_buf_sz;
309 dma_addr_t ring_dma;
311 u32 msg_enable;
313 struct net_device_stats net_stats;
315 struct pci_dev *pdev;
317 u16 setup_frame[DE_SETUP_FRAME_WORDS];
319 u32 media_type;
320 u32 media_supported;
321 u32 media_advertise;
322 struct media_info media[DE_MAX_MEDIA];
323 struct timer_list media_timer;
325 u8 *ee_data;
326 unsigned board_idx;
327 unsigned de21040 : 1;
328 unsigned media_lock : 1;
332 static void de_set_rx_mode (struct net_device *dev);
333 static void de_tx (struct de_private *de);
334 static void de_clean_rings (struct de_private *de);
335 static void de_media_interrupt (struct de_private *de, u32 status);
336 static void de21040_media_timer (unsigned long data);
337 static void de21041_media_timer (unsigned long data);
338 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
341 static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
342 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
343 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
344 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
345 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
346 { },
348 MODULE_DEVICE_TABLE(pci, de_pci_tbl);
350 static const char * const media_name[DE_MAX_MEDIA] = {
351 "10baseT auto",
352 "BNC",
353 "AUI",
354 "10baseT-HD",
355 "10baseT-FD"
358 /* 21040 transceiver register settings:
359 * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
360 static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
361 static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
362 static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
364 /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
365 static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
366 static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
367 /* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
368 static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
369 static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
372 #define dr32(reg) ioread32(de->regs + (reg))
373 #define dw32(reg, val) iowrite32((val), de->regs + (reg))
376 static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
377 u32 status, u32 len)
379 if (netif_msg_rx_err (de))
380 printk (KERN_DEBUG
381 "%s: rx err, slot %d status 0x%x len %d\n",
382 de->dev->name, rx_tail, status, len);
384 if ((status & 0x38000300) != 0x0300) {
385 /* Ingore earlier buffers. */
386 if ((status & 0xffff) != 0x7fff) {
387 if (netif_msg_rx_err(de))
388 dev_warn(&de->dev->dev,
389 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
390 status);
391 de->net_stats.rx_length_errors++;
393 } else if (status & RxError) {
394 /* There was a fatal error. */
395 de->net_stats.rx_errors++; /* end of a packet.*/
396 if (status & 0x0890) de->net_stats.rx_length_errors++;
397 if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
398 if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
402 static void de_rx (struct de_private *de)
404 unsigned rx_tail = de->rx_tail;
405 unsigned rx_work = DE_RX_RING_SIZE;
406 unsigned drop = 0;
407 int rc;
409 while (--rx_work) {
410 u32 status, len;
411 dma_addr_t mapping;
412 struct sk_buff *skb, *copy_skb;
413 unsigned copying_skb, buflen;
415 skb = de->rx_skb[rx_tail].skb;
416 BUG_ON(!skb);
417 rmb();
418 status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
419 if (status & DescOwn)
420 break;
422 len = ((status >> 16) & 0x7ff) - 4;
423 mapping = de->rx_skb[rx_tail].mapping;
425 if (unlikely(drop)) {
426 de->net_stats.rx_dropped++;
427 goto rx_next;
430 if (unlikely((status & 0x38008300) != 0x0300)) {
431 de_rx_err_acct(de, rx_tail, status, len);
432 goto rx_next;
435 copying_skb = (len <= rx_copybreak);
437 if (unlikely(netif_msg_rx_status(de)))
438 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d copying? %d\n",
439 de->dev->name, rx_tail, status, len,
440 copying_skb);
442 buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
443 copy_skb = dev_alloc_skb (buflen);
444 if (unlikely(!copy_skb)) {
445 de->net_stats.rx_dropped++;
446 drop = 1;
447 rx_work = 100;
448 goto rx_next;
451 if (!copying_skb) {
452 pci_unmap_single(de->pdev, mapping,
453 buflen, PCI_DMA_FROMDEVICE);
454 skb_put(skb, len);
456 mapping =
457 de->rx_skb[rx_tail].mapping =
458 pci_map_single(de->pdev, copy_skb->data,
459 buflen, PCI_DMA_FROMDEVICE);
460 de->rx_skb[rx_tail].skb = copy_skb;
461 } else {
462 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
463 skb_reserve(copy_skb, RX_OFFSET);
464 skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
465 len);
466 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
468 /* We'll reuse the original ring buffer. */
469 skb = copy_skb;
472 skb->protocol = eth_type_trans (skb, de->dev);
474 de->net_stats.rx_packets++;
475 de->net_stats.rx_bytes += skb->len;
476 rc = netif_rx (skb);
477 if (rc == NET_RX_DROP)
478 drop = 1;
480 rx_next:
481 if (rx_tail == (DE_RX_RING_SIZE - 1))
482 de->rx_ring[rx_tail].opts2 =
483 cpu_to_le32(RingEnd | de->rx_buf_sz);
484 else
485 de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
486 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
487 wmb();
488 de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
489 rx_tail = NEXT_RX(rx_tail);
492 if (!rx_work)
493 dev_warn(&de->dev->dev, "rx work limit reached\n");
495 de->rx_tail = rx_tail;
498 static irqreturn_t de_interrupt (int irq, void *dev_instance)
500 struct net_device *dev = dev_instance;
501 struct de_private *de = netdev_priv(dev);
502 u32 status;
504 status = dr32(MacStatus);
505 if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
506 return IRQ_NONE;
508 if (netif_msg_intr(de))
509 printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
510 dev->name, status, dr32(MacMode),
511 de->rx_tail, de->tx_head, de->tx_tail);
513 dw32(MacStatus, status);
515 if (status & (RxIntr | RxEmpty)) {
516 de_rx(de);
517 if (status & RxEmpty)
518 dw32(RxPoll, NormalRxPoll);
521 spin_lock(&de->lock);
523 if (status & (TxIntr | TxEmpty))
524 de_tx(de);
526 if (status & (LinkPass | LinkFail))
527 de_media_interrupt(de, status);
529 spin_unlock(&de->lock);
531 if (status & PciErr) {
532 u16 pci_status;
534 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
535 pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
536 dev_err(&de->dev->dev,
537 "PCI bus error, status=%08x, PCI status=%04x\n",
538 status, pci_status);
541 return IRQ_HANDLED;
544 static void de_tx (struct de_private *de)
546 unsigned tx_head = de->tx_head;
547 unsigned tx_tail = de->tx_tail;
549 while (tx_tail != tx_head) {
550 struct sk_buff *skb;
551 u32 status;
553 rmb();
554 status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
555 if (status & DescOwn)
556 break;
558 skb = de->tx_skb[tx_tail].skb;
559 BUG_ON(!skb);
560 if (unlikely(skb == DE_DUMMY_SKB))
561 goto next;
563 if (unlikely(skb == DE_SETUP_SKB)) {
564 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
565 sizeof(de->setup_frame), PCI_DMA_TODEVICE);
566 goto next;
569 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
570 skb->len, PCI_DMA_TODEVICE);
572 if (status & LastFrag) {
573 if (status & TxError) {
574 if (netif_msg_tx_err(de))
575 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
576 de->dev->name, status);
577 de->net_stats.tx_errors++;
578 if (status & TxOWC)
579 de->net_stats.tx_window_errors++;
580 if (status & TxMaxCol)
581 de->net_stats.tx_aborted_errors++;
582 if (status & TxLinkFail)
583 de->net_stats.tx_carrier_errors++;
584 if (status & TxFIFOUnder)
585 de->net_stats.tx_fifo_errors++;
586 } else {
587 de->net_stats.tx_packets++;
588 de->net_stats.tx_bytes += skb->len;
589 if (netif_msg_tx_done(de))
590 printk(KERN_DEBUG "%s: tx done, slot %d\n",
591 de->dev->name, tx_tail);
593 dev_kfree_skb_irq(skb);
596 next:
597 de->tx_skb[tx_tail].skb = NULL;
599 tx_tail = NEXT_TX(tx_tail);
602 de->tx_tail = tx_tail;
604 if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
605 netif_wake_queue(de->dev);
608 static netdev_tx_t de_start_xmit (struct sk_buff *skb,
609 struct net_device *dev)
611 struct de_private *de = netdev_priv(dev);
612 unsigned int entry, tx_free;
613 u32 mapping, len, flags = FirstFrag | LastFrag;
614 struct de_desc *txd;
616 spin_lock_irq(&de->lock);
618 tx_free = TX_BUFFS_AVAIL(de);
619 if (tx_free == 0) {
620 netif_stop_queue(dev);
621 spin_unlock_irq(&de->lock);
622 return NETDEV_TX_BUSY;
624 tx_free--;
626 entry = de->tx_head;
628 txd = &de->tx_ring[entry];
630 len = skb->len;
631 mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
632 if (entry == (DE_TX_RING_SIZE - 1))
633 flags |= RingEnd;
634 if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
635 flags |= TxSwInt;
636 flags |= len;
637 txd->opts2 = cpu_to_le32(flags);
638 txd->addr1 = cpu_to_le32(mapping);
640 de->tx_skb[entry].skb = skb;
641 de->tx_skb[entry].mapping = mapping;
642 wmb();
644 txd->opts1 = cpu_to_le32(DescOwn);
645 wmb();
647 de->tx_head = NEXT_TX(entry);
648 if (netif_msg_tx_queued(de))
649 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
650 dev->name, entry, skb->len);
652 if (tx_free == 0)
653 netif_stop_queue(dev);
655 spin_unlock_irq(&de->lock);
657 /* Trigger an immediate transmit demand. */
658 dw32(TxPoll, NormalTxPoll);
660 return NETDEV_TX_OK;
663 /* Set or clear the multicast filter for this adaptor.
664 Note that we only use exclusion around actually queueing the
665 new frame, not around filling de->setup_frame. This is non-deterministic
666 when re-entered but still correct. */
668 #undef set_bit_le
669 #define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
671 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
673 struct de_private *de = netdev_priv(dev);
674 u16 hash_table[32];
675 struct netdev_hw_addr *ha;
676 int i;
677 u16 *eaddrs;
679 memset(hash_table, 0, sizeof(hash_table));
680 set_bit_le(255, hash_table); /* Broadcast entry */
681 /* This should work on big-endian machines as well. */
682 netdev_for_each_mc_addr(ha, dev) {
683 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
685 set_bit_le(index, hash_table);
688 for (i = 0; i < 32; i++) {
689 *setup_frm++ = hash_table[i];
690 *setup_frm++ = hash_table[i];
692 setup_frm = &de->setup_frame[13*6];
694 /* Fill the final entry with our physical address. */
695 eaddrs = (u16 *)dev->dev_addr;
696 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
697 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
698 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
701 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
703 struct de_private *de = netdev_priv(dev);
704 struct netdev_hw_addr *ha;
705 u16 *eaddrs;
707 /* We have <= 14 addresses so we can use the wonderful
708 16 address perfect filtering of the Tulip. */
709 netdev_for_each_mc_addr(ha, dev) {
710 eaddrs = (u16 *) ha->addr;
711 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
712 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
713 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
715 /* Fill the unused entries with the broadcast address. */
716 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
717 setup_frm = &de->setup_frame[15*6];
719 /* Fill the final entry with our physical address. */
720 eaddrs = (u16 *)dev->dev_addr;
721 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
722 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
723 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
727 static void __de_set_rx_mode (struct net_device *dev)
729 struct de_private *de = netdev_priv(dev);
730 u32 macmode;
731 unsigned int entry;
732 u32 mapping;
733 struct de_desc *txd;
734 struct de_desc *dummy_txd = NULL;
736 macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
738 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
739 macmode |= AcceptAllMulticast | AcceptAllPhys;
740 goto out;
743 if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
744 /* Too many to filter well -- accept all multicasts. */
745 macmode |= AcceptAllMulticast;
746 goto out;
749 /* Note that only the low-address shortword of setup_frame is valid!
750 The values are doubled for big-endian architectures. */
751 if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */
752 build_setup_frame_hash (de->setup_frame, dev);
753 else
754 build_setup_frame_perfect (de->setup_frame, dev);
757 * Now add this frame to the Tx list.
760 entry = de->tx_head;
762 /* Avoid a chip errata by prefixing a dummy entry. */
763 if (entry != 0) {
764 de->tx_skb[entry].skb = DE_DUMMY_SKB;
766 dummy_txd = &de->tx_ring[entry];
767 dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
768 cpu_to_le32(RingEnd) : 0;
769 dummy_txd->addr1 = 0;
771 /* Must set DescOwned later to avoid race with chip */
773 entry = NEXT_TX(entry);
776 de->tx_skb[entry].skb = DE_SETUP_SKB;
777 de->tx_skb[entry].mapping = mapping =
778 pci_map_single (de->pdev, de->setup_frame,
779 sizeof (de->setup_frame), PCI_DMA_TODEVICE);
781 /* Put the setup frame on the Tx list. */
782 txd = &de->tx_ring[entry];
783 if (entry == (DE_TX_RING_SIZE - 1))
784 txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
785 else
786 txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
787 txd->addr1 = cpu_to_le32(mapping);
788 wmb();
790 txd->opts1 = cpu_to_le32(DescOwn);
791 wmb();
793 if (dummy_txd) {
794 dummy_txd->opts1 = cpu_to_le32(DescOwn);
795 wmb();
798 de->tx_head = NEXT_TX(entry);
800 if (TX_BUFFS_AVAIL(de) == 0)
801 netif_stop_queue(dev);
803 /* Trigger an immediate transmit demand. */
804 dw32(TxPoll, NormalTxPoll);
806 out:
807 if (macmode != dr32(MacMode))
808 dw32(MacMode, macmode);
811 static void de_set_rx_mode (struct net_device *dev)
813 unsigned long flags;
814 struct de_private *de = netdev_priv(dev);
816 spin_lock_irqsave (&de->lock, flags);
817 __de_set_rx_mode(dev);
818 spin_unlock_irqrestore (&de->lock, flags);
821 static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
823 if (unlikely(rx_missed & RxMissedOver))
824 de->net_stats.rx_missed_errors += RxMissedMask;
825 else
826 de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
829 static void __de_get_stats(struct de_private *de)
831 u32 tmp = dr32(RxMissed); /* self-clearing */
833 de_rx_missed(de, tmp);
836 static struct net_device_stats *de_get_stats(struct net_device *dev)
838 struct de_private *de = netdev_priv(dev);
840 /* The chip only need report frame silently dropped. */
841 spin_lock_irq(&de->lock);
842 if (netif_running(dev) && netif_device_present(dev))
843 __de_get_stats(de);
844 spin_unlock_irq(&de->lock);
846 return &de->net_stats;
849 static inline int de_is_running (struct de_private *de)
851 return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
854 static void de_stop_rxtx (struct de_private *de)
856 u32 macmode;
857 unsigned int i = 1300/100;
859 macmode = dr32(MacMode);
860 if (macmode & RxTx) {
861 dw32(MacMode, macmode & ~RxTx);
862 dr32(MacMode);
865 /* wait until in-flight frame completes.
866 * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
867 * Typically expect this loop to end in < 50 us on 100BT.
869 while (--i) {
870 if (!de_is_running(de))
871 return;
872 udelay(100);
875 dev_warn(&de->dev->dev, "timeout expired stopping DMA\n");
878 static inline void de_start_rxtx (struct de_private *de)
880 u32 macmode;
882 macmode = dr32(MacMode);
883 if ((macmode & RxTx) != RxTx) {
884 dw32(MacMode, macmode | RxTx);
885 dr32(MacMode);
889 static void de_stop_hw (struct de_private *de)
892 udelay(5);
893 dw32(IntrMask, 0);
895 de_stop_rxtx(de);
897 dw32(MacStatus, dr32(MacStatus));
899 udelay(10);
901 de->rx_tail = 0;
902 de->tx_head = de->tx_tail = 0;
905 static void de_link_up(struct de_private *de)
907 if (!netif_carrier_ok(de->dev)) {
908 netif_carrier_on(de->dev);
909 if (netif_msg_link(de))
910 dev_info(&de->dev->dev, "link up, media %s\n",
911 media_name[de->media_type]);
915 static void de_link_down(struct de_private *de)
917 if (netif_carrier_ok(de->dev)) {
918 netif_carrier_off(de->dev);
919 if (netif_msg_link(de))
920 dev_info(&de->dev->dev, "link down\n");
924 static void de_set_media (struct de_private *de)
926 unsigned media = de->media_type;
927 u32 macmode = dr32(MacMode);
929 if (de_is_running(de))
930 dev_warn(&de->dev->dev,
931 "chip is running while changing media!\n");
933 if (de->de21040)
934 dw32(CSR11, FULL_DUPLEX_MAGIC);
935 dw32(CSR13, 0); /* Reset phy */
936 dw32(CSR14, de->media[media].csr14);
937 dw32(CSR15, de->media[media].csr15);
938 dw32(CSR13, de->media[media].csr13);
940 /* must delay 10ms before writing to other registers,
941 * especially CSR6
943 mdelay(10);
945 if (media == DE_MEDIA_TP_FD)
946 macmode |= FullDuplex;
947 else
948 macmode &= ~FullDuplex;
950 if (netif_msg_link(de)) {
951 dev_info(&de->dev->dev, "set link %s\n", media_name[media]);
952 dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
953 dr32(MacMode), dr32(SIAStatus),
954 dr32(CSR13), dr32(CSR14), dr32(CSR15));
956 dev_info(&de->dev->dev,
957 "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
958 macmode, de->media[media].csr13,
959 de->media[media].csr14, de->media[media].csr15);
961 if (macmode != dr32(MacMode))
962 dw32(MacMode, macmode);
965 static void de_next_media (struct de_private *de, u32 *media,
966 unsigned int n_media)
968 unsigned int i;
970 for (i = 0; i < n_media; i++) {
971 if (de_ok_to_advertise(de, media[i])) {
972 de->media_type = media[i];
973 return;
978 static void de21040_media_timer (unsigned long data)
980 struct de_private *de = (struct de_private *) data;
981 struct net_device *dev = de->dev;
982 u32 status = dr32(SIAStatus);
983 unsigned int carrier;
984 unsigned long flags;
986 carrier = (status & NetCxnErr) ? 0 : 1;
988 if (carrier) {
989 if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
990 goto no_link_yet;
992 de->media_timer.expires = jiffies + DE_TIMER_LINK;
993 add_timer(&de->media_timer);
994 if (!netif_carrier_ok(dev))
995 de_link_up(de);
996 else
997 if (netif_msg_timer(de))
998 dev_info(&dev->dev, "%s link ok, status %x\n",
999 media_name[de->media_type], status);
1000 return;
1003 de_link_down(de);
1005 if (de->media_lock)
1006 return;
1008 if (de->media_type == DE_MEDIA_AUI) {
1009 u32 next_state = DE_MEDIA_TP;
1010 de_next_media(de, &next_state, 1);
1011 } else {
1012 u32 next_state = DE_MEDIA_AUI;
1013 de_next_media(de, &next_state, 1);
1016 spin_lock_irqsave(&de->lock, flags);
1017 de_stop_rxtx(de);
1018 spin_unlock_irqrestore(&de->lock, flags);
1019 de_set_media(de);
1020 de_start_rxtx(de);
1022 no_link_yet:
1023 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1024 add_timer(&de->media_timer);
1026 if (netif_msg_timer(de))
1027 dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1028 media_name[de->media_type], status);
1031 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1033 switch (new_media) {
1034 case DE_MEDIA_TP_AUTO:
1035 if (!(de->media_advertise & ADVERTISED_Autoneg))
1036 return 0;
1037 if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1038 return 0;
1039 break;
1040 case DE_MEDIA_BNC:
1041 if (!(de->media_advertise & ADVERTISED_BNC))
1042 return 0;
1043 break;
1044 case DE_MEDIA_AUI:
1045 if (!(de->media_advertise & ADVERTISED_AUI))
1046 return 0;
1047 break;
1048 case DE_MEDIA_TP:
1049 if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1050 return 0;
1051 break;
1052 case DE_MEDIA_TP_FD:
1053 if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1054 return 0;
1055 break;
1058 return 1;
1061 static void de21041_media_timer (unsigned long data)
1063 struct de_private *de = (struct de_private *) data;
1064 struct net_device *dev = de->dev;
1065 u32 status = dr32(SIAStatus);
1066 unsigned int carrier;
1067 unsigned long flags;
1069 /* clear port active bits */
1070 dw32(SIAStatus, NonselPortActive | SelPortActive);
1072 carrier = (status & NetCxnErr) ? 0 : 1;
1074 if (carrier) {
1075 if ((de->media_type == DE_MEDIA_TP_AUTO ||
1076 de->media_type == DE_MEDIA_TP ||
1077 de->media_type == DE_MEDIA_TP_FD) &&
1078 (status & LinkFailStatus))
1079 goto no_link_yet;
1081 de->media_timer.expires = jiffies + DE_TIMER_LINK;
1082 add_timer(&de->media_timer);
1083 if (!netif_carrier_ok(dev))
1084 de_link_up(de);
1085 else
1086 if (netif_msg_timer(de))
1087 dev_info(&dev->dev,
1088 "%s link ok, mode %x status %x\n",
1089 media_name[de->media_type],
1090 dr32(MacMode), status);
1091 return;
1094 de_link_down(de);
1096 /* if media type locked, don't switch media */
1097 if (de->media_lock)
1098 goto set_media;
1100 /* if activity detected, use that as hint for new media type */
1101 if (status & NonselPortActive) {
1102 unsigned int have_media = 1;
1104 /* if AUI/BNC selected, then activity is on TP port */
1105 if (de->media_type == DE_MEDIA_AUI ||
1106 de->media_type == DE_MEDIA_BNC) {
1107 if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1108 de->media_type = DE_MEDIA_TP_AUTO;
1109 else
1110 have_media = 0;
1113 /* TP selected. If there is only TP and BNC, then it's BNC */
1114 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1115 de_ok_to_advertise(de, DE_MEDIA_BNC))
1116 de->media_type = DE_MEDIA_BNC;
1118 /* TP selected. If there is only TP and AUI, then it's AUI */
1119 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1120 de_ok_to_advertise(de, DE_MEDIA_AUI))
1121 de->media_type = DE_MEDIA_AUI;
1123 /* otherwise, ignore the hint */
1124 else
1125 have_media = 0;
1127 if (have_media)
1128 goto set_media;
1132 * Absent or ambiguous activity hint, move to next advertised
1133 * media state. If de->media_type is left unchanged, this
1134 * simply resets the PHY and reloads the current media settings.
1136 if (de->media_type == DE_MEDIA_AUI) {
1137 u32 next_states[] = { DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1138 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1139 } else if (de->media_type == DE_MEDIA_BNC) {
1140 u32 next_states[] = { DE_MEDIA_TP_AUTO, DE_MEDIA_AUI };
1141 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1142 } else {
1143 u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1144 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1147 set_media:
1148 spin_lock_irqsave(&de->lock, flags);
1149 de_stop_rxtx(de);
1150 spin_unlock_irqrestore(&de->lock, flags);
1151 de_set_media(de);
1152 de_start_rxtx(de);
1154 no_link_yet:
1155 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1156 add_timer(&de->media_timer);
1158 if (netif_msg_timer(de))
1159 dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1160 media_name[de->media_type], status);
1163 static void de_media_interrupt (struct de_private *de, u32 status)
1165 if (status & LinkPass) {
1166 /* Ignore if current media is AUI or BNC and we can't use TP */
1167 if ((de->media_type == DE_MEDIA_AUI ||
1168 de->media_type == DE_MEDIA_BNC) &&
1169 (de->media_lock ||
1170 !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
1171 return;
1172 /* If current media is not TP, change it to TP */
1173 if ((de->media_type == DE_MEDIA_AUI ||
1174 de->media_type == DE_MEDIA_BNC)) {
1175 de->media_type = DE_MEDIA_TP_AUTO;
1176 de_stop_rxtx(de);
1177 de_set_media(de);
1178 de_start_rxtx(de);
1180 de_link_up(de);
1181 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1182 return;
1185 BUG_ON(!(status & LinkFail));
1186 /* Mark the link as down only if current media is TP */
1187 if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
1188 de->media_type != DE_MEDIA_BNC) {
1189 de_link_down(de);
1190 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1194 static int de_reset_mac (struct de_private *de)
1196 u32 status, tmp;
1199 * Reset MAC. de4x5.c and tulip.c examined for "advice"
1200 * in this area.
1203 if (dr32(BusMode) == 0xffffffff)
1204 return -EBUSY;
1206 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1207 dw32 (BusMode, CmdReset);
1208 mdelay (1);
1210 dw32 (BusMode, de_bus_mode);
1211 mdelay (1);
1213 for (tmp = 0; tmp < 5; tmp++) {
1214 dr32 (BusMode);
1215 mdelay (1);
1218 mdelay (1);
1220 status = dr32(MacStatus);
1221 if (status & (RxState | TxState))
1222 return -EBUSY;
1223 if (status == 0xffffffff)
1224 return -ENODEV;
1225 return 0;
1228 static void de_adapter_wake (struct de_private *de)
1230 u32 pmctl;
1232 if (de->de21040)
1233 return;
1235 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1236 if (pmctl & PM_Mask) {
1237 pmctl &= ~PM_Mask;
1238 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1240 /* de4x5.c delays, so we do too */
1241 msleep(10);
1245 static void de_adapter_sleep (struct de_private *de)
1247 u32 pmctl;
1249 if (de->de21040)
1250 return;
1252 dw32(CSR13, 0); /* Reset phy */
1253 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1254 pmctl |= PM_Sleep;
1255 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1258 static int de_init_hw (struct de_private *de)
1260 struct net_device *dev = de->dev;
1261 u32 macmode;
1262 int rc;
1264 de_adapter_wake(de);
1266 macmode = dr32(MacMode) & ~MacModeClear;
1268 rc = de_reset_mac(de);
1269 if (rc)
1270 return rc;
1272 de_set_media(de); /* reset phy */
1274 dw32(RxRingAddr, de->ring_dma);
1275 dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1277 dw32(MacMode, RxTx | macmode);
1279 dr32(RxMissed); /* self-clearing */
1281 dw32(IntrMask, de_intr_mask);
1283 de_set_rx_mode(dev);
1285 return 0;
1288 static int de_refill_rx (struct de_private *de)
1290 unsigned i;
1292 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1293 struct sk_buff *skb;
1295 skb = dev_alloc_skb(de->rx_buf_sz);
1296 if (!skb)
1297 goto err_out;
1299 skb->dev = de->dev;
1301 de->rx_skb[i].mapping = pci_map_single(de->pdev,
1302 skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1303 de->rx_skb[i].skb = skb;
1305 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1306 if (i == (DE_RX_RING_SIZE - 1))
1307 de->rx_ring[i].opts2 =
1308 cpu_to_le32(RingEnd | de->rx_buf_sz);
1309 else
1310 de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1311 de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1312 de->rx_ring[i].addr2 = 0;
1315 return 0;
1317 err_out:
1318 de_clean_rings(de);
1319 return -ENOMEM;
1322 static int de_init_rings (struct de_private *de)
1324 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1325 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1327 de->rx_tail = 0;
1328 de->tx_head = de->tx_tail = 0;
1330 return de_refill_rx (de);
1333 static int de_alloc_rings (struct de_private *de)
1335 de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1336 if (!de->rx_ring)
1337 return -ENOMEM;
1338 de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1339 return de_init_rings(de);
1342 static void de_clean_rings (struct de_private *de)
1344 unsigned i;
1346 memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1347 de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1348 wmb();
1349 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1350 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1351 wmb();
1353 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1354 if (de->rx_skb[i].skb) {
1355 pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1356 de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1357 dev_kfree_skb(de->rx_skb[i].skb);
1361 for (i = 0; i < DE_TX_RING_SIZE; i++) {
1362 struct sk_buff *skb = de->tx_skb[i].skb;
1363 if ((skb) && (skb != DE_DUMMY_SKB)) {
1364 if (skb != DE_SETUP_SKB) {
1365 de->net_stats.tx_dropped++;
1366 pci_unmap_single(de->pdev,
1367 de->tx_skb[i].mapping,
1368 skb->len, PCI_DMA_TODEVICE);
1369 dev_kfree_skb(skb);
1370 } else {
1371 pci_unmap_single(de->pdev,
1372 de->tx_skb[i].mapping,
1373 sizeof(de->setup_frame),
1374 PCI_DMA_TODEVICE);
1379 memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1380 memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1383 static void de_free_rings (struct de_private *de)
1385 de_clean_rings(de);
1386 pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1387 de->rx_ring = NULL;
1388 de->tx_ring = NULL;
1391 static int de_open (struct net_device *dev)
1393 struct de_private *de = netdev_priv(dev);
1394 int rc;
1396 if (netif_msg_ifup(de))
1397 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1399 de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1401 rc = de_alloc_rings(de);
1402 if (rc) {
1403 dev_err(&dev->dev, "ring allocation failure, err=%d\n", rc);
1404 return rc;
1407 dw32(IntrMask, 0);
1409 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1410 if (rc) {
1411 dev_err(&dev->dev, "IRQ %d request failure, err=%d\n",
1412 dev->irq, rc);
1413 goto err_out_free;
1416 rc = de_init_hw(de);
1417 if (rc) {
1418 dev_err(&dev->dev, "h/w init failure, err=%d\n", rc);
1419 goto err_out_free_irq;
1422 netif_start_queue(dev);
1423 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1425 return 0;
1427 err_out_free_irq:
1428 free_irq(dev->irq, dev);
1429 err_out_free:
1430 de_free_rings(de);
1431 return rc;
1434 static int de_close (struct net_device *dev)
1436 struct de_private *de = netdev_priv(dev);
1437 unsigned long flags;
1439 if (netif_msg_ifdown(de))
1440 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1442 del_timer_sync(&de->media_timer);
1444 spin_lock_irqsave(&de->lock, flags);
1445 de_stop_hw(de);
1446 netif_stop_queue(dev);
1447 netif_carrier_off(dev);
1448 spin_unlock_irqrestore(&de->lock, flags);
1450 free_irq(dev->irq, dev);
1452 de_free_rings(de);
1453 de_adapter_sleep(de);
1454 return 0;
1457 static void de_tx_timeout (struct net_device *dev)
1459 struct de_private *de = netdev_priv(dev);
1461 printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1462 dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1463 de->rx_tail, de->tx_head, de->tx_tail);
1465 del_timer_sync(&de->media_timer);
1467 disable_irq(dev->irq);
1468 spin_lock_irq(&de->lock);
1470 de_stop_hw(de);
1471 netif_stop_queue(dev);
1472 netif_carrier_off(dev);
1474 spin_unlock_irq(&de->lock);
1475 enable_irq(dev->irq);
1477 /* Update the error counts. */
1478 __de_get_stats(de);
1480 synchronize_irq(dev->irq);
1481 de_clean_rings(de);
1483 de_init_rings(de);
1485 de_init_hw(de);
1487 netif_wake_queue(dev);
1490 static void __de_get_regs(struct de_private *de, u8 *buf)
1492 int i;
1493 u32 *rbuf = (u32 *)buf;
1495 /* read all CSRs */
1496 for (i = 0; i < DE_NUM_REGS; i++)
1497 rbuf[i] = dr32(i * 8);
1499 /* handle self-clearing RxMissed counter, CSR8 */
1500 de_rx_missed(de, rbuf[8]);
1503 static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1505 ecmd->supported = de->media_supported;
1506 ecmd->transceiver = XCVR_INTERNAL;
1507 ecmd->phy_address = 0;
1508 ecmd->advertising = de->media_advertise;
1510 switch (de->media_type) {
1511 case DE_MEDIA_AUI:
1512 ecmd->port = PORT_AUI;
1513 ecmd->speed = 5;
1514 break;
1515 case DE_MEDIA_BNC:
1516 ecmd->port = PORT_BNC;
1517 ecmd->speed = 2;
1518 break;
1519 default:
1520 ecmd->port = PORT_TP;
1521 ecmd->speed = SPEED_10;
1522 break;
1525 if (dr32(MacMode) & FullDuplex)
1526 ecmd->duplex = DUPLEX_FULL;
1527 else
1528 ecmd->duplex = DUPLEX_HALF;
1530 if (de->media_lock)
1531 ecmd->autoneg = AUTONEG_DISABLE;
1532 else
1533 ecmd->autoneg = AUTONEG_ENABLE;
1535 /* ignore maxtxpkt, maxrxpkt for now */
1537 return 0;
1540 static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1542 u32 new_media;
1543 unsigned int media_lock;
1545 if (ecmd->speed != SPEED_10 && ecmd->speed != 5 && ecmd->speed != 2)
1546 return -EINVAL;
1547 if (de->de21040 && ecmd->speed == 2)
1548 return -EINVAL;
1549 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1550 return -EINVAL;
1551 if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
1552 return -EINVAL;
1553 if (de->de21040 && ecmd->port == PORT_BNC)
1554 return -EINVAL;
1555 if (ecmd->transceiver != XCVR_INTERNAL)
1556 return -EINVAL;
1557 if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1558 return -EINVAL;
1559 if (ecmd->advertising & ~de->media_supported)
1560 return -EINVAL;
1561 if (ecmd->autoneg == AUTONEG_ENABLE &&
1562 (!(ecmd->advertising & ADVERTISED_Autoneg)))
1563 return -EINVAL;
1565 switch (ecmd->port) {
1566 case PORT_AUI:
1567 new_media = DE_MEDIA_AUI;
1568 if (!(ecmd->advertising & ADVERTISED_AUI))
1569 return -EINVAL;
1570 break;
1571 case PORT_BNC:
1572 new_media = DE_MEDIA_BNC;
1573 if (!(ecmd->advertising & ADVERTISED_BNC))
1574 return -EINVAL;
1575 break;
1576 default:
1577 if (ecmd->autoneg == AUTONEG_ENABLE)
1578 new_media = DE_MEDIA_TP_AUTO;
1579 else if (ecmd->duplex == DUPLEX_FULL)
1580 new_media = DE_MEDIA_TP_FD;
1581 else
1582 new_media = DE_MEDIA_TP;
1583 if (!(ecmd->advertising & ADVERTISED_TP))
1584 return -EINVAL;
1585 if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
1586 return -EINVAL;
1587 break;
1590 media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
1592 if ((new_media == de->media_type) &&
1593 (media_lock == de->media_lock) &&
1594 (ecmd->advertising == de->media_advertise))
1595 return 0; /* nothing to change */
1597 de_link_down(de);
1598 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1599 de_stop_rxtx(de);
1601 de->media_type = new_media;
1602 de->media_lock = media_lock;
1603 de->media_advertise = ecmd->advertising;
1604 de_set_media(de);
1605 if (netif_running(de->dev))
1606 de_start_rxtx(de);
1608 return 0;
1611 static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1613 struct de_private *de = netdev_priv(dev);
1615 strcpy (info->driver, DRV_NAME);
1616 strcpy (info->version, DRV_VERSION);
1617 strcpy (info->bus_info, pci_name(de->pdev));
1618 info->eedump_len = DE_EEPROM_SIZE;
1621 static int de_get_regs_len(struct net_device *dev)
1623 return DE_REGS_SIZE;
1626 static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1628 struct de_private *de = netdev_priv(dev);
1629 int rc;
1631 spin_lock_irq(&de->lock);
1632 rc = __de_get_settings(de, ecmd);
1633 spin_unlock_irq(&de->lock);
1635 return rc;
1638 static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1640 struct de_private *de = netdev_priv(dev);
1641 int rc;
1643 spin_lock_irq(&de->lock);
1644 rc = __de_set_settings(de, ecmd);
1645 spin_unlock_irq(&de->lock);
1647 return rc;
1650 static u32 de_get_msglevel(struct net_device *dev)
1652 struct de_private *de = netdev_priv(dev);
1654 return de->msg_enable;
1657 static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1659 struct de_private *de = netdev_priv(dev);
1661 de->msg_enable = msglvl;
1664 static int de_get_eeprom(struct net_device *dev,
1665 struct ethtool_eeprom *eeprom, u8 *data)
1667 struct de_private *de = netdev_priv(dev);
1669 if (!de->ee_data)
1670 return -EOPNOTSUPP;
1671 if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1672 (eeprom->len != DE_EEPROM_SIZE))
1673 return -EINVAL;
1674 memcpy(data, de->ee_data, eeprom->len);
1676 return 0;
1679 static int de_nway_reset(struct net_device *dev)
1681 struct de_private *de = netdev_priv(dev);
1682 u32 status;
1684 if (de->media_type != DE_MEDIA_TP_AUTO)
1685 return -EINVAL;
1686 if (netif_carrier_ok(de->dev))
1687 de_link_down(de);
1689 status = dr32(SIAStatus);
1690 dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1691 if (netif_msg_link(de))
1692 dev_info(&de->dev->dev, "link nway restart, status %x,%x\n",
1693 status, dr32(SIAStatus));
1694 return 0;
1697 static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1698 void *data)
1700 struct de_private *de = netdev_priv(dev);
1702 regs->version = (DE_REGS_VER << 2) | de->de21040;
1704 spin_lock_irq(&de->lock);
1705 __de_get_regs(de, data);
1706 spin_unlock_irq(&de->lock);
1709 static const struct ethtool_ops de_ethtool_ops = {
1710 .get_link = ethtool_op_get_link,
1711 .get_drvinfo = de_get_drvinfo,
1712 .get_regs_len = de_get_regs_len,
1713 .get_settings = de_get_settings,
1714 .set_settings = de_set_settings,
1715 .get_msglevel = de_get_msglevel,
1716 .set_msglevel = de_set_msglevel,
1717 .get_eeprom = de_get_eeprom,
1718 .nway_reset = de_nway_reset,
1719 .get_regs = de_get_regs,
1722 static void __devinit de21040_get_mac_address (struct de_private *de)
1724 unsigned i;
1726 dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
1727 udelay(5);
1729 for (i = 0; i < 6; i++) {
1730 int value, boguscnt = 100000;
1731 do {
1732 value = dr32(ROMCmd);
1733 rmb();
1734 } while (value < 0 && --boguscnt > 0);
1735 de->dev->dev_addr[i] = value;
1736 udelay(1);
1737 if (boguscnt <= 0)
1738 pr_warning(PFX "timeout reading 21040 MAC address byte %u\n", i);
1742 static void __devinit de21040_get_media_info(struct de_private *de)
1744 unsigned int i;
1746 de->media_type = DE_MEDIA_TP;
1747 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1748 SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1749 de->media_advertise = de->media_supported;
1751 for (i = 0; i < DE_MAX_MEDIA; i++) {
1752 switch (i) {
1753 case DE_MEDIA_AUI:
1754 case DE_MEDIA_TP:
1755 case DE_MEDIA_TP_FD:
1756 de->media[i].type = i;
1757 de->media[i].csr13 = t21040_csr13[i];
1758 de->media[i].csr14 = t21040_csr14[i];
1759 de->media[i].csr15 = t21040_csr15[i];
1760 break;
1761 default:
1762 de->media[i].type = DE_MEDIA_INVALID;
1763 break;
1768 /* Note: this routine returns extra data bits for size detection. */
1769 static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
1771 int i;
1772 unsigned retval = 0;
1773 void __iomem *ee_addr = regs + ROMCmd;
1774 int read_cmd = location | (EE_READ_CMD << addr_len);
1776 writel(EE_ENB & ~EE_CS, ee_addr);
1777 writel(EE_ENB, ee_addr);
1779 /* Shift the read command bits out. */
1780 for (i = 4 + addr_len; i >= 0; i--) {
1781 short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1782 writel(EE_ENB | dataval, ee_addr);
1783 readl(ee_addr);
1784 writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1785 readl(ee_addr);
1786 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1788 writel(EE_ENB, ee_addr);
1789 readl(ee_addr);
1791 for (i = 16; i > 0; i--) {
1792 writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1793 readl(ee_addr);
1794 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1795 writel(EE_ENB, ee_addr);
1796 readl(ee_addr);
1799 /* Terminate the EEPROM access. */
1800 writel(EE_ENB & ~EE_CS, ee_addr);
1801 return retval;
1804 static void __devinit de21041_get_srom_info (struct de_private *de)
1806 unsigned i, sa_offset = 0, ofs;
1807 u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1808 unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1809 struct de_srom_info_leaf *il;
1810 void *bufp;
1812 /* download entire eeprom */
1813 for (i = 0; i < DE_EEPROM_WORDS; i++)
1814 ((__le16 *)ee_data)[i] =
1815 cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
1817 /* DEC now has a specification but early board makers
1818 just put the address in the first EEPROM locations. */
1819 /* This does memcmp(eedata, eedata+16, 8) */
1821 #ifndef CONFIG_MIPS_COBALT
1823 for (i = 0; i < 8; i ++)
1824 if (ee_data[i] != ee_data[16+i])
1825 sa_offset = 20;
1827 #endif
1829 /* store MAC address */
1830 for (i = 0; i < 6; i ++)
1831 de->dev->dev_addr[i] = ee_data[i + sa_offset];
1833 /* get offset of controller 0 info leaf. ignore 2nd byte. */
1834 ofs = ee_data[SROMC0InfoLeaf];
1835 if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1836 goto bad_srom;
1838 /* get pointer to info leaf */
1839 il = (struct de_srom_info_leaf *) &ee_data[ofs];
1841 /* paranoia checks */
1842 if (il->n_blocks == 0)
1843 goto bad_srom;
1844 if ((sizeof(ee_data) - ofs) <
1845 (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1846 goto bad_srom;
1848 /* get default media type */
1849 switch (get_unaligned(&il->default_media)) {
1850 case 0x0001: de->media_type = DE_MEDIA_BNC; break;
1851 case 0x0002: de->media_type = DE_MEDIA_AUI; break;
1852 case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
1853 default: de->media_type = DE_MEDIA_TP_AUTO; break;
1856 if (netif_msg_probe(de))
1857 pr_info("de%d: SROM leaf offset %u, default media %s\n",
1858 de->board_idx, ofs, media_name[de->media_type]);
1860 /* init SIA register values to defaults */
1861 for (i = 0; i < DE_MAX_MEDIA; i++) {
1862 de->media[i].type = DE_MEDIA_INVALID;
1863 de->media[i].csr13 = 0xffff;
1864 de->media[i].csr14 = 0xffff;
1865 de->media[i].csr15 = 0xffff;
1868 /* parse media blocks to see what medias are supported,
1869 * and if any custom CSR values are provided
1871 bufp = ((void *)il) + sizeof(*il);
1872 for (i = 0; i < il->n_blocks; i++) {
1873 struct de_srom_media_block *ib = bufp;
1874 unsigned idx;
1876 /* index based on media type in media block */
1877 switch(ib->opts & MediaBlockMask) {
1878 case 0: /* 10baseT */
1879 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1880 | SUPPORTED_Autoneg;
1881 idx = DE_MEDIA_TP;
1882 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1883 break;
1884 case 1: /* BNC */
1885 de->media_supported |= SUPPORTED_BNC;
1886 idx = DE_MEDIA_BNC;
1887 break;
1888 case 2: /* AUI */
1889 de->media_supported |= SUPPORTED_AUI;
1890 idx = DE_MEDIA_AUI;
1891 break;
1892 case 4: /* 10baseT-FD */
1893 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1894 | SUPPORTED_Autoneg;
1895 idx = DE_MEDIA_TP_FD;
1896 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1897 break;
1898 default:
1899 goto bad_srom;
1902 de->media[idx].type = idx;
1904 if (netif_msg_probe(de))
1905 pr_info("de%d: media block #%u: %s",
1906 de->board_idx, i,
1907 media_name[de->media[idx].type]);
1909 bufp += sizeof (ib->opts);
1911 if (ib->opts & MediaCustomCSRs) {
1912 de->media[idx].csr13 = get_unaligned(&ib->csr13);
1913 de->media[idx].csr14 = get_unaligned(&ib->csr14);
1914 de->media[idx].csr15 = get_unaligned(&ib->csr15);
1915 bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1916 sizeof(ib->csr15);
1918 if (netif_msg_probe(de))
1919 pr_cont(" (%x,%x,%x)\n",
1920 de->media[idx].csr13,
1921 de->media[idx].csr14,
1922 de->media[idx].csr15);
1924 } else if (netif_msg_probe(de))
1925 pr_cont("\n");
1927 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1928 break;
1931 de->media_advertise = de->media_supported;
1933 fill_defaults:
1934 /* fill in defaults, for cases where custom CSRs not used */
1935 for (i = 0; i < DE_MAX_MEDIA; i++) {
1936 if (de->media[i].csr13 == 0xffff)
1937 de->media[i].csr13 = t21041_csr13[i];
1938 if (de->media[i].csr14 == 0xffff) {
1939 /* autonegotiation is broken at least on some chip
1940 revisions - rev. 0x21 works, 0x11 does not */
1941 if (de->pdev->revision < 0x20)
1942 de->media[i].csr14 = t21041_csr14_brk[i];
1943 else
1944 de->media[i].csr14 = t21041_csr14[i];
1946 if (de->media[i].csr15 == 0xffff)
1947 de->media[i].csr15 = t21041_csr15[i];
1950 de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
1952 return;
1954 bad_srom:
1955 /* for error cases, it's ok to assume we support all these */
1956 for (i = 0; i < DE_MAX_MEDIA; i++)
1957 de->media[i].type = i;
1958 de->media_supported =
1959 SUPPORTED_10baseT_Half |
1960 SUPPORTED_10baseT_Full |
1961 SUPPORTED_Autoneg |
1962 SUPPORTED_TP |
1963 SUPPORTED_AUI |
1964 SUPPORTED_BNC;
1965 goto fill_defaults;
1968 static const struct net_device_ops de_netdev_ops = {
1969 .ndo_open = de_open,
1970 .ndo_stop = de_close,
1971 .ndo_set_multicast_list = de_set_rx_mode,
1972 .ndo_start_xmit = de_start_xmit,
1973 .ndo_get_stats = de_get_stats,
1974 .ndo_tx_timeout = de_tx_timeout,
1975 .ndo_change_mtu = eth_change_mtu,
1976 .ndo_set_mac_address = eth_mac_addr,
1977 .ndo_validate_addr = eth_validate_addr,
1980 static int __devinit de_init_one (struct pci_dev *pdev,
1981 const struct pci_device_id *ent)
1983 struct net_device *dev;
1984 struct de_private *de;
1985 int rc;
1986 void __iomem *regs;
1987 unsigned long pciaddr;
1988 static int board_idx = -1;
1990 board_idx++;
1992 #ifndef MODULE
1993 if (board_idx == 0)
1994 printk("%s", version);
1995 #endif
1997 /* allocate a new ethernet device structure, and fill in defaults */
1998 dev = alloc_etherdev(sizeof(struct de_private));
1999 if (!dev)
2000 return -ENOMEM;
2002 dev->netdev_ops = &de_netdev_ops;
2003 SET_NETDEV_DEV(dev, &pdev->dev);
2004 dev->ethtool_ops = &de_ethtool_ops;
2005 dev->watchdog_timeo = TX_TIMEOUT;
2007 de = netdev_priv(dev);
2008 de->de21040 = ent->driver_data == 0 ? 1 : 0;
2009 de->pdev = pdev;
2010 de->dev = dev;
2011 de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
2012 de->board_idx = board_idx;
2013 spin_lock_init (&de->lock);
2014 init_timer(&de->media_timer);
2015 if (de->de21040)
2016 de->media_timer.function = de21040_media_timer;
2017 else
2018 de->media_timer.function = de21041_media_timer;
2019 de->media_timer.data = (unsigned long) de;
2021 netif_carrier_off(dev);
2022 netif_stop_queue(dev);
2024 /* wake up device, assign resources */
2025 rc = pci_enable_device(pdev);
2026 if (rc)
2027 goto err_out_free;
2029 /* reserve PCI resources to ensure driver atomicity */
2030 rc = pci_request_regions(pdev, DRV_NAME);
2031 if (rc)
2032 goto err_out_disable;
2034 /* check for invalid IRQ value */
2035 if (pdev->irq < 2) {
2036 rc = -EIO;
2037 pr_err(PFX "invalid irq (%d) for pci dev %s\n",
2038 pdev->irq, pci_name(pdev));
2039 goto err_out_res;
2042 dev->irq = pdev->irq;
2044 /* obtain and check validity of PCI I/O address */
2045 pciaddr = pci_resource_start(pdev, 1);
2046 if (!pciaddr) {
2047 rc = -EIO;
2048 pr_err(PFX "no MMIO resource for pci dev %s\n", pci_name(pdev));
2049 goto err_out_res;
2051 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2052 rc = -EIO;
2053 pr_err(PFX "MMIO resource (%llx) too small on pci dev %s\n",
2054 (unsigned long long)pci_resource_len(pdev, 1),
2055 pci_name(pdev));
2056 goto err_out_res;
2059 /* remap CSR registers */
2060 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2061 if (!regs) {
2062 rc = -EIO;
2063 pr_err(PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2064 (unsigned long long)pci_resource_len(pdev, 1),
2065 pciaddr, pci_name(pdev));
2066 goto err_out_res;
2068 dev->base_addr = (unsigned long) regs;
2069 de->regs = regs;
2071 de_adapter_wake(de);
2073 /* make sure hardware is not running */
2074 rc = de_reset_mac(de);
2075 if (rc) {
2076 pr_err(PFX "Cannot reset MAC, pci dev %s\n", pci_name(pdev));
2077 goto err_out_iomap;
2080 /* get MAC address, initialize default media type and
2081 * get list of supported media
2083 if (de->de21040) {
2084 de21040_get_mac_address(de);
2085 de21040_get_media_info(de);
2086 } else {
2087 de21041_get_srom_info(de);
2090 /* register new network interface with kernel */
2091 rc = register_netdev(dev);
2092 if (rc)
2093 goto err_out_iomap;
2095 /* print info about board and interface just registered */
2096 dev_info(&dev->dev, "%s at 0x%lx, %pM, IRQ %d\n",
2097 de->de21040 ? "21040" : "21041",
2098 dev->base_addr,
2099 dev->dev_addr,
2100 dev->irq);
2102 pci_set_drvdata(pdev, dev);
2104 /* enable busmastering */
2105 pci_set_master(pdev);
2107 /* put adapter to sleep */
2108 de_adapter_sleep(de);
2110 return 0;
2112 err_out_iomap:
2113 kfree(de->ee_data);
2114 iounmap(regs);
2115 err_out_res:
2116 pci_release_regions(pdev);
2117 err_out_disable:
2118 pci_disable_device(pdev);
2119 err_out_free:
2120 free_netdev(dev);
2121 return rc;
2124 static void __devexit de_remove_one (struct pci_dev *pdev)
2126 struct net_device *dev = pci_get_drvdata(pdev);
2127 struct de_private *de = netdev_priv(dev);
2129 BUG_ON(!dev);
2130 unregister_netdev(dev);
2131 kfree(de->ee_data);
2132 iounmap(de->regs);
2133 pci_release_regions(pdev);
2134 pci_disable_device(pdev);
2135 pci_set_drvdata(pdev, NULL);
2136 free_netdev(dev);
2139 #ifdef CONFIG_PM
2141 static int de_suspend (struct pci_dev *pdev, pm_message_t state)
2143 struct net_device *dev = pci_get_drvdata (pdev);
2144 struct de_private *de = netdev_priv(dev);
2146 rtnl_lock();
2147 if (netif_running (dev)) {
2148 del_timer_sync(&de->media_timer);
2150 disable_irq(dev->irq);
2151 spin_lock_irq(&de->lock);
2153 de_stop_hw(de);
2154 netif_stop_queue(dev);
2155 netif_device_detach(dev);
2156 netif_carrier_off(dev);
2158 spin_unlock_irq(&de->lock);
2159 enable_irq(dev->irq);
2161 /* Update the error counts. */
2162 __de_get_stats(de);
2164 synchronize_irq(dev->irq);
2165 de_clean_rings(de);
2167 de_adapter_sleep(de);
2168 pci_disable_device(pdev);
2169 } else {
2170 netif_device_detach(dev);
2172 rtnl_unlock();
2173 return 0;
2176 static int de_resume (struct pci_dev *pdev)
2178 struct net_device *dev = pci_get_drvdata (pdev);
2179 struct de_private *de = netdev_priv(dev);
2180 int retval = 0;
2182 rtnl_lock();
2183 if (netif_device_present(dev))
2184 goto out;
2185 if (!netif_running(dev))
2186 goto out_attach;
2187 if ((retval = pci_enable_device(pdev))) {
2188 dev_err(&dev->dev, "pci_enable_device failed in resume\n");
2189 goto out;
2191 pci_set_master(pdev);
2192 de_init_rings(de);
2193 de_init_hw(de);
2194 out_attach:
2195 netif_device_attach(dev);
2196 out:
2197 rtnl_unlock();
2198 return 0;
2201 #endif /* CONFIG_PM */
2203 static struct pci_driver de_driver = {
2204 .name = DRV_NAME,
2205 .id_table = de_pci_tbl,
2206 .probe = de_init_one,
2207 .remove = __devexit_p(de_remove_one),
2208 #ifdef CONFIG_PM
2209 .suspend = de_suspend,
2210 .resume = de_resume,
2211 #endif
2214 static int __init de_init (void)
2216 #ifdef MODULE
2217 printk("%s", version);
2218 #endif
2219 return pci_register_driver(&de_driver);
2222 static void __exit de_exit (void)
2224 pci_unregister_driver (&de_driver);
2227 module_init(de_init);
2228 module_exit(de_exit);