net: dont hold rtnl mutex during netlink dump callbacks
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / sis190.c
blobb436e007eea0583326312e289e7e4506295ea61a
1 /*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9 genuine driver.
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
18 See the file COPYING in this distribution for more information.
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/netdevice.h>
27 #include <linux/rtnetlink.h>
28 #include <linux/etherdevice.h>
29 #include <linux/ethtool.h>
30 #include <linux/pci.h>
31 #include <linux/mii.h>
32 #include <linux/delay.h>
33 #include <linux/crc32.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/slab.h>
36 #include <asm/irq.h>
38 #define PHY_MAX_ADDR 32
39 #define PHY_ID_ANY 0x1f
40 #define MII_REG_ANY 0x1f
42 #define DRV_VERSION "1.4"
43 #define DRV_NAME "sis190"
44 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
46 #define sis190_rx_skb netif_rx
47 #define sis190_rx_quota(count, quota) count
49 #define MAC_ADDR_LEN 6
51 #define NUM_TX_DESC 64 /* [8..1024] */
52 #define NUM_RX_DESC 64 /* [8..8192] */
53 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
54 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
55 #define RX_BUF_SIZE 1536
56 #define RX_BUF_MASK 0xfff8
58 #define SIS190_REGS_SIZE 0x80
59 #define SIS190_TX_TIMEOUT (6*HZ)
60 #define SIS190_PHY_TIMEOUT (10*HZ)
61 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
62 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
63 NETIF_MSG_IFDOWN)
65 /* Enhanced PHY access register bit definitions */
66 #define EhnMIIread 0x0000
67 #define EhnMIIwrite 0x0020
68 #define EhnMIIdataShift 16
69 #define EhnMIIpmdShift 6 /* 7016 only */
70 #define EhnMIIregShift 11
71 #define EhnMIIreq 0x0010
72 #define EhnMIInotDone 0x0010
74 /* Write/read MMIO register */
75 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
76 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
77 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
78 #define SIS_R8(reg) readb (ioaddr + (reg))
79 #define SIS_R16(reg) readw (ioaddr + (reg))
80 #define SIS_R32(reg) readl (ioaddr + (reg))
82 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
84 enum sis190_registers {
85 TxControl = 0x00,
86 TxDescStartAddr = 0x04,
87 rsv0 = 0x08, // reserved
88 TxSts = 0x0c, // unused (Control/Status)
89 RxControl = 0x10,
90 RxDescStartAddr = 0x14,
91 rsv1 = 0x18, // reserved
92 RxSts = 0x1c, // unused
93 IntrStatus = 0x20,
94 IntrMask = 0x24,
95 IntrControl = 0x28,
96 IntrTimer = 0x2c, // unused (Interrupt Timer)
97 PMControl = 0x30, // unused (Power Mgmt Control/Status)
98 rsv2 = 0x34, // reserved
99 ROMControl = 0x38,
100 ROMInterface = 0x3c,
101 StationControl = 0x40,
102 GMIIControl = 0x44,
103 GIoCR = 0x48, // unused (GMAC IO Compensation)
104 GIoCtrl = 0x4c, // unused (GMAC IO Control)
105 TxMacControl = 0x50,
106 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
107 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
108 rsv3 = 0x5c, // reserved
109 RxMacControl = 0x60,
110 RxMacAddr = 0x62,
111 RxHashTable = 0x68,
112 // Undocumented = 0x6c,
113 RxWolCtrl = 0x70,
114 RxWolData = 0x74, // unused (Rx WOL Data Access)
115 RxMPSControl = 0x78, // unused (Rx MPS Control)
116 rsv4 = 0x7c, // reserved
119 enum sis190_register_content {
120 /* IntrStatus */
121 SoftInt = 0x40000000, // unused
122 Timeup = 0x20000000, // unused
123 PauseFrame = 0x00080000, // unused
124 MagicPacket = 0x00040000, // unused
125 WakeupFrame = 0x00020000, // unused
126 LinkChange = 0x00010000,
127 RxQEmpty = 0x00000080,
128 RxQInt = 0x00000040,
129 TxQ1Empty = 0x00000020, // unused
130 TxQ1Int = 0x00000010,
131 TxQ0Empty = 0x00000008, // unused
132 TxQ0Int = 0x00000004,
133 RxHalt = 0x00000002,
134 TxHalt = 0x00000001,
136 /* {Rx/Tx}CmdBits */
137 CmdReset = 0x10,
138 CmdRxEnb = 0x08, // unused
139 CmdTxEnb = 0x01,
140 RxBufEmpty = 0x01, // unused
142 /* Cfg9346Bits */
143 Cfg9346_Lock = 0x00, // unused
144 Cfg9346_Unlock = 0xc0, // unused
146 /* RxMacControl */
147 AcceptErr = 0x20, // unused
148 AcceptRunt = 0x10, // unused
149 AcceptBroadcast = 0x0800,
150 AcceptMulticast = 0x0400,
151 AcceptMyPhys = 0x0200,
152 AcceptAllPhys = 0x0100,
154 /* RxConfigBits */
155 RxCfgFIFOShift = 13,
156 RxCfgDMAShift = 8, // 0x1a in RxControl ?
158 /* TxConfigBits */
159 TxInterFrameGapShift = 24,
160 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
162 LinkStatus = 0x02, // unused
163 FullDup = 0x01, // unused
165 /* TBICSRBit */
166 TBILinkOK = 0x02000000, // unused
169 struct TxDesc {
170 __le32 PSize;
171 __le32 status;
172 __le32 addr;
173 __le32 size;
176 struct RxDesc {
177 __le32 PSize;
178 __le32 status;
179 __le32 addr;
180 __le32 size;
183 enum _DescStatusBit {
184 /* _Desc.status */
185 OWNbit = 0x80000000, // RXOWN/TXOWN
186 INTbit = 0x40000000, // RXINT/TXINT
187 CRCbit = 0x00020000, // CRCOFF/CRCEN
188 PADbit = 0x00010000, // PREADD/PADEN
189 /* _Desc.size */
190 RingEnd = 0x80000000,
191 /* TxDesc.status */
192 LSEN = 0x08000000, // TSO ? -- FR
193 IPCS = 0x04000000,
194 TCPCS = 0x02000000,
195 UDPCS = 0x01000000,
196 BSTEN = 0x00800000,
197 EXTEN = 0x00400000,
198 DEFEN = 0x00200000,
199 BKFEN = 0x00100000,
200 CRSEN = 0x00080000,
201 COLEN = 0x00040000,
202 THOL3 = 0x30000000,
203 THOL2 = 0x20000000,
204 THOL1 = 0x10000000,
205 THOL0 = 0x00000000,
207 WND = 0x00080000,
208 TABRT = 0x00040000,
209 FIFO = 0x00020000,
210 LINK = 0x00010000,
211 ColCountMask = 0x0000ffff,
212 /* RxDesc.status */
213 IPON = 0x20000000,
214 TCPON = 0x10000000,
215 UDPON = 0x08000000,
216 Wakup = 0x00400000,
217 Magic = 0x00200000,
218 Pause = 0x00100000,
219 DEFbit = 0x00200000,
220 BCAST = 0x000c0000,
221 MCAST = 0x00080000,
222 UCAST = 0x00040000,
223 /* RxDesc.PSize */
224 TAGON = 0x80000000,
225 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
226 ABORT = 0x00800000,
227 SHORT = 0x00400000,
228 LIMIT = 0x00200000,
229 MIIER = 0x00100000,
230 OVRUN = 0x00080000,
231 NIBON = 0x00040000,
232 COLON = 0x00020000,
233 CRCOK = 0x00010000,
234 RxSizeMask = 0x0000ffff
236 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
237 * provide two (unused with Linux) Tx queues. No publicly
238 * available documentation alas.
242 enum sis190_eeprom_access_register_bits {
243 EECS = 0x00000001, // unused
244 EECLK = 0x00000002, // unused
245 EEDO = 0x00000008, // unused
246 EEDI = 0x00000004, // unused
247 EEREQ = 0x00000080,
248 EEROP = 0x00000200,
249 EEWOP = 0x00000100 // unused
252 /* EEPROM Addresses */
253 enum sis190_eeprom_address {
254 EEPROMSignature = 0x00,
255 EEPROMCLK = 0x01, // unused
256 EEPROMInfo = 0x02,
257 EEPROMMACAddr = 0x03
260 enum sis190_feature {
261 F_HAS_RGMII = 1,
262 F_PHY_88E1111 = 2,
263 F_PHY_BCM5461 = 4
266 struct sis190_private {
267 void __iomem *mmio_addr;
268 struct pci_dev *pci_dev;
269 struct net_device *dev;
270 spinlock_t lock;
271 u32 rx_buf_sz;
272 u32 cur_rx;
273 u32 cur_tx;
274 u32 dirty_rx;
275 u32 dirty_tx;
276 dma_addr_t rx_dma;
277 dma_addr_t tx_dma;
278 struct RxDesc *RxDescRing;
279 struct TxDesc *TxDescRing;
280 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
281 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
282 struct work_struct phy_task;
283 struct timer_list timer;
284 u32 msg_enable;
285 struct mii_if_info mii_if;
286 struct list_head first_phy;
287 u32 features;
288 u32 negotiated_lpa;
289 enum {
290 LNK_OFF,
291 LNK_ON,
292 LNK_AUTONEG,
293 } link_status;
296 struct sis190_phy {
297 struct list_head list;
298 int phy_id;
299 u16 id[2];
300 u16 status;
301 u8 type;
304 enum sis190_phy_type {
305 UNKNOWN = 0x00,
306 HOME = 0x01,
307 LAN = 0x02,
308 MIX = 0x03
311 static struct mii_chip_info {
312 const char *name;
313 u16 id[2];
314 unsigned int type;
315 u32 feature;
316 } mii_chip_table[] = {
317 { "Atheros PHY", { 0x004d, 0xd010 }, LAN, 0 },
318 { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
319 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
320 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
321 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
322 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
323 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
324 { NULL, }
327 static const struct {
328 const char *name;
329 } sis_chip_info[] = {
330 { "SiS 190 PCI Fast Ethernet adapter" },
331 { "SiS 191 PCI Gigabit Ethernet adapter" },
334 static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = {
335 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
336 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
337 { 0, },
340 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
342 static int rx_copybreak = 200;
344 static struct {
345 u32 msg_enable;
346 } debug = { -1 };
348 MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver");
349 module_param(rx_copybreak, int, 0);
350 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
351 module_param_named(debug, debug.msg_enable, int, 0);
352 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
353 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
354 MODULE_VERSION(DRV_VERSION);
355 MODULE_LICENSE("GPL");
357 static const u32 sis190_intr_mask =
358 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
361 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
362 * The chips use a 64 element hash table based on the Ethernet CRC.
364 static const int multicast_filter_limit = 32;
366 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
368 unsigned int i;
370 SIS_W32(GMIIControl, ctl);
372 msleep(1);
374 for (i = 0; i < 100; i++) {
375 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
376 break;
377 msleep(1);
380 if (i > 99)
381 pr_err("PHY command failed !\n");
384 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
386 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
387 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
388 (((u32) val) << EhnMIIdataShift));
391 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
393 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
394 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
396 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
399 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
401 struct sis190_private *tp = netdev_priv(dev);
403 mdio_write(tp->mmio_addr, phy_id, reg, val);
406 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
408 struct sis190_private *tp = netdev_priv(dev);
410 return mdio_read(tp->mmio_addr, phy_id, reg);
413 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
415 mdio_read(ioaddr, phy_id, reg);
416 return mdio_read(ioaddr, phy_id, reg);
419 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
421 u16 data = 0xffff;
422 unsigned int i;
424 if (!(SIS_R32(ROMControl) & 0x0002))
425 return 0;
427 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
429 for (i = 0; i < 200; i++) {
430 if (!(SIS_R32(ROMInterface) & EEREQ)) {
431 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
432 break;
434 msleep(1);
437 return data;
440 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
442 SIS_W32(IntrMask, 0x00);
443 SIS_W32(IntrStatus, 0xffffffff);
444 SIS_PCI_COMMIT();
447 static void sis190_asic_down(void __iomem *ioaddr)
449 /* Stop the chip's Tx and Rx DMA processes. */
451 SIS_W32(TxControl, 0x1a00);
452 SIS_W32(RxControl, 0x1a00);
454 sis190_irq_mask_and_ack(ioaddr);
457 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
459 desc->size |= cpu_to_le32(RingEnd);
462 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
464 u32 eor = le32_to_cpu(desc->size) & RingEnd;
466 desc->PSize = 0x0;
467 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
468 wmb();
469 desc->status = cpu_to_le32(OWNbit | INTbit);
472 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
473 u32 rx_buf_sz)
475 desc->addr = cpu_to_le32(mapping);
476 sis190_give_to_asic(desc, rx_buf_sz);
479 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
481 desc->PSize = 0x0;
482 desc->addr = cpu_to_le32(0xdeadbeef);
483 desc->size &= cpu_to_le32(RingEnd);
484 wmb();
485 desc->status = 0x0;
488 static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
489 struct RxDesc *desc)
491 u32 rx_buf_sz = tp->rx_buf_sz;
492 struct sk_buff *skb;
493 dma_addr_t mapping;
495 skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
496 if (unlikely(!skb))
497 goto skb_alloc_failed;
498 mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
499 PCI_DMA_FROMDEVICE);
500 if (pci_dma_mapping_error(tp->pci_dev, mapping))
501 goto out;
502 sis190_map_to_asic(desc, mapping, rx_buf_sz);
504 return skb;
506 out:
507 dev_kfree_skb_any(skb);
508 skb_alloc_failed:
509 sis190_make_unusable_by_asic(desc);
510 return NULL;
513 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
514 u32 start, u32 end)
516 u32 cur;
518 for (cur = start; cur < end; cur++) {
519 unsigned int i = cur % NUM_RX_DESC;
521 if (tp->Rx_skbuff[i])
522 continue;
524 tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
526 if (!tp->Rx_skbuff[i])
527 break;
529 return cur - start;
532 static bool sis190_try_rx_copy(struct sis190_private *tp,
533 struct sk_buff **sk_buff, int pkt_size,
534 dma_addr_t addr)
536 struct sk_buff *skb;
537 bool done = false;
539 if (pkt_size >= rx_copybreak)
540 goto out;
542 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
543 if (!skb)
544 goto out;
546 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
547 PCI_DMA_FROMDEVICE);
548 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
549 *sk_buff = skb;
550 done = true;
551 out:
552 return done;
555 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
557 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
559 if ((status & CRCOK) && !(status & ErrMask))
560 return 0;
562 if (!(status & CRCOK))
563 stats->rx_crc_errors++;
564 else if (status & OVRUN)
565 stats->rx_over_errors++;
566 else if (status & (SHORT | LIMIT))
567 stats->rx_length_errors++;
568 else if (status & (MIIER | NIBON | COLON))
569 stats->rx_frame_errors++;
571 stats->rx_errors++;
572 return -1;
575 static int sis190_rx_interrupt(struct net_device *dev,
576 struct sis190_private *tp, void __iomem *ioaddr)
578 struct net_device_stats *stats = &dev->stats;
579 u32 rx_left, cur_rx = tp->cur_rx;
580 u32 delta, count;
582 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
583 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
585 for (; rx_left > 0; rx_left--, cur_rx++) {
586 unsigned int entry = cur_rx % NUM_RX_DESC;
587 struct RxDesc *desc = tp->RxDescRing + entry;
588 u32 status;
590 if (le32_to_cpu(desc->status) & OWNbit)
591 break;
593 status = le32_to_cpu(desc->PSize);
595 //netif_info(tp, intr, dev, "Rx PSize = %08x\n", status);
597 if (sis190_rx_pkt_err(status, stats) < 0)
598 sis190_give_to_asic(desc, tp->rx_buf_sz);
599 else {
600 struct sk_buff *skb = tp->Rx_skbuff[entry];
601 dma_addr_t addr = le32_to_cpu(desc->addr);
602 int pkt_size = (status & RxSizeMask) - 4;
603 struct pci_dev *pdev = tp->pci_dev;
605 if (unlikely(pkt_size > tp->rx_buf_sz)) {
606 netif_info(tp, intr, dev,
607 "(frag) status = %08x\n", status);
608 stats->rx_dropped++;
609 stats->rx_length_errors++;
610 sis190_give_to_asic(desc, tp->rx_buf_sz);
611 continue;
615 if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
616 pci_dma_sync_single_for_device(pdev, addr,
617 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
618 sis190_give_to_asic(desc, tp->rx_buf_sz);
619 } else {
620 pci_unmap_single(pdev, addr, tp->rx_buf_sz,
621 PCI_DMA_FROMDEVICE);
622 tp->Rx_skbuff[entry] = NULL;
623 sis190_make_unusable_by_asic(desc);
626 skb_put(skb, pkt_size);
627 skb->protocol = eth_type_trans(skb, dev);
629 sis190_rx_skb(skb);
631 stats->rx_packets++;
632 stats->rx_bytes += pkt_size;
633 if ((status & BCAST) == MCAST)
634 stats->multicast++;
637 count = cur_rx - tp->cur_rx;
638 tp->cur_rx = cur_rx;
640 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
641 if (!delta && count)
642 netif_info(tp, intr, dev, "no Rx buffer allocated\n");
643 tp->dirty_rx += delta;
645 if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx)
646 netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
648 return count;
651 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
652 struct TxDesc *desc)
654 unsigned int len;
656 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
658 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
660 memset(desc, 0x00, sizeof(*desc));
663 static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
665 #define TxErrMask (WND | TABRT | FIFO | LINK)
667 if (!unlikely(status & TxErrMask))
668 return 0;
670 if (status & WND)
671 stats->tx_window_errors++;
672 if (status & TABRT)
673 stats->tx_aborted_errors++;
674 if (status & FIFO)
675 stats->tx_fifo_errors++;
676 if (status & LINK)
677 stats->tx_carrier_errors++;
679 stats->tx_errors++;
681 return -1;
684 static void sis190_tx_interrupt(struct net_device *dev,
685 struct sis190_private *tp, void __iomem *ioaddr)
687 struct net_device_stats *stats = &dev->stats;
688 u32 pending, dirty_tx = tp->dirty_tx;
690 * It would not be needed if queueing was allowed to be enabled
691 * again too early (hint: think preempt and unclocked smp systems).
693 unsigned int queue_stopped;
695 smp_rmb();
696 pending = tp->cur_tx - dirty_tx;
697 queue_stopped = (pending == NUM_TX_DESC);
699 for (; pending; pending--, dirty_tx++) {
700 unsigned int entry = dirty_tx % NUM_TX_DESC;
701 struct TxDesc *txd = tp->TxDescRing + entry;
702 u32 status = le32_to_cpu(txd->status);
703 struct sk_buff *skb;
705 if (status & OWNbit)
706 break;
708 skb = tp->Tx_skbuff[entry];
710 if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
711 stats->tx_packets++;
712 stats->tx_bytes += skb->len;
713 stats->collisions += ((status & ColCountMask) - 1);
716 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
717 tp->Tx_skbuff[entry] = NULL;
718 dev_kfree_skb_irq(skb);
721 if (tp->dirty_tx != dirty_tx) {
722 tp->dirty_tx = dirty_tx;
723 smp_wmb();
724 if (queue_stopped)
725 netif_wake_queue(dev);
730 * The interrupt handler does all of the Rx thread work and cleans up after
731 * the Tx thread.
733 static irqreturn_t sis190_interrupt(int irq, void *__dev)
735 struct net_device *dev = __dev;
736 struct sis190_private *tp = netdev_priv(dev);
737 void __iomem *ioaddr = tp->mmio_addr;
738 unsigned int handled = 0;
739 u32 status;
741 status = SIS_R32(IntrStatus);
743 if ((status == 0xffffffff) || !status)
744 goto out;
746 handled = 1;
748 if (unlikely(!netif_running(dev))) {
749 sis190_asic_down(ioaddr);
750 goto out;
753 SIS_W32(IntrStatus, status);
755 // netif_info(tp, intr, dev, "status = %08x\n", status);
757 if (status & LinkChange) {
758 netif_info(tp, intr, dev, "link change\n");
759 del_timer(&tp->timer);
760 schedule_work(&tp->phy_task);
763 if (status & RxQInt)
764 sis190_rx_interrupt(dev, tp, ioaddr);
766 if (status & TxQ0Int)
767 sis190_tx_interrupt(dev, tp, ioaddr);
768 out:
769 return IRQ_RETVAL(handled);
772 #ifdef CONFIG_NET_POLL_CONTROLLER
773 static void sis190_netpoll(struct net_device *dev)
775 struct sis190_private *tp = netdev_priv(dev);
776 struct pci_dev *pdev = tp->pci_dev;
778 disable_irq(pdev->irq);
779 sis190_interrupt(pdev->irq, dev);
780 enable_irq(pdev->irq);
782 #endif
784 static void sis190_free_rx_skb(struct sis190_private *tp,
785 struct sk_buff **sk_buff, struct RxDesc *desc)
787 struct pci_dev *pdev = tp->pci_dev;
789 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
790 PCI_DMA_FROMDEVICE);
791 dev_kfree_skb(*sk_buff);
792 *sk_buff = NULL;
793 sis190_make_unusable_by_asic(desc);
796 static void sis190_rx_clear(struct sis190_private *tp)
798 unsigned int i;
800 for (i = 0; i < NUM_RX_DESC; i++) {
801 if (!tp->Rx_skbuff[i])
802 continue;
803 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
807 static void sis190_init_ring_indexes(struct sis190_private *tp)
809 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
812 static int sis190_init_ring(struct net_device *dev)
814 struct sis190_private *tp = netdev_priv(dev);
816 sis190_init_ring_indexes(tp);
818 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
819 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
821 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
822 goto err_rx_clear;
824 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
826 return 0;
828 err_rx_clear:
829 sis190_rx_clear(tp);
830 return -ENOMEM;
833 static void sis190_set_rx_mode(struct net_device *dev)
835 struct sis190_private *tp = netdev_priv(dev);
836 void __iomem *ioaddr = tp->mmio_addr;
837 unsigned long flags;
838 u32 mc_filter[2]; /* Multicast hash filter */
839 u16 rx_mode;
841 if (dev->flags & IFF_PROMISC) {
842 rx_mode =
843 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
844 AcceptAllPhys;
845 mc_filter[1] = mc_filter[0] = 0xffffffff;
846 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
847 (dev->flags & IFF_ALLMULTI)) {
848 /* Too many to filter perfectly -- accept all multicasts. */
849 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
850 mc_filter[1] = mc_filter[0] = 0xffffffff;
851 } else {
852 struct netdev_hw_addr *ha;
854 rx_mode = AcceptBroadcast | AcceptMyPhys;
855 mc_filter[1] = mc_filter[0] = 0;
856 netdev_for_each_mc_addr(ha, dev) {
857 int bit_nr =
858 ether_crc(ETH_ALEN, ha->addr) & 0x3f;
859 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
860 rx_mode |= AcceptMulticast;
864 spin_lock_irqsave(&tp->lock, flags);
866 SIS_W16(RxMacControl, rx_mode | 0x2);
867 SIS_W32(RxHashTable, mc_filter[0]);
868 SIS_W32(RxHashTable + 4, mc_filter[1]);
870 spin_unlock_irqrestore(&tp->lock, flags);
873 static void sis190_soft_reset(void __iomem *ioaddr)
875 SIS_W32(IntrControl, 0x8000);
876 SIS_PCI_COMMIT();
877 SIS_W32(IntrControl, 0x0);
878 sis190_asic_down(ioaddr);
881 static void sis190_hw_start(struct net_device *dev)
883 struct sis190_private *tp = netdev_priv(dev);
884 void __iomem *ioaddr = tp->mmio_addr;
886 sis190_soft_reset(ioaddr);
888 SIS_W32(TxDescStartAddr, tp->tx_dma);
889 SIS_W32(RxDescStartAddr, tp->rx_dma);
891 SIS_W32(IntrStatus, 0xffffffff);
892 SIS_W32(IntrMask, 0x0);
893 SIS_W32(GMIIControl, 0x0);
894 SIS_W32(TxMacControl, 0x60);
895 SIS_W16(RxMacControl, 0x02);
896 SIS_W32(RxHashTable, 0x0);
897 SIS_W32(0x6c, 0x0);
898 SIS_W32(RxWolCtrl, 0x0);
899 SIS_W32(RxWolData, 0x0);
901 SIS_PCI_COMMIT();
903 sis190_set_rx_mode(dev);
905 /* Enable all known interrupts by setting the interrupt mask. */
906 SIS_W32(IntrMask, sis190_intr_mask);
908 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
909 SIS_W32(RxControl, 0x1a1d);
911 netif_start_queue(dev);
914 static void sis190_phy_task(struct work_struct *work)
916 struct sis190_private *tp =
917 container_of(work, struct sis190_private, phy_task);
918 struct net_device *dev = tp->dev;
919 void __iomem *ioaddr = tp->mmio_addr;
920 int phy_id = tp->mii_if.phy_id;
921 u16 val;
923 rtnl_lock();
925 if (!netif_running(dev))
926 goto out_unlock;
928 val = mdio_read(ioaddr, phy_id, MII_BMCR);
929 if (val & BMCR_RESET) {
930 // FIXME: needlessly high ? -- FR 02/07/2005
931 mod_timer(&tp->timer, jiffies + HZ/10);
932 goto out_unlock;
935 val = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
936 if (!(val & BMSR_ANEGCOMPLETE) && tp->link_status != LNK_AUTONEG) {
937 netif_carrier_off(dev);
938 netif_warn(tp, link, dev, "auto-negotiating...\n");
939 tp->link_status = LNK_AUTONEG;
940 } else if ((val & BMSR_LSTATUS) && tp->link_status != LNK_ON) {
941 /* Rejoice ! */
942 struct {
943 int val;
944 u32 ctl;
945 const char *msg;
946 } reg31[] = {
947 { LPA_1000FULL, 0x07000c00 | 0x00001000,
948 "1000 Mbps Full Duplex" },
949 { LPA_1000HALF, 0x07000c00,
950 "1000 Mbps Half Duplex" },
951 { LPA_100FULL, 0x04000800 | 0x00001000,
952 "100 Mbps Full Duplex" },
953 { LPA_100HALF, 0x04000800,
954 "100 Mbps Half Duplex" },
955 { LPA_10FULL, 0x04000400 | 0x00001000,
956 "10 Mbps Full Duplex" },
957 { LPA_10HALF, 0x04000400,
958 "10 Mbps Half Duplex" },
959 { 0, 0x04000400, "unknown" }
960 }, *p = NULL;
961 u16 adv, autoexp, gigadv, gigrec;
963 val = mdio_read(ioaddr, phy_id, 0x1f);
964 netif_info(tp, link, dev, "mii ext = %04x\n", val);
966 val = mdio_read(ioaddr, phy_id, MII_LPA);
967 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
968 autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
969 netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n",
970 val, adv, autoexp);
972 if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
973 /* check for gigabit speed */
974 gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000);
975 gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000);
976 val = (gigadv & (gigrec >> 2));
977 if (val & ADVERTISE_1000FULL)
978 p = reg31;
979 else if (val & ADVERTISE_1000HALF)
980 p = reg31 + 1;
982 if (!p) {
983 val &= adv;
985 for (p = reg31; p->val; p++) {
986 if ((val & p->val) == p->val)
987 break;
991 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
993 if ((tp->features & F_HAS_RGMII) &&
994 (tp->features & F_PHY_BCM5461)) {
995 // Set Tx Delay in RGMII mode.
996 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
997 udelay(200);
998 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
999 p->ctl |= 0x03000000;
1002 SIS_W32(StationControl, p->ctl);
1004 if (tp->features & F_HAS_RGMII) {
1005 SIS_W32(RGDelay, 0x0441);
1006 SIS_W32(RGDelay, 0x0440);
1009 tp->negotiated_lpa = p->val;
1011 netif_info(tp, link, dev, "link on %s mode\n", p->msg);
1012 netif_carrier_on(dev);
1013 tp->link_status = LNK_ON;
1014 } else if (!(val & BMSR_LSTATUS) && tp->link_status != LNK_AUTONEG)
1015 tp->link_status = LNK_OFF;
1016 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
1018 out_unlock:
1019 rtnl_unlock();
1022 static void sis190_phy_timer(unsigned long __opaque)
1024 struct net_device *dev = (struct net_device *)__opaque;
1025 struct sis190_private *tp = netdev_priv(dev);
1027 if (likely(netif_running(dev)))
1028 schedule_work(&tp->phy_task);
1031 static inline void sis190_delete_timer(struct net_device *dev)
1033 struct sis190_private *tp = netdev_priv(dev);
1035 del_timer_sync(&tp->timer);
1038 static inline void sis190_request_timer(struct net_device *dev)
1040 struct sis190_private *tp = netdev_priv(dev);
1041 struct timer_list *timer = &tp->timer;
1043 init_timer(timer);
1044 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1045 timer->data = (unsigned long)dev;
1046 timer->function = sis190_phy_timer;
1047 add_timer(timer);
1050 static void sis190_set_rxbufsize(struct sis190_private *tp,
1051 struct net_device *dev)
1053 unsigned int mtu = dev->mtu;
1055 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1056 /* RxDesc->size has a licence to kill the lower bits */
1057 if (tp->rx_buf_sz & 0x07) {
1058 tp->rx_buf_sz += 8;
1059 tp->rx_buf_sz &= RX_BUF_MASK;
1063 static int sis190_open(struct net_device *dev)
1065 struct sis190_private *tp = netdev_priv(dev);
1066 struct pci_dev *pdev = tp->pci_dev;
1067 int rc = -ENOMEM;
1069 sis190_set_rxbufsize(tp, dev);
1072 * Rx and Tx descriptors need 256 bytes alignment.
1073 * pci_alloc_consistent() guarantees a stronger alignment.
1075 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1076 if (!tp->TxDescRing)
1077 goto out;
1079 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1080 if (!tp->RxDescRing)
1081 goto err_free_tx_0;
1083 rc = sis190_init_ring(dev);
1084 if (rc < 0)
1085 goto err_free_rx_1;
1087 sis190_request_timer(dev);
1089 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1090 if (rc < 0)
1091 goto err_release_timer_2;
1093 sis190_hw_start(dev);
1094 out:
1095 return rc;
1097 err_release_timer_2:
1098 sis190_delete_timer(dev);
1099 sis190_rx_clear(tp);
1100 err_free_rx_1:
1101 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1102 tp->rx_dma);
1103 err_free_tx_0:
1104 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1105 tp->tx_dma);
1106 goto out;
1109 static void sis190_tx_clear(struct sis190_private *tp)
1111 unsigned int i;
1113 for (i = 0; i < NUM_TX_DESC; i++) {
1114 struct sk_buff *skb = tp->Tx_skbuff[i];
1116 if (!skb)
1117 continue;
1119 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1120 tp->Tx_skbuff[i] = NULL;
1121 dev_kfree_skb(skb);
1123 tp->dev->stats.tx_dropped++;
1125 tp->cur_tx = tp->dirty_tx = 0;
1128 static void sis190_down(struct net_device *dev)
1130 struct sis190_private *tp = netdev_priv(dev);
1131 void __iomem *ioaddr = tp->mmio_addr;
1132 unsigned int poll_locked = 0;
1134 sis190_delete_timer(dev);
1136 netif_stop_queue(dev);
1138 do {
1139 spin_lock_irq(&tp->lock);
1141 sis190_asic_down(ioaddr);
1143 spin_unlock_irq(&tp->lock);
1145 synchronize_irq(dev->irq);
1147 if (!poll_locked)
1148 poll_locked++;
1150 synchronize_sched();
1152 } while (SIS_R32(IntrMask));
1154 sis190_tx_clear(tp);
1155 sis190_rx_clear(tp);
1158 static int sis190_close(struct net_device *dev)
1160 struct sis190_private *tp = netdev_priv(dev);
1161 struct pci_dev *pdev = tp->pci_dev;
1163 sis190_down(dev);
1165 free_irq(dev->irq, dev);
1167 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1168 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1170 tp->TxDescRing = NULL;
1171 tp->RxDescRing = NULL;
1173 return 0;
1176 static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
1177 struct net_device *dev)
1179 struct sis190_private *tp = netdev_priv(dev);
1180 void __iomem *ioaddr = tp->mmio_addr;
1181 u32 len, entry, dirty_tx;
1182 struct TxDesc *desc;
1183 dma_addr_t mapping;
1185 if (unlikely(skb->len < ETH_ZLEN)) {
1186 if (skb_padto(skb, ETH_ZLEN)) {
1187 dev->stats.tx_dropped++;
1188 goto out;
1190 len = ETH_ZLEN;
1191 } else {
1192 len = skb->len;
1195 entry = tp->cur_tx % NUM_TX_DESC;
1196 desc = tp->TxDescRing + entry;
1198 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1199 netif_stop_queue(dev);
1200 netif_err(tp, tx_err, dev,
1201 "BUG! Tx Ring full when queue awake!\n");
1202 return NETDEV_TX_BUSY;
1205 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1206 if (pci_dma_mapping_error(tp->pci_dev, mapping)) {
1207 netif_err(tp, tx_err, dev,
1208 "PCI mapping failed, dropping packet");
1209 return NETDEV_TX_BUSY;
1212 tp->Tx_skbuff[entry] = skb;
1214 desc->PSize = cpu_to_le32(len);
1215 desc->addr = cpu_to_le32(mapping);
1217 desc->size = cpu_to_le32(len);
1218 if (entry == (NUM_TX_DESC - 1))
1219 desc->size |= cpu_to_le32(RingEnd);
1221 wmb();
1223 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1224 if (tp->negotiated_lpa & (LPA_1000HALF | LPA_100HALF | LPA_10HALF)) {
1225 /* Half Duplex */
1226 desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN);
1227 if (tp->negotiated_lpa & (LPA_1000HALF | LPA_1000FULL))
1228 desc->status |= cpu_to_le32(EXTEN | BSTEN); /* gigabit HD */
1231 tp->cur_tx++;
1233 smp_wmb();
1235 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1237 dirty_tx = tp->dirty_tx;
1238 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1239 netif_stop_queue(dev);
1240 smp_rmb();
1241 if (dirty_tx != tp->dirty_tx)
1242 netif_wake_queue(dev);
1244 out:
1245 return NETDEV_TX_OK;
1248 static void sis190_free_phy(struct list_head *first_phy)
1250 struct sis190_phy *cur, *next;
1252 list_for_each_entry_safe(cur, next, first_phy, list) {
1253 kfree(cur);
1258 * sis190_default_phy - Select default PHY for sis190 mac.
1259 * @dev: the net device to probe for
1261 * Select first detected PHY with link as default.
1262 * If no one is link on, select PHY whose types is HOME as default.
1263 * If HOME doesn't exist, select LAN.
1265 static u16 sis190_default_phy(struct net_device *dev)
1267 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1268 struct sis190_private *tp = netdev_priv(dev);
1269 struct mii_if_info *mii_if = &tp->mii_if;
1270 void __iomem *ioaddr = tp->mmio_addr;
1271 u16 status;
1273 phy_home = phy_default = phy_lan = NULL;
1275 list_for_each_entry(phy, &tp->first_phy, list) {
1276 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1278 // Link ON & Not select default PHY & not ghost PHY.
1279 if ((status & BMSR_LSTATUS) &&
1280 !phy_default &&
1281 (phy->type != UNKNOWN)) {
1282 phy_default = phy;
1283 } else {
1284 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1285 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1286 status | BMCR_ANENABLE | BMCR_ISOLATE);
1287 if (phy->type == HOME)
1288 phy_home = phy;
1289 else if (phy->type == LAN)
1290 phy_lan = phy;
1294 if (!phy_default) {
1295 if (phy_home)
1296 phy_default = phy_home;
1297 else if (phy_lan)
1298 phy_default = phy_lan;
1299 else
1300 phy_default = list_first_entry(&tp->first_phy,
1301 struct sis190_phy, list);
1304 if (mii_if->phy_id != phy_default->phy_id) {
1305 mii_if->phy_id = phy_default->phy_id;
1306 if (netif_msg_probe(tp))
1307 pr_info("%s: Using transceiver at address %d as default\n",
1308 pci_name(tp->pci_dev), mii_if->phy_id);
1311 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1312 status &= (~BMCR_ISOLATE);
1314 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1315 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1317 return status;
1320 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1321 struct sis190_phy *phy, unsigned int phy_id,
1322 u16 mii_status)
1324 void __iomem *ioaddr = tp->mmio_addr;
1325 struct mii_chip_info *p;
1327 INIT_LIST_HEAD(&phy->list);
1328 phy->status = mii_status;
1329 phy->phy_id = phy_id;
1331 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1332 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1334 for (p = mii_chip_table; p->type; p++) {
1335 if ((p->id[0] == phy->id[0]) &&
1336 (p->id[1] == (phy->id[1] & 0xfff0))) {
1337 break;
1341 if (p->id[1]) {
1342 phy->type = (p->type == MIX) ?
1343 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1344 LAN : HOME) : p->type;
1345 tp->features |= p->feature;
1346 if (netif_msg_probe(tp))
1347 pr_info("%s: %s transceiver at address %d\n",
1348 pci_name(tp->pci_dev), p->name, phy_id);
1349 } else {
1350 phy->type = UNKNOWN;
1351 if (netif_msg_probe(tp))
1352 pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
1353 pci_name(tp->pci_dev),
1354 phy->id[0], (phy->id[1] & 0xfff0), phy_id);
1358 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1360 if (tp->features & F_PHY_88E1111) {
1361 void __iomem *ioaddr = tp->mmio_addr;
1362 int phy_id = tp->mii_if.phy_id;
1363 u16 reg[2][2] = {
1364 { 0x808b, 0x0ce1 },
1365 { 0x808f, 0x0c60 }
1366 }, *p;
1368 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1370 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1371 udelay(200);
1372 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1373 udelay(200);
1378 * sis190_mii_probe - Probe MII PHY for sis190
1379 * @dev: the net device to probe for
1381 * Search for total of 32 possible mii phy addresses.
1382 * Identify and set current phy if found one,
1383 * return error if it failed to found.
1385 static int __devinit sis190_mii_probe(struct net_device *dev)
1387 struct sis190_private *tp = netdev_priv(dev);
1388 struct mii_if_info *mii_if = &tp->mii_if;
1389 void __iomem *ioaddr = tp->mmio_addr;
1390 int phy_id;
1391 int rc = 0;
1393 INIT_LIST_HEAD(&tp->first_phy);
1395 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1396 struct sis190_phy *phy;
1397 u16 status;
1399 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1401 // Try next mii if the current one is not accessible.
1402 if (status == 0xffff || status == 0x0000)
1403 continue;
1405 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1406 if (!phy) {
1407 sis190_free_phy(&tp->first_phy);
1408 rc = -ENOMEM;
1409 goto out;
1412 sis190_init_phy(dev, tp, phy, phy_id, status);
1414 list_add(&tp->first_phy, &phy->list);
1417 if (list_empty(&tp->first_phy)) {
1418 if (netif_msg_probe(tp))
1419 pr_info("%s: No MII transceivers found!\n",
1420 pci_name(tp->pci_dev));
1421 rc = -EIO;
1422 goto out;
1425 /* Select default PHY for mac */
1426 sis190_default_phy(dev);
1428 sis190_mii_probe_88e1111_fixup(tp);
1430 mii_if->dev = dev;
1431 mii_if->mdio_read = __mdio_read;
1432 mii_if->mdio_write = __mdio_write;
1433 mii_if->phy_id_mask = PHY_ID_ANY;
1434 mii_if->reg_num_mask = MII_REG_ANY;
1435 out:
1436 return rc;
1439 static void sis190_mii_remove(struct net_device *dev)
1441 struct sis190_private *tp = netdev_priv(dev);
1443 sis190_free_phy(&tp->first_phy);
1446 static void sis190_release_board(struct pci_dev *pdev)
1448 struct net_device *dev = pci_get_drvdata(pdev);
1449 struct sis190_private *tp = netdev_priv(dev);
1451 iounmap(tp->mmio_addr);
1452 pci_release_regions(pdev);
1453 pci_disable_device(pdev);
1454 free_netdev(dev);
1457 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1459 struct sis190_private *tp;
1460 struct net_device *dev;
1461 void __iomem *ioaddr;
1462 int rc;
1464 dev = alloc_etherdev(sizeof(*tp));
1465 if (!dev) {
1466 if (netif_msg_drv(&debug))
1467 pr_err("unable to alloc new ethernet\n");
1468 rc = -ENOMEM;
1469 goto err_out_0;
1472 SET_NETDEV_DEV(dev, &pdev->dev);
1474 tp = netdev_priv(dev);
1475 tp->dev = dev;
1476 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1478 rc = pci_enable_device(pdev);
1479 if (rc < 0) {
1480 if (netif_msg_probe(tp))
1481 pr_err("%s: enable failure\n", pci_name(pdev));
1482 goto err_free_dev_1;
1485 rc = -ENODEV;
1487 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1488 if (netif_msg_probe(tp))
1489 pr_err("%s: region #0 is no MMIO resource\n",
1490 pci_name(pdev));
1491 goto err_pci_disable_2;
1493 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1494 if (netif_msg_probe(tp))
1495 pr_err("%s: invalid PCI region size(s)\n",
1496 pci_name(pdev));
1497 goto err_pci_disable_2;
1500 rc = pci_request_regions(pdev, DRV_NAME);
1501 if (rc < 0) {
1502 if (netif_msg_probe(tp))
1503 pr_err("%s: could not request regions\n",
1504 pci_name(pdev));
1505 goto err_pci_disable_2;
1508 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1509 if (rc < 0) {
1510 if (netif_msg_probe(tp))
1511 pr_err("%s: DMA configuration failed\n",
1512 pci_name(pdev));
1513 goto err_free_res_3;
1516 pci_set_master(pdev);
1518 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1519 if (!ioaddr) {
1520 if (netif_msg_probe(tp))
1521 pr_err("%s: cannot remap MMIO, aborting\n",
1522 pci_name(pdev));
1523 rc = -EIO;
1524 goto err_free_res_3;
1527 tp->pci_dev = pdev;
1528 tp->mmio_addr = ioaddr;
1529 tp->link_status = LNK_OFF;
1531 sis190_irq_mask_and_ack(ioaddr);
1533 sis190_soft_reset(ioaddr);
1534 out:
1535 return dev;
1537 err_free_res_3:
1538 pci_release_regions(pdev);
1539 err_pci_disable_2:
1540 pci_disable_device(pdev);
1541 err_free_dev_1:
1542 free_netdev(dev);
1543 err_out_0:
1544 dev = ERR_PTR(rc);
1545 goto out;
1548 static void sis190_tx_timeout(struct net_device *dev)
1550 struct sis190_private *tp = netdev_priv(dev);
1551 void __iomem *ioaddr = tp->mmio_addr;
1552 u8 tmp8;
1554 /* Disable Tx, if not already */
1555 tmp8 = SIS_R8(TxControl);
1556 if (tmp8 & CmdTxEnb)
1557 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1559 netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n",
1560 SIS_R32(TxControl), SIS_R32(TxSts));
1562 /* Disable interrupts by clearing the interrupt mask. */
1563 SIS_W32(IntrMask, 0x0000);
1565 /* Stop a shared interrupt from scavenging while we are. */
1566 spin_lock_irq(&tp->lock);
1567 sis190_tx_clear(tp);
1568 spin_unlock_irq(&tp->lock);
1570 /* ...and finally, reset everything. */
1571 sis190_hw_start(dev);
1573 netif_wake_queue(dev);
1576 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1578 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1581 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1582 struct net_device *dev)
1584 struct sis190_private *tp = netdev_priv(dev);
1585 void __iomem *ioaddr = tp->mmio_addr;
1586 u16 sig;
1587 int i;
1589 if (netif_msg_probe(tp))
1590 pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev));
1592 /* Check to see if there is a sane EEPROM */
1593 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1595 if ((sig == 0xffff) || (sig == 0x0000)) {
1596 if (netif_msg_probe(tp))
1597 pr_info("%s: Error EEPROM read %x\n",
1598 pci_name(pdev), sig);
1599 return -EIO;
1602 /* Get MAC address from EEPROM */
1603 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1604 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1606 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1609 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1611 return 0;
1615 * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model
1616 * @pdev: PCI device
1617 * @dev: network device to get address for
1619 * SiS96x model, use APC CMOS RAM to store MAC address.
1620 * APC CMOS RAM is accessed through ISA bridge.
1621 * MAC address is read into @net_dev->dev_addr.
1623 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1624 struct net_device *dev)
1626 static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
1627 struct sis190_private *tp = netdev_priv(dev);
1628 struct pci_dev *isa_bridge;
1629 u8 reg, tmp8;
1630 unsigned int i;
1632 if (netif_msg_probe(tp))
1633 pr_info("%s: Read MAC address from APC\n", pci_name(pdev));
1635 for (i = 0; i < ARRAY_SIZE(ids); i++) {
1636 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1637 if (isa_bridge)
1638 break;
1641 if (!isa_bridge) {
1642 if (netif_msg_probe(tp))
1643 pr_info("%s: Can not find ISA bridge\n",
1644 pci_name(pdev));
1645 return -EIO;
1648 /* Enable port 78h & 79h to access APC Registers. */
1649 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1650 reg = (tmp8 & ~0x02);
1651 pci_write_config_byte(isa_bridge, 0x48, reg);
1652 udelay(50);
1653 pci_read_config_byte(isa_bridge, 0x48, &reg);
1655 for (i = 0; i < MAC_ADDR_LEN; i++) {
1656 outb(0x9 + i, 0x78);
1657 dev->dev_addr[i] = inb(0x79);
1660 outb(0x12, 0x78);
1661 reg = inb(0x79);
1663 sis190_set_rgmii(tp, reg);
1665 /* Restore the value to ISA Bridge */
1666 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1667 pci_dev_put(isa_bridge);
1669 return 0;
1673 * sis190_init_rxfilter - Initialize the Rx filter
1674 * @dev: network device to initialize
1676 * Set receive filter address to our MAC address
1677 * and enable packet filtering.
1679 static inline void sis190_init_rxfilter(struct net_device *dev)
1681 struct sis190_private *tp = netdev_priv(dev);
1682 void __iomem *ioaddr = tp->mmio_addr;
1683 u16 ctl;
1684 int i;
1686 ctl = SIS_R16(RxMacControl);
1688 * Disable packet filtering before setting filter.
1689 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1690 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1692 SIS_W16(RxMacControl, ctl & ~0x0f00);
1694 for (i = 0; i < MAC_ADDR_LEN; i++)
1695 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1697 SIS_W16(RxMacControl, ctl);
1698 SIS_PCI_COMMIT();
1701 static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1702 struct net_device *dev)
1704 int rc;
1706 rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1707 if (rc < 0) {
1708 u8 reg;
1710 pci_read_config_byte(pdev, 0x73, &reg);
1712 if (reg & 0x00000001)
1713 rc = sis190_get_mac_addr_from_apc(pdev, dev);
1715 return rc;
1718 static void sis190_set_speed_auto(struct net_device *dev)
1720 struct sis190_private *tp = netdev_priv(dev);
1721 void __iomem *ioaddr = tp->mmio_addr;
1722 int phy_id = tp->mii_if.phy_id;
1723 int val;
1725 netif_info(tp, link, dev, "Enabling Auto-negotiation\n");
1727 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1729 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1730 // unchanged.
1731 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1732 ADVERTISE_100FULL | ADVERTISE_10FULL |
1733 ADVERTISE_100HALF | ADVERTISE_10HALF);
1735 // Enable 1000 Full Mode.
1736 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1738 // Enable auto-negotiation and restart auto-negotiation.
1739 mdio_write(ioaddr, phy_id, MII_BMCR,
1740 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1743 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1745 struct sis190_private *tp = netdev_priv(dev);
1747 return mii_ethtool_gset(&tp->mii_if, cmd);
1750 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1752 struct sis190_private *tp = netdev_priv(dev);
1754 return mii_ethtool_sset(&tp->mii_if, cmd);
1757 static void sis190_get_drvinfo(struct net_device *dev,
1758 struct ethtool_drvinfo *info)
1760 struct sis190_private *tp = netdev_priv(dev);
1762 strcpy(info->driver, DRV_NAME);
1763 strcpy(info->version, DRV_VERSION);
1764 strcpy(info->bus_info, pci_name(tp->pci_dev));
1767 static int sis190_get_regs_len(struct net_device *dev)
1769 return SIS190_REGS_SIZE;
1772 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1773 void *p)
1775 struct sis190_private *tp = netdev_priv(dev);
1776 unsigned long flags;
1778 if (regs->len > SIS190_REGS_SIZE)
1779 regs->len = SIS190_REGS_SIZE;
1781 spin_lock_irqsave(&tp->lock, flags);
1782 memcpy_fromio(p, tp->mmio_addr, regs->len);
1783 spin_unlock_irqrestore(&tp->lock, flags);
1786 static int sis190_nway_reset(struct net_device *dev)
1788 struct sis190_private *tp = netdev_priv(dev);
1790 return mii_nway_restart(&tp->mii_if);
1793 static u32 sis190_get_msglevel(struct net_device *dev)
1795 struct sis190_private *tp = netdev_priv(dev);
1797 return tp->msg_enable;
1800 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1802 struct sis190_private *tp = netdev_priv(dev);
1804 tp->msg_enable = value;
1807 static const struct ethtool_ops sis190_ethtool_ops = {
1808 .get_settings = sis190_get_settings,
1809 .set_settings = sis190_set_settings,
1810 .get_drvinfo = sis190_get_drvinfo,
1811 .get_regs_len = sis190_get_regs_len,
1812 .get_regs = sis190_get_regs,
1813 .get_link = ethtool_op_get_link,
1814 .get_msglevel = sis190_get_msglevel,
1815 .set_msglevel = sis190_set_msglevel,
1816 .nway_reset = sis190_nway_reset,
1819 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1821 struct sis190_private *tp = netdev_priv(dev);
1823 return !netif_running(dev) ? -EINVAL :
1824 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1827 static const struct net_device_ops sis190_netdev_ops = {
1828 .ndo_open = sis190_open,
1829 .ndo_stop = sis190_close,
1830 .ndo_do_ioctl = sis190_ioctl,
1831 .ndo_start_xmit = sis190_start_xmit,
1832 .ndo_tx_timeout = sis190_tx_timeout,
1833 .ndo_set_multicast_list = sis190_set_rx_mode,
1834 .ndo_change_mtu = eth_change_mtu,
1835 .ndo_set_mac_address = eth_mac_addr,
1836 .ndo_validate_addr = eth_validate_addr,
1837 #ifdef CONFIG_NET_POLL_CONTROLLER
1838 .ndo_poll_controller = sis190_netpoll,
1839 #endif
1842 static int __devinit sis190_init_one(struct pci_dev *pdev,
1843 const struct pci_device_id *ent)
1845 static int printed_version = 0;
1846 struct sis190_private *tp;
1847 struct net_device *dev;
1848 void __iomem *ioaddr;
1849 int rc;
1851 if (!printed_version) {
1852 if (netif_msg_drv(&debug))
1853 pr_info(SIS190_DRIVER_NAME " loaded\n");
1854 printed_version = 1;
1857 dev = sis190_init_board(pdev);
1858 if (IS_ERR(dev)) {
1859 rc = PTR_ERR(dev);
1860 goto out;
1863 pci_set_drvdata(pdev, dev);
1865 tp = netdev_priv(dev);
1866 ioaddr = tp->mmio_addr;
1868 rc = sis190_get_mac_addr(pdev, dev);
1869 if (rc < 0)
1870 goto err_release_board;
1872 sis190_init_rxfilter(dev);
1874 INIT_WORK(&tp->phy_task, sis190_phy_task);
1876 dev->netdev_ops = &sis190_netdev_ops;
1878 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1879 dev->irq = pdev->irq;
1880 dev->base_addr = (unsigned long) 0xdead;
1881 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1883 spin_lock_init(&tp->lock);
1885 rc = sis190_mii_probe(dev);
1886 if (rc < 0)
1887 goto err_release_board;
1889 rc = register_netdev(dev);
1890 if (rc < 0)
1891 goto err_remove_mii;
1893 if (netif_msg_probe(tp)) {
1894 netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
1895 pci_name(pdev),
1896 sis_chip_info[ent->driver_data].name,
1897 ioaddr, dev->irq, dev->dev_addr);
1898 netdev_info(dev, "%s mode.\n",
1899 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1902 netif_carrier_off(dev);
1904 sis190_set_speed_auto(dev);
1905 out:
1906 return rc;
1908 err_remove_mii:
1909 sis190_mii_remove(dev);
1910 err_release_board:
1911 sis190_release_board(pdev);
1912 goto out;
1915 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1917 struct net_device *dev = pci_get_drvdata(pdev);
1918 struct sis190_private *tp = netdev_priv(dev);
1920 sis190_mii_remove(dev);
1921 cancel_work_sync(&tp->phy_task);
1922 unregister_netdev(dev);
1923 sis190_release_board(pdev);
1924 pci_set_drvdata(pdev, NULL);
1927 static struct pci_driver sis190_pci_driver = {
1928 .name = DRV_NAME,
1929 .id_table = sis190_pci_tbl,
1930 .probe = sis190_init_one,
1931 .remove = __devexit_p(sis190_remove_one),
1934 static int __init sis190_init_module(void)
1936 return pci_register_driver(&sis190_pci_driver);
1939 static void __exit sis190_cleanup_module(void)
1941 pci_unregister_driver(&sis190_pci_driver);
1944 module_init(sis190_init_module);
1945 module_exit(sis190_cleanup_module);