x86, delay: tsc based udelay should have rdtsc_barrier
[linux-2.6/mini2440.git] / drivers / net / sis190.c
blob1f040e8a000ba043d2b2f41d411e0478a7a3e4b3
1 /*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9 genuine driver.
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
18 See the file COPYING in this distribution for more information.
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
33 #include <asm/irq.h>
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 printk(arg)
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 printk(arg)
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
40 printk(arg)
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 printk(arg)
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
44 printk(arg)
46 #define PHY_MAX_ADDR 32
47 #define PHY_ID_ANY 0x1f
48 #define MII_REG_ANY 0x1f
50 #define DRV_VERSION "1.3"
51 #define DRV_NAME "sis190"
52 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
53 #define PFX DRV_NAME ": "
55 #define sis190_rx_skb netif_rx
56 #define sis190_rx_quota(count, quota) count
58 #define MAC_ADDR_LEN 6
60 #define NUM_TX_DESC 64 /* [8..1024] */
61 #define NUM_RX_DESC 64 /* [8..8192] */
62 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
63 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
64 #define RX_BUF_SIZE 1536
65 #define RX_BUF_MASK 0xfff8
67 #define SIS190_REGS_SIZE 0x80
68 #define SIS190_TX_TIMEOUT (6*HZ)
69 #define SIS190_PHY_TIMEOUT (10*HZ)
70 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
71 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
72 NETIF_MSG_IFDOWN)
74 /* Enhanced PHY access register bit definitions */
75 #define EhnMIIread 0x0000
76 #define EhnMIIwrite 0x0020
77 #define EhnMIIdataShift 16
78 #define EhnMIIpmdShift 6 /* 7016 only */
79 #define EhnMIIregShift 11
80 #define EhnMIIreq 0x0010
81 #define EhnMIInotDone 0x0010
83 /* Write/read MMIO register */
84 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
85 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
86 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
87 #define SIS_R8(reg) readb (ioaddr + (reg))
88 #define SIS_R16(reg) readw (ioaddr + (reg))
89 #define SIS_R32(reg) readl (ioaddr + (reg))
91 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
93 enum sis190_registers {
94 TxControl = 0x00,
95 TxDescStartAddr = 0x04,
96 rsv0 = 0x08, // reserved
97 TxSts = 0x0c, // unused (Control/Status)
98 RxControl = 0x10,
99 RxDescStartAddr = 0x14,
100 rsv1 = 0x18, // reserved
101 RxSts = 0x1c, // unused
102 IntrStatus = 0x20,
103 IntrMask = 0x24,
104 IntrControl = 0x28,
105 IntrTimer = 0x2c, // unused (Interupt Timer)
106 PMControl = 0x30, // unused (Power Mgmt Control/Status)
107 rsv2 = 0x34, // reserved
108 ROMControl = 0x38,
109 ROMInterface = 0x3c,
110 StationControl = 0x40,
111 GMIIControl = 0x44,
112 GIoCR = 0x48, // unused (GMAC IO Compensation)
113 GIoCtrl = 0x4c, // unused (GMAC IO Control)
114 TxMacControl = 0x50,
115 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
116 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
117 rsv3 = 0x5c, // reserved
118 RxMacControl = 0x60,
119 RxMacAddr = 0x62,
120 RxHashTable = 0x68,
121 // Undocumented = 0x6c,
122 RxWolCtrl = 0x70,
123 RxWolData = 0x74, // unused (Rx WOL Data Access)
124 RxMPSControl = 0x78, // unused (Rx MPS Control)
125 rsv4 = 0x7c, // reserved
128 enum sis190_register_content {
129 /* IntrStatus */
130 SoftInt = 0x40000000, // unused
131 Timeup = 0x20000000, // unused
132 PauseFrame = 0x00080000, // unused
133 MagicPacket = 0x00040000, // unused
134 WakeupFrame = 0x00020000, // unused
135 LinkChange = 0x00010000,
136 RxQEmpty = 0x00000080,
137 RxQInt = 0x00000040,
138 TxQ1Empty = 0x00000020, // unused
139 TxQ1Int = 0x00000010,
140 TxQ0Empty = 0x00000008, // unused
141 TxQ0Int = 0x00000004,
142 RxHalt = 0x00000002,
143 TxHalt = 0x00000001,
145 /* {Rx/Tx}CmdBits */
146 CmdReset = 0x10,
147 CmdRxEnb = 0x08, // unused
148 CmdTxEnb = 0x01,
149 RxBufEmpty = 0x01, // unused
151 /* Cfg9346Bits */
152 Cfg9346_Lock = 0x00, // unused
153 Cfg9346_Unlock = 0xc0, // unused
155 /* RxMacControl */
156 AcceptErr = 0x20, // unused
157 AcceptRunt = 0x10, // unused
158 AcceptBroadcast = 0x0800,
159 AcceptMulticast = 0x0400,
160 AcceptMyPhys = 0x0200,
161 AcceptAllPhys = 0x0100,
163 /* RxConfigBits */
164 RxCfgFIFOShift = 13,
165 RxCfgDMAShift = 8, // 0x1a in RxControl ?
167 /* TxConfigBits */
168 TxInterFrameGapShift = 24,
169 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
171 LinkStatus = 0x02, // unused
172 FullDup = 0x01, // unused
174 /* TBICSRBit */
175 TBILinkOK = 0x02000000, // unused
178 struct TxDesc {
179 __le32 PSize;
180 __le32 status;
181 __le32 addr;
182 __le32 size;
185 struct RxDesc {
186 __le32 PSize;
187 __le32 status;
188 __le32 addr;
189 __le32 size;
192 enum _DescStatusBit {
193 /* _Desc.status */
194 OWNbit = 0x80000000, // RXOWN/TXOWN
195 INTbit = 0x40000000, // RXINT/TXINT
196 CRCbit = 0x00020000, // CRCOFF/CRCEN
197 PADbit = 0x00010000, // PREADD/PADEN
198 /* _Desc.size */
199 RingEnd = 0x80000000,
200 /* TxDesc.status */
201 LSEN = 0x08000000, // TSO ? -- FR
202 IPCS = 0x04000000,
203 TCPCS = 0x02000000,
204 UDPCS = 0x01000000,
205 BSTEN = 0x00800000,
206 EXTEN = 0x00400000,
207 DEFEN = 0x00200000,
208 BKFEN = 0x00100000,
209 CRSEN = 0x00080000,
210 COLEN = 0x00040000,
211 THOL3 = 0x30000000,
212 THOL2 = 0x20000000,
213 THOL1 = 0x10000000,
214 THOL0 = 0x00000000,
216 WND = 0x00080000,
217 TABRT = 0x00040000,
218 FIFO = 0x00020000,
219 LINK = 0x00010000,
220 ColCountMask = 0x0000ffff,
221 /* RxDesc.status */
222 IPON = 0x20000000,
223 TCPON = 0x10000000,
224 UDPON = 0x08000000,
225 Wakup = 0x00400000,
226 Magic = 0x00200000,
227 Pause = 0x00100000,
228 DEFbit = 0x00200000,
229 BCAST = 0x000c0000,
230 MCAST = 0x00080000,
231 UCAST = 0x00040000,
232 /* RxDesc.PSize */
233 TAGON = 0x80000000,
234 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
235 ABORT = 0x00800000,
236 SHORT = 0x00400000,
237 LIMIT = 0x00200000,
238 MIIER = 0x00100000,
239 OVRUN = 0x00080000,
240 NIBON = 0x00040000,
241 COLON = 0x00020000,
242 CRCOK = 0x00010000,
243 RxSizeMask = 0x0000ffff
245 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
246 * provide two (unused with Linux) Tx queues. No publically
247 * available documentation alas.
251 enum sis190_eeprom_access_register_bits {
252 EECS = 0x00000001, // unused
253 EECLK = 0x00000002, // unused
254 EEDO = 0x00000008, // unused
255 EEDI = 0x00000004, // unused
256 EEREQ = 0x00000080,
257 EEROP = 0x00000200,
258 EEWOP = 0x00000100 // unused
261 /* EEPROM Addresses */
262 enum sis190_eeprom_address {
263 EEPROMSignature = 0x00,
264 EEPROMCLK = 0x01, // unused
265 EEPROMInfo = 0x02,
266 EEPROMMACAddr = 0x03
269 enum sis190_feature {
270 F_HAS_RGMII = 1,
271 F_PHY_88E1111 = 2,
272 F_PHY_BCM5461 = 4
275 struct sis190_private {
276 void __iomem *mmio_addr;
277 struct pci_dev *pci_dev;
278 struct net_device *dev;
279 spinlock_t lock;
280 u32 rx_buf_sz;
281 u32 cur_rx;
282 u32 cur_tx;
283 u32 dirty_rx;
284 u32 dirty_tx;
285 dma_addr_t rx_dma;
286 dma_addr_t tx_dma;
287 struct RxDesc *RxDescRing;
288 struct TxDesc *TxDescRing;
289 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
290 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
291 struct work_struct phy_task;
292 struct timer_list timer;
293 u32 msg_enable;
294 struct mii_if_info mii_if;
295 struct list_head first_phy;
296 u32 features;
299 struct sis190_phy {
300 struct list_head list;
301 int phy_id;
302 u16 id[2];
303 u16 status;
304 u8 type;
307 enum sis190_phy_type {
308 UNKNOWN = 0x00,
309 HOME = 0x01,
310 LAN = 0x02,
311 MIX = 0x03
314 static struct mii_chip_info {
315 const char *name;
316 u16 id[2];
317 unsigned int type;
318 u32 feature;
319 } mii_chip_table[] = {
320 { "Atheros PHY", { 0x004d, 0xd010 }, LAN, 0 },
321 { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
322 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
323 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
324 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
325 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
326 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
327 { NULL, }
330 static const struct {
331 const char *name;
332 } sis_chip_info[] = {
333 { "SiS 190 PCI Fast Ethernet adapter" },
334 { "SiS 191 PCI Gigabit Ethernet adapter" },
337 static struct pci_device_id sis190_pci_tbl[] = {
338 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
339 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
340 { 0, },
343 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
345 static int rx_copybreak = 200;
347 static struct {
348 u32 msg_enable;
349 } debug = { -1 };
351 MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver");
352 module_param(rx_copybreak, int, 0);
353 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
354 module_param_named(debug, debug.msg_enable, int, 0);
355 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
356 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
357 MODULE_VERSION(DRV_VERSION);
358 MODULE_LICENSE("GPL");
360 static const u32 sis190_intr_mask =
361 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
364 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
365 * The chips use a 64 element hash table based on the Ethernet CRC.
367 static const int multicast_filter_limit = 32;
369 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
371 unsigned int i;
373 SIS_W32(GMIIControl, ctl);
375 msleep(1);
377 for (i = 0; i < 100; i++) {
378 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
379 break;
380 msleep(1);
383 if (i > 99)
384 printk(KERN_ERR PFX "PHY command failed !\n");
387 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
389 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
390 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
391 (((u32) val) << EhnMIIdataShift));
394 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
396 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
397 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
399 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
402 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
404 struct sis190_private *tp = netdev_priv(dev);
406 mdio_write(tp->mmio_addr, phy_id, reg, val);
409 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
411 struct sis190_private *tp = netdev_priv(dev);
413 return mdio_read(tp->mmio_addr, phy_id, reg);
416 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
418 mdio_read(ioaddr, phy_id, reg);
419 return mdio_read(ioaddr, phy_id, reg);
422 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
424 u16 data = 0xffff;
425 unsigned int i;
427 if (!(SIS_R32(ROMControl) & 0x0002))
428 return 0;
430 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
432 for (i = 0; i < 200; i++) {
433 if (!(SIS_R32(ROMInterface) & EEREQ)) {
434 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
435 break;
437 msleep(1);
440 return data;
443 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
445 SIS_W32(IntrMask, 0x00);
446 SIS_W32(IntrStatus, 0xffffffff);
447 SIS_PCI_COMMIT();
450 static void sis190_asic_down(void __iomem *ioaddr)
452 /* Stop the chip's Tx and Rx DMA processes. */
454 SIS_W32(TxControl, 0x1a00);
455 SIS_W32(RxControl, 0x1a00);
457 sis190_irq_mask_and_ack(ioaddr);
460 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
462 desc->size |= cpu_to_le32(RingEnd);
465 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
467 u32 eor = le32_to_cpu(desc->size) & RingEnd;
469 desc->PSize = 0x0;
470 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
471 wmb();
472 desc->status = cpu_to_le32(OWNbit | INTbit);
475 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
476 u32 rx_buf_sz)
478 desc->addr = cpu_to_le32(mapping);
479 sis190_give_to_asic(desc, rx_buf_sz);
482 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
484 desc->PSize = 0x0;
485 desc->addr = cpu_to_le32(0xdeadbeef);
486 desc->size &= cpu_to_le32(RingEnd);
487 wmb();
488 desc->status = 0x0;
491 static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
492 struct RxDesc *desc)
494 u32 rx_buf_sz = tp->rx_buf_sz;
495 struct sk_buff *skb;
497 skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
498 if (likely(skb)) {
499 dma_addr_t mapping;
501 mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
502 PCI_DMA_FROMDEVICE);
503 sis190_map_to_asic(desc, mapping, rx_buf_sz);
504 } else
505 sis190_make_unusable_by_asic(desc);
507 return skb;
510 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
511 u32 start, u32 end)
513 u32 cur;
515 for (cur = start; cur < end; cur++) {
516 unsigned int i = cur % NUM_RX_DESC;
518 if (tp->Rx_skbuff[i])
519 continue;
521 tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
523 if (!tp->Rx_skbuff[i])
524 break;
526 return cur - start;
529 static bool sis190_try_rx_copy(struct sis190_private *tp,
530 struct sk_buff **sk_buff, int pkt_size,
531 dma_addr_t addr)
533 struct sk_buff *skb;
534 bool done = false;
536 if (pkt_size >= rx_copybreak)
537 goto out;
539 skb = netdev_alloc_skb(tp->dev, pkt_size + 2);
540 if (!skb)
541 goto out;
543 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
544 PCI_DMA_FROMDEVICE);
545 skb_reserve(skb, 2);
546 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
547 *sk_buff = skb;
548 done = true;
549 out:
550 return done;
553 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
555 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
557 if ((status & CRCOK) && !(status & ErrMask))
558 return 0;
560 if (!(status & CRCOK))
561 stats->rx_crc_errors++;
562 else if (status & OVRUN)
563 stats->rx_over_errors++;
564 else if (status & (SHORT | LIMIT))
565 stats->rx_length_errors++;
566 else if (status & (MIIER | NIBON | COLON))
567 stats->rx_frame_errors++;
569 stats->rx_errors++;
570 return -1;
573 static int sis190_rx_interrupt(struct net_device *dev,
574 struct sis190_private *tp, void __iomem *ioaddr)
576 struct net_device_stats *stats = &dev->stats;
577 u32 rx_left, cur_rx = tp->cur_rx;
578 u32 delta, count;
580 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
581 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
583 for (; rx_left > 0; rx_left--, cur_rx++) {
584 unsigned int entry = cur_rx % NUM_RX_DESC;
585 struct RxDesc *desc = tp->RxDescRing + entry;
586 u32 status;
588 if (le32_to_cpu(desc->status) & OWNbit)
589 break;
591 status = le32_to_cpu(desc->PSize);
593 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
594 // status);
596 if (sis190_rx_pkt_err(status, stats) < 0)
597 sis190_give_to_asic(desc, tp->rx_buf_sz);
598 else {
599 struct sk_buff *skb = tp->Rx_skbuff[entry];
600 dma_addr_t addr = le32_to_cpu(desc->addr);
601 int pkt_size = (status & RxSizeMask) - 4;
602 struct pci_dev *pdev = tp->pci_dev;
604 if (unlikely(pkt_size > tp->rx_buf_sz)) {
605 net_intr(tp, KERN_INFO
606 "%s: (frag) status = %08x.\n",
607 dev->name, status);
608 stats->rx_dropped++;
609 stats->rx_length_errors++;
610 sis190_give_to_asic(desc, tp->rx_buf_sz);
611 continue;
615 if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
616 pci_dma_sync_single_for_device(pdev, addr,
617 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
618 sis190_give_to_asic(desc, tp->rx_buf_sz);
619 } else {
620 pci_unmap_single(pdev, addr, tp->rx_buf_sz,
621 PCI_DMA_FROMDEVICE);
622 tp->Rx_skbuff[entry] = NULL;
623 sis190_make_unusable_by_asic(desc);
626 skb_put(skb, pkt_size);
627 skb->protocol = eth_type_trans(skb, dev);
629 sis190_rx_skb(skb);
631 stats->rx_packets++;
632 stats->rx_bytes += pkt_size;
633 if ((status & BCAST) == MCAST)
634 stats->multicast++;
637 count = cur_rx - tp->cur_rx;
638 tp->cur_rx = cur_rx;
640 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
641 if (!delta && count && netif_msg_intr(tp))
642 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
643 tp->dirty_rx += delta;
645 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
646 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
648 return count;
651 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
652 struct TxDesc *desc)
654 unsigned int len;
656 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
658 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
660 memset(desc, 0x00, sizeof(*desc));
663 static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
665 #define TxErrMask (WND | TABRT | FIFO | LINK)
667 if (!unlikely(status & TxErrMask))
668 return 0;
670 if (status & WND)
671 stats->tx_window_errors++;
672 if (status & TABRT)
673 stats->tx_aborted_errors++;
674 if (status & FIFO)
675 stats->tx_fifo_errors++;
676 if (status & LINK)
677 stats->tx_carrier_errors++;
679 stats->tx_errors++;
681 return -1;
684 static void sis190_tx_interrupt(struct net_device *dev,
685 struct sis190_private *tp, void __iomem *ioaddr)
687 struct net_device_stats *stats = &dev->stats;
688 u32 pending, dirty_tx = tp->dirty_tx;
690 * It would not be needed if queueing was allowed to be enabled
691 * again too early (hint: think preempt and unclocked smp systems).
693 unsigned int queue_stopped;
695 smp_rmb();
696 pending = tp->cur_tx - dirty_tx;
697 queue_stopped = (pending == NUM_TX_DESC);
699 for (; pending; pending--, dirty_tx++) {
700 unsigned int entry = dirty_tx % NUM_TX_DESC;
701 struct TxDesc *txd = tp->TxDescRing + entry;
702 u32 status = le32_to_cpu(txd->status);
703 struct sk_buff *skb;
705 if (status & OWNbit)
706 break;
708 skb = tp->Tx_skbuff[entry];
710 if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
711 stats->tx_packets++;
712 stats->tx_bytes += skb->len;
713 stats->collisions += ((status & ColCountMask) - 1);
716 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
717 tp->Tx_skbuff[entry] = NULL;
718 dev_kfree_skb_irq(skb);
721 if (tp->dirty_tx != dirty_tx) {
722 tp->dirty_tx = dirty_tx;
723 smp_wmb();
724 if (queue_stopped)
725 netif_wake_queue(dev);
730 * The interrupt handler does all of the Rx thread work and cleans up after
731 * the Tx thread.
733 static irqreturn_t sis190_interrupt(int irq, void *__dev)
735 struct net_device *dev = __dev;
736 struct sis190_private *tp = netdev_priv(dev);
737 void __iomem *ioaddr = tp->mmio_addr;
738 unsigned int handled = 0;
739 u32 status;
741 status = SIS_R32(IntrStatus);
743 if ((status == 0xffffffff) || !status)
744 goto out;
746 handled = 1;
748 if (unlikely(!netif_running(dev))) {
749 sis190_asic_down(ioaddr);
750 goto out;
753 SIS_W32(IntrStatus, status);
755 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
757 if (status & LinkChange) {
758 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
759 schedule_work(&tp->phy_task);
762 if (status & RxQInt)
763 sis190_rx_interrupt(dev, tp, ioaddr);
765 if (status & TxQ0Int)
766 sis190_tx_interrupt(dev, tp, ioaddr);
767 out:
768 return IRQ_RETVAL(handled);
771 #ifdef CONFIG_NET_POLL_CONTROLLER
772 static void sis190_netpoll(struct net_device *dev)
774 struct sis190_private *tp = netdev_priv(dev);
775 struct pci_dev *pdev = tp->pci_dev;
777 disable_irq(pdev->irq);
778 sis190_interrupt(pdev->irq, dev);
779 enable_irq(pdev->irq);
781 #endif
783 static void sis190_free_rx_skb(struct sis190_private *tp,
784 struct sk_buff **sk_buff, struct RxDesc *desc)
786 struct pci_dev *pdev = tp->pci_dev;
788 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
789 PCI_DMA_FROMDEVICE);
790 dev_kfree_skb(*sk_buff);
791 *sk_buff = NULL;
792 sis190_make_unusable_by_asic(desc);
795 static void sis190_rx_clear(struct sis190_private *tp)
797 unsigned int i;
799 for (i = 0; i < NUM_RX_DESC; i++) {
800 if (!tp->Rx_skbuff[i])
801 continue;
802 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
806 static void sis190_init_ring_indexes(struct sis190_private *tp)
808 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
811 static int sis190_init_ring(struct net_device *dev)
813 struct sis190_private *tp = netdev_priv(dev);
815 sis190_init_ring_indexes(tp);
817 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
818 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
820 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
821 goto err_rx_clear;
823 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
825 return 0;
827 err_rx_clear:
828 sis190_rx_clear(tp);
829 return -ENOMEM;
832 static void sis190_set_rx_mode(struct net_device *dev)
834 struct sis190_private *tp = netdev_priv(dev);
835 void __iomem *ioaddr = tp->mmio_addr;
836 unsigned long flags;
837 u32 mc_filter[2]; /* Multicast hash filter */
838 u16 rx_mode;
840 if (dev->flags & IFF_PROMISC) {
841 rx_mode =
842 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
843 AcceptAllPhys;
844 mc_filter[1] = mc_filter[0] = 0xffffffff;
845 } else if ((dev->mc_count > multicast_filter_limit) ||
846 (dev->flags & IFF_ALLMULTI)) {
847 /* Too many to filter perfectly -- accept all multicasts. */
848 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
849 mc_filter[1] = mc_filter[0] = 0xffffffff;
850 } else {
851 struct dev_mc_list *mclist;
852 unsigned int i;
854 rx_mode = AcceptBroadcast | AcceptMyPhys;
855 mc_filter[1] = mc_filter[0] = 0;
856 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
857 i++, mclist = mclist->next) {
858 int bit_nr =
859 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
860 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
861 rx_mode |= AcceptMulticast;
865 spin_lock_irqsave(&tp->lock, flags);
867 SIS_W16(RxMacControl, rx_mode | 0x2);
868 SIS_W32(RxHashTable, mc_filter[0]);
869 SIS_W32(RxHashTable + 4, mc_filter[1]);
871 spin_unlock_irqrestore(&tp->lock, flags);
874 static void sis190_soft_reset(void __iomem *ioaddr)
876 SIS_W32(IntrControl, 0x8000);
877 SIS_PCI_COMMIT();
878 SIS_W32(IntrControl, 0x0);
879 sis190_asic_down(ioaddr);
882 static void sis190_hw_start(struct net_device *dev)
884 struct sis190_private *tp = netdev_priv(dev);
885 void __iomem *ioaddr = tp->mmio_addr;
887 sis190_soft_reset(ioaddr);
889 SIS_W32(TxDescStartAddr, tp->tx_dma);
890 SIS_W32(RxDescStartAddr, tp->rx_dma);
892 SIS_W32(IntrStatus, 0xffffffff);
893 SIS_W32(IntrMask, 0x0);
894 SIS_W32(GMIIControl, 0x0);
895 SIS_W32(TxMacControl, 0x60);
896 SIS_W16(RxMacControl, 0x02);
897 SIS_W32(RxHashTable, 0x0);
898 SIS_W32(0x6c, 0x0);
899 SIS_W32(RxWolCtrl, 0x0);
900 SIS_W32(RxWolData, 0x0);
902 SIS_PCI_COMMIT();
904 sis190_set_rx_mode(dev);
906 /* Enable all known interrupts by setting the interrupt mask. */
907 SIS_W32(IntrMask, sis190_intr_mask);
909 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
910 SIS_W32(RxControl, 0x1a1d);
912 netif_start_queue(dev);
915 static void sis190_phy_task(struct work_struct *work)
917 struct sis190_private *tp =
918 container_of(work, struct sis190_private, phy_task);
919 struct net_device *dev = tp->dev;
920 void __iomem *ioaddr = tp->mmio_addr;
921 int phy_id = tp->mii_if.phy_id;
922 u16 val;
924 rtnl_lock();
926 if (!netif_running(dev))
927 goto out_unlock;
929 val = mdio_read(ioaddr, phy_id, MII_BMCR);
930 if (val & BMCR_RESET) {
931 // FIXME: needlessly high ? -- FR 02/07/2005
932 mod_timer(&tp->timer, jiffies + HZ/10);
933 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
934 BMSR_ANEGCOMPLETE)) {
935 netif_carrier_off(dev);
936 net_link(tp, KERN_WARNING "%s: auto-negotiating...\n",
937 dev->name);
938 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
939 } else {
940 /* Rejoice ! */
941 struct {
942 int val;
943 u32 ctl;
944 const char *msg;
945 } reg31[] = {
946 { LPA_1000FULL, 0x07000c00 | 0x00001000,
947 "1000 Mbps Full Duplex" },
948 { LPA_1000HALF, 0x07000c00,
949 "1000 Mbps Half Duplex" },
950 { LPA_100FULL, 0x04000800 | 0x00001000,
951 "100 Mbps Full Duplex" },
952 { LPA_100HALF, 0x04000800,
953 "100 Mbps Half Duplex" },
954 { LPA_10FULL, 0x04000400 | 0x00001000,
955 "10 Mbps Full Duplex" },
956 { LPA_10HALF, 0x04000400,
957 "10 Mbps Half Duplex" },
958 { 0, 0x04000400, "unknown" }
959 }, *p = NULL;
960 u16 adv, autoexp, gigadv, gigrec;
962 val = mdio_read(ioaddr, phy_id, 0x1f);
963 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
965 val = mdio_read(ioaddr, phy_id, MII_LPA);
966 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
967 autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
968 net_link(tp, KERN_INFO "%s: mii lpa=%04x adv=%04x exp=%04x.\n",
969 dev->name, val, adv, autoexp);
971 if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
972 /* check for gigabit speed */
973 gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000);
974 gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000);
975 val = (gigadv & (gigrec >> 2));
976 if (val & ADVERTISE_1000FULL)
977 p = reg31;
978 else if (val & ADVERTISE_1000HALF)
979 p = reg31 + 1;
981 if (!p) {
982 val &= adv;
984 for (p = reg31; p->val; p++) {
985 if ((val & p->val) == p->val)
986 break;
990 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
992 if ((tp->features & F_HAS_RGMII) &&
993 (tp->features & F_PHY_BCM5461)) {
994 // Set Tx Delay in RGMII mode.
995 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
996 udelay(200);
997 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
998 p->ctl |= 0x03000000;
1001 SIS_W32(StationControl, p->ctl);
1003 if (tp->features & F_HAS_RGMII) {
1004 SIS_W32(RGDelay, 0x0441);
1005 SIS_W32(RGDelay, 0x0440);
1008 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
1009 p->msg);
1010 netif_carrier_on(dev);
1013 out_unlock:
1014 rtnl_unlock();
1017 static void sis190_phy_timer(unsigned long __opaque)
1019 struct net_device *dev = (struct net_device *)__opaque;
1020 struct sis190_private *tp = netdev_priv(dev);
1022 if (likely(netif_running(dev)))
1023 schedule_work(&tp->phy_task);
1026 static inline void sis190_delete_timer(struct net_device *dev)
1028 struct sis190_private *tp = netdev_priv(dev);
1030 del_timer_sync(&tp->timer);
1033 static inline void sis190_request_timer(struct net_device *dev)
1035 struct sis190_private *tp = netdev_priv(dev);
1036 struct timer_list *timer = &tp->timer;
1038 init_timer(timer);
1039 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1040 timer->data = (unsigned long)dev;
1041 timer->function = sis190_phy_timer;
1042 add_timer(timer);
1045 static void sis190_set_rxbufsize(struct sis190_private *tp,
1046 struct net_device *dev)
1048 unsigned int mtu = dev->mtu;
1050 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1051 /* RxDesc->size has a licence to kill the lower bits */
1052 if (tp->rx_buf_sz & 0x07) {
1053 tp->rx_buf_sz += 8;
1054 tp->rx_buf_sz &= RX_BUF_MASK;
1058 static int sis190_open(struct net_device *dev)
1060 struct sis190_private *tp = netdev_priv(dev);
1061 struct pci_dev *pdev = tp->pci_dev;
1062 int rc = -ENOMEM;
1064 sis190_set_rxbufsize(tp, dev);
1067 * Rx and Tx descriptors need 256 bytes alignment.
1068 * pci_alloc_consistent() guarantees a stronger alignment.
1070 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1071 if (!tp->TxDescRing)
1072 goto out;
1074 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1075 if (!tp->RxDescRing)
1076 goto err_free_tx_0;
1078 rc = sis190_init_ring(dev);
1079 if (rc < 0)
1080 goto err_free_rx_1;
1082 sis190_request_timer(dev);
1084 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1085 if (rc < 0)
1086 goto err_release_timer_2;
1088 sis190_hw_start(dev);
1089 out:
1090 return rc;
1092 err_release_timer_2:
1093 sis190_delete_timer(dev);
1094 sis190_rx_clear(tp);
1095 err_free_rx_1:
1096 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1097 tp->rx_dma);
1098 err_free_tx_0:
1099 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1100 tp->tx_dma);
1101 goto out;
1104 static void sis190_tx_clear(struct sis190_private *tp)
1106 unsigned int i;
1108 for (i = 0; i < NUM_TX_DESC; i++) {
1109 struct sk_buff *skb = tp->Tx_skbuff[i];
1111 if (!skb)
1112 continue;
1114 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1115 tp->Tx_skbuff[i] = NULL;
1116 dev_kfree_skb(skb);
1118 tp->dev->stats.tx_dropped++;
1120 tp->cur_tx = tp->dirty_tx = 0;
1123 static void sis190_down(struct net_device *dev)
1125 struct sis190_private *tp = netdev_priv(dev);
1126 void __iomem *ioaddr = tp->mmio_addr;
1127 unsigned int poll_locked = 0;
1129 sis190_delete_timer(dev);
1131 netif_stop_queue(dev);
1133 do {
1134 spin_lock_irq(&tp->lock);
1136 sis190_asic_down(ioaddr);
1138 spin_unlock_irq(&tp->lock);
1140 synchronize_irq(dev->irq);
1142 if (!poll_locked)
1143 poll_locked++;
1145 synchronize_sched();
1147 } while (SIS_R32(IntrMask));
1149 sis190_tx_clear(tp);
1150 sis190_rx_clear(tp);
1153 static int sis190_close(struct net_device *dev)
1155 struct sis190_private *tp = netdev_priv(dev);
1156 struct pci_dev *pdev = tp->pci_dev;
1158 sis190_down(dev);
1160 free_irq(dev->irq, dev);
1162 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1163 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1165 tp->TxDescRing = NULL;
1166 tp->RxDescRing = NULL;
1168 return 0;
1171 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1173 struct sis190_private *tp = netdev_priv(dev);
1174 void __iomem *ioaddr = tp->mmio_addr;
1175 u32 len, entry, dirty_tx;
1176 struct TxDesc *desc;
1177 dma_addr_t mapping;
1179 if (unlikely(skb->len < ETH_ZLEN)) {
1180 if (skb_padto(skb, ETH_ZLEN)) {
1181 dev->stats.tx_dropped++;
1182 goto out;
1184 len = ETH_ZLEN;
1185 } else {
1186 len = skb->len;
1189 entry = tp->cur_tx % NUM_TX_DESC;
1190 desc = tp->TxDescRing + entry;
1192 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1193 netif_stop_queue(dev);
1194 net_tx_err(tp, KERN_ERR PFX
1195 "%s: BUG! Tx Ring full when queue awake!\n",
1196 dev->name);
1197 return NETDEV_TX_BUSY;
1200 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1202 tp->Tx_skbuff[entry] = skb;
1204 desc->PSize = cpu_to_le32(len);
1205 desc->addr = cpu_to_le32(mapping);
1207 desc->size = cpu_to_le32(len);
1208 if (entry == (NUM_TX_DESC - 1))
1209 desc->size |= cpu_to_le32(RingEnd);
1211 wmb();
1213 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1215 tp->cur_tx++;
1217 smp_wmb();
1219 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1221 dirty_tx = tp->dirty_tx;
1222 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1223 netif_stop_queue(dev);
1224 smp_rmb();
1225 if (dirty_tx != tp->dirty_tx)
1226 netif_wake_queue(dev);
1228 out:
1229 return NETDEV_TX_OK;
1232 static void sis190_free_phy(struct list_head *first_phy)
1234 struct sis190_phy *cur, *next;
1236 list_for_each_entry_safe(cur, next, first_phy, list) {
1237 kfree(cur);
1242 * sis190_default_phy - Select default PHY for sis190 mac.
1243 * @dev: the net device to probe for
1245 * Select first detected PHY with link as default.
1246 * If no one is link on, select PHY whose types is HOME as default.
1247 * If HOME doesn't exist, select LAN.
1249 static u16 sis190_default_phy(struct net_device *dev)
1251 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1252 struct sis190_private *tp = netdev_priv(dev);
1253 struct mii_if_info *mii_if = &tp->mii_if;
1254 void __iomem *ioaddr = tp->mmio_addr;
1255 u16 status;
1257 phy_home = phy_default = phy_lan = NULL;
1259 list_for_each_entry(phy, &tp->first_phy, list) {
1260 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1262 // Link ON & Not select default PHY & not ghost PHY.
1263 if ((status & BMSR_LSTATUS) &&
1264 !phy_default &&
1265 (phy->type != UNKNOWN)) {
1266 phy_default = phy;
1267 } else {
1268 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1269 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1270 status | BMCR_ANENABLE | BMCR_ISOLATE);
1271 if (phy->type == HOME)
1272 phy_home = phy;
1273 else if (phy->type == LAN)
1274 phy_lan = phy;
1278 if (!phy_default) {
1279 if (phy_home)
1280 phy_default = phy_home;
1281 else if (phy_lan)
1282 phy_default = phy_lan;
1283 else
1284 phy_default = list_first_entry(&tp->first_phy,
1285 struct sis190_phy, list);
1288 if (mii_if->phy_id != phy_default->phy_id) {
1289 mii_if->phy_id = phy_default->phy_id;
1290 net_probe(tp, KERN_INFO
1291 "%s: Using transceiver at address %d as default.\n",
1292 pci_name(tp->pci_dev), mii_if->phy_id);
1295 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1296 status &= (~BMCR_ISOLATE);
1298 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1299 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1301 return status;
1304 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1305 struct sis190_phy *phy, unsigned int phy_id,
1306 u16 mii_status)
1308 void __iomem *ioaddr = tp->mmio_addr;
1309 struct mii_chip_info *p;
1311 INIT_LIST_HEAD(&phy->list);
1312 phy->status = mii_status;
1313 phy->phy_id = phy_id;
1315 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1316 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1318 for (p = mii_chip_table; p->type; p++) {
1319 if ((p->id[0] == phy->id[0]) &&
1320 (p->id[1] == (phy->id[1] & 0xfff0))) {
1321 break;
1325 if (p->id[1]) {
1326 phy->type = (p->type == MIX) ?
1327 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1328 LAN : HOME) : p->type;
1329 tp->features |= p->feature;
1330 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1331 pci_name(tp->pci_dev), p->name, phy_id);
1332 } else {
1333 phy->type = UNKNOWN;
1334 net_probe(tp, KERN_INFO
1335 "%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
1336 pci_name(tp->pci_dev),
1337 phy->id[0], (phy->id[1] & 0xfff0), phy_id);
1341 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1343 if (tp->features & F_PHY_88E1111) {
1344 void __iomem *ioaddr = tp->mmio_addr;
1345 int phy_id = tp->mii_if.phy_id;
1346 u16 reg[2][2] = {
1347 { 0x808b, 0x0ce1 },
1348 { 0x808f, 0x0c60 }
1349 }, *p;
1351 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1353 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1354 udelay(200);
1355 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1356 udelay(200);
1361 * sis190_mii_probe - Probe MII PHY for sis190
1362 * @dev: the net device to probe for
1364 * Search for total of 32 possible mii phy addresses.
1365 * Identify and set current phy if found one,
1366 * return error if it failed to found.
1368 static int __devinit sis190_mii_probe(struct net_device *dev)
1370 struct sis190_private *tp = netdev_priv(dev);
1371 struct mii_if_info *mii_if = &tp->mii_if;
1372 void __iomem *ioaddr = tp->mmio_addr;
1373 int phy_id;
1374 int rc = 0;
1376 INIT_LIST_HEAD(&tp->first_phy);
1378 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1379 struct sis190_phy *phy;
1380 u16 status;
1382 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1384 // Try next mii if the current one is not accessible.
1385 if (status == 0xffff || status == 0x0000)
1386 continue;
1388 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1389 if (!phy) {
1390 sis190_free_phy(&tp->first_phy);
1391 rc = -ENOMEM;
1392 goto out;
1395 sis190_init_phy(dev, tp, phy, phy_id, status);
1397 list_add(&tp->first_phy, &phy->list);
1400 if (list_empty(&tp->first_phy)) {
1401 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1402 pci_name(tp->pci_dev));
1403 rc = -EIO;
1404 goto out;
1407 /* Select default PHY for mac */
1408 sis190_default_phy(dev);
1410 sis190_mii_probe_88e1111_fixup(tp);
1412 mii_if->dev = dev;
1413 mii_if->mdio_read = __mdio_read;
1414 mii_if->mdio_write = __mdio_write;
1415 mii_if->phy_id_mask = PHY_ID_ANY;
1416 mii_if->reg_num_mask = MII_REG_ANY;
1417 out:
1418 return rc;
1421 static void sis190_mii_remove(struct net_device *dev)
1423 struct sis190_private *tp = netdev_priv(dev);
1425 sis190_free_phy(&tp->first_phy);
1428 static void sis190_release_board(struct pci_dev *pdev)
1430 struct net_device *dev = pci_get_drvdata(pdev);
1431 struct sis190_private *tp = netdev_priv(dev);
1433 iounmap(tp->mmio_addr);
1434 pci_release_regions(pdev);
1435 pci_disable_device(pdev);
1436 free_netdev(dev);
1439 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1441 struct sis190_private *tp;
1442 struct net_device *dev;
1443 void __iomem *ioaddr;
1444 int rc;
1446 dev = alloc_etherdev(sizeof(*tp));
1447 if (!dev) {
1448 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1449 rc = -ENOMEM;
1450 goto err_out_0;
1453 SET_NETDEV_DEV(dev, &pdev->dev);
1455 tp = netdev_priv(dev);
1456 tp->dev = dev;
1457 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1459 rc = pci_enable_device(pdev);
1460 if (rc < 0) {
1461 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1462 goto err_free_dev_1;
1465 rc = -ENODEV;
1467 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1468 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1469 pci_name(pdev));
1470 goto err_pci_disable_2;
1472 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1473 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1474 pci_name(pdev));
1475 goto err_pci_disable_2;
1478 rc = pci_request_regions(pdev, DRV_NAME);
1479 if (rc < 0) {
1480 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1481 pci_name(pdev));
1482 goto err_pci_disable_2;
1485 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1486 if (rc < 0) {
1487 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1488 pci_name(pdev));
1489 goto err_free_res_3;
1492 pci_set_master(pdev);
1494 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1495 if (!ioaddr) {
1496 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1497 pci_name(pdev));
1498 rc = -EIO;
1499 goto err_free_res_3;
1502 tp->pci_dev = pdev;
1503 tp->mmio_addr = ioaddr;
1505 sis190_irq_mask_and_ack(ioaddr);
1507 sis190_soft_reset(ioaddr);
1508 out:
1509 return dev;
1511 err_free_res_3:
1512 pci_release_regions(pdev);
1513 err_pci_disable_2:
1514 pci_disable_device(pdev);
1515 err_free_dev_1:
1516 free_netdev(dev);
1517 err_out_0:
1518 dev = ERR_PTR(rc);
1519 goto out;
1522 static void sis190_tx_timeout(struct net_device *dev)
1524 struct sis190_private *tp = netdev_priv(dev);
1525 void __iomem *ioaddr = tp->mmio_addr;
1526 u8 tmp8;
1528 /* Disable Tx, if not already */
1529 tmp8 = SIS_R8(TxControl);
1530 if (tmp8 & CmdTxEnb)
1531 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1534 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1535 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1537 /* Disable interrupts by clearing the interrupt mask. */
1538 SIS_W32(IntrMask, 0x0000);
1540 /* Stop a shared interrupt from scavenging while we are. */
1541 spin_lock_irq(&tp->lock);
1542 sis190_tx_clear(tp);
1543 spin_unlock_irq(&tp->lock);
1545 /* ...and finally, reset everything. */
1546 sis190_hw_start(dev);
1548 netif_wake_queue(dev);
1551 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1553 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1556 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1557 struct net_device *dev)
1559 struct sis190_private *tp = netdev_priv(dev);
1560 void __iomem *ioaddr = tp->mmio_addr;
1561 u16 sig;
1562 int i;
1564 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1565 pci_name(pdev));
1567 /* Check to see if there is a sane EEPROM */
1568 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1570 if ((sig == 0xffff) || (sig == 0x0000)) {
1571 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1572 pci_name(pdev), sig);
1573 return -EIO;
1576 /* Get MAC address from EEPROM */
1577 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1578 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1580 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1583 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1585 return 0;
1589 * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model
1590 * @pdev: PCI device
1591 * @dev: network device to get address for
1593 * SiS96x model, use APC CMOS RAM to store MAC address.
1594 * APC CMOS RAM is accessed through ISA bridge.
1595 * MAC address is read into @net_dev->dev_addr.
1597 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1598 struct net_device *dev)
1600 static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
1601 struct sis190_private *tp = netdev_priv(dev);
1602 struct pci_dev *isa_bridge;
1603 u8 reg, tmp8;
1604 unsigned int i;
1606 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1607 pci_name(pdev));
1609 for (i = 0; i < ARRAY_SIZE(ids); i++) {
1610 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1611 if (isa_bridge)
1612 break;
1615 if (!isa_bridge) {
1616 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1617 pci_name(pdev));
1618 return -EIO;
1621 /* Enable port 78h & 79h to access APC Registers. */
1622 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1623 reg = (tmp8 & ~0x02);
1624 pci_write_config_byte(isa_bridge, 0x48, reg);
1625 udelay(50);
1626 pci_read_config_byte(isa_bridge, 0x48, &reg);
1628 for (i = 0; i < MAC_ADDR_LEN; i++) {
1629 outb(0x9 + i, 0x78);
1630 dev->dev_addr[i] = inb(0x79);
1633 outb(0x12, 0x78);
1634 reg = inb(0x79);
1636 sis190_set_rgmii(tp, reg);
1638 /* Restore the value to ISA Bridge */
1639 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1640 pci_dev_put(isa_bridge);
1642 return 0;
1646 * sis190_init_rxfilter - Initialize the Rx filter
1647 * @dev: network device to initialize
1649 * Set receive filter address to our MAC address
1650 * and enable packet filtering.
1652 static inline void sis190_init_rxfilter(struct net_device *dev)
1654 struct sis190_private *tp = netdev_priv(dev);
1655 void __iomem *ioaddr = tp->mmio_addr;
1656 u16 ctl;
1657 int i;
1659 ctl = SIS_R16(RxMacControl);
1661 * Disable packet filtering before setting filter.
1662 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1663 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1665 SIS_W16(RxMacControl, ctl & ~0x0f00);
1667 for (i = 0; i < MAC_ADDR_LEN; i++)
1668 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1670 SIS_W16(RxMacControl, ctl);
1671 SIS_PCI_COMMIT();
1674 static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1675 struct net_device *dev)
1677 int rc;
1679 rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1680 if (rc < 0) {
1681 u8 reg;
1683 pci_read_config_byte(pdev, 0x73, &reg);
1685 if (reg & 0x00000001)
1686 rc = sis190_get_mac_addr_from_apc(pdev, dev);
1688 return rc;
1691 static void sis190_set_speed_auto(struct net_device *dev)
1693 struct sis190_private *tp = netdev_priv(dev);
1694 void __iomem *ioaddr = tp->mmio_addr;
1695 int phy_id = tp->mii_if.phy_id;
1696 int val;
1698 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1700 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1702 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1703 // unchanged.
1704 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1705 ADVERTISE_100FULL | ADVERTISE_10FULL |
1706 ADVERTISE_100HALF | ADVERTISE_10HALF);
1708 // Enable 1000 Full Mode.
1709 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1711 // Enable auto-negotiation and restart auto-negotiation.
1712 mdio_write(ioaddr, phy_id, MII_BMCR,
1713 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1716 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1718 struct sis190_private *tp = netdev_priv(dev);
1720 return mii_ethtool_gset(&tp->mii_if, cmd);
1723 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1725 struct sis190_private *tp = netdev_priv(dev);
1727 return mii_ethtool_sset(&tp->mii_if, cmd);
1730 static void sis190_get_drvinfo(struct net_device *dev,
1731 struct ethtool_drvinfo *info)
1733 struct sis190_private *tp = netdev_priv(dev);
1735 strcpy(info->driver, DRV_NAME);
1736 strcpy(info->version, DRV_VERSION);
1737 strcpy(info->bus_info, pci_name(tp->pci_dev));
1740 static int sis190_get_regs_len(struct net_device *dev)
1742 return SIS190_REGS_SIZE;
1745 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1746 void *p)
1748 struct sis190_private *tp = netdev_priv(dev);
1749 unsigned long flags;
1751 if (regs->len > SIS190_REGS_SIZE)
1752 regs->len = SIS190_REGS_SIZE;
1754 spin_lock_irqsave(&tp->lock, flags);
1755 memcpy_fromio(p, tp->mmio_addr, regs->len);
1756 spin_unlock_irqrestore(&tp->lock, flags);
1759 static int sis190_nway_reset(struct net_device *dev)
1761 struct sis190_private *tp = netdev_priv(dev);
1763 return mii_nway_restart(&tp->mii_if);
1766 static u32 sis190_get_msglevel(struct net_device *dev)
1768 struct sis190_private *tp = netdev_priv(dev);
1770 return tp->msg_enable;
1773 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1775 struct sis190_private *tp = netdev_priv(dev);
1777 tp->msg_enable = value;
1780 static const struct ethtool_ops sis190_ethtool_ops = {
1781 .get_settings = sis190_get_settings,
1782 .set_settings = sis190_set_settings,
1783 .get_drvinfo = sis190_get_drvinfo,
1784 .get_regs_len = sis190_get_regs_len,
1785 .get_regs = sis190_get_regs,
1786 .get_link = ethtool_op_get_link,
1787 .get_msglevel = sis190_get_msglevel,
1788 .set_msglevel = sis190_set_msglevel,
1789 .nway_reset = sis190_nway_reset,
1792 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1794 struct sis190_private *tp = netdev_priv(dev);
1796 return !netif_running(dev) ? -EINVAL :
1797 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1800 static const struct net_device_ops sis190_netdev_ops = {
1801 .ndo_open = sis190_open,
1802 .ndo_stop = sis190_close,
1803 .ndo_do_ioctl = sis190_ioctl,
1804 .ndo_start_xmit = sis190_start_xmit,
1805 .ndo_tx_timeout = sis190_tx_timeout,
1806 .ndo_set_multicast_list = sis190_set_rx_mode,
1807 .ndo_change_mtu = eth_change_mtu,
1808 .ndo_set_mac_address = eth_mac_addr,
1809 .ndo_validate_addr = eth_validate_addr,
1810 #ifdef CONFIG_NET_POLL_CONTROLLER
1811 .ndo_poll_controller = sis190_netpoll,
1812 #endif
1815 static int __devinit sis190_init_one(struct pci_dev *pdev,
1816 const struct pci_device_id *ent)
1818 static int printed_version = 0;
1819 struct sis190_private *tp;
1820 struct net_device *dev;
1821 void __iomem *ioaddr;
1822 int rc;
1824 if (!printed_version) {
1825 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1826 printed_version = 1;
1829 dev = sis190_init_board(pdev);
1830 if (IS_ERR(dev)) {
1831 rc = PTR_ERR(dev);
1832 goto out;
1835 pci_set_drvdata(pdev, dev);
1837 tp = netdev_priv(dev);
1838 ioaddr = tp->mmio_addr;
1840 rc = sis190_get_mac_addr(pdev, dev);
1841 if (rc < 0)
1842 goto err_release_board;
1844 sis190_init_rxfilter(dev);
1846 INIT_WORK(&tp->phy_task, sis190_phy_task);
1848 dev->netdev_ops = &sis190_netdev_ops;
1850 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1851 dev->irq = pdev->irq;
1852 dev->base_addr = (unsigned long) 0xdead;
1853 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1855 spin_lock_init(&tp->lock);
1857 rc = sis190_mii_probe(dev);
1858 if (rc < 0)
1859 goto err_release_board;
1861 rc = register_netdev(dev);
1862 if (rc < 0)
1863 goto err_remove_mii;
1865 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), %pM\n",
1866 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1867 ioaddr, dev->irq, dev->dev_addr);
1869 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1870 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1872 netif_carrier_off(dev);
1874 sis190_set_speed_auto(dev);
1875 out:
1876 return rc;
1878 err_remove_mii:
1879 sis190_mii_remove(dev);
1880 err_release_board:
1881 sis190_release_board(pdev);
1882 goto out;
1885 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1887 struct net_device *dev = pci_get_drvdata(pdev);
1889 sis190_mii_remove(dev);
1890 flush_scheduled_work();
1891 unregister_netdev(dev);
1892 sis190_release_board(pdev);
1893 pci_set_drvdata(pdev, NULL);
1896 static struct pci_driver sis190_pci_driver = {
1897 .name = DRV_NAME,
1898 .id_table = sis190_pci_tbl,
1899 .probe = sis190_init_one,
1900 .remove = __devexit_p(sis190_remove_one),
1903 static int __init sis190_init_module(void)
1905 return pci_register_driver(&sis190_pci_driver);
1908 static void __exit sis190_cleanup_module(void)
1910 pci_unregister_driver(&sis190_pci_driver);
1913 module_init(sis190_init_module);
1914 module_exit(sis190_cleanup_module);