sh: kgdb stub cleanups.
[linux-2.6/libata-dev.git] / drivers / net / sis190.c
blobe8f26b79bbafa9f4102aefcc97c384bbaf8f9764
1 /*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9 genuine driver.
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
18 See the file COPYING in this distribution for more information.
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
33 #include <asm/irq.h>
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 printk(arg)
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 printk(arg)
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
40 printk(arg)
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 printk(arg)
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
44 printk(arg)
46 #define PHY_MAX_ADDR 32
47 #define PHY_ID_ANY 0x1f
48 #define MII_REG_ANY 0x1f
50 #ifdef CONFIG_SIS190_NAPI
51 #define NAPI_SUFFIX "-NAPI"
52 #else
53 #define NAPI_SUFFIX ""
54 #endif
56 #define DRV_VERSION "1.2" NAPI_SUFFIX
57 #define DRV_NAME "sis190"
58 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
59 #define PFX DRV_NAME ": "
61 #ifdef CONFIG_SIS190_NAPI
62 #define sis190_rx_skb netif_receive_skb
63 #define sis190_rx_quota(count, quota) min(count, quota)
64 #else
65 #define sis190_rx_skb netif_rx
66 #define sis190_rx_quota(count, quota) count
67 #endif
69 #define MAC_ADDR_LEN 6
71 #define NUM_TX_DESC 64 /* [8..1024] */
72 #define NUM_RX_DESC 64 /* [8..8192] */
73 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
74 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
75 #define RX_BUF_SIZE 1536
76 #define RX_BUF_MASK 0xfff8
78 #define SIS190_REGS_SIZE 0x80
79 #define SIS190_TX_TIMEOUT (6*HZ)
80 #define SIS190_PHY_TIMEOUT (10*HZ)
81 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
82 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
83 NETIF_MSG_IFDOWN)
85 /* Enhanced PHY access register bit definitions */
86 #define EhnMIIread 0x0000
87 #define EhnMIIwrite 0x0020
88 #define EhnMIIdataShift 16
89 #define EhnMIIpmdShift 6 /* 7016 only */
90 #define EhnMIIregShift 11
91 #define EhnMIIreq 0x0010
92 #define EhnMIInotDone 0x0010
94 /* Write/read MMIO register */
95 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
96 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
97 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
98 #define SIS_R8(reg) readb (ioaddr + (reg))
99 #define SIS_R16(reg) readw (ioaddr + (reg))
100 #define SIS_R32(reg) readl (ioaddr + (reg))
102 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
104 enum sis190_registers {
105 TxControl = 0x00,
106 TxDescStartAddr = 0x04,
107 rsv0 = 0x08, // reserved
108 TxSts = 0x0c, // unused (Control/Status)
109 RxControl = 0x10,
110 RxDescStartAddr = 0x14,
111 rsv1 = 0x18, // reserved
112 RxSts = 0x1c, // unused
113 IntrStatus = 0x20,
114 IntrMask = 0x24,
115 IntrControl = 0x28,
116 IntrTimer = 0x2c, // unused (Interupt Timer)
117 PMControl = 0x30, // unused (Power Mgmt Control/Status)
118 rsv2 = 0x34, // reserved
119 ROMControl = 0x38,
120 ROMInterface = 0x3c,
121 StationControl = 0x40,
122 GMIIControl = 0x44,
123 GIoCR = 0x48, // unused (GMAC IO Compensation)
124 GIoCtrl = 0x4c, // unused (GMAC IO Control)
125 TxMacControl = 0x50,
126 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
127 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
128 rsv3 = 0x5c, // reserved
129 RxMacControl = 0x60,
130 RxMacAddr = 0x62,
131 RxHashTable = 0x68,
132 // Undocumented = 0x6c,
133 RxWolCtrl = 0x70,
134 RxWolData = 0x74, // unused (Rx WOL Data Access)
135 RxMPSControl = 0x78, // unused (Rx MPS Control)
136 rsv4 = 0x7c, // reserved
139 enum sis190_register_content {
140 /* IntrStatus */
141 SoftInt = 0x40000000, // unused
142 Timeup = 0x20000000, // unused
143 PauseFrame = 0x00080000, // unused
144 MagicPacket = 0x00040000, // unused
145 WakeupFrame = 0x00020000, // unused
146 LinkChange = 0x00010000,
147 RxQEmpty = 0x00000080,
148 RxQInt = 0x00000040,
149 TxQ1Empty = 0x00000020, // unused
150 TxQ1Int = 0x00000010,
151 TxQ0Empty = 0x00000008, // unused
152 TxQ0Int = 0x00000004,
153 RxHalt = 0x00000002,
154 TxHalt = 0x00000001,
156 /* {Rx/Tx}CmdBits */
157 CmdReset = 0x10,
158 CmdRxEnb = 0x08, // unused
159 CmdTxEnb = 0x01,
160 RxBufEmpty = 0x01, // unused
162 /* Cfg9346Bits */
163 Cfg9346_Lock = 0x00, // unused
164 Cfg9346_Unlock = 0xc0, // unused
166 /* RxMacControl */
167 AcceptErr = 0x20, // unused
168 AcceptRunt = 0x10, // unused
169 AcceptBroadcast = 0x0800,
170 AcceptMulticast = 0x0400,
171 AcceptMyPhys = 0x0200,
172 AcceptAllPhys = 0x0100,
174 /* RxConfigBits */
175 RxCfgFIFOShift = 13,
176 RxCfgDMAShift = 8, // 0x1a in RxControl ?
178 /* TxConfigBits */
179 TxInterFrameGapShift = 24,
180 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
182 LinkStatus = 0x02, // unused
183 FullDup = 0x01, // unused
185 /* TBICSRBit */
186 TBILinkOK = 0x02000000, // unused
189 struct TxDesc {
190 __le32 PSize;
191 __le32 status;
192 __le32 addr;
193 __le32 size;
196 struct RxDesc {
197 __le32 PSize;
198 __le32 status;
199 __le32 addr;
200 __le32 size;
203 enum _DescStatusBit {
204 /* _Desc.status */
205 OWNbit = 0x80000000, // RXOWN/TXOWN
206 INTbit = 0x40000000, // RXINT/TXINT
207 CRCbit = 0x00020000, // CRCOFF/CRCEN
208 PADbit = 0x00010000, // PREADD/PADEN
209 /* _Desc.size */
210 RingEnd = 0x80000000,
211 /* TxDesc.status */
212 LSEN = 0x08000000, // TSO ? -- FR
213 IPCS = 0x04000000,
214 TCPCS = 0x02000000,
215 UDPCS = 0x01000000,
216 BSTEN = 0x00800000,
217 EXTEN = 0x00400000,
218 DEFEN = 0x00200000,
219 BKFEN = 0x00100000,
220 CRSEN = 0x00080000,
221 COLEN = 0x00040000,
222 THOL3 = 0x30000000,
223 THOL2 = 0x20000000,
224 THOL1 = 0x10000000,
225 THOL0 = 0x00000000,
226 /* RxDesc.status */
227 IPON = 0x20000000,
228 TCPON = 0x10000000,
229 UDPON = 0x08000000,
230 Wakup = 0x00400000,
231 Magic = 0x00200000,
232 Pause = 0x00100000,
233 DEFbit = 0x00200000,
234 BCAST = 0x000c0000,
235 MCAST = 0x00080000,
236 UCAST = 0x00040000,
237 /* RxDesc.PSize */
238 TAGON = 0x80000000,
239 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
240 ABORT = 0x00800000,
241 SHORT = 0x00400000,
242 LIMIT = 0x00200000,
243 MIIER = 0x00100000,
244 OVRUN = 0x00080000,
245 NIBON = 0x00040000,
246 COLON = 0x00020000,
247 CRCOK = 0x00010000,
248 RxSizeMask = 0x0000ffff
250 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
251 * provide two (unused with Linux) Tx queues. No publically
252 * available documentation alas.
256 enum sis190_eeprom_access_register_bits {
257 EECS = 0x00000001, // unused
258 EECLK = 0x00000002, // unused
259 EEDO = 0x00000008, // unused
260 EEDI = 0x00000004, // unused
261 EEREQ = 0x00000080,
262 EEROP = 0x00000200,
263 EEWOP = 0x00000100 // unused
266 /* EEPROM Addresses */
267 enum sis190_eeprom_address {
268 EEPROMSignature = 0x00,
269 EEPROMCLK = 0x01, // unused
270 EEPROMInfo = 0x02,
271 EEPROMMACAddr = 0x03
274 enum sis190_feature {
275 F_HAS_RGMII = 1,
276 F_PHY_88E1111 = 2,
277 F_PHY_BCM5461 = 4
280 struct sis190_private {
281 void __iomem *mmio_addr;
282 struct pci_dev *pci_dev;
283 struct net_device_stats stats;
284 spinlock_t lock;
285 u32 rx_buf_sz;
286 u32 cur_rx;
287 u32 cur_tx;
288 u32 dirty_rx;
289 u32 dirty_tx;
290 dma_addr_t rx_dma;
291 dma_addr_t tx_dma;
292 struct RxDesc *RxDescRing;
293 struct TxDesc *TxDescRing;
294 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
295 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
296 struct work_struct phy_task;
297 struct timer_list timer;
298 u32 msg_enable;
299 struct mii_if_info mii_if;
300 struct list_head first_phy;
301 u32 features;
304 struct sis190_phy {
305 struct list_head list;
306 int phy_id;
307 u16 id[2];
308 u16 status;
309 u8 type;
312 enum sis190_phy_type {
313 UNKNOWN = 0x00,
314 HOME = 0x01,
315 LAN = 0x02,
316 MIX = 0x03
319 static struct mii_chip_info {
320 const char *name;
321 u16 id[2];
322 unsigned int type;
323 u32 feature;
324 } mii_chip_table[] = {
325 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
326 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
327 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
328 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
329 { NULL, }
332 static const struct {
333 const char *name;
334 } sis_chip_info[] = {
335 { "SiS 190 PCI Fast Ethernet adapter" },
336 { "SiS 191 PCI Gigabit Ethernet adapter" },
339 static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
340 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
341 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
342 { 0, },
345 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
347 static int rx_copybreak = 200;
349 static struct {
350 u32 msg_enable;
351 } debug = { -1 };
353 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
354 module_param(rx_copybreak, int, 0);
355 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
356 module_param_named(debug, debug.msg_enable, int, 0);
357 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
358 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
359 MODULE_VERSION(DRV_VERSION);
360 MODULE_LICENSE("GPL");
362 static const u32 sis190_intr_mask =
363 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
366 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
367 * The chips use a 64 element hash table based on the Ethernet CRC.
369 static const int multicast_filter_limit = 32;
371 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
373 unsigned int i;
375 SIS_W32(GMIIControl, ctl);
377 msleep(1);
379 for (i = 0; i < 100; i++) {
380 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
381 break;
382 msleep(1);
385 if (i > 999)
386 printk(KERN_ERR PFX "PHY command failed !\n");
389 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
391 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
392 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
393 (((u32) val) << EhnMIIdataShift));
396 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
398 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
399 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
401 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
404 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
406 struct sis190_private *tp = netdev_priv(dev);
408 mdio_write(tp->mmio_addr, phy_id, reg, val);
411 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
413 struct sis190_private *tp = netdev_priv(dev);
415 return mdio_read(tp->mmio_addr, phy_id, reg);
418 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
420 mdio_read(ioaddr, phy_id, reg);
421 return mdio_read(ioaddr, phy_id, reg);
424 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
426 u16 data = 0xffff;
427 unsigned int i;
429 if (!(SIS_R32(ROMControl) & 0x0002))
430 return 0;
432 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
434 for (i = 0; i < 200; i++) {
435 if (!(SIS_R32(ROMInterface) & EEREQ)) {
436 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
437 break;
439 msleep(1);
442 return data;
445 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
447 SIS_W32(IntrMask, 0x00);
448 SIS_W32(IntrStatus, 0xffffffff);
449 SIS_PCI_COMMIT();
452 static void sis190_asic_down(void __iomem *ioaddr)
454 /* Stop the chip's Tx and Rx DMA processes. */
456 SIS_W32(TxControl, 0x1a00);
457 SIS_W32(RxControl, 0x1a00);
459 sis190_irq_mask_and_ack(ioaddr);
462 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
464 desc->size |= cpu_to_le32(RingEnd);
467 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
469 u32 eor = le32_to_cpu(desc->size) & RingEnd;
471 desc->PSize = 0x0;
472 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
473 wmb();
474 desc->status = cpu_to_le32(OWNbit | INTbit);
477 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
478 u32 rx_buf_sz)
480 desc->addr = cpu_to_le32(mapping);
481 sis190_give_to_asic(desc, rx_buf_sz);
484 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
486 desc->PSize = 0x0;
487 desc->addr = 0xdeadbeef;
488 desc->size &= cpu_to_le32(RingEnd);
489 wmb();
490 desc->status = 0x0;
493 static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
494 struct RxDesc *desc, u32 rx_buf_sz)
496 struct sk_buff *skb;
497 dma_addr_t mapping;
498 int ret = 0;
500 skb = dev_alloc_skb(rx_buf_sz);
501 if (!skb)
502 goto err_out;
504 *sk_buff = skb;
506 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
507 PCI_DMA_FROMDEVICE);
509 sis190_map_to_asic(desc, mapping, rx_buf_sz);
510 out:
511 return ret;
513 err_out:
514 ret = -ENOMEM;
515 sis190_make_unusable_by_asic(desc);
516 goto out;
519 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
520 u32 start, u32 end)
522 u32 cur;
524 for (cur = start; cur < end; cur++) {
525 int ret, i = cur % NUM_RX_DESC;
527 if (tp->Rx_skbuff[i])
528 continue;
530 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
531 tp->RxDescRing + i, tp->rx_buf_sz);
532 if (ret < 0)
533 break;
535 return cur - start;
538 static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
539 struct RxDesc *desc, int rx_buf_sz)
541 int ret = -1;
543 if (pkt_size < rx_copybreak) {
544 struct sk_buff *skb;
546 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
547 if (skb) {
548 skb_reserve(skb, NET_IP_ALIGN);
549 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
550 *sk_buff = skb;
551 sis190_give_to_asic(desc, rx_buf_sz);
552 ret = 0;
555 return ret;
558 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
560 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
562 if ((status & CRCOK) && !(status & ErrMask))
563 return 0;
565 if (!(status & CRCOK))
566 stats->rx_crc_errors++;
567 else if (status & OVRUN)
568 stats->rx_over_errors++;
569 else if (status & (SHORT | LIMIT))
570 stats->rx_length_errors++;
571 else if (status & (MIIER | NIBON | COLON))
572 stats->rx_frame_errors++;
574 stats->rx_errors++;
575 return -1;
578 static int sis190_rx_interrupt(struct net_device *dev,
579 struct sis190_private *tp, void __iomem *ioaddr)
581 struct net_device_stats *stats = &tp->stats;
582 u32 rx_left, cur_rx = tp->cur_rx;
583 u32 delta, count;
585 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
586 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
588 for (; rx_left > 0; rx_left--, cur_rx++) {
589 unsigned int entry = cur_rx % NUM_RX_DESC;
590 struct RxDesc *desc = tp->RxDescRing + entry;
591 u32 status;
593 if (desc->status & OWNbit)
594 break;
596 status = le32_to_cpu(desc->PSize);
598 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
599 // status);
601 if (sis190_rx_pkt_err(status, stats) < 0)
602 sis190_give_to_asic(desc, tp->rx_buf_sz);
603 else {
604 struct sk_buff *skb = tp->Rx_skbuff[entry];
605 int pkt_size = (status & RxSizeMask) - 4;
606 void (*pci_action)(struct pci_dev *, dma_addr_t,
607 size_t, int) = pci_dma_sync_single_for_device;
609 if (unlikely(pkt_size > tp->rx_buf_sz)) {
610 net_intr(tp, KERN_INFO
611 "%s: (frag) status = %08x.\n",
612 dev->name, status);
613 stats->rx_dropped++;
614 stats->rx_length_errors++;
615 sis190_give_to_asic(desc, tp->rx_buf_sz);
616 continue;
619 pci_dma_sync_single_for_cpu(tp->pci_dev,
620 le32_to_cpu(desc->addr), tp->rx_buf_sz,
621 PCI_DMA_FROMDEVICE);
623 if (sis190_try_rx_copy(&skb, pkt_size, desc,
624 tp->rx_buf_sz)) {
625 pci_action = pci_unmap_single;
626 tp->Rx_skbuff[entry] = NULL;
627 sis190_make_unusable_by_asic(desc);
630 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
631 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
633 skb->dev = dev;
634 skb_put(skb, pkt_size);
635 skb->protocol = eth_type_trans(skb, dev);
637 sis190_rx_skb(skb);
639 dev->last_rx = jiffies;
640 stats->rx_packets++;
641 stats->rx_bytes += pkt_size;
642 if ((status & BCAST) == MCAST)
643 stats->multicast++;
646 count = cur_rx - tp->cur_rx;
647 tp->cur_rx = cur_rx;
649 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
650 if (!delta && count && netif_msg_intr(tp))
651 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
652 tp->dirty_rx += delta;
654 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
655 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
657 return count;
660 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
661 struct TxDesc *desc)
663 unsigned int len;
665 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
667 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
669 memset(desc, 0x00, sizeof(*desc));
672 static void sis190_tx_interrupt(struct net_device *dev,
673 struct sis190_private *tp, void __iomem *ioaddr)
675 u32 pending, dirty_tx = tp->dirty_tx;
677 * It would not be needed if queueing was allowed to be enabled
678 * again too early (hint: think preempt and unclocked smp systems).
680 unsigned int queue_stopped;
682 smp_rmb();
683 pending = tp->cur_tx - dirty_tx;
684 queue_stopped = (pending == NUM_TX_DESC);
686 for (; pending; pending--, dirty_tx++) {
687 unsigned int entry = dirty_tx % NUM_TX_DESC;
688 struct TxDesc *txd = tp->TxDescRing + entry;
689 struct sk_buff *skb;
691 if (le32_to_cpu(txd->status) & OWNbit)
692 break;
694 skb = tp->Tx_skbuff[entry];
696 tp->stats.tx_packets++;
697 tp->stats.tx_bytes += skb->len;
699 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
700 tp->Tx_skbuff[entry] = NULL;
701 dev_kfree_skb_irq(skb);
704 if (tp->dirty_tx != dirty_tx) {
705 tp->dirty_tx = dirty_tx;
706 smp_wmb();
707 if (queue_stopped)
708 netif_wake_queue(dev);
713 * The interrupt handler does all of the Rx thread work and cleans up after
714 * the Tx thread.
716 static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
718 struct net_device *dev = __dev;
719 struct sis190_private *tp = netdev_priv(dev);
720 void __iomem *ioaddr = tp->mmio_addr;
721 unsigned int handled = 0;
722 u32 status;
724 status = SIS_R32(IntrStatus);
726 if ((status == 0xffffffff) || !status)
727 goto out;
729 handled = 1;
731 if (unlikely(!netif_running(dev))) {
732 sis190_asic_down(ioaddr);
733 goto out;
736 SIS_W32(IntrStatus, status);
738 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
740 if (status & LinkChange) {
741 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
742 schedule_work(&tp->phy_task);
745 if (status & RxQInt)
746 sis190_rx_interrupt(dev, tp, ioaddr);
748 if (status & TxQ0Int)
749 sis190_tx_interrupt(dev, tp, ioaddr);
750 out:
751 return IRQ_RETVAL(handled);
754 #ifdef CONFIG_NET_POLL_CONTROLLER
755 static void sis190_netpoll(struct net_device *dev)
757 struct sis190_private *tp = netdev_priv(dev);
758 struct pci_dev *pdev = tp->pci_dev;
760 disable_irq(pdev->irq);
761 sis190_interrupt(pdev->irq, dev, NULL);
762 enable_irq(pdev->irq);
764 #endif
766 static void sis190_free_rx_skb(struct sis190_private *tp,
767 struct sk_buff **sk_buff, struct RxDesc *desc)
769 struct pci_dev *pdev = tp->pci_dev;
771 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
772 PCI_DMA_FROMDEVICE);
773 dev_kfree_skb(*sk_buff);
774 *sk_buff = NULL;
775 sis190_make_unusable_by_asic(desc);
778 static void sis190_rx_clear(struct sis190_private *tp)
780 unsigned int i;
782 for (i = 0; i < NUM_RX_DESC; i++) {
783 if (!tp->Rx_skbuff[i])
784 continue;
785 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
789 static void sis190_init_ring_indexes(struct sis190_private *tp)
791 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
794 static int sis190_init_ring(struct net_device *dev)
796 struct sis190_private *tp = netdev_priv(dev);
798 sis190_init_ring_indexes(tp);
800 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
801 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
803 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
804 goto err_rx_clear;
806 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
808 return 0;
810 err_rx_clear:
811 sis190_rx_clear(tp);
812 return -ENOMEM;
815 static void sis190_set_rx_mode(struct net_device *dev)
817 struct sis190_private *tp = netdev_priv(dev);
818 void __iomem *ioaddr = tp->mmio_addr;
819 unsigned long flags;
820 u32 mc_filter[2]; /* Multicast hash filter */
821 u16 rx_mode;
823 if (dev->flags & IFF_PROMISC) {
824 rx_mode =
825 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
826 AcceptAllPhys;
827 mc_filter[1] = mc_filter[0] = 0xffffffff;
828 } else if ((dev->mc_count > multicast_filter_limit) ||
829 (dev->flags & IFF_ALLMULTI)) {
830 /* Too many to filter perfectly -- accept all multicasts. */
831 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
832 mc_filter[1] = mc_filter[0] = 0xffffffff;
833 } else {
834 struct dev_mc_list *mclist;
835 unsigned int i;
837 rx_mode = AcceptBroadcast | AcceptMyPhys;
838 mc_filter[1] = mc_filter[0] = 0;
839 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
840 i++, mclist = mclist->next) {
841 int bit_nr =
842 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
843 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
844 rx_mode |= AcceptMulticast;
848 spin_lock_irqsave(&tp->lock, flags);
850 SIS_W16(RxMacControl, rx_mode | 0x2);
851 SIS_W32(RxHashTable, mc_filter[0]);
852 SIS_W32(RxHashTable + 4, mc_filter[1]);
854 spin_unlock_irqrestore(&tp->lock, flags);
857 static void sis190_soft_reset(void __iomem *ioaddr)
859 SIS_W32(IntrControl, 0x8000);
860 SIS_PCI_COMMIT();
861 msleep(1);
862 SIS_W32(IntrControl, 0x0);
863 sis190_asic_down(ioaddr);
864 msleep(1);
867 static void sis190_hw_start(struct net_device *dev)
869 struct sis190_private *tp = netdev_priv(dev);
870 void __iomem *ioaddr = tp->mmio_addr;
872 sis190_soft_reset(ioaddr);
874 SIS_W32(TxDescStartAddr, tp->tx_dma);
875 SIS_W32(RxDescStartAddr, tp->rx_dma);
877 SIS_W32(IntrStatus, 0xffffffff);
878 SIS_W32(IntrMask, 0x0);
879 SIS_W32(GMIIControl, 0x0);
880 SIS_W32(TxMacControl, 0x60);
881 SIS_W16(RxMacControl, 0x02);
882 SIS_W32(RxHashTable, 0x0);
883 SIS_W32(0x6c, 0x0);
884 SIS_W32(RxWolCtrl, 0x0);
885 SIS_W32(RxWolData, 0x0);
887 SIS_PCI_COMMIT();
889 sis190_set_rx_mode(dev);
891 /* Enable all known interrupts by setting the interrupt mask. */
892 SIS_W32(IntrMask, sis190_intr_mask);
894 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
895 SIS_W32(RxControl, 0x1a1d);
897 netif_start_queue(dev);
900 static void sis190_phy_task(void * data)
902 struct net_device *dev = data;
903 struct sis190_private *tp = netdev_priv(dev);
904 void __iomem *ioaddr = tp->mmio_addr;
905 int phy_id = tp->mii_if.phy_id;
906 u16 val;
908 rtnl_lock();
910 val = mdio_read(ioaddr, phy_id, MII_BMCR);
911 if (val & BMCR_RESET) {
912 // FIXME: needlessly high ? -- FR 02/07/2005
913 mod_timer(&tp->timer, jiffies + HZ/10);
914 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
915 BMSR_ANEGCOMPLETE)) {
916 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
917 dev->name);
918 netif_carrier_off(dev);
919 mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);
920 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
921 } else {
922 /* Rejoice ! */
923 struct {
924 int val;
925 u32 ctl;
926 const char *msg;
927 } reg31[] = {
928 { LPA_1000XFULL | LPA_SLCT, 0x07000c00 | 0x00001000,
929 "1000 Mbps Full Duplex" },
930 { LPA_1000XHALF | LPA_SLCT, 0x07000c00,
931 "1000 Mbps Half Duplex" },
932 { LPA_100FULL, 0x04000800 | 0x00001000,
933 "100 Mbps Full Duplex" },
934 { LPA_100HALF, 0x04000800,
935 "100 Mbps Half Duplex" },
936 { LPA_10FULL, 0x04000400 | 0x00001000,
937 "10 Mbps Full Duplex" },
938 { LPA_10HALF, 0x04000400,
939 "10 Mbps Half Duplex" },
940 { 0, 0x04000400, "unknown" }
941 }, *p;
942 u16 adv;
944 val = mdio_read(ioaddr, phy_id, 0x1f);
945 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
947 val = mdio_read(ioaddr, phy_id, MII_LPA);
948 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
949 net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
950 dev->name, val, adv);
952 val &= adv;
954 for (p = reg31; p->val; p++) {
955 if ((val & p->val) == p->val)
956 break;
959 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
961 if ((tp->features & F_HAS_RGMII) &&
962 (tp->features & F_PHY_BCM5461)) {
963 // Set Tx Delay in RGMII mode.
964 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
965 udelay(200);
966 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
967 p->ctl |= 0x03000000;
970 SIS_W32(StationControl, p->ctl);
972 if (tp->features & F_HAS_RGMII) {
973 SIS_W32(RGDelay, 0x0441);
974 SIS_W32(RGDelay, 0x0440);
977 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
978 p->msg);
979 netif_carrier_on(dev);
982 rtnl_unlock();
985 static void sis190_phy_timer(unsigned long __opaque)
987 struct net_device *dev = (struct net_device *)__opaque;
988 struct sis190_private *tp = netdev_priv(dev);
990 if (likely(netif_running(dev)))
991 schedule_work(&tp->phy_task);
994 static inline void sis190_delete_timer(struct net_device *dev)
996 struct sis190_private *tp = netdev_priv(dev);
998 del_timer_sync(&tp->timer);
1001 static inline void sis190_request_timer(struct net_device *dev)
1003 struct sis190_private *tp = netdev_priv(dev);
1004 struct timer_list *timer = &tp->timer;
1006 init_timer(timer);
1007 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1008 timer->data = (unsigned long)dev;
1009 timer->function = sis190_phy_timer;
1010 add_timer(timer);
1013 static void sis190_set_rxbufsize(struct sis190_private *tp,
1014 struct net_device *dev)
1016 unsigned int mtu = dev->mtu;
1018 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1019 /* RxDesc->size has a licence to kill the lower bits */
1020 if (tp->rx_buf_sz & 0x07) {
1021 tp->rx_buf_sz += 8;
1022 tp->rx_buf_sz &= RX_BUF_MASK;
1026 static int sis190_open(struct net_device *dev)
1028 struct sis190_private *tp = netdev_priv(dev);
1029 struct pci_dev *pdev = tp->pci_dev;
1030 int rc = -ENOMEM;
1032 sis190_set_rxbufsize(tp, dev);
1035 * Rx and Tx descriptors need 256 bytes alignment.
1036 * pci_alloc_consistent() guarantees a stronger alignment.
1038 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1039 if (!tp->TxDescRing)
1040 goto out;
1042 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1043 if (!tp->RxDescRing)
1044 goto err_free_tx_0;
1046 rc = sis190_init_ring(dev);
1047 if (rc < 0)
1048 goto err_free_rx_1;
1050 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1052 sis190_request_timer(dev);
1054 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1055 if (rc < 0)
1056 goto err_release_timer_2;
1058 sis190_hw_start(dev);
1059 out:
1060 return rc;
1062 err_release_timer_2:
1063 sis190_delete_timer(dev);
1064 sis190_rx_clear(tp);
1065 err_free_rx_1:
1066 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1067 tp->rx_dma);
1068 err_free_tx_0:
1069 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1070 tp->tx_dma);
1071 goto out;
1074 static void sis190_tx_clear(struct sis190_private *tp)
1076 unsigned int i;
1078 for (i = 0; i < NUM_TX_DESC; i++) {
1079 struct sk_buff *skb = tp->Tx_skbuff[i];
1081 if (!skb)
1082 continue;
1084 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1085 tp->Tx_skbuff[i] = NULL;
1086 dev_kfree_skb(skb);
1088 tp->stats.tx_dropped++;
1090 tp->cur_tx = tp->dirty_tx = 0;
1093 static void sis190_down(struct net_device *dev)
1095 struct sis190_private *tp = netdev_priv(dev);
1096 void __iomem *ioaddr = tp->mmio_addr;
1097 unsigned int poll_locked = 0;
1099 sis190_delete_timer(dev);
1101 netif_stop_queue(dev);
1103 flush_scheduled_work();
1105 do {
1106 spin_lock_irq(&tp->lock);
1108 sis190_asic_down(ioaddr);
1110 spin_unlock_irq(&tp->lock);
1112 synchronize_irq(dev->irq);
1114 if (!poll_locked) {
1115 netif_poll_disable(dev);
1116 poll_locked++;
1119 synchronize_sched();
1121 } while (SIS_R32(IntrMask));
1123 sis190_tx_clear(tp);
1124 sis190_rx_clear(tp);
1127 static int sis190_close(struct net_device *dev)
1129 struct sis190_private *tp = netdev_priv(dev);
1130 struct pci_dev *pdev = tp->pci_dev;
1132 sis190_down(dev);
1134 free_irq(dev->irq, dev);
1136 netif_poll_enable(dev);
1138 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1139 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1141 tp->TxDescRing = NULL;
1142 tp->RxDescRing = NULL;
1144 return 0;
1147 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1149 struct sis190_private *tp = netdev_priv(dev);
1150 void __iomem *ioaddr = tp->mmio_addr;
1151 u32 len, entry, dirty_tx;
1152 struct TxDesc *desc;
1153 dma_addr_t mapping;
1155 if (unlikely(skb->len < ETH_ZLEN)) {
1156 if (skb_padto(skb, ETH_ZLEN)) {
1157 tp->stats.tx_dropped++;
1158 goto out;
1160 len = ETH_ZLEN;
1161 } else {
1162 len = skb->len;
1165 entry = tp->cur_tx % NUM_TX_DESC;
1166 desc = tp->TxDescRing + entry;
1168 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1169 netif_stop_queue(dev);
1170 net_tx_err(tp, KERN_ERR PFX
1171 "%s: BUG! Tx Ring full when queue awake!\n",
1172 dev->name);
1173 return NETDEV_TX_BUSY;
1176 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1178 tp->Tx_skbuff[entry] = skb;
1180 desc->PSize = cpu_to_le32(len);
1181 desc->addr = cpu_to_le32(mapping);
1183 desc->size = cpu_to_le32(len);
1184 if (entry == (NUM_TX_DESC - 1))
1185 desc->size |= cpu_to_le32(RingEnd);
1187 wmb();
1189 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1191 tp->cur_tx++;
1193 smp_wmb();
1195 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1197 dev->trans_start = jiffies;
1199 dirty_tx = tp->dirty_tx;
1200 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1201 netif_stop_queue(dev);
1202 smp_rmb();
1203 if (dirty_tx != tp->dirty_tx)
1204 netif_wake_queue(dev);
1206 out:
1207 return NETDEV_TX_OK;
1210 static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1212 struct sis190_private *tp = netdev_priv(dev);
1214 return &tp->stats;
1217 static void sis190_free_phy(struct list_head *first_phy)
1219 struct sis190_phy *cur, *next;
1221 list_for_each_entry_safe(cur, next, first_phy, list) {
1222 kfree(cur);
1227 * sis190_default_phy - Select default PHY for sis190 mac.
1228 * @dev: the net device to probe for
1230 * Select first detected PHY with link as default.
1231 * If no one is link on, select PHY whose types is HOME as default.
1232 * If HOME doesn't exist, select LAN.
1234 static u16 sis190_default_phy(struct net_device *dev)
1236 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1237 struct sis190_private *tp = netdev_priv(dev);
1238 struct mii_if_info *mii_if = &tp->mii_if;
1239 void __iomem *ioaddr = tp->mmio_addr;
1240 u16 status;
1242 phy_home = phy_default = phy_lan = NULL;
1244 list_for_each_entry(phy, &tp->first_phy, list) {
1245 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1247 // Link ON & Not select default PHY & not ghost PHY.
1248 if ((status & BMSR_LSTATUS) &&
1249 !phy_default &&
1250 (phy->type != UNKNOWN)) {
1251 phy_default = phy;
1252 } else {
1253 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1254 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1255 status | BMCR_ANENABLE | BMCR_ISOLATE);
1256 if (phy->type == HOME)
1257 phy_home = phy;
1258 else if (phy->type == LAN)
1259 phy_lan = phy;
1263 if (!phy_default) {
1264 if (phy_home)
1265 phy_default = phy_home;
1266 else if (phy_lan)
1267 phy_default = phy_lan;
1268 else
1269 phy_default = list_entry(&tp->first_phy,
1270 struct sis190_phy, list);
1273 if (mii_if->phy_id != phy_default->phy_id) {
1274 mii_if->phy_id = phy_default->phy_id;
1275 net_probe(tp, KERN_INFO
1276 "%s: Using transceiver at address %d as default.\n",
1277 pci_name(tp->pci_dev), mii_if->phy_id);
1280 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1281 status &= (~BMCR_ISOLATE);
1283 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1284 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1286 return status;
1289 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1290 struct sis190_phy *phy, unsigned int phy_id,
1291 u16 mii_status)
1293 void __iomem *ioaddr = tp->mmio_addr;
1294 struct mii_chip_info *p;
1296 INIT_LIST_HEAD(&phy->list);
1297 phy->status = mii_status;
1298 phy->phy_id = phy_id;
1300 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1301 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1303 for (p = mii_chip_table; p->type; p++) {
1304 if ((p->id[0] == phy->id[0]) &&
1305 (p->id[1] == (phy->id[1] & 0xfff0))) {
1306 break;
1310 if (p->id[1]) {
1311 phy->type = (p->type == MIX) ?
1312 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1313 LAN : HOME) : p->type;
1314 tp->features |= p->feature;
1315 } else
1316 phy->type = UNKNOWN;
1318 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1319 pci_name(tp->pci_dev),
1320 (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
1323 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1325 if (tp->features & F_PHY_88E1111) {
1326 void __iomem *ioaddr = tp->mmio_addr;
1327 int phy_id = tp->mii_if.phy_id;
1328 u16 reg[2][2] = {
1329 { 0x808b, 0x0ce1 },
1330 { 0x808f, 0x0c60 }
1331 }, *p;
1333 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1335 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1336 udelay(200);
1337 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1338 udelay(200);
1343 * sis190_mii_probe - Probe MII PHY for sis190
1344 * @dev: the net device to probe for
1346 * Search for total of 32 possible mii phy addresses.
1347 * Identify and set current phy if found one,
1348 * return error if it failed to found.
1350 static int __devinit sis190_mii_probe(struct net_device *dev)
1352 struct sis190_private *tp = netdev_priv(dev);
1353 struct mii_if_info *mii_if = &tp->mii_if;
1354 void __iomem *ioaddr = tp->mmio_addr;
1355 int phy_id;
1356 int rc = 0;
1358 INIT_LIST_HEAD(&tp->first_phy);
1360 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1361 struct sis190_phy *phy;
1362 u16 status;
1364 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1366 // Try next mii if the current one is not accessible.
1367 if (status == 0xffff || status == 0x0000)
1368 continue;
1370 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1371 if (!phy) {
1372 sis190_free_phy(&tp->first_phy);
1373 rc = -ENOMEM;
1374 goto out;
1377 sis190_init_phy(dev, tp, phy, phy_id, status);
1379 list_add(&tp->first_phy, &phy->list);
1382 if (list_empty(&tp->first_phy)) {
1383 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1384 pci_name(tp->pci_dev));
1385 rc = -EIO;
1386 goto out;
1389 /* Select default PHY for mac */
1390 sis190_default_phy(dev);
1392 sis190_mii_probe_88e1111_fixup(tp);
1394 mii_if->dev = dev;
1395 mii_if->mdio_read = __mdio_read;
1396 mii_if->mdio_write = __mdio_write;
1397 mii_if->phy_id_mask = PHY_ID_ANY;
1398 mii_if->reg_num_mask = MII_REG_ANY;
1399 out:
1400 return rc;
1403 static void __devexit sis190_mii_remove(struct net_device *dev)
1405 struct sis190_private *tp = netdev_priv(dev);
1407 sis190_free_phy(&tp->first_phy);
1410 static void sis190_release_board(struct pci_dev *pdev)
1412 struct net_device *dev = pci_get_drvdata(pdev);
1413 struct sis190_private *tp = netdev_priv(dev);
1415 iounmap(tp->mmio_addr);
1416 pci_release_regions(pdev);
1417 pci_disable_device(pdev);
1418 free_netdev(dev);
1421 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1423 struct sis190_private *tp;
1424 struct net_device *dev;
1425 void __iomem *ioaddr;
1426 int rc;
1428 dev = alloc_etherdev(sizeof(*tp));
1429 if (!dev) {
1430 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1431 rc = -ENOMEM;
1432 goto err_out_0;
1435 SET_MODULE_OWNER(dev);
1436 SET_NETDEV_DEV(dev, &pdev->dev);
1438 tp = netdev_priv(dev);
1439 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1441 rc = pci_enable_device(pdev);
1442 if (rc < 0) {
1443 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1444 goto err_free_dev_1;
1447 rc = -ENODEV;
1449 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1450 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1451 pci_name(pdev));
1452 goto err_pci_disable_2;
1454 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1455 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1456 pci_name(pdev));
1457 goto err_pci_disable_2;
1460 rc = pci_request_regions(pdev, DRV_NAME);
1461 if (rc < 0) {
1462 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1463 pci_name(pdev));
1464 goto err_pci_disable_2;
1467 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1468 if (rc < 0) {
1469 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1470 pci_name(pdev));
1471 goto err_free_res_3;
1474 pci_set_master(pdev);
1476 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1477 if (!ioaddr) {
1478 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1479 pci_name(pdev));
1480 rc = -EIO;
1481 goto err_free_res_3;
1484 tp->pci_dev = pdev;
1485 tp->mmio_addr = ioaddr;
1487 sis190_irq_mask_and_ack(ioaddr);
1489 sis190_soft_reset(ioaddr);
1490 out:
1491 return dev;
1493 err_free_res_3:
1494 pci_release_regions(pdev);
1495 err_pci_disable_2:
1496 pci_disable_device(pdev);
1497 err_free_dev_1:
1498 free_netdev(dev);
1499 err_out_0:
1500 dev = ERR_PTR(rc);
1501 goto out;
1504 static void sis190_tx_timeout(struct net_device *dev)
1506 struct sis190_private *tp = netdev_priv(dev);
1507 void __iomem *ioaddr = tp->mmio_addr;
1508 u8 tmp8;
1510 /* Disable Tx, if not already */
1511 tmp8 = SIS_R8(TxControl);
1512 if (tmp8 & CmdTxEnb)
1513 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1516 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1517 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1519 /* Disable interrupts by clearing the interrupt mask. */
1520 SIS_W32(IntrMask, 0x0000);
1522 /* Stop a shared interrupt from scavenging while we are. */
1523 spin_lock_irq(&tp->lock);
1524 sis190_tx_clear(tp);
1525 spin_unlock_irq(&tp->lock);
1527 /* ...and finally, reset everything. */
1528 sis190_hw_start(dev);
1530 netif_wake_queue(dev);
1533 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1535 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1538 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1539 struct net_device *dev)
1541 struct sis190_private *tp = netdev_priv(dev);
1542 void __iomem *ioaddr = tp->mmio_addr;
1543 u16 sig;
1544 int i;
1546 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1547 pci_name(pdev));
1549 /* Check to see if there is a sane EEPROM */
1550 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1552 if ((sig == 0xffff) || (sig == 0x0000)) {
1553 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1554 pci_name(pdev), sig);
1555 return -EIO;
1558 /* Get MAC address from EEPROM */
1559 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1560 __le16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1562 ((u16 *)dev->dev_addr)[0] = le16_to_cpu(w);
1565 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1567 return 0;
1571 * sis190_get_mac_addr_from_apc - Get MAC address for SiS965 model
1572 * @pdev: PCI device
1573 * @dev: network device to get address for
1575 * SiS965 model, use APC CMOS RAM to store MAC address.
1576 * APC CMOS RAM is accessed through ISA bridge.
1577 * MAC address is read into @net_dev->dev_addr.
1579 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1580 struct net_device *dev)
1582 struct sis190_private *tp = netdev_priv(dev);
1583 struct pci_dev *isa_bridge;
1584 u8 reg, tmp8;
1585 int i;
1587 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1588 pci_name(pdev));
1590 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL);
1591 if (!isa_bridge) {
1592 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1593 pci_name(pdev));
1594 return -EIO;
1597 /* Enable port 78h & 79h to access APC Registers. */
1598 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1599 reg = (tmp8 & ~0x02);
1600 pci_write_config_byte(isa_bridge, 0x48, reg);
1601 udelay(50);
1602 pci_read_config_byte(isa_bridge, 0x48, &reg);
1604 for (i = 0; i < MAC_ADDR_LEN; i++) {
1605 outb(0x9 + i, 0x78);
1606 dev->dev_addr[i] = inb(0x79);
1609 outb(0x12, 0x78);
1610 reg = inb(0x79);
1612 sis190_set_rgmii(tp, reg);
1614 /* Restore the value to ISA Bridge */
1615 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1616 pci_dev_put(isa_bridge);
1618 return 0;
1622 * sis190_init_rxfilter - Initialize the Rx filter
1623 * @dev: network device to initialize
1625 * Set receive filter address to our MAC address
1626 * and enable packet filtering.
1628 static inline void sis190_init_rxfilter(struct net_device *dev)
1630 struct sis190_private *tp = netdev_priv(dev);
1631 void __iomem *ioaddr = tp->mmio_addr;
1632 u16 ctl;
1633 int i;
1635 ctl = SIS_R16(RxMacControl);
1637 * Disable packet filtering before setting filter.
1638 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1639 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1641 SIS_W16(RxMacControl, ctl & ~0x0f00);
1643 for (i = 0; i < MAC_ADDR_LEN; i++)
1644 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1646 SIS_W16(RxMacControl, ctl);
1647 SIS_PCI_COMMIT();
1650 static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1652 u8 from;
1654 pci_read_config_byte(pdev, 0x73, &from);
1656 return (from & 0x00000001) ?
1657 sis190_get_mac_addr_from_apc(pdev, dev) :
1658 sis190_get_mac_addr_from_eeprom(pdev, dev);
1661 static void sis190_set_speed_auto(struct net_device *dev)
1663 struct sis190_private *tp = netdev_priv(dev);
1664 void __iomem *ioaddr = tp->mmio_addr;
1665 int phy_id = tp->mii_if.phy_id;
1666 int val;
1668 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1670 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1672 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1673 // unchanged.
1674 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1675 ADVERTISE_100FULL | ADVERTISE_10FULL |
1676 ADVERTISE_100HALF | ADVERTISE_10HALF);
1678 // Enable 1000 Full Mode.
1679 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1681 // Enable auto-negotiation and restart auto-negotiation.
1682 mdio_write(ioaddr, phy_id, MII_BMCR,
1683 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1686 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1688 struct sis190_private *tp = netdev_priv(dev);
1690 return mii_ethtool_gset(&tp->mii_if, cmd);
1693 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1695 struct sis190_private *tp = netdev_priv(dev);
1697 return mii_ethtool_sset(&tp->mii_if, cmd);
1700 static void sis190_get_drvinfo(struct net_device *dev,
1701 struct ethtool_drvinfo *info)
1703 struct sis190_private *tp = netdev_priv(dev);
1705 strcpy(info->driver, DRV_NAME);
1706 strcpy(info->version, DRV_VERSION);
1707 strcpy(info->bus_info, pci_name(tp->pci_dev));
1710 static int sis190_get_regs_len(struct net_device *dev)
1712 return SIS190_REGS_SIZE;
1715 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1716 void *p)
1718 struct sis190_private *tp = netdev_priv(dev);
1719 unsigned long flags;
1721 if (regs->len > SIS190_REGS_SIZE)
1722 regs->len = SIS190_REGS_SIZE;
1724 spin_lock_irqsave(&tp->lock, flags);
1725 memcpy_fromio(p, tp->mmio_addr, regs->len);
1726 spin_unlock_irqrestore(&tp->lock, flags);
1729 static int sis190_nway_reset(struct net_device *dev)
1731 struct sis190_private *tp = netdev_priv(dev);
1733 return mii_nway_restart(&tp->mii_if);
1736 static u32 sis190_get_msglevel(struct net_device *dev)
1738 struct sis190_private *tp = netdev_priv(dev);
1740 return tp->msg_enable;
1743 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1745 struct sis190_private *tp = netdev_priv(dev);
1747 tp->msg_enable = value;
1750 static const struct ethtool_ops sis190_ethtool_ops = {
1751 .get_settings = sis190_get_settings,
1752 .set_settings = sis190_set_settings,
1753 .get_drvinfo = sis190_get_drvinfo,
1754 .get_regs_len = sis190_get_regs_len,
1755 .get_regs = sis190_get_regs,
1756 .get_link = ethtool_op_get_link,
1757 .get_msglevel = sis190_get_msglevel,
1758 .set_msglevel = sis190_set_msglevel,
1759 .nway_reset = sis190_nway_reset,
1762 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1764 struct sis190_private *tp = netdev_priv(dev);
1766 return !netif_running(dev) ? -EINVAL :
1767 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1770 static int __devinit sis190_init_one(struct pci_dev *pdev,
1771 const struct pci_device_id *ent)
1773 static int printed_version = 0;
1774 struct sis190_private *tp;
1775 struct net_device *dev;
1776 void __iomem *ioaddr;
1777 int rc;
1779 if (!printed_version) {
1780 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1781 printed_version = 1;
1784 dev = sis190_init_board(pdev);
1785 if (IS_ERR(dev)) {
1786 rc = PTR_ERR(dev);
1787 goto out;
1790 pci_set_drvdata(pdev, dev);
1792 tp = netdev_priv(dev);
1793 ioaddr = tp->mmio_addr;
1795 rc = sis190_get_mac_addr(pdev, dev);
1796 if (rc < 0)
1797 goto err_release_board;
1799 sis190_init_rxfilter(dev);
1801 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1803 dev->open = sis190_open;
1804 dev->stop = sis190_close;
1805 dev->do_ioctl = sis190_ioctl;
1806 dev->get_stats = sis190_get_stats;
1807 dev->tx_timeout = sis190_tx_timeout;
1808 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1809 dev->hard_start_xmit = sis190_start_xmit;
1810 #ifdef CONFIG_NET_POLL_CONTROLLER
1811 dev->poll_controller = sis190_netpoll;
1812 #endif
1813 dev->set_multicast_list = sis190_set_rx_mode;
1814 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1815 dev->irq = pdev->irq;
1816 dev->base_addr = (unsigned long) 0xdead;
1818 spin_lock_init(&tp->lock);
1820 rc = sis190_mii_probe(dev);
1821 if (rc < 0)
1822 goto err_release_board;
1824 rc = register_netdev(dev);
1825 if (rc < 0)
1826 goto err_remove_mii;
1828 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1829 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1830 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1831 ioaddr, dev->irq,
1832 dev->dev_addr[0], dev->dev_addr[1],
1833 dev->dev_addr[2], dev->dev_addr[3],
1834 dev->dev_addr[4], dev->dev_addr[5]);
1836 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1837 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1839 netif_carrier_off(dev);
1841 sis190_set_speed_auto(dev);
1842 out:
1843 return rc;
1845 err_remove_mii:
1846 sis190_mii_remove(dev);
1847 err_release_board:
1848 sis190_release_board(pdev);
1849 goto out;
1852 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1854 struct net_device *dev = pci_get_drvdata(pdev);
1856 sis190_mii_remove(dev);
1857 unregister_netdev(dev);
1858 sis190_release_board(pdev);
1859 pci_set_drvdata(pdev, NULL);
1862 static struct pci_driver sis190_pci_driver = {
1863 .name = DRV_NAME,
1864 .id_table = sis190_pci_tbl,
1865 .probe = sis190_init_one,
1866 .remove = __devexit_p(sis190_remove_one),
1869 static int __init sis190_init_module(void)
1871 return pci_register_driver(&sis190_pci_driver);
1874 static void __exit sis190_cleanup_module(void)
1876 pci_unregister_driver(&sis190_pci_driver);
1879 module_init(sis190_init_module);
1880 module_exit(sis190_cleanup_module);