cxgb3 - avoid deadlock with mac watchdog
[linux-2.6/openmoko-kernel/knife-kernel.git] / drivers / net / sis190.c
blob34463ce6f132f7be948c455ed013ecc83dbc20b2
1 /*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9 genuine driver.
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
18 See the file COPYING in this distribution for more information.
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
33 #include <asm/irq.h>
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 printk(arg)
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 printk(arg)
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
40 printk(arg)
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 printk(arg)
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
44 printk(arg)
46 #define PHY_MAX_ADDR 32
47 #define PHY_ID_ANY 0x1f
48 #define MII_REG_ANY 0x1f
50 #ifdef CONFIG_SIS190_NAPI
51 #define NAPI_SUFFIX "-NAPI"
52 #else
53 #define NAPI_SUFFIX ""
54 #endif
56 #define DRV_VERSION "1.2" NAPI_SUFFIX
57 #define DRV_NAME "sis190"
58 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
59 #define PFX DRV_NAME ": "
61 #ifdef CONFIG_SIS190_NAPI
62 #define sis190_rx_skb netif_receive_skb
63 #define sis190_rx_quota(count, quota) min(count, quota)
64 #else
65 #define sis190_rx_skb netif_rx
66 #define sis190_rx_quota(count, quota) count
67 #endif
69 #define MAC_ADDR_LEN 6
71 #define NUM_TX_DESC 64 /* [8..1024] */
72 #define NUM_RX_DESC 64 /* [8..8192] */
73 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
74 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
75 #define RX_BUF_SIZE 1536
76 #define RX_BUF_MASK 0xfff8
78 #define SIS190_REGS_SIZE 0x80
79 #define SIS190_TX_TIMEOUT (6*HZ)
80 #define SIS190_PHY_TIMEOUT (10*HZ)
81 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
82 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
83 NETIF_MSG_IFDOWN)
85 /* Enhanced PHY access register bit definitions */
86 #define EhnMIIread 0x0000
87 #define EhnMIIwrite 0x0020
88 #define EhnMIIdataShift 16
89 #define EhnMIIpmdShift 6 /* 7016 only */
90 #define EhnMIIregShift 11
91 #define EhnMIIreq 0x0010
92 #define EhnMIInotDone 0x0010
94 /* Write/read MMIO register */
95 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
96 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
97 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
98 #define SIS_R8(reg) readb (ioaddr + (reg))
99 #define SIS_R16(reg) readw (ioaddr + (reg))
100 #define SIS_R32(reg) readl (ioaddr + (reg))
102 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
104 enum sis190_registers {
105 TxControl = 0x00,
106 TxDescStartAddr = 0x04,
107 rsv0 = 0x08, // reserved
108 TxSts = 0x0c, // unused (Control/Status)
109 RxControl = 0x10,
110 RxDescStartAddr = 0x14,
111 rsv1 = 0x18, // reserved
112 RxSts = 0x1c, // unused
113 IntrStatus = 0x20,
114 IntrMask = 0x24,
115 IntrControl = 0x28,
116 IntrTimer = 0x2c, // unused (Interupt Timer)
117 PMControl = 0x30, // unused (Power Mgmt Control/Status)
118 rsv2 = 0x34, // reserved
119 ROMControl = 0x38,
120 ROMInterface = 0x3c,
121 StationControl = 0x40,
122 GMIIControl = 0x44,
123 GIoCR = 0x48, // unused (GMAC IO Compensation)
124 GIoCtrl = 0x4c, // unused (GMAC IO Control)
125 TxMacControl = 0x50,
126 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
127 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
128 rsv3 = 0x5c, // reserved
129 RxMacControl = 0x60,
130 RxMacAddr = 0x62,
131 RxHashTable = 0x68,
132 // Undocumented = 0x6c,
133 RxWolCtrl = 0x70,
134 RxWolData = 0x74, // unused (Rx WOL Data Access)
135 RxMPSControl = 0x78, // unused (Rx MPS Control)
136 rsv4 = 0x7c, // reserved
139 enum sis190_register_content {
140 /* IntrStatus */
141 SoftInt = 0x40000000, // unused
142 Timeup = 0x20000000, // unused
143 PauseFrame = 0x00080000, // unused
144 MagicPacket = 0x00040000, // unused
145 WakeupFrame = 0x00020000, // unused
146 LinkChange = 0x00010000,
147 RxQEmpty = 0x00000080,
148 RxQInt = 0x00000040,
149 TxQ1Empty = 0x00000020, // unused
150 TxQ1Int = 0x00000010,
151 TxQ0Empty = 0x00000008, // unused
152 TxQ0Int = 0x00000004,
153 RxHalt = 0x00000002,
154 TxHalt = 0x00000001,
156 /* {Rx/Tx}CmdBits */
157 CmdReset = 0x10,
158 CmdRxEnb = 0x08, // unused
159 CmdTxEnb = 0x01,
160 RxBufEmpty = 0x01, // unused
162 /* Cfg9346Bits */
163 Cfg9346_Lock = 0x00, // unused
164 Cfg9346_Unlock = 0xc0, // unused
166 /* RxMacControl */
167 AcceptErr = 0x20, // unused
168 AcceptRunt = 0x10, // unused
169 AcceptBroadcast = 0x0800,
170 AcceptMulticast = 0x0400,
171 AcceptMyPhys = 0x0200,
172 AcceptAllPhys = 0x0100,
174 /* RxConfigBits */
175 RxCfgFIFOShift = 13,
176 RxCfgDMAShift = 8, // 0x1a in RxControl ?
178 /* TxConfigBits */
179 TxInterFrameGapShift = 24,
180 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
182 LinkStatus = 0x02, // unused
183 FullDup = 0x01, // unused
185 /* TBICSRBit */
186 TBILinkOK = 0x02000000, // unused
189 struct TxDesc {
190 __le32 PSize;
191 __le32 status;
192 __le32 addr;
193 __le32 size;
196 struct RxDesc {
197 __le32 PSize;
198 __le32 status;
199 __le32 addr;
200 __le32 size;
203 enum _DescStatusBit {
204 /* _Desc.status */
205 OWNbit = 0x80000000, // RXOWN/TXOWN
206 INTbit = 0x40000000, // RXINT/TXINT
207 CRCbit = 0x00020000, // CRCOFF/CRCEN
208 PADbit = 0x00010000, // PREADD/PADEN
209 /* _Desc.size */
210 RingEnd = 0x80000000,
211 /* TxDesc.status */
212 LSEN = 0x08000000, // TSO ? -- FR
213 IPCS = 0x04000000,
214 TCPCS = 0x02000000,
215 UDPCS = 0x01000000,
216 BSTEN = 0x00800000,
217 EXTEN = 0x00400000,
218 DEFEN = 0x00200000,
219 BKFEN = 0x00100000,
220 CRSEN = 0x00080000,
221 COLEN = 0x00040000,
222 THOL3 = 0x30000000,
223 THOL2 = 0x20000000,
224 THOL1 = 0x10000000,
225 THOL0 = 0x00000000,
226 /* RxDesc.status */
227 IPON = 0x20000000,
228 TCPON = 0x10000000,
229 UDPON = 0x08000000,
230 Wakup = 0x00400000,
231 Magic = 0x00200000,
232 Pause = 0x00100000,
233 DEFbit = 0x00200000,
234 BCAST = 0x000c0000,
235 MCAST = 0x00080000,
236 UCAST = 0x00040000,
237 /* RxDesc.PSize */
238 TAGON = 0x80000000,
239 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
240 ABORT = 0x00800000,
241 SHORT = 0x00400000,
242 LIMIT = 0x00200000,
243 MIIER = 0x00100000,
244 OVRUN = 0x00080000,
245 NIBON = 0x00040000,
246 COLON = 0x00020000,
247 CRCOK = 0x00010000,
248 RxSizeMask = 0x0000ffff
250 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
251 * provide two (unused with Linux) Tx queues. No publically
252 * available documentation alas.
256 enum sis190_eeprom_access_register_bits {
257 EECS = 0x00000001, // unused
258 EECLK = 0x00000002, // unused
259 EEDO = 0x00000008, // unused
260 EEDI = 0x00000004, // unused
261 EEREQ = 0x00000080,
262 EEROP = 0x00000200,
263 EEWOP = 0x00000100 // unused
266 /* EEPROM Addresses */
267 enum sis190_eeprom_address {
268 EEPROMSignature = 0x00,
269 EEPROMCLK = 0x01, // unused
270 EEPROMInfo = 0x02,
271 EEPROMMACAddr = 0x03
274 enum sis190_feature {
275 F_HAS_RGMII = 1,
276 F_PHY_88E1111 = 2,
277 F_PHY_BCM5461 = 4
280 struct sis190_private {
281 void __iomem *mmio_addr;
282 struct pci_dev *pci_dev;
283 struct net_device *dev;
284 struct net_device_stats stats;
285 spinlock_t lock;
286 u32 rx_buf_sz;
287 u32 cur_rx;
288 u32 cur_tx;
289 u32 dirty_rx;
290 u32 dirty_tx;
291 dma_addr_t rx_dma;
292 dma_addr_t tx_dma;
293 struct RxDesc *RxDescRing;
294 struct TxDesc *TxDescRing;
295 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
296 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
297 struct work_struct phy_task;
298 struct timer_list timer;
299 u32 msg_enable;
300 struct mii_if_info mii_if;
301 struct list_head first_phy;
302 u32 features;
305 struct sis190_phy {
306 struct list_head list;
307 int phy_id;
308 u16 id[2];
309 u16 status;
310 u8 type;
313 enum sis190_phy_type {
314 UNKNOWN = 0x00,
315 HOME = 0x01,
316 LAN = 0x02,
317 MIX = 0x03
320 static struct mii_chip_info {
321 const char *name;
322 u16 id[2];
323 unsigned int type;
324 u32 feature;
325 } mii_chip_table[] = {
326 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
327 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
328 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
329 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
330 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
331 { NULL, }
334 static const struct {
335 const char *name;
336 } sis_chip_info[] = {
337 { "SiS 190 PCI Fast Ethernet adapter" },
338 { "SiS 191 PCI Gigabit Ethernet adapter" },
341 static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
342 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
343 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
344 { 0, },
347 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
349 static int rx_copybreak = 200;
351 static struct {
352 u32 msg_enable;
353 } debug = { -1 };
355 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
356 module_param(rx_copybreak, int, 0);
357 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
358 module_param_named(debug, debug.msg_enable, int, 0);
359 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
360 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
361 MODULE_VERSION(DRV_VERSION);
362 MODULE_LICENSE("GPL");
364 static const u32 sis190_intr_mask =
365 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
368 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
369 * The chips use a 64 element hash table based on the Ethernet CRC.
371 static const int multicast_filter_limit = 32;
373 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
375 unsigned int i;
377 SIS_W32(GMIIControl, ctl);
379 msleep(1);
381 for (i = 0; i < 100; i++) {
382 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
383 break;
384 msleep(1);
387 if (i > 999)
388 printk(KERN_ERR PFX "PHY command failed !\n");
391 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
393 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
394 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
395 (((u32) val) << EhnMIIdataShift));
398 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
400 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
401 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
403 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
406 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
408 struct sis190_private *tp = netdev_priv(dev);
410 mdio_write(tp->mmio_addr, phy_id, reg, val);
413 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
415 struct sis190_private *tp = netdev_priv(dev);
417 return mdio_read(tp->mmio_addr, phy_id, reg);
420 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
422 mdio_read(ioaddr, phy_id, reg);
423 return mdio_read(ioaddr, phy_id, reg);
426 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
428 u16 data = 0xffff;
429 unsigned int i;
431 if (!(SIS_R32(ROMControl) & 0x0002))
432 return 0;
434 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
436 for (i = 0; i < 200; i++) {
437 if (!(SIS_R32(ROMInterface) & EEREQ)) {
438 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
439 break;
441 msleep(1);
444 return data;
447 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
449 SIS_W32(IntrMask, 0x00);
450 SIS_W32(IntrStatus, 0xffffffff);
451 SIS_PCI_COMMIT();
454 static void sis190_asic_down(void __iomem *ioaddr)
456 /* Stop the chip's Tx and Rx DMA processes. */
458 SIS_W32(TxControl, 0x1a00);
459 SIS_W32(RxControl, 0x1a00);
461 sis190_irq_mask_and_ack(ioaddr);
464 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
466 desc->size |= cpu_to_le32(RingEnd);
469 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
471 u32 eor = le32_to_cpu(desc->size) & RingEnd;
473 desc->PSize = 0x0;
474 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
475 wmb();
476 desc->status = cpu_to_le32(OWNbit | INTbit);
479 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
480 u32 rx_buf_sz)
482 desc->addr = cpu_to_le32(mapping);
483 sis190_give_to_asic(desc, rx_buf_sz);
486 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
488 desc->PSize = 0x0;
489 desc->addr = 0xdeadbeef;
490 desc->size &= cpu_to_le32(RingEnd);
491 wmb();
492 desc->status = 0x0;
495 static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
496 struct RxDesc *desc, u32 rx_buf_sz)
498 struct sk_buff *skb;
499 dma_addr_t mapping;
500 int ret = 0;
502 skb = dev_alloc_skb(rx_buf_sz);
503 if (!skb)
504 goto err_out;
506 *sk_buff = skb;
508 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
509 PCI_DMA_FROMDEVICE);
511 sis190_map_to_asic(desc, mapping, rx_buf_sz);
512 out:
513 return ret;
515 err_out:
516 ret = -ENOMEM;
517 sis190_make_unusable_by_asic(desc);
518 goto out;
521 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
522 u32 start, u32 end)
524 u32 cur;
526 for (cur = start; cur < end; cur++) {
527 int ret, i = cur % NUM_RX_DESC;
529 if (tp->Rx_skbuff[i])
530 continue;
532 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
533 tp->RxDescRing + i, tp->rx_buf_sz);
534 if (ret < 0)
535 break;
537 return cur - start;
540 static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
541 struct RxDesc *desc, int rx_buf_sz)
543 int ret = -1;
545 if (pkt_size < rx_copybreak) {
546 struct sk_buff *skb;
548 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
549 if (skb) {
550 skb_reserve(skb, NET_IP_ALIGN);
551 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
552 *sk_buff = skb;
553 sis190_give_to_asic(desc, rx_buf_sz);
554 ret = 0;
557 return ret;
560 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
562 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
564 if ((status & CRCOK) && !(status & ErrMask))
565 return 0;
567 if (!(status & CRCOK))
568 stats->rx_crc_errors++;
569 else if (status & OVRUN)
570 stats->rx_over_errors++;
571 else if (status & (SHORT | LIMIT))
572 stats->rx_length_errors++;
573 else if (status & (MIIER | NIBON | COLON))
574 stats->rx_frame_errors++;
576 stats->rx_errors++;
577 return -1;
580 static int sis190_rx_interrupt(struct net_device *dev,
581 struct sis190_private *tp, void __iomem *ioaddr)
583 struct net_device_stats *stats = &tp->stats;
584 u32 rx_left, cur_rx = tp->cur_rx;
585 u32 delta, count;
587 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
588 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
590 for (; rx_left > 0; rx_left--, cur_rx++) {
591 unsigned int entry = cur_rx % NUM_RX_DESC;
592 struct RxDesc *desc = tp->RxDescRing + entry;
593 u32 status;
595 if (desc->status & OWNbit)
596 break;
598 status = le32_to_cpu(desc->PSize);
600 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
601 // status);
603 if (sis190_rx_pkt_err(status, stats) < 0)
604 sis190_give_to_asic(desc, tp->rx_buf_sz);
605 else {
606 struct sk_buff *skb = tp->Rx_skbuff[entry];
607 int pkt_size = (status & RxSizeMask) - 4;
608 void (*pci_action)(struct pci_dev *, dma_addr_t,
609 size_t, int) = pci_dma_sync_single_for_device;
611 if (unlikely(pkt_size > tp->rx_buf_sz)) {
612 net_intr(tp, KERN_INFO
613 "%s: (frag) status = %08x.\n",
614 dev->name, status);
615 stats->rx_dropped++;
616 stats->rx_length_errors++;
617 sis190_give_to_asic(desc, tp->rx_buf_sz);
618 continue;
621 pci_dma_sync_single_for_cpu(tp->pci_dev,
622 le32_to_cpu(desc->addr), tp->rx_buf_sz,
623 PCI_DMA_FROMDEVICE);
625 if (sis190_try_rx_copy(&skb, pkt_size, desc,
626 tp->rx_buf_sz)) {
627 pci_action = pci_unmap_single;
628 tp->Rx_skbuff[entry] = NULL;
629 sis190_make_unusable_by_asic(desc);
632 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
633 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
635 skb->dev = dev;
636 skb_put(skb, pkt_size);
637 skb->protocol = eth_type_trans(skb, dev);
639 sis190_rx_skb(skb);
641 dev->last_rx = jiffies;
642 stats->rx_packets++;
643 stats->rx_bytes += pkt_size;
644 if ((status & BCAST) == MCAST)
645 stats->multicast++;
648 count = cur_rx - tp->cur_rx;
649 tp->cur_rx = cur_rx;
651 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
652 if (!delta && count && netif_msg_intr(tp))
653 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
654 tp->dirty_rx += delta;
656 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
657 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
659 return count;
662 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
663 struct TxDesc *desc)
665 unsigned int len;
667 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
669 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
671 memset(desc, 0x00, sizeof(*desc));
674 static void sis190_tx_interrupt(struct net_device *dev,
675 struct sis190_private *tp, void __iomem *ioaddr)
677 u32 pending, dirty_tx = tp->dirty_tx;
679 * It would not be needed if queueing was allowed to be enabled
680 * again too early (hint: think preempt and unclocked smp systems).
682 unsigned int queue_stopped;
684 smp_rmb();
685 pending = tp->cur_tx - dirty_tx;
686 queue_stopped = (pending == NUM_TX_DESC);
688 for (; pending; pending--, dirty_tx++) {
689 unsigned int entry = dirty_tx % NUM_TX_DESC;
690 struct TxDesc *txd = tp->TxDescRing + entry;
691 struct sk_buff *skb;
693 if (le32_to_cpu(txd->status) & OWNbit)
694 break;
696 skb = tp->Tx_skbuff[entry];
698 tp->stats.tx_packets++;
699 tp->stats.tx_bytes += skb->len;
701 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
702 tp->Tx_skbuff[entry] = NULL;
703 dev_kfree_skb_irq(skb);
706 if (tp->dirty_tx != dirty_tx) {
707 tp->dirty_tx = dirty_tx;
708 smp_wmb();
709 if (queue_stopped)
710 netif_wake_queue(dev);
715 * The interrupt handler does all of the Rx thread work and cleans up after
716 * the Tx thread.
718 static irqreturn_t sis190_interrupt(int irq, void *__dev)
720 struct net_device *dev = __dev;
721 struct sis190_private *tp = netdev_priv(dev);
722 void __iomem *ioaddr = tp->mmio_addr;
723 unsigned int handled = 0;
724 u32 status;
726 status = SIS_R32(IntrStatus);
728 if ((status == 0xffffffff) || !status)
729 goto out;
731 handled = 1;
733 if (unlikely(!netif_running(dev))) {
734 sis190_asic_down(ioaddr);
735 goto out;
738 SIS_W32(IntrStatus, status);
740 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
742 if (status & LinkChange) {
743 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
744 schedule_work(&tp->phy_task);
747 if (status & RxQInt)
748 sis190_rx_interrupt(dev, tp, ioaddr);
750 if (status & TxQ0Int)
751 sis190_tx_interrupt(dev, tp, ioaddr);
752 out:
753 return IRQ_RETVAL(handled);
756 #ifdef CONFIG_NET_POLL_CONTROLLER
757 static void sis190_netpoll(struct net_device *dev)
759 struct sis190_private *tp = netdev_priv(dev);
760 struct pci_dev *pdev = tp->pci_dev;
762 disable_irq(pdev->irq);
763 sis190_interrupt(pdev->irq, dev);
764 enable_irq(pdev->irq);
766 #endif
768 static void sis190_free_rx_skb(struct sis190_private *tp,
769 struct sk_buff **sk_buff, struct RxDesc *desc)
771 struct pci_dev *pdev = tp->pci_dev;
773 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
774 PCI_DMA_FROMDEVICE);
775 dev_kfree_skb(*sk_buff);
776 *sk_buff = NULL;
777 sis190_make_unusable_by_asic(desc);
780 static void sis190_rx_clear(struct sis190_private *tp)
782 unsigned int i;
784 for (i = 0; i < NUM_RX_DESC; i++) {
785 if (!tp->Rx_skbuff[i])
786 continue;
787 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
791 static void sis190_init_ring_indexes(struct sis190_private *tp)
793 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
796 static int sis190_init_ring(struct net_device *dev)
798 struct sis190_private *tp = netdev_priv(dev);
800 sis190_init_ring_indexes(tp);
802 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
803 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
805 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
806 goto err_rx_clear;
808 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
810 return 0;
812 err_rx_clear:
813 sis190_rx_clear(tp);
814 return -ENOMEM;
817 static void sis190_set_rx_mode(struct net_device *dev)
819 struct sis190_private *tp = netdev_priv(dev);
820 void __iomem *ioaddr = tp->mmio_addr;
821 unsigned long flags;
822 u32 mc_filter[2]; /* Multicast hash filter */
823 u16 rx_mode;
825 if (dev->flags & IFF_PROMISC) {
826 rx_mode =
827 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
828 AcceptAllPhys;
829 mc_filter[1] = mc_filter[0] = 0xffffffff;
830 } else if ((dev->mc_count > multicast_filter_limit) ||
831 (dev->flags & IFF_ALLMULTI)) {
832 /* Too many to filter perfectly -- accept all multicasts. */
833 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
834 mc_filter[1] = mc_filter[0] = 0xffffffff;
835 } else {
836 struct dev_mc_list *mclist;
837 unsigned int i;
839 rx_mode = AcceptBroadcast | AcceptMyPhys;
840 mc_filter[1] = mc_filter[0] = 0;
841 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
842 i++, mclist = mclist->next) {
843 int bit_nr =
844 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
845 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
846 rx_mode |= AcceptMulticast;
850 spin_lock_irqsave(&tp->lock, flags);
852 SIS_W16(RxMacControl, rx_mode | 0x2);
853 SIS_W32(RxHashTable, mc_filter[0]);
854 SIS_W32(RxHashTable + 4, mc_filter[1]);
856 spin_unlock_irqrestore(&tp->lock, flags);
859 static void sis190_soft_reset(void __iomem *ioaddr)
861 SIS_W32(IntrControl, 0x8000);
862 SIS_PCI_COMMIT();
863 msleep(1);
864 SIS_W32(IntrControl, 0x0);
865 sis190_asic_down(ioaddr);
866 msleep(1);
869 static void sis190_hw_start(struct net_device *dev)
871 struct sis190_private *tp = netdev_priv(dev);
872 void __iomem *ioaddr = tp->mmio_addr;
874 sis190_soft_reset(ioaddr);
876 SIS_W32(TxDescStartAddr, tp->tx_dma);
877 SIS_W32(RxDescStartAddr, tp->rx_dma);
879 SIS_W32(IntrStatus, 0xffffffff);
880 SIS_W32(IntrMask, 0x0);
881 SIS_W32(GMIIControl, 0x0);
882 SIS_W32(TxMacControl, 0x60);
883 SIS_W16(RxMacControl, 0x02);
884 SIS_W32(RxHashTable, 0x0);
885 SIS_W32(0x6c, 0x0);
886 SIS_W32(RxWolCtrl, 0x0);
887 SIS_W32(RxWolData, 0x0);
889 SIS_PCI_COMMIT();
891 sis190_set_rx_mode(dev);
893 /* Enable all known interrupts by setting the interrupt mask. */
894 SIS_W32(IntrMask, sis190_intr_mask);
896 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
897 SIS_W32(RxControl, 0x1a1d);
899 netif_start_queue(dev);
902 static void sis190_phy_task(struct work_struct *work)
904 struct sis190_private *tp =
905 container_of(work, struct sis190_private, phy_task);
906 struct net_device *dev = tp->dev;
907 void __iomem *ioaddr = tp->mmio_addr;
908 int phy_id = tp->mii_if.phy_id;
909 u16 val;
911 rtnl_lock();
913 if (!netif_running(dev))
914 goto out_unlock;
916 val = mdio_read(ioaddr, phy_id, MII_BMCR);
917 if (val & BMCR_RESET) {
918 // FIXME: needlessly high ? -- FR 02/07/2005
919 mod_timer(&tp->timer, jiffies + HZ/10);
920 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
921 BMSR_ANEGCOMPLETE)) {
922 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
923 dev->name);
924 netif_carrier_off(dev);
925 mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);
926 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
927 } else {
928 /* Rejoice ! */
929 struct {
930 int val;
931 u32 ctl;
932 const char *msg;
933 } reg31[] = {
934 { LPA_1000XFULL | LPA_SLCT, 0x07000c00 | 0x00001000,
935 "1000 Mbps Full Duplex" },
936 { LPA_1000XHALF | LPA_SLCT, 0x07000c00,
937 "1000 Mbps Half Duplex" },
938 { LPA_100FULL, 0x04000800 | 0x00001000,
939 "100 Mbps Full Duplex" },
940 { LPA_100HALF, 0x04000800,
941 "100 Mbps Half Duplex" },
942 { LPA_10FULL, 0x04000400 | 0x00001000,
943 "10 Mbps Full Duplex" },
944 { LPA_10HALF, 0x04000400,
945 "10 Mbps Half Duplex" },
946 { 0, 0x04000400, "unknown" }
947 }, *p;
948 u16 adv;
950 val = mdio_read(ioaddr, phy_id, 0x1f);
951 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
953 val = mdio_read(ioaddr, phy_id, MII_LPA);
954 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
955 net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
956 dev->name, val, adv);
958 val &= adv;
960 for (p = reg31; p->val; p++) {
961 if ((val & p->val) == p->val)
962 break;
965 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
967 if ((tp->features & F_HAS_RGMII) &&
968 (tp->features & F_PHY_BCM5461)) {
969 // Set Tx Delay in RGMII mode.
970 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
971 udelay(200);
972 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
973 p->ctl |= 0x03000000;
976 SIS_W32(StationControl, p->ctl);
978 if (tp->features & F_HAS_RGMII) {
979 SIS_W32(RGDelay, 0x0441);
980 SIS_W32(RGDelay, 0x0440);
983 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
984 p->msg);
985 netif_carrier_on(dev);
988 out_unlock:
989 rtnl_unlock();
992 static void sis190_phy_timer(unsigned long __opaque)
994 struct net_device *dev = (struct net_device *)__opaque;
995 struct sis190_private *tp = netdev_priv(dev);
997 if (likely(netif_running(dev)))
998 schedule_work(&tp->phy_task);
1001 static inline void sis190_delete_timer(struct net_device *dev)
1003 struct sis190_private *tp = netdev_priv(dev);
1005 del_timer_sync(&tp->timer);
1008 static inline void sis190_request_timer(struct net_device *dev)
1010 struct sis190_private *tp = netdev_priv(dev);
1011 struct timer_list *timer = &tp->timer;
1013 init_timer(timer);
1014 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1015 timer->data = (unsigned long)dev;
1016 timer->function = sis190_phy_timer;
1017 add_timer(timer);
1020 static void sis190_set_rxbufsize(struct sis190_private *tp,
1021 struct net_device *dev)
1023 unsigned int mtu = dev->mtu;
1025 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1026 /* RxDesc->size has a licence to kill the lower bits */
1027 if (tp->rx_buf_sz & 0x07) {
1028 tp->rx_buf_sz += 8;
1029 tp->rx_buf_sz &= RX_BUF_MASK;
1033 static int sis190_open(struct net_device *dev)
1035 struct sis190_private *tp = netdev_priv(dev);
1036 struct pci_dev *pdev = tp->pci_dev;
1037 int rc = -ENOMEM;
1039 sis190_set_rxbufsize(tp, dev);
1042 * Rx and Tx descriptors need 256 bytes alignment.
1043 * pci_alloc_consistent() guarantees a stronger alignment.
1045 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1046 if (!tp->TxDescRing)
1047 goto out;
1049 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1050 if (!tp->RxDescRing)
1051 goto err_free_tx_0;
1053 rc = sis190_init_ring(dev);
1054 if (rc < 0)
1055 goto err_free_rx_1;
1057 INIT_WORK(&tp->phy_task, sis190_phy_task);
1059 sis190_request_timer(dev);
1061 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1062 if (rc < 0)
1063 goto err_release_timer_2;
1065 sis190_hw_start(dev);
1066 out:
1067 return rc;
1069 err_release_timer_2:
1070 sis190_delete_timer(dev);
1071 sis190_rx_clear(tp);
1072 err_free_rx_1:
1073 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1074 tp->rx_dma);
1075 err_free_tx_0:
1076 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1077 tp->tx_dma);
1078 goto out;
1081 static void sis190_tx_clear(struct sis190_private *tp)
1083 unsigned int i;
1085 for (i = 0; i < NUM_TX_DESC; i++) {
1086 struct sk_buff *skb = tp->Tx_skbuff[i];
1088 if (!skb)
1089 continue;
1091 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1092 tp->Tx_skbuff[i] = NULL;
1093 dev_kfree_skb(skb);
1095 tp->stats.tx_dropped++;
1097 tp->cur_tx = tp->dirty_tx = 0;
1100 static void sis190_down(struct net_device *dev)
1102 struct sis190_private *tp = netdev_priv(dev);
1103 void __iomem *ioaddr = tp->mmio_addr;
1104 unsigned int poll_locked = 0;
1106 sis190_delete_timer(dev);
1108 netif_stop_queue(dev);
1110 do {
1111 spin_lock_irq(&tp->lock);
1113 sis190_asic_down(ioaddr);
1115 spin_unlock_irq(&tp->lock);
1117 synchronize_irq(dev->irq);
1119 if (!poll_locked) {
1120 netif_poll_disable(dev);
1121 poll_locked++;
1124 synchronize_sched();
1126 } while (SIS_R32(IntrMask));
1128 sis190_tx_clear(tp);
1129 sis190_rx_clear(tp);
1132 static int sis190_close(struct net_device *dev)
1134 struct sis190_private *tp = netdev_priv(dev);
1135 struct pci_dev *pdev = tp->pci_dev;
1137 sis190_down(dev);
1139 free_irq(dev->irq, dev);
1141 netif_poll_enable(dev);
1143 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1144 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1146 tp->TxDescRing = NULL;
1147 tp->RxDescRing = NULL;
1149 return 0;
1152 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1154 struct sis190_private *tp = netdev_priv(dev);
1155 void __iomem *ioaddr = tp->mmio_addr;
1156 u32 len, entry, dirty_tx;
1157 struct TxDesc *desc;
1158 dma_addr_t mapping;
1160 if (unlikely(skb->len < ETH_ZLEN)) {
1161 if (skb_padto(skb, ETH_ZLEN)) {
1162 tp->stats.tx_dropped++;
1163 goto out;
1165 len = ETH_ZLEN;
1166 } else {
1167 len = skb->len;
1170 entry = tp->cur_tx % NUM_TX_DESC;
1171 desc = tp->TxDescRing + entry;
1173 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1174 netif_stop_queue(dev);
1175 net_tx_err(tp, KERN_ERR PFX
1176 "%s: BUG! Tx Ring full when queue awake!\n",
1177 dev->name);
1178 return NETDEV_TX_BUSY;
1181 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1183 tp->Tx_skbuff[entry] = skb;
1185 desc->PSize = cpu_to_le32(len);
1186 desc->addr = cpu_to_le32(mapping);
1188 desc->size = cpu_to_le32(len);
1189 if (entry == (NUM_TX_DESC - 1))
1190 desc->size |= cpu_to_le32(RingEnd);
1192 wmb();
1194 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1196 tp->cur_tx++;
1198 smp_wmb();
1200 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1202 dev->trans_start = jiffies;
1204 dirty_tx = tp->dirty_tx;
1205 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1206 netif_stop_queue(dev);
1207 smp_rmb();
1208 if (dirty_tx != tp->dirty_tx)
1209 netif_wake_queue(dev);
1211 out:
1212 return NETDEV_TX_OK;
1215 static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1217 struct sis190_private *tp = netdev_priv(dev);
1219 return &tp->stats;
1222 static void sis190_free_phy(struct list_head *first_phy)
1224 struct sis190_phy *cur, *next;
1226 list_for_each_entry_safe(cur, next, first_phy, list) {
1227 kfree(cur);
1232 * sis190_default_phy - Select default PHY for sis190 mac.
1233 * @dev: the net device to probe for
1235 * Select first detected PHY with link as default.
1236 * If no one is link on, select PHY whose types is HOME as default.
1237 * If HOME doesn't exist, select LAN.
1239 static u16 sis190_default_phy(struct net_device *dev)
1241 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1242 struct sis190_private *tp = netdev_priv(dev);
1243 struct mii_if_info *mii_if = &tp->mii_if;
1244 void __iomem *ioaddr = tp->mmio_addr;
1245 u16 status;
1247 phy_home = phy_default = phy_lan = NULL;
1249 list_for_each_entry(phy, &tp->first_phy, list) {
1250 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1252 // Link ON & Not select default PHY & not ghost PHY.
1253 if ((status & BMSR_LSTATUS) &&
1254 !phy_default &&
1255 (phy->type != UNKNOWN)) {
1256 phy_default = phy;
1257 } else {
1258 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1259 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1260 status | BMCR_ANENABLE | BMCR_ISOLATE);
1261 if (phy->type == HOME)
1262 phy_home = phy;
1263 else if (phy->type == LAN)
1264 phy_lan = phy;
1268 if (!phy_default) {
1269 if (phy_home)
1270 phy_default = phy_home;
1271 else if (phy_lan)
1272 phy_default = phy_lan;
1273 else
1274 phy_default = list_entry(&tp->first_phy,
1275 struct sis190_phy, list);
1278 if (mii_if->phy_id != phy_default->phy_id) {
1279 mii_if->phy_id = phy_default->phy_id;
1280 net_probe(tp, KERN_INFO
1281 "%s: Using transceiver at address %d as default.\n",
1282 pci_name(tp->pci_dev), mii_if->phy_id);
1285 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1286 status &= (~BMCR_ISOLATE);
1288 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1289 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1291 return status;
1294 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1295 struct sis190_phy *phy, unsigned int phy_id,
1296 u16 mii_status)
1298 void __iomem *ioaddr = tp->mmio_addr;
1299 struct mii_chip_info *p;
1301 INIT_LIST_HEAD(&phy->list);
1302 phy->status = mii_status;
1303 phy->phy_id = phy_id;
1305 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1306 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1308 for (p = mii_chip_table; p->type; p++) {
1309 if ((p->id[0] == phy->id[0]) &&
1310 (p->id[1] == (phy->id[1] & 0xfff0))) {
1311 break;
1315 if (p->id[1]) {
1316 phy->type = (p->type == MIX) ?
1317 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1318 LAN : HOME) : p->type;
1319 tp->features |= p->feature;
1320 } else
1321 phy->type = UNKNOWN;
1323 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1324 pci_name(tp->pci_dev),
1325 (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
1328 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1330 if (tp->features & F_PHY_88E1111) {
1331 void __iomem *ioaddr = tp->mmio_addr;
1332 int phy_id = tp->mii_if.phy_id;
1333 u16 reg[2][2] = {
1334 { 0x808b, 0x0ce1 },
1335 { 0x808f, 0x0c60 }
1336 }, *p;
1338 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1340 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1341 udelay(200);
1342 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1343 udelay(200);
1348 * sis190_mii_probe - Probe MII PHY for sis190
1349 * @dev: the net device to probe for
1351 * Search for total of 32 possible mii phy addresses.
1352 * Identify and set current phy if found one,
1353 * return error if it failed to found.
1355 static int __devinit sis190_mii_probe(struct net_device *dev)
1357 struct sis190_private *tp = netdev_priv(dev);
1358 struct mii_if_info *mii_if = &tp->mii_if;
1359 void __iomem *ioaddr = tp->mmio_addr;
1360 int phy_id;
1361 int rc = 0;
1363 INIT_LIST_HEAD(&tp->first_phy);
1365 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1366 struct sis190_phy *phy;
1367 u16 status;
1369 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1371 // Try next mii if the current one is not accessible.
1372 if (status == 0xffff || status == 0x0000)
1373 continue;
1375 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1376 if (!phy) {
1377 sis190_free_phy(&tp->first_phy);
1378 rc = -ENOMEM;
1379 goto out;
1382 sis190_init_phy(dev, tp, phy, phy_id, status);
1384 list_add(&tp->first_phy, &phy->list);
1387 if (list_empty(&tp->first_phy)) {
1388 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1389 pci_name(tp->pci_dev));
1390 rc = -EIO;
1391 goto out;
1394 /* Select default PHY for mac */
1395 sis190_default_phy(dev);
1397 sis190_mii_probe_88e1111_fixup(tp);
1399 mii_if->dev = dev;
1400 mii_if->mdio_read = __mdio_read;
1401 mii_if->mdio_write = __mdio_write;
1402 mii_if->phy_id_mask = PHY_ID_ANY;
1403 mii_if->reg_num_mask = MII_REG_ANY;
1404 out:
1405 return rc;
1408 static void __devexit sis190_mii_remove(struct net_device *dev)
1410 struct sis190_private *tp = netdev_priv(dev);
1412 sis190_free_phy(&tp->first_phy);
1415 static void sis190_release_board(struct pci_dev *pdev)
1417 struct net_device *dev = pci_get_drvdata(pdev);
1418 struct sis190_private *tp = netdev_priv(dev);
1420 iounmap(tp->mmio_addr);
1421 pci_release_regions(pdev);
1422 pci_disable_device(pdev);
1423 free_netdev(dev);
1426 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1428 struct sis190_private *tp;
1429 struct net_device *dev;
1430 void __iomem *ioaddr;
1431 int rc;
1433 dev = alloc_etherdev(sizeof(*tp));
1434 if (!dev) {
1435 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1436 rc = -ENOMEM;
1437 goto err_out_0;
1440 SET_MODULE_OWNER(dev);
1441 SET_NETDEV_DEV(dev, &pdev->dev);
1443 tp = netdev_priv(dev);
1444 tp->dev = dev;
1445 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1447 rc = pci_enable_device(pdev);
1448 if (rc < 0) {
1449 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1450 goto err_free_dev_1;
1453 rc = -ENODEV;
1455 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1456 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1457 pci_name(pdev));
1458 goto err_pci_disable_2;
1460 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1461 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1462 pci_name(pdev));
1463 goto err_pci_disable_2;
1466 rc = pci_request_regions(pdev, DRV_NAME);
1467 if (rc < 0) {
1468 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1469 pci_name(pdev));
1470 goto err_pci_disable_2;
1473 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1474 if (rc < 0) {
1475 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1476 pci_name(pdev));
1477 goto err_free_res_3;
1480 pci_set_master(pdev);
1482 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1483 if (!ioaddr) {
1484 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1485 pci_name(pdev));
1486 rc = -EIO;
1487 goto err_free_res_3;
1490 tp->pci_dev = pdev;
1491 tp->mmio_addr = ioaddr;
1493 sis190_irq_mask_and_ack(ioaddr);
1495 sis190_soft_reset(ioaddr);
1496 out:
1497 return dev;
1499 err_free_res_3:
1500 pci_release_regions(pdev);
1501 err_pci_disable_2:
1502 pci_disable_device(pdev);
1503 err_free_dev_1:
1504 free_netdev(dev);
1505 err_out_0:
1506 dev = ERR_PTR(rc);
1507 goto out;
1510 static void sis190_tx_timeout(struct net_device *dev)
1512 struct sis190_private *tp = netdev_priv(dev);
1513 void __iomem *ioaddr = tp->mmio_addr;
1514 u8 tmp8;
1516 /* Disable Tx, if not already */
1517 tmp8 = SIS_R8(TxControl);
1518 if (tmp8 & CmdTxEnb)
1519 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1522 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1523 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1525 /* Disable interrupts by clearing the interrupt mask. */
1526 SIS_W32(IntrMask, 0x0000);
1528 /* Stop a shared interrupt from scavenging while we are. */
1529 spin_lock_irq(&tp->lock);
1530 sis190_tx_clear(tp);
1531 spin_unlock_irq(&tp->lock);
1533 /* ...and finally, reset everything. */
1534 sis190_hw_start(dev);
1536 netif_wake_queue(dev);
1539 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1541 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1544 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1545 struct net_device *dev)
1547 struct sis190_private *tp = netdev_priv(dev);
1548 void __iomem *ioaddr = tp->mmio_addr;
1549 u16 sig;
1550 int i;
1552 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1553 pci_name(pdev));
1555 /* Check to see if there is a sane EEPROM */
1556 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1558 if ((sig == 0xffff) || (sig == 0x0000)) {
1559 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1560 pci_name(pdev), sig);
1561 return -EIO;
1564 /* Get MAC address from EEPROM */
1565 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1566 __le16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1568 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(w);
1571 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1573 return 0;
1577 * sis190_get_mac_addr_from_apc - Get MAC address for SiS965 model
1578 * @pdev: PCI device
1579 * @dev: network device to get address for
1581 * SiS965 model, use APC CMOS RAM to store MAC address.
1582 * APC CMOS RAM is accessed through ISA bridge.
1583 * MAC address is read into @net_dev->dev_addr.
1585 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1586 struct net_device *dev)
1588 struct sis190_private *tp = netdev_priv(dev);
1589 struct pci_dev *isa_bridge;
1590 u8 reg, tmp8;
1591 int i;
1593 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1594 pci_name(pdev));
1596 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL);
1597 if (!isa_bridge) {
1598 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1599 pci_name(pdev));
1600 return -EIO;
1603 /* Enable port 78h & 79h to access APC Registers. */
1604 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1605 reg = (tmp8 & ~0x02);
1606 pci_write_config_byte(isa_bridge, 0x48, reg);
1607 udelay(50);
1608 pci_read_config_byte(isa_bridge, 0x48, &reg);
1610 for (i = 0; i < MAC_ADDR_LEN; i++) {
1611 outb(0x9 + i, 0x78);
1612 dev->dev_addr[i] = inb(0x79);
1615 outb(0x12, 0x78);
1616 reg = inb(0x79);
1618 sis190_set_rgmii(tp, reg);
1620 /* Restore the value to ISA Bridge */
1621 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1622 pci_dev_put(isa_bridge);
1624 return 0;
1628 * sis190_init_rxfilter - Initialize the Rx filter
1629 * @dev: network device to initialize
1631 * Set receive filter address to our MAC address
1632 * and enable packet filtering.
1634 static inline void sis190_init_rxfilter(struct net_device *dev)
1636 struct sis190_private *tp = netdev_priv(dev);
1637 void __iomem *ioaddr = tp->mmio_addr;
1638 u16 ctl;
1639 int i;
1641 ctl = SIS_R16(RxMacControl);
1643 * Disable packet filtering before setting filter.
1644 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1645 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1647 SIS_W16(RxMacControl, ctl & ~0x0f00);
1649 for (i = 0; i < MAC_ADDR_LEN; i++)
1650 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1652 SIS_W16(RxMacControl, ctl);
1653 SIS_PCI_COMMIT();
1656 static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1658 u8 from;
1660 pci_read_config_byte(pdev, 0x73, &from);
1662 return (from & 0x00000001) ?
1663 sis190_get_mac_addr_from_apc(pdev, dev) :
1664 sis190_get_mac_addr_from_eeprom(pdev, dev);
1667 static void sis190_set_speed_auto(struct net_device *dev)
1669 struct sis190_private *tp = netdev_priv(dev);
1670 void __iomem *ioaddr = tp->mmio_addr;
1671 int phy_id = tp->mii_if.phy_id;
1672 int val;
1674 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1676 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1678 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1679 // unchanged.
1680 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1681 ADVERTISE_100FULL | ADVERTISE_10FULL |
1682 ADVERTISE_100HALF | ADVERTISE_10HALF);
1684 // Enable 1000 Full Mode.
1685 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1687 // Enable auto-negotiation and restart auto-negotiation.
1688 mdio_write(ioaddr, phy_id, MII_BMCR,
1689 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1692 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1694 struct sis190_private *tp = netdev_priv(dev);
1696 return mii_ethtool_gset(&tp->mii_if, cmd);
1699 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1701 struct sis190_private *tp = netdev_priv(dev);
1703 return mii_ethtool_sset(&tp->mii_if, cmd);
1706 static void sis190_get_drvinfo(struct net_device *dev,
1707 struct ethtool_drvinfo *info)
1709 struct sis190_private *tp = netdev_priv(dev);
1711 strcpy(info->driver, DRV_NAME);
1712 strcpy(info->version, DRV_VERSION);
1713 strcpy(info->bus_info, pci_name(tp->pci_dev));
1716 static int sis190_get_regs_len(struct net_device *dev)
1718 return SIS190_REGS_SIZE;
1721 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1722 void *p)
1724 struct sis190_private *tp = netdev_priv(dev);
1725 unsigned long flags;
1727 if (regs->len > SIS190_REGS_SIZE)
1728 regs->len = SIS190_REGS_SIZE;
1730 spin_lock_irqsave(&tp->lock, flags);
1731 memcpy_fromio(p, tp->mmio_addr, regs->len);
1732 spin_unlock_irqrestore(&tp->lock, flags);
1735 static int sis190_nway_reset(struct net_device *dev)
1737 struct sis190_private *tp = netdev_priv(dev);
1739 return mii_nway_restart(&tp->mii_if);
1742 static u32 sis190_get_msglevel(struct net_device *dev)
1744 struct sis190_private *tp = netdev_priv(dev);
1746 return tp->msg_enable;
1749 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1751 struct sis190_private *tp = netdev_priv(dev);
1753 tp->msg_enable = value;
1756 static const struct ethtool_ops sis190_ethtool_ops = {
1757 .get_settings = sis190_get_settings,
1758 .set_settings = sis190_set_settings,
1759 .get_drvinfo = sis190_get_drvinfo,
1760 .get_regs_len = sis190_get_regs_len,
1761 .get_regs = sis190_get_regs,
1762 .get_link = ethtool_op_get_link,
1763 .get_msglevel = sis190_get_msglevel,
1764 .set_msglevel = sis190_set_msglevel,
1765 .nway_reset = sis190_nway_reset,
1768 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1770 struct sis190_private *tp = netdev_priv(dev);
1772 return !netif_running(dev) ? -EINVAL :
1773 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1776 static int __devinit sis190_init_one(struct pci_dev *pdev,
1777 const struct pci_device_id *ent)
1779 static int printed_version = 0;
1780 struct sis190_private *tp;
1781 struct net_device *dev;
1782 void __iomem *ioaddr;
1783 int rc;
1785 if (!printed_version) {
1786 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1787 printed_version = 1;
1790 dev = sis190_init_board(pdev);
1791 if (IS_ERR(dev)) {
1792 rc = PTR_ERR(dev);
1793 goto out;
1796 pci_set_drvdata(pdev, dev);
1798 tp = netdev_priv(dev);
1799 ioaddr = tp->mmio_addr;
1801 rc = sis190_get_mac_addr(pdev, dev);
1802 if (rc < 0)
1803 goto err_release_board;
1805 sis190_init_rxfilter(dev);
1807 INIT_WORK(&tp->phy_task, sis190_phy_task);
1809 dev->open = sis190_open;
1810 dev->stop = sis190_close;
1811 dev->do_ioctl = sis190_ioctl;
1812 dev->get_stats = sis190_get_stats;
1813 dev->tx_timeout = sis190_tx_timeout;
1814 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1815 dev->hard_start_xmit = sis190_start_xmit;
1816 #ifdef CONFIG_NET_POLL_CONTROLLER
1817 dev->poll_controller = sis190_netpoll;
1818 #endif
1819 dev->set_multicast_list = sis190_set_rx_mode;
1820 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1821 dev->irq = pdev->irq;
1822 dev->base_addr = (unsigned long) 0xdead;
1824 spin_lock_init(&tp->lock);
1826 rc = sis190_mii_probe(dev);
1827 if (rc < 0)
1828 goto err_release_board;
1830 rc = register_netdev(dev);
1831 if (rc < 0)
1832 goto err_remove_mii;
1834 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1835 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1836 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1837 ioaddr, dev->irq,
1838 dev->dev_addr[0], dev->dev_addr[1],
1839 dev->dev_addr[2], dev->dev_addr[3],
1840 dev->dev_addr[4], dev->dev_addr[5]);
1842 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1843 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1845 netif_carrier_off(dev);
1847 sis190_set_speed_auto(dev);
1848 out:
1849 return rc;
1851 err_remove_mii:
1852 sis190_mii_remove(dev);
1853 err_release_board:
1854 sis190_release_board(pdev);
1855 goto out;
1858 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1860 struct net_device *dev = pci_get_drvdata(pdev);
1862 sis190_mii_remove(dev);
1863 flush_scheduled_work();
1864 unregister_netdev(dev);
1865 sis190_release_board(pdev);
1866 pci_set_drvdata(pdev, NULL);
1869 static struct pci_driver sis190_pci_driver = {
1870 .name = DRV_NAME,
1871 .id_table = sis190_pci_tbl,
1872 .probe = sis190_init_one,
1873 .remove = __devexit_p(sis190_remove_one),
1876 static int __init sis190_init_module(void)
1878 return pci_register_driver(&sis190_pci_driver);
1881 static void __exit sis190_cleanup_module(void)
1883 pci_unregister_driver(&sis190_pci_driver);
1886 module_init(sis190_init_module);
1887 module_exit(sis190_cleanup_module);