sis190: add cmos ram access code for the SiS19x/968 chipset pair
[linux-2.6/sactl.git] / drivers / net / sis190.c
blobc63f484f9072730a0c92f352f0dc7df766bbc524
1 /*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9 genuine driver.
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
18 See the file COPYING in this distribution for more information.
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
33 #include <asm/irq.h>
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 printk(arg)
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 printk(arg)
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
40 printk(arg)
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 printk(arg)
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
44 printk(arg)
46 #define PHY_MAX_ADDR 32
47 #define PHY_ID_ANY 0x1f
48 #define MII_REG_ANY 0x1f
50 #define DRV_VERSION "1.2"
51 #define DRV_NAME "sis190"
52 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
53 #define PFX DRV_NAME ": "
55 #define sis190_rx_skb netif_rx
56 #define sis190_rx_quota(count, quota) count
58 #define MAC_ADDR_LEN 6
60 #define NUM_TX_DESC 64 /* [8..1024] */
61 #define NUM_RX_DESC 64 /* [8..8192] */
62 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
63 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
64 #define RX_BUF_SIZE 1536
65 #define RX_BUF_MASK 0xfff8
67 #define SIS190_REGS_SIZE 0x80
68 #define SIS190_TX_TIMEOUT (6*HZ)
69 #define SIS190_PHY_TIMEOUT (10*HZ)
70 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
71 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
72 NETIF_MSG_IFDOWN)
74 /* Enhanced PHY access register bit definitions */
75 #define EhnMIIread 0x0000
76 #define EhnMIIwrite 0x0020
77 #define EhnMIIdataShift 16
78 #define EhnMIIpmdShift 6 /* 7016 only */
79 #define EhnMIIregShift 11
80 #define EhnMIIreq 0x0010
81 #define EhnMIInotDone 0x0010
83 /* Write/read MMIO register */
84 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
85 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
86 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
87 #define SIS_R8(reg) readb (ioaddr + (reg))
88 #define SIS_R16(reg) readw (ioaddr + (reg))
89 #define SIS_R32(reg) readl (ioaddr + (reg))
91 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
93 enum sis190_registers {
94 TxControl = 0x00,
95 TxDescStartAddr = 0x04,
96 rsv0 = 0x08, // reserved
97 TxSts = 0x0c, // unused (Control/Status)
98 RxControl = 0x10,
99 RxDescStartAddr = 0x14,
100 rsv1 = 0x18, // reserved
101 RxSts = 0x1c, // unused
102 IntrStatus = 0x20,
103 IntrMask = 0x24,
104 IntrControl = 0x28,
105 IntrTimer = 0x2c, // unused (Interupt Timer)
106 PMControl = 0x30, // unused (Power Mgmt Control/Status)
107 rsv2 = 0x34, // reserved
108 ROMControl = 0x38,
109 ROMInterface = 0x3c,
110 StationControl = 0x40,
111 GMIIControl = 0x44,
112 GIoCR = 0x48, // unused (GMAC IO Compensation)
113 GIoCtrl = 0x4c, // unused (GMAC IO Control)
114 TxMacControl = 0x50,
115 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
116 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
117 rsv3 = 0x5c, // reserved
118 RxMacControl = 0x60,
119 RxMacAddr = 0x62,
120 RxHashTable = 0x68,
121 // Undocumented = 0x6c,
122 RxWolCtrl = 0x70,
123 RxWolData = 0x74, // unused (Rx WOL Data Access)
124 RxMPSControl = 0x78, // unused (Rx MPS Control)
125 rsv4 = 0x7c, // reserved
128 enum sis190_register_content {
129 /* IntrStatus */
130 SoftInt = 0x40000000, // unused
131 Timeup = 0x20000000, // unused
132 PauseFrame = 0x00080000, // unused
133 MagicPacket = 0x00040000, // unused
134 WakeupFrame = 0x00020000, // unused
135 LinkChange = 0x00010000,
136 RxQEmpty = 0x00000080,
137 RxQInt = 0x00000040,
138 TxQ1Empty = 0x00000020, // unused
139 TxQ1Int = 0x00000010,
140 TxQ0Empty = 0x00000008, // unused
141 TxQ0Int = 0x00000004,
142 RxHalt = 0x00000002,
143 TxHalt = 0x00000001,
145 /* {Rx/Tx}CmdBits */
146 CmdReset = 0x10,
147 CmdRxEnb = 0x08, // unused
148 CmdTxEnb = 0x01,
149 RxBufEmpty = 0x01, // unused
151 /* Cfg9346Bits */
152 Cfg9346_Lock = 0x00, // unused
153 Cfg9346_Unlock = 0xc0, // unused
155 /* RxMacControl */
156 AcceptErr = 0x20, // unused
157 AcceptRunt = 0x10, // unused
158 AcceptBroadcast = 0x0800,
159 AcceptMulticast = 0x0400,
160 AcceptMyPhys = 0x0200,
161 AcceptAllPhys = 0x0100,
163 /* RxConfigBits */
164 RxCfgFIFOShift = 13,
165 RxCfgDMAShift = 8, // 0x1a in RxControl ?
167 /* TxConfigBits */
168 TxInterFrameGapShift = 24,
169 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
171 LinkStatus = 0x02, // unused
172 FullDup = 0x01, // unused
174 /* TBICSRBit */
175 TBILinkOK = 0x02000000, // unused
178 struct TxDesc {
179 __le32 PSize;
180 __le32 status;
181 __le32 addr;
182 __le32 size;
185 struct RxDesc {
186 __le32 PSize;
187 __le32 status;
188 __le32 addr;
189 __le32 size;
192 enum _DescStatusBit {
193 /* _Desc.status */
194 OWNbit = 0x80000000, // RXOWN/TXOWN
195 INTbit = 0x40000000, // RXINT/TXINT
196 CRCbit = 0x00020000, // CRCOFF/CRCEN
197 PADbit = 0x00010000, // PREADD/PADEN
198 /* _Desc.size */
199 RingEnd = 0x80000000,
200 /* TxDesc.status */
201 LSEN = 0x08000000, // TSO ? -- FR
202 IPCS = 0x04000000,
203 TCPCS = 0x02000000,
204 UDPCS = 0x01000000,
205 BSTEN = 0x00800000,
206 EXTEN = 0x00400000,
207 DEFEN = 0x00200000,
208 BKFEN = 0x00100000,
209 CRSEN = 0x00080000,
210 COLEN = 0x00040000,
211 THOL3 = 0x30000000,
212 THOL2 = 0x20000000,
213 THOL1 = 0x10000000,
214 THOL0 = 0x00000000,
215 /* RxDesc.status */
216 IPON = 0x20000000,
217 TCPON = 0x10000000,
218 UDPON = 0x08000000,
219 Wakup = 0x00400000,
220 Magic = 0x00200000,
221 Pause = 0x00100000,
222 DEFbit = 0x00200000,
223 BCAST = 0x000c0000,
224 MCAST = 0x00080000,
225 UCAST = 0x00040000,
226 /* RxDesc.PSize */
227 TAGON = 0x80000000,
228 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
229 ABORT = 0x00800000,
230 SHORT = 0x00400000,
231 LIMIT = 0x00200000,
232 MIIER = 0x00100000,
233 OVRUN = 0x00080000,
234 NIBON = 0x00040000,
235 COLON = 0x00020000,
236 CRCOK = 0x00010000,
237 RxSizeMask = 0x0000ffff
239 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
240 * provide two (unused with Linux) Tx queues. No publically
241 * available documentation alas.
245 enum sis190_eeprom_access_register_bits {
246 EECS = 0x00000001, // unused
247 EECLK = 0x00000002, // unused
248 EEDO = 0x00000008, // unused
249 EEDI = 0x00000004, // unused
250 EEREQ = 0x00000080,
251 EEROP = 0x00000200,
252 EEWOP = 0x00000100 // unused
255 /* EEPROM Addresses */
256 enum sis190_eeprom_address {
257 EEPROMSignature = 0x00,
258 EEPROMCLK = 0x01, // unused
259 EEPROMInfo = 0x02,
260 EEPROMMACAddr = 0x03
263 enum sis190_feature {
264 F_HAS_RGMII = 1,
265 F_PHY_88E1111 = 2,
266 F_PHY_BCM5461 = 4
269 struct sis190_private {
270 void __iomem *mmio_addr;
271 struct pci_dev *pci_dev;
272 struct net_device *dev;
273 spinlock_t lock;
274 u32 rx_buf_sz;
275 u32 cur_rx;
276 u32 cur_tx;
277 u32 dirty_rx;
278 u32 dirty_tx;
279 dma_addr_t rx_dma;
280 dma_addr_t tx_dma;
281 struct RxDesc *RxDescRing;
282 struct TxDesc *TxDescRing;
283 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
284 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
285 struct work_struct phy_task;
286 struct timer_list timer;
287 u32 msg_enable;
288 struct mii_if_info mii_if;
289 struct list_head first_phy;
290 u32 features;
293 struct sis190_phy {
294 struct list_head list;
295 int phy_id;
296 u16 id[2];
297 u16 status;
298 u8 type;
301 enum sis190_phy_type {
302 UNKNOWN = 0x00,
303 HOME = 0x01,
304 LAN = 0x02,
305 MIX = 0x03
308 static struct mii_chip_info {
309 const char *name;
310 u16 id[2];
311 unsigned int type;
312 u32 feature;
313 } mii_chip_table[] = {
314 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
315 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
316 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
317 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
318 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
319 { NULL, }
322 static const struct {
323 const char *name;
324 } sis_chip_info[] = {
325 { "SiS 190 PCI Fast Ethernet adapter" },
326 { "SiS 191 PCI Gigabit Ethernet adapter" },
329 static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
330 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
331 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
332 { 0, },
335 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
337 static int rx_copybreak = 200;
339 static struct {
340 u32 msg_enable;
341 } debug = { -1 };
343 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
344 module_param(rx_copybreak, int, 0);
345 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
346 module_param_named(debug, debug.msg_enable, int, 0);
347 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
348 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
349 MODULE_VERSION(DRV_VERSION);
350 MODULE_LICENSE("GPL");
352 static const u32 sis190_intr_mask =
353 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
356 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
357 * The chips use a 64 element hash table based on the Ethernet CRC.
359 static const int multicast_filter_limit = 32;
361 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
363 unsigned int i;
365 SIS_W32(GMIIControl, ctl);
367 msleep(1);
369 for (i = 0; i < 100; i++) {
370 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
371 break;
372 msleep(1);
375 if (i > 999)
376 printk(KERN_ERR PFX "PHY command failed !\n");
379 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
381 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
382 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
383 (((u32) val) << EhnMIIdataShift));
386 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
388 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
389 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
391 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
394 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
396 struct sis190_private *tp = netdev_priv(dev);
398 mdio_write(tp->mmio_addr, phy_id, reg, val);
401 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
403 struct sis190_private *tp = netdev_priv(dev);
405 return mdio_read(tp->mmio_addr, phy_id, reg);
408 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
410 mdio_read(ioaddr, phy_id, reg);
411 return mdio_read(ioaddr, phy_id, reg);
414 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
416 u16 data = 0xffff;
417 unsigned int i;
419 if (!(SIS_R32(ROMControl) & 0x0002))
420 return 0;
422 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
424 for (i = 0; i < 200; i++) {
425 if (!(SIS_R32(ROMInterface) & EEREQ)) {
426 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
427 break;
429 msleep(1);
432 return data;
435 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
437 SIS_W32(IntrMask, 0x00);
438 SIS_W32(IntrStatus, 0xffffffff);
439 SIS_PCI_COMMIT();
442 static void sis190_asic_down(void __iomem *ioaddr)
444 /* Stop the chip's Tx and Rx DMA processes. */
446 SIS_W32(TxControl, 0x1a00);
447 SIS_W32(RxControl, 0x1a00);
449 sis190_irq_mask_and_ack(ioaddr);
452 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
454 desc->size |= cpu_to_le32(RingEnd);
457 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
459 u32 eor = le32_to_cpu(desc->size) & RingEnd;
461 desc->PSize = 0x0;
462 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
463 wmb();
464 desc->status = cpu_to_le32(OWNbit | INTbit);
467 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
468 u32 rx_buf_sz)
470 desc->addr = cpu_to_le32(mapping);
471 sis190_give_to_asic(desc, rx_buf_sz);
474 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
476 desc->PSize = 0x0;
477 desc->addr = cpu_to_le32(0xdeadbeef);
478 desc->size &= cpu_to_le32(RingEnd);
479 wmb();
480 desc->status = 0x0;
483 static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
484 struct RxDesc *desc, u32 rx_buf_sz)
486 struct sk_buff *skb;
487 dma_addr_t mapping;
488 int ret = 0;
490 skb = dev_alloc_skb(rx_buf_sz);
491 if (!skb)
492 goto err_out;
494 *sk_buff = skb;
496 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
497 PCI_DMA_FROMDEVICE);
499 sis190_map_to_asic(desc, mapping, rx_buf_sz);
500 out:
501 return ret;
503 err_out:
504 ret = -ENOMEM;
505 sis190_make_unusable_by_asic(desc);
506 goto out;
509 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
510 u32 start, u32 end)
512 u32 cur;
514 for (cur = start; cur < end; cur++) {
515 int ret, i = cur % NUM_RX_DESC;
517 if (tp->Rx_skbuff[i])
518 continue;
520 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
521 tp->RxDescRing + i, tp->rx_buf_sz);
522 if (ret < 0)
523 break;
525 return cur - start;
528 static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
529 struct RxDesc *desc, int rx_buf_sz)
531 int ret = -1;
533 if (pkt_size < rx_copybreak) {
534 struct sk_buff *skb;
536 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
537 if (skb) {
538 skb_reserve(skb, NET_IP_ALIGN);
539 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
540 *sk_buff = skb;
541 sis190_give_to_asic(desc, rx_buf_sz);
542 ret = 0;
545 return ret;
548 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
550 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
552 if ((status & CRCOK) && !(status & ErrMask))
553 return 0;
555 if (!(status & CRCOK))
556 stats->rx_crc_errors++;
557 else if (status & OVRUN)
558 stats->rx_over_errors++;
559 else if (status & (SHORT | LIMIT))
560 stats->rx_length_errors++;
561 else if (status & (MIIER | NIBON | COLON))
562 stats->rx_frame_errors++;
564 stats->rx_errors++;
565 return -1;
568 static int sis190_rx_interrupt(struct net_device *dev,
569 struct sis190_private *tp, void __iomem *ioaddr)
571 struct net_device_stats *stats = &dev->stats;
572 u32 rx_left, cur_rx = tp->cur_rx;
573 u32 delta, count;
575 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
576 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
578 for (; rx_left > 0; rx_left--, cur_rx++) {
579 unsigned int entry = cur_rx % NUM_RX_DESC;
580 struct RxDesc *desc = tp->RxDescRing + entry;
581 u32 status;
583 if (le32_to_cpu(desc->status) & OWNbit)
584 break;
586 status = le32_to_cpu(desc->PSize);
588 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
589 // status);
591 if (sis190_rx_pkt_err(status, stats) < 0)
592 sis190_give_to_asic(desc, tp->rx_buf_sz);
593 else {
594 struct sk_buff *skb = tp->Rx_skbuff[entry];
595 int pkt_size = (status & RxSizeMask) - 4;
596 void (*pci_action)(struct pci_dev *, dma_addr_t,
597 size_t, int) = pci_dma_sync_single_for_device;
599 if (unlikely(pkt_size > tp->rx_buf_sz)) {
600 net_intr(tp, KERN_INFO
601 "%s: (frag) status = %08x.\n",
602 dev->name, status);
603 stats->rx_dropped++;
604 stats->rx_length_errors++;
605 sis190_give_to_asic(desc, tp->rx_buf_sz);
606 continue;
609 pci_dma_sync_single_for_cpu(tp->pci_dev,
610 le32_to_cpu(desc->addr), tp->rx_buf_sz,
611 PCI_DMA_FROMDEVICE);
613 if (sis190_try_rx_copy(&skb, pkt_size, desc,
614 tp->rx_buf_sz)) {
615 pci_action = pci_unmap_single;
616 tp->Rx_skbuff[entry] = NULL;
617 sis190_make_unusable_by_asic(desc);
620 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
621 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
623 skb_put(skb, pkt_size);
624 skb->protocol = eth_type_trans(skb, dev);
626 sis190_rx_skb(skb);
628 dev->last_rx = jiffies;
629 stats->rx_packets++;
630 stats->rx_bytes += pkt_size;
631 if ((status & BCAST) == MCAST)
632 stats->multicast++;
635 count = cur_rx - tp->cur_rx;
636 tp->cur_rx = cur_rx;
638 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
639 if (!delta && count && netif_msg_intr(tp))
640 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
641 tp->dirty_rx += delta;
643 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
644 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
646 return count;
649 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
650 struct TxDesc *desc)
652 unsigned int len;
654 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
656 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
658 memset(desc, 0x00, sizeof(*desc));
661 static void sis190_tx_interrupt(struct net_device *dev,
662 struct sis190_private *tp, void __iomem *ioaddr)
664 u32 pending, dirty_tx = tp->dirty_tx;
666 * It would not be needed if queueing was allowed to be enabled
667 * again too early (hint: think preempt and unclocked smp systems).
669 unsigned int queue_stopped;
671 smp_rmb();
672 pending = tp->cur_tx - dirty_tx;
673 queue_stopped = (pending == NUM_TX_DESC);
675 for (; pending; pending--, dirty_tx++) {
676 unsigned int entry = dirty_tx % NUM_TX_DESC;
677 struct TxDesc *txd = tp->TxDescRing + entry;
678 struct sk_buff *skb;
680 if (le32_to_cpu(txd->status) & OWNbit)
681 break;
683 skb = tp->Tx_skbuff[entry];
685 dev->stats.tx_packets++;
686 dev->stats.tx_bytes += skb->len;
688 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
689 tp->Tx_skbuff[entry] = NULL;
690 dev_kfree_skb_irq(skb);
693 if (tp->dirty_tx != dirty_tx) {
694 tp->dirty_tx = dirty_tx;
695 smp_wmb();
696 if (queue_stopped)
697 netif_wake_queue(dev);
702 * The interrupt handler does all of the Rx thread work and cleans up after
703 * the Tx thread.
705 static irqreturn_t sis190_interrupt(int irq, void *__dev)
707 struct net_device *dev = __dev;
708 struct sis190_private *tp = netdev_priv(dev);
709 void __iomem *ioaddr = tp->mmio_addr;
710 unsigned int handled = 0;
711 u32 status;
713 status = SIS_R32(IntrStatus);
715 if ((status == 0xffffffff) || !status)
716 goto out;
718 handled = 1;
720 if (unlikely(!netif_running(dev))) {
721 sis190_asic_down(ioaddr);
722 goto out;
725 SIS_W32(IntrStatus, status);
727 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
729 if (status & LinkChange) {
730 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
731 schedule_work(&tp->phy_task);
734 if (status & RxQInt)
735 sis190_rx_interrupt(dev, tp, ioaddr);
737 if (status & TxQ0Int)
738 sis190_tx_interrupt(dev, tp, ioaddr);
739 out:
740 return IRQ_RETVAL(handled);
743 #ifdef CONFIG_NET_POLL_CONTROLLER
744 static void sis190_netpoll(struct net_device *dev)
746 struct sis190_private *tp = netdev_priv(dev);
747 struct pci_dev *pdev = tp->pci_dev;
749 disable_irq(pdev->irq);
750 sis190_interrupt(pdev->irq, dev);
751 enable_irq(pdev->irq);
753 #endif
755 static void sis190_free_rx_skb(struct sis190_private *tp,
756 struct sk_buff **sk_buff, struct RxDesc *desc)
758 struct pci_dev *pdev = tp->pci_dev;
760 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
761 PCI_DMA_FROMDEVICE);
762 dev_kfree_skb(*sk_buff);
763 *sk_buff = NULL;
764 sis190_make_unusable_by_asic(desc);
767 static void sis190_rx_clear(struct sis190_private *tp)
769 unsigned int i;
771 for (i = 0; i < NUM_RX_DESC; i++) {
772 if (!tp->Rx_skbuff[i])
773 continue;
774 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
778 static void sis190_init_ring_indexes(struct sis190_private *tp)
780 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
783 static int sis190_init_ring(struct net_device *dev)
785 struct sis190_private *tp = netdev_priv(dev);
787 sis190_init_ring_indexes(tp);
789 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
790 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
792 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
793 goto err_rx_clear;
795 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
797 return 0;
799 err_rx_clear:
800 sis190_rx_clear(tp);
801 return -ENOMEM;
804 static void sis190_set_rx_mode(struct net_device *dev)
806 struct sis190_private *tp = netdev_priv(dev);
807 void __iomem *ioaddr = tp->mmio_addr;
808 unsigned long flags;
809 u32 mc_filter[2]; /* Multicast hash filter */
810 u16 rx_mode;
812 if (dev->flags & IFF_PROMISC) {
813 rx_mode =
814 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
815 AcceptAllPhys;
816 mc_filter[1] = mc_filter[0] = 0xffffffff;
817 } else if ((dev->mc_count > multicast_filter_limit) ||
818 (dev->flags & IFF_ALLMULTI)) {
819 /* Too many to filter perfectly -- accept all multicasts. */
820 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
821 mc_filter[1] = mc_filter[0] = 0xffffffff;
822 } else {
823 struct dev_mc_list *mclist;
824 unsigned int i;
826 rx_mode = AcceptBroadcast | AcceptMyPhys;
827 mc_filter[1] = mc_filter[0] = 0;
828 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
829 i++, mclist = mclist->next) {
830 int bit_nr =
831 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
832 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
833 rx_mode |= AcceptMulticast;
837 spin_lock_irqsave(&tp->lock, flags);
839 SIS_W16(RxMacControl, rx_mode | 0x2);
840 SIS_W32(RxHashTable, mc_filter[0]);
841 SIS_W32(RxHashTable + 4, mc_filter[1]);
843 spin_unlock_irqrestore(&tp->lock, flags);
846 static void sis190_soft_reset(void __iomem *ioaddr)
848 SIS_W32(IntrControl, 0x8000);
849 SIS_PCI_COMMIT();
850 msleep(1);
851 SIS_W32(IntrControl, 0x0);
852 sis190_asic_down(ioaddr);
853 msleep(1);
856 static void sis190_hw_start(struct net_device *dev)
858 struct sis190_private *tp = netdev_priv(dev);
859 void __iomem *ioaddr = tp->mmio_addr;
861 sis190_soft_reset(ioaddr);
863 SIS_W32(TxDescStartAddr, tp->tx_dma);
864 SIS_W32(RxDescStartAddr, tp->rx_dma);
866 SIS_W32(IntrStatus, 0xffffffff);
867 SIS_W32(IntrMask, 0x0);
868 SIS_W32(GMIIControl, 0x0);
869 SIS_W32(TxMacControl, 0x60);
870 SIS_W16(RxMacControl, 0x02);
871 SIS_W32(RxHashTable, 0x0);
872 SIS_W32(0x6c, 0x0);
873 SIS_W32(RxWolCtrl, 0x0);
874 SIS_W32(RxWolData, 0x0);
876 SIS_PCI_COMMIT();
878 sis190_set_rx_mode(dev);
880 /* Enable all known interrupts by setting the interrupt mask. */
881 SIS_W32(IntrMask, sis190_intr_mask);
883 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
884 SIS_W32(RxControl, 0x1a1d);
886 netif_start_queue(dev);
889 static void sis190_phy_task(struct work_struct *work)
891 struct sis190_private *tp =
892 container_of(work, struct sis190_private, phy_task);
893 struct net_device *dev = tp->dev;
894 void __iomem *ioaddr = tp->mmio_addr;
895 int phy_id = tp->mii_if.phy_id;
896 u16 val;
898 rtnl_lock();
900 if (!netif_running(dev))
901 goto out_unlock;
903 val = mdio_read(ioaddr, phy_id, MII_BMCR);
904 if (val & BMCR_RESET) {
905 // FIXME: needlessly high ? -- FR 02/07/2005
906 mod_timer(&tp->timer, jiffies + HZ/10);
907 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
908 BMSR_ANEGCOMPLETE)) {
909 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
910 dev->name);
911 netif_carrier_off(dev);
912 mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);
913 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
914 } else {
915 /* Rejoice ! */
916 struct {
917 int val;
918 u32 ctl;
919 const char *msg;
920 } reg31[] = {
921 { LPA_1000XFULL | LPA_SLCT, 0x07000c00 | 0x00001000,
922 "1000 Mbps Full Duplex" },
923 { LPA_1000XHALF | LPA_SLCT, 0x07000c00,
924 "1000 Mbps Half Duplex" },
925 { LPA_100FULL, 0x04000800 | 0x00001000,
926 "100 Mbps Full Duplex" },
927 { LPA_100HALF, 0x04000800,
928 "100 Mbps Half Duplex" },
929 { LPA_10FULL, 0x04000400 | 0x00001000,
930 "10 Mbps Full Duplex" },
931 { LPA_10HALF, 0x04000400,
932 "10 Mbps Half Duplex" },
933 { 0, 0x04000400, "unknown" }
934 }, *p;
935 u16 adv;
937 val = mdio_read(ioaddr, phy_id, 0x1f);
938 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
940 val = mdio_read(ioaddr, phy_id, MII_LPA);
941 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
942 net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
943 dev->name, val, adv);
945 val &= adv;
947 for (p = reg31; p->val; p++) {
948 if ((val & p->val) == p->val)
949 break;
952 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
954 if ((tp->features & F_HAS_RGMII) &&
955 (tp->features & F_PHY_BCM5461)) {
956 // Set Tx Delay in RGMII mode.
957 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
958 udelay(200);
959 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
960 p->ctl |= 0x03000000;
963 SIS_W32(StationControl, p->ctl);
965 if (tp->features & F_HAS_RGMII) {
966 SIS_W32(RGDelay, 0x0441);
967 SIS_W32(RGDelay, 0x0440);
970 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
971 p->msg);
972 netif_carrier_on(dev);
975 out_unlock:
976 rtnl_unlock();
979 static void sis190_phy_timer(unsigned long __opaque)
981 struct net_device *dev = (struct net_device *)__opaque;
982 struct sis190_private *tp = netdev_priv(dev);
984 if (likely(netif_running(dev)))
985 schedule_work(&tp->phy_task);
988 static inline void sis190_delete_timer(struct net_device *dev)
990 struct sis190_private *tp = netdev_priv(dev);
992 del_timer_sync(&tp->timer);
995 static inline void sis190_request_timer(struct net_device *dev)
997 struct sis190_private *tp = netdev_priv(dev);
998 struct timer_list *timer = &tp->timer;
1000 init_timer(timer);
1001 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1002 timer->data = (unsigned long)dev;
1003 timer->function = sis190_phy_timer;
1004 add_timer(timer);
1007 static void sis190_set_rxbufsize(struct sis190_private *tp,
1008 struct net_device *dev)
1010 unsigned int mtu = dev->mtu;
1012 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1013 /* RxDesc->size has a licence to kill the lower bits */
1014 if (tp->rx_buf_sz & 0x07) {
1015 tp->rx_buf_sz += 8;
1016 tp->rx_buf_sz &= RX_BUF_MASK;
1020 static int sis190_open(struct net_device *dev)
1022 struct sis190_private *tp = netdev_priv(dev);
1023 struct pci_dev *pdev = tp->pci_dev;
1024 int rc = -ENOMEM;
1026 sis190_set_rxbufsize(tp, dev);
1029 * Rx and Tx descriptors need 256 bytes alignment.
1030 * pci_alloc_consistent() guarantees a stronger alignment.
1032 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1033 if (!tp->TxDescRing)
1034 goto out;
1036 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1037 if (!tp->RxDescRing)
1038 goto err_free_tx_0;
1040 rc = sis190_init_ring(dev);
1041 if (rc < 0)
1042 goto err_free_rx_1;
1044 INIT_WORK(&tp->phy_task, sis190_phy_task);
1046 sis190_request_timer(dev);
1048 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1049 if (rc < 0)
1050 goto err_release_timer_2;
1052 sis190_hw_start(dev);
1053 out:
1054 return rc;
1056 err_release_timer_2:
1057 sis190_delete_timer(dev);
1058 sis190_rx_clear(tp);
1059 err_free_rx_1:
1060 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1061 tp->rx_dma);
1062 err_free_tx_0:
1063 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1064 tp->tx_dma);
1065 goto out;
1068 static void sis190_tx_clear(struct sis190_private *tp)
1070 unsigned int i;
1072 for (i = 0; i < NUM_TX_DESC; i++) {
1073 struct sk_buff *skb = tp->Tx_skbuff[i];
1075 if (!skb)
1076 continue;
1078 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1079 tp->Tx_skbuff[i] = NULL;
1080 dev_kfree_skb(skb);
1082 tp->dev->stats.tx_dropped++;
1084 tp->cur_tx = tp->dirty_tx = 0;
1087 static void sis190_down(struct net_device *dev)
1089 struct sis190_private *tp = netdev_priv(dev);
1090 void __iomem *ioaddr = tp->mmio_addr;
1091 unsigned int poll_locked = 0;
1093 sis190_delete_timer(dev);
1095 netif_stop_queue(dev);
1097 do {
1098 spin_lock_irq(&tp->lock);
1100 sis190_asic_down(ioaddr);
1102 spin_unlock_irq(&tp->lock);
1104 synchronize_irq(dev->irq);
1106 if (!poll_locked)
1107 poll_locked++;
1109 synchronize_sched();
1111 } while (SIS_R32(IntrMask));
1113 sis190_tx_clear(tp);
1114 sis190_rx_clear(tp);
1117 static int sis190_close(struct net_device *dev)
1119 struct sis190_private *tp = netdev_priv(dev);
1120 struct pci_dev *pdev = tp->pci_dev;
1122 sis190_down(dev);
1124 free_irq(dev->irq, dev);
1126 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1127 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1129 tp->TxDescRing = NULL;
1130 tp->RxDescRing = NULL;
1132 return 0;
1135 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1137 struct sis190_private *tp = netdev_priv(dev);
1138 void __iomem *ioaddr = tp->mmio_addr;
1139 u32 len, entry, dirty_tx;
1140 struct TxDesc *desc;
1141 dma_addr_t mapping;
1143 if (unlikely(skb->len < ETH_ZLEN)) {
1144 if (skb_padto(skb, ETH_ZLEN)) {
1145 dev->stats.tx_dropped++;
1146 goto out;
1148 len = ETH_ZLEN;
1149 } else {
1150 len = skb->len;
1153 entry = tp->cur_tx % NUM_TX_DESC;
1154 desc = tp->TxDescRing + entry;
1156 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1157 netif_stop_queue(dev);
1158 net_tx_err(tp, KERN_ERR PFX
1159 "%s: BUG! Tx Ring full when queue awake!\n",
1160 dev->name);
1161 return NETDEV_TX_BUSY;
1164 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1166 tp->Tx_skbuff[entry] = skb;
1168 desc->PSize = cpu_to_le32(len);
1169 desc->addr = cpu_to_le32(mapping);
1171 desc->size = cpu_to_le32(len);
1172 if (entry == (NUM_TX_DESC - 1))
1173 desc->size |= cpu_to_le32(RingEnd);
1175 wmb();
1177 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1179 tp->cur_tx++;
1181 smp_wmb();
1183 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1185 dev->trans_start = jiffies;
1187 dirty_tx = tp->dirty_tx;
1188 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1189 netif_stop_queue(dev);
1190 smp_rmb();
1191 if (dirty_tx != tp->dirty_tx)
1192 netif_wake_queue(dev);
1194 out:
1195 return NETDEV_TX_OK;
1198 static void sis190_free_phy(struct list_head *first_phy)
1200 struct sis190_phy *cur, *next;
1202 list_for_each_entry_safe(cur, next, first_phy, list) {
1203 kfree(cur);
1208 * sis190_default_phy - Select default PHY for sis190 mac.
1209 * @dev: the net device to probe for
1211 * Select first detected PHY with link as default.
1212 * If no one is link on, select PHY whose types is HOME as default.
1213 * If HOME doesn't exist, select LAN.
1215 static u16 sis190_default_phy(struct net_device *dev)
1217 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1218 struct sis190_private *tp = netdev_priv(dev);
1219 struct mii_if_info *mii_if = &tp->mii_if;
1220 void __iomem *ioaddr = tp->mmio_addr;
1221 u16 status;
1223 phy_home = phy_default = phy_lan = NULL;
1225 list_for_each_entry(phy, &tp->first_phy, list) {
1226 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1228 // Link ON & Not select default PHY & not ghost PHY.
1229 if ((status & BMSR_LSTATUS) &&
1230 !phy_default &&
1231 (phy->type != UNKNOWN)) {
1232 phy_default = phy;
1233 } else {
1234 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1235 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1236 status | BMCR_ANENABLE | BMCR_ISOLATE);
1237 if (phy->type == HOME)
1238 phy_home = phy;
1239 else if (phy->type == LAN)
1240 phy_lan = phy;
1244 if (!phy_default) {
1245 if (phy_home)
1246 phy_default = phy_home;
1247 else if (phy_lan)
1248 phy_default = phy_lan;
1249 else
1250 phy_default = list_entry(&tp->first_phy,
1251 struct sis190_phy, list);
1254 if (mii_if->phy_id != phy_default->phy_id) {
1255 mii_if->phy_id = phy_default->phy_id;
1256 net_probe(tp, KERN_INFO
1257 "%s: Using transceiver at address %d as default.\n",
1258 pci_name(tp->pci_dev), mii_if->phy_id);
1261 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1262 status &= (~BMCR_ISOLATE);
1264 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1265 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1267 return status;
1270 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1271 struct sis190_phy *phy, unsigned int phy_id,
1272 u16 mii_status)
1274 void __iomem *ioaddr = tp->mmio_addr;
1275 struct mii_chip_info *p;
1277 INIT_LIST_HEAD(&phy->list);
1278 phy->status = mii_status;
1279 phy->phy_id = phy_id;
1281 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1282 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1284 for (p = mii_chip_table; p->type; p++) {
1285 if ((p->id[0] == phy->id[0]) &&
1286 (p->id[1] == (phy->id[1] & 0xfff0))) {
1287 break;
1291 if (p->id[1]) {
1292 phy->type = (p->type == MIX) ?
1293 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1294 LAN : HOME) : p->type;
1295 tp->features |= p->feature;
1296 } else
1297 phy->type = UNKNOWN;
1299 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1300 pci_name(tp->pci_dev),
1301 (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
1304 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1306 if (tp->features & F_PHY_88E1111) {
1307 void __iomem *ioaddr = tp->mmio_addr;
1308 int phy_id = tp->mii_if.phy_id;
1309 u16 reg[2][2] = {
1310 { 0x808b, 0x0ce1 },
1311 { 0x808f, 0x0c60 }
1312 }, *p;
1314 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1316 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1317 udelay(200);
1318 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1319 udelay(200);
1324 * sis190_mii_probe - Probe MII PHY for sis190
1325 * @dev: the net device to probe for
1327 * Search for total of 32 possible mii phy addresses.
1328 * Identify and set current phy if found one,
1329 * return error if it failed to found.
1331 static int __devinit sis190_mii_probe(struct net_device *dev)
1333 struct sis190_private *tp = netdev_priv(dev);
1334 struct mii_if_info *mii_if = &tp->mii_if;
1335 void __iomem *ioaddr = tp->mmio_addr;
1336 int phy_id;
1337 int rc = 0;
1339 INIT_LIST_HEAD(&tp->first_phy);
1341 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1342 struct sis190_phy *phy;
1343 u16 status;
1345 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1347 // Try next mii if the current one is not accessible.
1348 if (status == 0xffff || status == 0x0000)
1349 continue;
1351 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1352 if (!phy) {
1353 sis190_free_phy(&tp->first_phy);
1354 rc = -ENOMEM;
1355 goto out;
1358 sis190_init_phy(dev, tp, phy, phy_id, status);
1360 list_add(&tp->first_phy, &phy->list);
1363 if (list_empty(&tp->first_phy)) {
1364 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1365 pci_name(tp->pci_dev));
1366 rc = -EIO;
1367 goto out;
1370 /* Select default PHY for mac */
1371 sis190_default_phy(dev);
1373 sis190_mii_probe_88e1111_fixup(tp);
1375 mii_if->dev = dev;
1376 mii_if->mdio_read = __mdio_read;
1377 mii_if->mdio_write = __mdio_write;
1378 mii_if->phy_id_mask = PHY_ID_ANY;
1379 mii_if->reg_num_mask = MII_REG_ANY;
1380 out:
1381 return rc;
1384 static void sis190_mii_remove(struct net_device *dev)
1386 struct sis190_private *tp = netdev_priv(dev);
1388 sis190_free_phy(&tp->first_phy);
1391 static void sis190_release_board(struct pci_dev *pdev)
1393 struct net_device *dev = pci_get_drvdata(pdev);
1394 struct sis190_private *tp = netdev_priv(dev);
1396 iounmap(tp->mmio_addr);
1397 pci_release_regions(pdev);
1398 pci_disable_device(pdev);
1399 free_netdev(dev);
1402 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1404 struct sis190_private *tp;
1405 struct net_device *dev;
1406 void __iomem *ioaddr;
1407 int rc;
1409 dev = alloc_etherdev(sizeof(*tp));
1410 if (!dev) {
1411 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1412 rc = -ENOMEM;
1413 goto err_out_0;
1416 SET_NETDEV_DEV(dev, &pdev->dev);
1418 tp = netdev_priv(dev);
1419 tp->dev = dev;
1420 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1422 rc = pci_enable_device(pdev);
1423 if (rc < 0) {
1424 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1425 goto err_free_dev_1;
1428 rc = -ENODEV;
1430 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1431 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1432 pci_name(pdev));
1433 goto err_pci_disable_2;
1435 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1436 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1437 pci_name(pdev));
1438 goto err_pci_disable_2;
1441 rc = pci_request_regions(pdev, DRV_NAME);
1442 if (rc < 0) {
1443 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1444 pci_name(pdev));
1445 goto err_pci_disable_2;
1448 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1449 if (rc < 0) {
1450 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1451 pci_name(pdev));
1452 goto err_free_res_3;
1455 pci_set_master(pdev);
1457 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1458 if (!ioaddr) {
1459 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1460 pci_name(pdev));
1461 rc = -EIO;
1462 goto err_free_res_3;
1465 tp->pci_dev = pdev;
1466 tp->mmio_addr = ioaddr;
1468 sis190_irq_mask_and_ack(ioaddr);
1470 sis190_soft_reset(ioaddr);
1471 out:
1472 return dev;
1474 err_free_res_3:
1475 pci_release_regions(pdev);
1476 err_pci_disable_2:
1477 pci_disable_device(pdev);
1478 err_free_dev_1:
1479 free_netdev(dev);
1480 err_out_0:
1481 dev = ERR_PTR(rc);
1482 goto out;
1485 static void sis190_tx_timeout(struct net_device *dev)
1487 struct sis190_private *tp = netdev_priv(dev);
1488 void __iomem *ioaddr = tp->mmio_addr;
1489 u8 tmp8;
1491 /* Disable Tx, if not already */
1492 tmp8 = SIS_R8(TxControl);
1493 if (tmp8 & CmdTxEnb)
1494 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1497 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1498 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1500 /* Disable interrupts by clearing the interrupt mask. */
1501 SIS_W32(IntrMask, 0x0000);
1503 /* Stop a shared interrupt from scavenging while we are. */
1504 spin_lock_irq(&tp->lock);
1505 sis190_tx_clear(tp);
1506 spin_unlock_irq(&tp->lock);
1508 /* ...and finally, reset everything. */
1509 sis190_hw_start(dev);
1511 netif_wake_queue(dev);
1514 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1516 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1519 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1520 struct net_device *dev)
1522 struct sis190_private *tp = netdev_priv(dev);
1523 void __iomem *ioaddr = tp->mmio_addr;
1524 u16 sig;
1525 int i;
1527 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1528 pci_name(pdev));
1530 /* Check to see if there is a sane EEPROM */
1531 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1533 if ((sig == 0xffff) || (sig == 0x0000)) {
1534 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1535 pci_name(pdev), sig);
1536 return -EIO;
1539 /* Get MAC address from EEPROM */
1540 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1541 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1543 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1546 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1548 return 0;
1552 * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model
1553 * @pdev: PCI device
1554 * @dev: network device to get address for
1556 * SiS96x model, use APC CMOS RAM to store MAC address.
1557 * APC CMOS RAM is accessed through ISA bridge.
1558 * MAC address is read into @net_dev->dev_addr.
1560 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1561 struct net_device *dev)
1563 static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
1564 struct sis190_private *tp = netdev_priv(dev);
1565 struct pci_dev *isa_bridge;
1566 u8 reg, tmp8;
1567 unsigned int i;
1569 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1570 pci_name(pdev));
1572 for (i = 0; i < ARRAY_SIZE(ids); i++) {
1573 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1574 if (isa_bridge)
1575 break;
1578 if (!isa_bridge) {
1579 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1580 pci_name(pdev));
1581 return -EIO;
1584 /* Enable port 78h & 79h to access APC Registers. */
1585 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1586 reg = (tmp8 & ~0x02);
1587 pci_write_config_byte(isa_bridge, 0x48, reg);
1588 udelay(50);
1589 pci_read_config_byte(isa_bridge, 0x48, &reg);
1591 for (i = 0; i < MAC_ADDR_LEN; i++) {
1592 outb(0x9 + i, 0x78);
1593 dev->dev_addr[i] = inb(0x79);
1596 outb(0x12, 0x78);
1597 reg = inb(0x79);
1599 sis190_set_rgmii(tp, reg);
1601 /* Restore the value to ISA Bridge */
1602 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1603 pci_dev_put(isa_bridge);
1605 return 0;
1609 * sis190_init_rxfilter - Initialize the Rx filter
1610 * @dev: network device to initialize
1612 * Set receive filter address to our MAC address
1613 * and enable packet filtering.
1615 static inline void sis190_init_rxfilter(struct net_device *dev)
1617 struct sis190_private *tp = netdev_priv(dev);
1618 void __iomem *ioaddr = tp->mmio_addr;
1619 u16 ctl;
1620 int i;
1622 ctl = SIS_R16(RxMacControl);
1624 * Disable packet filtering before setting filter.
1625 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1626 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1628 SIS_W16(RxMacControl, ctl & ~0x0f00);
1630 for (i = 0; i < MAC_ADDR_LEN; i++)
1631 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1633 SIS_W16(RxMacControl, ctl);
1634 SIS_PCI_COMMIT();
1637 static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1639 u8 from;
1641 pci_read_config_byte(pdev, 0x73, &from);
1643 return (from & 0x00000001) ?
1644 sis190_get_mac_addr_from_apc(pdev, dev) :
1645 sis190_get_mac_addr_from_eeprom(pdev, dev);
1648 static void sis190_set_speed_auto(struct net_device *dev)
1650 struct sis190_private *tp = netdev_priv(dev);
1651 void __iomem *ioaddr = tp->mmio_addr;
1652 int phy_id = tp->mii_if.phy_id;
1653 int val;
1655 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1657 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1659 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1660 // unchanged.
1661 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1662 ADVERTISE_100FULL | ADVERTISE_10FULL |
1663 ADVERTISE_100HALF | ADVERTISE_10HALF);
1665 // Enable 1000 Full Mode.
1666 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1668 // Enable auto-negotiation and restart auto-negotiation.
1669 mdio_write(ioaddr, phy_id, MII_BMCR,
1670 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1673 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1675 struct sis190_private *tp = netdev_priv(dev);
1677 return mii_ethtool_gset(&tp->mii_if, cmd);
1680 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1682 struct sis190_private *tp = netdev_priv(dev);
1684 return mii_ethtool_sset(&tp->mii_if, cmd);
1687 static void sis190_get_drvinfo(struct net_device *dev,
1688 struct ethtool_drvinfo *info)
1690 struct sis190_private *tp = netdev_priv(dev);
1692 strcpy(info->driver, DRV_NAME);
1693 strcpy(info->version, DRV_VERSION);
1694 strcpy(info->bus_info, pci_name(tp->pci_dev));
1697 static int sis190_get_regs_len(struct net_device *dev)
1699 return SIS190_REGS_SIZE;
1702 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1703 void *p)
1705 struct sis190_private *tp = netdev_priv(dev);
1706 unsigned long flags;
1708 if (regs->len > SIS190_REGS_SIZE)
1709 regs->len = SIS190_REGS_SIZE;
1711 spin_lock_irqsave(&tp->lock, flags);
1712 memcpy_fromio(p, tp->mmio_addr, regs->len);
1713 spin_unlock_irqrestore(&tp->lock, flags);
1716 static int sis190_nway_reset(struct net_device *dev)
1718 struct sis190_private *tp = netdev_priv(dev);
1720 return mii_nway_restart(&tp->mii_if);
1723 static u32 sis190_get_msglevel(struct net_device *dev)
1725 struct sis190_private *tp = netdev_priv(dev);
1727 return tp->msg_enable;
1730 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1732 struct sis190_private *tp = netdev_priv(dev);
1734 tp->msg_enable = value;
1737 static const struct ethtool_ops sis190_ethtool_ops = {
1738 .get_settings = sis190_get_settings,
1739 .set_settings = sis190_set_settings,
1740 .get_drvinfo = sis190_get_drvinfo,
1741 .get_regs_len = sis190_get_regs_len,
1742 .get_regs = sis190_get_regs,
1743 .get_link = ethtool_op_get_link,
1744 .get_msglevel = sis190_get_msglevel,
1745 .set_msglevel = sis190_set_msglevel,
1746 .nway_reset = sis190_nway_reset,
1749 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1751 struct sis190_private *tp = netdev_priv(dev);
1753 return !netif_running(dev) ? -EINVAL :
1754 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1757 static int __devinit sis190_init_one(struct pci_dev *pdev,
1758 const struct pci_device_id *ent)
1760 static int printed_version = 0;
1761 struct sis190_private *tp;
1762 struct net_device *dev;
1763 void __iomem *ioaddr;
1764 int rc;
1765 DECLARE_MAC_BUF(mac);
1767 if (!printed_version) {
1768 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1769 printed_version = 1;
1772 dev = sis190_init_board(pdev);
1773 if (IS_ERR(dev)) {
1774 rc = PTR_ERR(dev);
1775 goto out;
1778 pci_set_drvdata(pdev, dev);
1780 tp = netdev_priv(dev);
1781 ioaddr = tp->mmio_addr;
1783 rc = sis190_get_mac_addr(pdev, dev);
1784 if (rc < 0)
1785 goto err_release_board;
1787 sis190_init_rxfilter(dev);
1789 INIT_WORK(&tp->phy_task, sis190_phy_task);
1791 dev->open = sis190_open;
1792 dev->stop = sis190_close;
1793 dev->do_ioctl = sis190_ioctl;
1794 dev->tx_timeout = sis190_tx_timeout;
1795 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1796 dev->hard_start_xmit = sis190_start_xmit;
1797 #ifdef CONFIG_NET_POLL_CONTROLLER
1798 dev->poll_controller = sis190_netpoll;
1799 #endif
1800 dev->set_multicast_list = sis190_set_rx_mode;
1801 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1802 dev->irq = pdev->irq;
1803 dev->base_addr = (unsigned long) 0xdead;
1805 spin_lock_init(&tp->lock);
1807 rc = sis190_mii_probe(dev);
1808 if (rc < 0)
1809 goto err_release_board;
1811 rc = register_netdev(dev);
1812 if (rc < 0)
1813 goto err_remove_mii;
1815 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1816 "%s\n",
1817 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1818 ioaddr, dev->irq, print_mac(mac, dev->dev_addr));
1820 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1821 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1823 netif_carrier_off(dev);
1825 sis190_set_speed_auto(dev);
1826 out:
1827 return rc;
1829 err_remove_mii:
1830 sis190_mii_remove(dev);
1831 err_release_board:
1832 sis190_release_board(pdev);
1833 goto out;
1836 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1838 struct net_device *dev = pci_get_drvdata(pdev);
1840 sis190_mii_remove(dev);
1841 flush_scheduled_work();
1842 unregister_netdev(dev);
1843 sis190_release_board(pdev);
1844 pci_set_drvdata(pdev, NULL);
1847 static struct pci_driver sis190_pci_driver = {
1848 .name = DRV_NAME,
1849 .id_table = sis190_pci_tbl,
1850 .probe = sis190_init_one,
1851 .remove = __devexit_p(sis190_remove_one),
1854 static int __init sis190_init_module(void)
1856 return pci_register_driver(&sis190_pci_driver);
1859 static void __exit sis190_cleanup_module(void)
1861 pci_unregister_driver(&sis190_pci_driver);
1864 module_init(sis190_init_module);
1865 module_exit(sis190_cleanup_module);