[PATCH] sis190: extract bits definition from SiS driver.
[linux-2.6/kmemtrace.git] / drivers / net / sis190.c
blob3c33b2d14852bb8c291cf9a3e5b19ff36a2213be
1 /*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9 genuine driver.
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
18 See the file COPYING in this distribution for more information.
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
33 #include <asm/irq.h>
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 printk(arg)
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 printk(arg)
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
40 printk(arg)
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 printk(arg)
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
44 printk(arg)
46 #ifdef CONFIG_SIS190_NAPI
47 #define NAPI_SUFFIX "-NAPI"
48 #else
49 #define NAPI_SUFFIX ""
50 #endif
52 #define DRV_VERSION "1.2" NAPI_SUFFIX
53 #define DRV_NAME "sis190"
54 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
55 #define PFX DRV_NAME ": "
57 #ifdef CONFIG_SIS190_NAPI
58 #define sis190_rx_skb netif_receive_skb
59 #define sis190_rx_quota(count, quota) min(count, quota)
60 #else
61 #define sis190_rx_skb netif_rx
62 #define sis190_rx_quota(count, quota) count
63 #endif
65 #define MAC_ADDR_LEN 6
67 #define NUM_TX_DESC 64 /* [8..1024] */
68 #define NUM_RX_DESC 64 /* [8..8192] */
69 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
70 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
71 #define RX_BUF_SIZE 1536
72 #define RX_BUF_MASK 0xfff8
74 #define SIS190_REGS_SIZE 0x80
75 #define SIS190_TX_TIMEOUT (6*HZ)
76 #define SIS190_PHY_TIMEOUT (10*HZ)
77 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
78 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
79 NETIF_MSG_IFDOWN)
81 /* Enhanced PHY access register bit definitions */
82 #define EhnMIIread 0x0000
83 #define EhnMIIwrite 0x0020
84 #define EhnMIIdataShift 16
85 #define EhnMIIpmdShift 6 /* 7016 only */
86 #define EhnMIIregShift 11
87 #define EhnMIIreq 0x0010
88 #define EhnMIInotDone 0x0010
90 /* Write/read MMIO register */
91 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
92 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
93 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
94 #define SIS_R8(reg) readb (ioaddr + (reg))
95 #define SIS_R16(reg) readw (ioaddr + (reg))
96 #define SIS_R32(reg) readl (ioaddr + (reg))
98 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
100 enum sis190_registers {
101 TxControl = 0x00,
102 TxDescStartAddr = 0x04,
103 rsv0 = 0x08, // reserved
104 TxSts = 0x0c, // unused (Control/Status)
105 RxControl = 0x10,
106 RxDescStartAddr = 0x14,
107 rsv1 = 0x18, // reserved
108 RxSts = 0x1c, // unused
109 IntrStatus = 0x20,
110 IntrMask = 0x24,
111 IntrControl = 0x28,
112 IntrTimer = 0x2c, // unused (Interupt Timer)
113 PMControl = 0x30, // unused (Power Mgmt Control/Status)
114 rsv2 = 0x34, // reserved
115 ROMControl = 0x38,
116 ROMInterface = 0x3c,
117 StationControl = 0x40,
118 GMIIControl = 0x44,
119 GIoCR = 0x48, // unused (GMAC IO Compensation)
120 GIoCtrl = 0x4c, // unused (GMAC IO Control)
121 TxMacControl = 0x50,
122 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
123 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
124 rsv3 = 0x5c, // reserved
125 RxMacControl = 0x60,
126 RxMacAddr = 0x62,
127 RxHashTable = 0x68,
128 // Undocumented = 0x6c,
129 RxWolCtrl = 0x70,
130 RxWolData = 0x74, // unused (Rx WOL Data Access)
131 RxMPSControl = 0x78, // unused (Rx MPS Control)
132 rsv4 = 0x7c, // reserved
135 enum sis190_register_content {
136 /* IntrStatus */
137 SoftInt = 0x40000000, // unused
138 Timeup = 0x20000000, // unused
139 PauseFrame = 0x00080000, // unused
140 MagicPacket = 0x00040000, // unused
141 WakeupFrame = 0x00020000, // unused
142 LinkChange = 0x00010000,
143 RxQEmpty = 0x00000080,
144 RxQInt = 0x00000040,
145 TxQ1Empty = 0x00000020, // unused
146 TxQ1Int = 0x00000010,
147 TxQ0Empty = 0x00000008, // unused
148 TxQ0Int = 0x00000004,
149 RxHalt = 0x00000002,
150 TxHalt = 0x00000001,
152 /* {Rx/Tx}CmdBits */
153 CmdReset = 0x10,
154 CmdRxEnb = 0x08, // unused
155 CmdTxEnb = 0x01,
156 RxBufEmpty = 0x01, // unused
158 /* Cfg9346Bits */
159 Cfg9346_Lock = 0x00, // unused
160 Cfg9346_Unlock = 0xc0, // unused
162 /* RxMacControl */
163 AcceptErr = 0x20, // unused
164 AcceptRunt = 0x10, // unused
165 AcceptBroadcast = 0x0800,
166 AcceptMulticast = 0x0400,
167 AcceptMyPhys = 0x0200,
168 AcceptAllPhys = 0x0100,
170 /* RxConfigBits */
171 RxCfgFIFOShift = 13,
172 RxCfgDMAShift = 8, // 0x1a in RxControl ?
174 /* TxConfigBits */
175 TxInterFrameGapShift = 24,
176 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
178 /* StationControl */
179 _1000bpsF = 0x1c00,
180 _1000bpsH = 0x0c00,
181 _100bpsF = 0x1800,
182 _100bpsH = 0x0800,
183 _10bpsF = 0x1400,
184 _10bpsH = 0x0400,
186 LinkStatus = 0x02, // unused
187 FullDup = 0x01, // unused
189 /* TBICSRBit */
190 TBILinkOK = 0x02000000, // unused
193 struct TxDesc {
194 u32 PSize;
195 u32 status;
196 u32 addr;
197 u32 size;
200 struct RxDesc {
201 u32 PSize;
202 u32 status;
203 u32 addr;
204 u32 size;
207 enum _DescStatusBit {
208 /* _Desc.status */
209 OWNbit = 0x80000000, // RXOWN/TXOWN
210 INTbit = 0x40000000, // RXINT/TXINT
211 CRCbit = 0x00020000, // CRCOFF/CRCEN
212 PADbit = 0x00010000, // PREADD/PADEN
213 /* _Desc.size */
214 RingEnd = 0x80000000,
215 /* TxDesc.status */
216 LSEN = 0x08000000, // TSO ? -- FR
217 IPCS = 0x04000000,
218 TCPCS = 0x02000000,
219 UDPCS = 0x01000000,
220 BSTEN = 0x00800000,
221 EXTEN = 0x00400000,
222 DEFEN = 0x00200000,
223 BKFEN = 0x00100000,
224 CRSEN = 0x00080000,
225 COLEN = 0x00040000,
226 THOL3 = 0x30000000,
227 THOL2 = 0x20000000,
228 THOL1 = 0x10000000,
229 THOL0 = 0x00000000,
230 /* RxDesc.status */
231 IPON = 0x20000000,
232 TCPON = 0x10000000,
233 UDPON = 0x08000000,
234 Wakup = 0x00400000,
235 Magic = 0x00200000,
236 Pause = 0x00100000,
237 DEFbit = 0x00200000,
238 BCAST = 0x000c0000,
239 MCAST = 0x00080000,
240 UCAST = 0x00040000,
241 /* RxDesc.PSize */
242 TAGON = 0x80000000,
243 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
244 ABORT = 0x00800000,
245 SHORT = 0x00400000,
246 LIMIT = 0x00200000,
247 MIIER = 0x00100000,
248 OVRUN = 0x00080000,
249 NIBON = 0x00040000,
250 COLON = 0x00020000,
251 CRCOK = 0x00010000,
252 RxSizeMask = 0x0000ffff
254 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
255 * provide two (unused with Linux) Tx queues. No publically
256 * available documentation alas.
260 enum sis190_eeprom_access_register_bits {
261 EECS = 0x00000001, // unused
262 EECLK = 0x00000002, // unused
263 EEDO = 0x00000008, // unused
264 EEDI = 0x00000004, // unused
265 EEREQ = 0x00000080,
266 EEROP = 0x00000200,
267 EEWOP = 0x00000100 // unused
270 /* EEPROM Addresses */
271 enum sis190_eeprom_address {
272 EEPROMSignature = 0x00,
273 EEPROMCLK = 0x01, // unused
274 EEPROMInfo = 0x02,
275 EEPROMMACAddr = 0x03
278 struct sis190_private {
279 void __iomem *mmio_addr;
280 struct pci_dev *pci_dev;
281 struct net_device_stats stats;
282 spinlock_t lock;
283 u32 rx_buf_sz;
284 u32 cur_rx;
285 u32 cur_tx;
286 u32 dirty_rx;
287 u32 dirty_tx;
288 dma_addr_t rx_dma;
289 dma_addr_t tx_dma;
290 struct RxDesc *RxDescRing;
291 struct TxDesc *TxDescRing;
292 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
293 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
294 struct work_struct phy_task;
295 struct timer_list timer;
296 u32 msg_enable;
297 struct mii_if_info mii_if;
300 const static struct {
301 const char *name;
302 u8 version; /* depend on docs */
303 u32 RxConfigMask; /* clear the bits supported by this chip */
304 } sis_chip_info[] = {
305 { DRV_NAME, 0x00, 0xff7e1880, },
308 static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
309 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
310 { 0, },
313 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
315 static int rx_copybreak = 200;
317 static struct {
318 u32 msg_enable;
319 } debug = { -1 };
321 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
322 module_param(rx_copybreak, int, 0);
323 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
324 module_param_named(debug, debug.msg_enable, int, 0);
325 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
326 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
327 MODULE_VERSION(DRV_VERSION);
328 MODULE_LICENSE("GPL");
330 static const u32 sis190_intr_mask =
331 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt;
334 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
335 * The chips use a 64 element hash table based on the Ethernet CRC.
337 static int multicast_filter_limit = 32;
339 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
341 unsigned int i;
343 SIS_W32(GMIIControl, ctl);
345 msleep(1);
347 for (i = 0; i < 100; i++) {
348 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
349 break;
350 msleep(1);
353 if (i > 999)
354 printk(KERN_ERR PFX "PHY command failed !\n");
357 static void mdio_write(void __iomem *ioaddr, int reg, int val)
359 u32 pmd = 1;
361 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
362 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift) |
363 (((u32) val) << EhnMIIdataShift));
366 static int mdio_read(void __iomem *ioaddr, int reg)
368 u32 pmd = 1;
370 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
371 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift));
373 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
376 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
378 struct sis190_private *tp = netdev_priv(dev);
380 mdio_write(tp->mmio_addr, reg, val);
383 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
385 struct sis190_private *tp = netdev_priv(dev);
387 return mdio_read(tp->mmio_addr, reg);
390 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
392 u16 data = 0xffff;
393 unsigned int i;
395 if (!(SIS_R32(ROMControl) & 0x0002))
396 return 0;
398 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
400 for (i = 0; i < 200; i++) {
401 if (!(SIS_R32(ROMInterface) & EEREQ)) {
402 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
403 break;
405 msleep(1);
408 return data;
411 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
413 SIS_W32(IntrMask, 0x00);
414 SIS_W32(IntrStatus, 0xffffffff);
415 SIS_PCI_COMMIT();
418 static void sis190_asic_down(void __iomem *ioaddr)
420 /* Stop the chip's Tx and Rx DMA processes. */
422 SIS_W32(TxControl, 0x1a00);
423 SIS_W32(RxControl, 0x1a00);
425 sis190_irq_mask_and_ack(ioaddr);
428 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
430 desc->size |= cpu_to_le32(RingEnd);
433 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
435 u32 eor = le32_to_cpu(desc->size) & RingEnd;
437 desc->PSize = 0x0;
438 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
439 wmb();
440 desc->status = cpu_to_le32(OWNbit | INTbit);
443 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
444 u32 rx_buf_sz)
446 desc->addr = cpu_to_le32(mapping);
447 sis190_give_to_asic(desc, rx_buf_sz);
450 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
452 desc->PSize = 0x0;
453 desc->addr = 0xdeadbeef;
454 desc->size &= cpu_to_le32(RingEnd);
455 wmb();
456 desc->status = 0x0;
459 static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
460 struct RxDesc *desc, u32 rx_buf_sz)
462 struct sk_buff *skb;
463 dma_addr_t mapping;
464 int ret = 0;
466 skb = dev_alloc_skb(rx_buf_sz);
467 if (!skb)
468 goto err_out;
470 *sk_buff = skb;
472 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
473 PCI_DMA_FROMDEVICE);
475 sis190_map_to_asic(desc, mapping, rx_buf_sz);
476 out:
477 return ret;
479 err_out:
480 ret = -ENOMEM;
481 sis190_make_unusable_by_asic(desc);
482 goto out;
485 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
486 u32 start, u32 end)
488 u32 cur;
490 for (cur = start; cur < end; cur++) {
491 int ret, i = cur % NUM_RX_DESC;
493 if (tp->Rx_skbuff[i])
494 continue;
496 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
497 tp->RxDescRing + i, tp->rx_buf_sz);
498 if (ret < 0)
499 break;
501 return cur - start;
504 static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
505 struct RxDesc *desc, int rx_buf_sz)
507 int ret = -1;
509 if (pkt_size < rx_copybreak) {
510 struct sk_buff *skb;
512 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
513 if (skb) {
514 skb_reserve(skb, NET_IP_ALIGN);
515 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
516 *sk_buff = skb;
517 sis190_give_to_asic(desc, rx_buf_sz);
518 ret = 0;
521 return ret;
524 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
526 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
528 if ((status & CRCOK) && !(status & ErrMask))
529 return 0;
531 if (!(status & CRCOK))
532 stats->rx_crc_errors++;
533 else if (status & OVRUN)
534 stats->rx_over_errors++;
535 else if (status & (SHORT | LIMIT))
536 stats->rx_length_errors++;
537 else if (status & (MIIER | NIBON | COLON))
538 stats->rx_frame_errors++;
540 stats->rx_errors++;
541 return -1;
544 static int sis190_rx_interrupt(struct net_device *dev,
545 struct sis190_private *tp, void __iomem *ioaddr)
547 struct net_device_stats *stats = &tp->stats;
548 u32 rx_left, cur_rx = tp->cur_rx;
549 u32 delta, count;
551 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
552 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
554 for (; rx_left > 0; rx_left--, cur_rx++) {
555 unsigned int entry = cur_rx % NUM_RX_DESC;
556 struct RxDesc *desc = tp->RxDescRing + entry;
557 u32 status;
559 if (desc->status & OWNbit)
560 break;
562 status = le32_to_cpu(desc->PSize);
564 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
565 // status);
567 if (sis190_rx_pkt_err(status, stats) < 0)
568 sis190_give_to_asic(desc, tp->rx_buf_sz);
569 else {
570 struct sk_buff *skb = tp->Rx_skbuff[entry];
571 int pkt_size = (status & RxSizeMask) - 4;
572 void (*pci_action)(struct pci_dev *, dma_addr_t,
573 size_t, int) = pci_dma_sync_single_for_device;
575 if (unlikely(pkt_size > tp->rx_buf_sz)) {
576 net_intr(tp, KERN_INFO
577 "%s: (frag) status = %08x.\n",
578 dev->name, status);
579 stats->rx_dropped++;
580 stats->rx_length_errors++;
581 sis190_give_to_asic(desc, tp->rx_buf_sz);
582 continue;
585 pci_dma_sync_single_for_cpu(tp->pci_dev,
586 le32_to_cpu(desc->addr), tp->rx_buf_sz,
587 PCI_DMA_FROMDEVICE);
589 if (sis190_try_rx_copy(&skb, pkt_size, desc,
590 tp->rx_buf_sz)) {
591 pci_action = pci_unmap_single;
592 tp->Rx_skbuff[entry] = NULL;
593 sis190_make_unusable_by_asic(desc);
596 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
597 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
599 skb->dev = dev;
600 skb_put(skb, pkt_size);
601 skb->protocol = eth_type_trans(skb, dev);
603 sis190_rx_skb(skb);
605 dev->last_rx = jiffies;
606 stats->rx_packets++;
607 stats->rx_bytes += pkt_size;
608 if ((status & BCAST) == MCAST)
609 stats->multicast++;
612 count = cur_rx - tp->cur_rx;
613 tp->cur_rx = cur_rx;
615 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
616 if (!delta && count && netif_msg_intr(tp))
617 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
618 tp->dirty_rx += delta;
620 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
621 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
623 return count;
626 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
627 struct TxDesc *desc)
629 unsigned int len;
631 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
633 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
635 memset(desc, 0x00, sizeof(*desc));
638 static void sis190_tx_interrupt(struct net_device *dev,
639 struct sis190_private *tp, void __iomem *ioaddr)
641 u32 pending, dirty_tx = tp->dirty_tx;
643 * It would not be needed if queueing was allowed to be enabled
644 * again too early (hint: think preempt and unclocked smp systems).
646 unsigned int queue_stopped;
648 smp_rmb();
649 pending = tp->cur_tx - dirty_tx;
650 queue_stopped = (pending == NUM_TX_DESC);
652 for (; pending; pending--, dirty_tx++) {
653 unsigned int entry = dirty_tx % NUM_TX_DESC;
654 struct TxDesc *txd = tp->TxDescRing + entry;
655 struct sk_buff *skb;
657 if (le32_to_cpu(txd->status) & OWNbit)
658 break;
660 skb = tp->Tx_skbuff[entry];
662 tp->stats.tx_packets++;
663 tp->stats.tx_bytes += skb->len;
665 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
666 tp->Tx_skbuff[entry] = NULL;
667 dev_kfree_skb_irq(skb);
670 if (tp->dirty_tx != dirty_tx) {
671 tp->dirty_tx = dirty_tx;
672 smp_wmb();
673 if (queue_stopped)
674 netif_wake_queue(dev);
679 * The interrupt handler does all of the Rx thread work and cleans up after
680 * the Tx thread.
682 static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
684 struct net_device *dev = __dev;
685 struct sis190_private *tp = netdev_priv(dev);
686 void __iomem *ioaddr = tp->mmio_addr;
687 unsigned int handled = 0;
688 u32 status;
690 status = SIS_R32(IntrStatus);
692 if ((status == 0xffffffff) || !status)
693 goto out;
695 handled = 1;
697 if (unlikely(!netif_running(dev))) {
698 sis190_asic_down(ioaddr);
699 goto out;
702 SIS_W32(IntrStatus, status);
704 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
706 if (status & LinkChange) {
707 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
708 schedule_work(&tp->phy_task);
711 if (status & RxQInt)
712 sis190_rx_interrupt(dev, tp, ioaddr);
714 if (status & TxQ0Int)
715 sis190_tx_interrupt(dev, tp, ioaddr);
716 out:
717 return IRQ_RETVAL(handled);
720 #ifdef CONFIG_NET_POLL_CONTROLLER
721 static void sis190_netpoll(struct net_device *dev)
723 struct sis190_private *tp = netdev_priv(dev);
724 struct pci_dev *pdev = tp->pci_dev;
726 disable_irq(pdev->irq);
727 sis190_interrupt(pdev->irq, dev, NULL);
728 enable_irq(pdev->irq);
730 #endif
732 static void sis190_free_rx_skb(struct sis190_private *tp,
733 struct sk_buff **sk_buff, struct RxDesc *desc)
735 struct pci_dev *pdev = tp->pci_dev;
737 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
738 PCI_DMA_FROMDEVICE);
739 dev_kfree_skb(*sk_buff);
740 *sk_buff = NULL;
741 sis190_make_unusable_by_asic(desc);
744 static void sis190_rx_clear(struct sis190_private *tp)
746 unsigned int i;
748 for (i = 0; i < NUM_RX_DESC; i++) {
749 if (!tp->Rx_skbuff[i])
750 continue;
751 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
755 static void sis190_init_ring_indexes(struct sis190_private *tp)
757 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
760 static int sis190_init_ring(struct net_device *dev)
762 struct sis190_private *tp = netdev_priv(dev);
764 sis190_init_ring_indexes(tp);
766 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
767 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
769 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
770 goto err_rx_clear;
772 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
774 return 0;
776 err_rx_clear:
777 sis190_rx_clear(tp);
778 return -ENOMEM;
781 static void sis190_set_rx_mode(struct net_device *dev)
783 struct sis190_private *tp = netdev_priv(dev);
784 void __iomem *ioaddr = tp->mmio_addr;
785 unsigned long flags;
786 u32 mc_filter[2]; /* Multicast hash filter */
787 u16 rx_mode;
789 if (dev->flags & IFF_PROMISC) {
790 /* Unconditionally log net taps. */
791 net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
792 dev->name);
793 rx_mode =
794 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
795 AcceptAllPhys;
796 mc_filter[1] = mc_filter[0] = 0xffffffff;
797 } else if ((dev->mc_count > multicast_filter_limit) ||
798 (dev->flags & IFF_ALLMULTI)) {
799 /* Too many to filter perfectly -- accept all multicasts. */
800 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
801 mc_filter[1] = mc_filter[0] = 0xffffffff;
802 } else {
803 struct dev_mc_list *mclist;
804 unsigned int i;
806 rx_mode = AcceptBroadcast | AcceptMyPhys;
807 mc_filter[1] = mc_filter[0] = 0;
808 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
809 i++, mclist = mclist->next) {
810 int bit_nr =
811 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
812 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
813 rx_mode |= AcceptMulticast;
817 spin_lock_irqsave(&tp->lock, flags);
819 SIS_W16(RxMacControl, rx_mode | 0x2);
820 SIS_W32(RxHashTable, mc_filter[0]);
821 SIS_W32(RxHashTable + 4, mc_filter[1]);
823 spin_unlock_irqrestore(&tp->lock, flags);
826 static void sis190_soft_reset(void __iomem *ioaddr)
828 SIS_W32(IntrControl, 0x8000);
829 SIS_PCI_COMMIT();
830 msleep(1);
831 SIS_W32(IntrControl, 0x0);
832 sis190_asic_down(ioaddr);
833 msleep(1);
836 static void sis190_hw_start(struct net_device *dev)
838 struct sis190_private *tp = netdev_priv(dev);
839 void __iomem *ioaddr = tp->mmio_addr;
841 sis190_soft_reset(ioaddr);
843 SIS_W32(TxDescStartAddr, tp->tx_dma);
844 SIS_W32(RxDescStartAddr, tp->rx_dma);
846 SIS_W32(IntrStatus, 0xffffffff);
847 SIS_W32(IntrMask, 0x0);
849 * Default is 100Mbps.
850 * A bit strange: 100Mbps is 0x1801 elsewhere -- FR 2005/06/09
852 SIS_W16(StationControl, 0x1901);
853 SIS_W32(GMIIControl, 0x0);
854 SIS_W32(TxMacControl, 0x60);
855 SIS_W16(RxMacControl, 0x02);
856 SIS_W32(RxHashTable, 0x0);
857 SIS_W32(0x6c, 0x0);
858 SIS_W32(RxWolCtrl, 0x0);
859 SIS_W32(RxWolData, 0x0);
861 SIS_PCI_COMMIT();
863 sis190_set_rx_mode(dev);
865 /* Enable all known interrupts by setting the interrupt mask. */
866 SIS_W32(IntrMask, sis190_intr_mask);
868 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
869 SIS_W32(RxControl, 0x1a1d);
871 netif_start_queue(dev);
874 static void sis190_phy_task(void * data)
876 struct net_device *dev = data;
877 struct sis190_private *tp = netdev_priv(dev);
878 void __iomem *ioaddr = tp->mmio_addr;
879 u16 val;
881 rtnl_lock();
883 val = mdio_read(ioaddr, MII_BMCR);
884 if (val & BMCR_RESET) {
885 // FIXME: needlessly high ? -- FR 02/07/2005
886 mod_timer(&tp->timer, jiffies + HZ/10);
887 } else if (!(mdio_read(ioaddr, MII_BMSR) & BMSR_ANEGCOMPLETE)) {
888 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
889 dev->name);
890 mdio_write(ioaddr, MII_BMCR, val | BMCR_RESET);
891 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
892 } else {
893 /* Rejoice ! */
894 struct {
895 int val;
896 const char *msg;
897 u16 ctl;
898 } reg31[] = {
899 { LPA_1000XFULL | LPA_SLCT,
900 "1000 Mbps Full Duplex",
901 0x01 | _1000bpsF },
902 { LPA_1000XHALF | LPA_SLCT,
903 "1000 Mbps Half Duplex",
904 0x01 | _1000bpsH },
905 { LPA_100FULL,
906 "100 Mbps Full Duplex",
907 0x01 | _100bpsF },
908 { LPA_100HALF,
909 "100 Mbps Half Duplex",
910 0x01 | _100bpsH },
911 { LPA_10FULL,
912 "10 Mbps Full Duplex",
913 0x01 | _10bpsF },
914 { LPA_10HALF,
915 "10 Mbps Half Duplex",
916 0x01 | _10bpsH },
917 { 0, "unknown", 0x0000 }
918 }, *p;
920 val = mdio_read(ioaddr, 0x1f);
921 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
923 val = mdio_read(ioaddr, MII_LPA);
924 net_link(tp, KERN_INFO "%s: mii lpa = %04x.\n", dev->name, val);
926 for (p = reg31; p->ctl; p++) {
927 if ((val & p->val) == p->val)
928 break;
930 if (p->ctl)
931 SIS_W16(StationControl, p->ctl);
932 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
933 p->msg);
934 netif_carrier_on(dev);
937 rtnl_unlock();
940 static void sis190_phy_timer(unsigned long __opaque)
942 struct net_device *dev = (struct net_device *)__opaque;
943 struct sis190_private *tp = netdev_priv(dev);
945 if (likely(netif_running(dev)))
946 schedule_work(&tp->phy_task);
949 static inline void sis190_delete_timer(struct net_device *dev)
951 struct sis190_private *tp = netdev_priv(dev);
953 del_timer_sync(&tp->timer);
956 static inline void sis190_request_timer(struct net_device *dev)
958 struct sis190_private *tp = netdev_priv(dev);
959 struct timer_list *timer = &tp->timer;
961 init_timer(timer);
962 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
963 timer->data = (unsigned long)dev;
964 timer->function = sis190_phy_timer;
965 add_timer(timer);
968 static void sis190_set_rxbufsize(struct sis190_private *tp,
969 struct net_device *dev)
971 unsigned int mtu = dev->mtu;
973 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
974 /* RxDesc->size has a licence to kill the lower bits */
975 if (tp->rx_buf_sz & 0x07) {
976 tp->rx_buf_sz += 8;
977 tp->rx_buf_sz &= RX_BUF_MASK;
981 static int sis190_open(struct net_device *dev)
983 struct sis190_private *tp = netdev_priv(dev);
984 struct pci_dev *pdev = tp->pci_dev;
985 int rc = -ENOMEM;
987 sis190_set_rxbufsize(tp, dev);
990 * Rx and Tx descriptors need 256 bytes alignment.
991 * pci_alloc_consistent() guarantees a stronger alignment.
993 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
994 if (!tp->TxDescRing)
995 goto out;
997 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
998 if (!tp->RxDescRing)
999 goto err_free_tx_0;
1001 rc = sis190_init_ring(dev);
1002 if (rc < 0)
1003 goto err_free_rx_1;
1005 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1007 sis190_request_timer(dev);
1009 rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
1010 if (rc < 0)
1011 goto err_release_timer_2;
1013 sis190_hw_start(dev);
1014 out:
1015 return rc;
1017 err_release_timer_2:
1018 sis190_delete_timer(dev);
1019 sis190_rx_clear(tp);
1020 err_free_rx_1:
1021 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1022 tp->rx_dma);
1023 err_free_tx_0:
1024 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1025 tp->tx_dma);
1026 goto out;
1029 static void sis190_tx_clear(struct sis190_private *tp)
1031 unsigned int i;
1033 for (i = 0; i < NUM_TX_DESC; i++) {
1034 struct sk_buff *skb = tp->Tx_skbuff[i];
1036 if (!skb)
1037 continue;
1039 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1040 tp->Tx_skbuff[i] = NULL;
1041 dev_kfree_skb(skb);
1043 tp->stats.tx_dropped++;
1045 tp->cur_tx = tp->dirty_tx = 0;
1048 static void sis190_down(struct net_device *dev)
1050 struct sis190_private *tp = netdev_priv(dev);
1051 void __iomem *ioaddr = tp->mmio_addr;
1052 unsigned int poll_locked = 0;
1054 sis190_delete_timer(dev);
1056 netif_stop_queue(dev);
1058 flush_scheduled_work();
1060 do {
1061 spin_lock_irq(&tp->lock);
1063 sis190_asic_down(ioaddr);
1065 spin_unlock_irq(&tp->lock);
1067 synchronize_irq(dev->irq);
1069 if (!poll_locked) {
1070 netif_poll_disable(dev);
1071 poll_locked++;
1074 synchronize_sched();
1076 } while (SIS_R32(IntrMask));
1078 sis190_tx_clear(tp);
1079 sis190_rx_clear(tp);
1082 static int sis190_close(struct net_device *dev)
1084 struct sis190_private *tp = netdev_priv(dev);
1085 struct pci_dev *pdev = tp->pci_dev;
1087 sis190_down(dev);
1089 free_irq(dev->irq, dev);
1091 netif_poll_enable(dev);
1093 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1094 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1096 tp->TxDescRing = NULL;
1097 tp->RxDescRing = NULL;
1099 return 0;
1102 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1104 struct sis190_private *tp = netdev_priv(dev);
1105 void __iomem *ioaddr = tp->mmio_addr;
1106 u32 len, entry, dirty_tx;
1107 struct TxDesc *desc;
1108 dma_addr_t mapping;
1110 if (unlikely(skb->len < ETH_ZLEN)) {
1111 skb = skb_padto(skb, ETH_ZLEN);
1112 if (!skb) {
1113 tp->stats.tx_dropped++;
1114 goto out;
1116 len = ETH_ZLEN;
1117 } else {
1118 len = skb->len;
1121 entry = tp->cur_tx % NUM_TX_DESC;
1122 desc = tp->TxDescRing + entry;
1124 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1125 netif_stop_queue(dev);
1126 net_tx_err(tp, KERN_ERR PFX
1127 "%s: BUG! Tx Ring full when queue awake!\n",
1128 dev->name);
1129 return NETDEV_TX_BUSY;
1132 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1134 tp->Tx_skbuff[entry] = skb;
1136 desc->PSize = cpu_to_le32(len);
1137 desc->addr = cpu_to_le32(mapping);
1139 desc->size = cpu_to_le32(len);
1140 if (entry == (NUM_TX_DESC - 1))
1141 desc->size |= cpu_to_le32(RingEnd);
1143 wmb();
1145 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1147 tp->cur_tx++;
1149 smp_wmb();
1151 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1153 dev->trans_start = jiffies;
1155 dirty_tx = tp->dirty_tx;
1156 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1157 netif_stop_queue(dev);
1158 smp_rmb();
1159 if (dirty_tx != tp->dirty_tx)
1160 netif_wake_queue(dev);
1162 out:
1163 return NETDEV_TX_OK;
1166 static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1168 struct sis190_private *tp = netdev_priv(dev);
1170 return &tp->stats;
1173 static void sis190_release_board(struct pci_dev *pdev)
1175 struct net_device *dev = pci_get_drvdata(pdev);
1176 struct sis190_private *tp = netdev_priv(dev);
1178 iounmap(tp->mmio_addr);
1179 pci_release_regions(pdev);
1180 pci_disable_device(pdev);
1181 free_netdev(dev);
1184 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1186 struct sis190_private *tp;
1187 struct net_device *dev;
1188 void __iomem *ioaddr;
1189 int rc;
1191 dev = alloc_etherdev(sizeof(*tp));
1192 if (!dev) {
1193 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1194 rc = -ENOMEM;
1195 goto err_out_0;
1198 SET_MODULE_OWNER(dev);
1199 SET_NETDEV_DEV(dev, &pdev->dev);
1201 tp = netdev_priv(dev);
1202 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1204 rc = pci_enable_device(pdev);
1205 if (rc < 0) {
1206 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1207 goto err_free_dev_1;
1210 rc = -ENODEV;
1212 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1213 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1214 pci_name(pdev));
1215 goto err_pci_disable_2;
1217 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1218 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1219 pci_name(pdev));
1220 goto err_pci_disable_2;
1223 rc = pci_request_regions(pdev, DRV_NAME);
1224 if (rc < 0) {
1225 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1226 pci_name(pdev));
1227 goto err_pci_disable_2;
1230 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1231 if (rc < 0) {
1232 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1233 pci_name(pdev));
1234 goto err_free_res_3;
1237 pci_set_master(pdev);
1239 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1240 if (!ioaddr) {
1241 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1242 pci_name(pdev));
1243 rc = -EIO;
1244 goto err_free_res_3;
1247 tp->pci_dev = pdev;
1248 tp->mmio_addr = ioaddr;
1250 tp->mii_if.dev = dev;
1251 tp->mii_if.mdio_read = __mdio_read;
1252 tp->mii_if.mdio_write = __mdio_write;
1253 // tp->mii_if.phy_id = XXX;
1254 tp->mii_if.phy_id_mask = 0x1f;
1255 tp->mii_if.reg_num_mask = 0x1f;
1257 sis190_irq_mask_and_ack(ioaddr);
1259 sis190_soft_reset(ioaddr);
1260 out:
1261 return dev;
1263 err_free_res_3:
1264 pci_release_regions(pdev);
1265 err_pci_disable_2:
1266 pci_disable_device(pdev);
1267 err_free_dev_1:
1268 free_netdev(dev);
1269 err_out_0:
1270 dev = ERR_PTR(rc);
1271 goto out;
1274 static void sis190_tx_timeout(struct net_device *dev)
1276 struct sis190_private *tp = netdev_priv(dev);
1277 void __iomem *ioaddr = tp->mmio_addr;
1278 u8 tmp8;
1280 /* Disable Tx, if not already */
1281 tmp8 = SIS_R8(TxControl);
1282 if (tmp8 & CmdTxEnb)
1283 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1286 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1287 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1289 /* Disable interrupts by clearing the interrupt mask. */
1290 SIS_W32(IntrMask, 0x0000);
1292 /* Stop a shared interrupt from scavenging while we are. */
1293 spin_lock_irq(&tp->lock);
1294 sis190_tx_clear(tp);
1295 spin_unlock_irq(&tp->lock);
1297 /* ...and finally, reset everything. */
1298 sis190_hw_start(dev);
1300 netif_wake_queue(dev);
1303 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1304 struct net_device *dev)
1306 struct sis190_private *tp = netdev_priv(dev);
1307 void __iomem *ioaddr = tp->mmio_addr;
1308 u16 sig;
1309 int i;
1311 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1312 pci_name(pdev));
1314 /* Check to see if there is a sane EEPROM */
1315 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1317 if ((sig == 0xffff) || (sig == 0x0000)) {
1318 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1319 pci_name(pdev), sig);
1320 return -EIO;
1323 /* Get MAC address from EEPROM */
1324 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1325 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1327 ((u16 *)dev->dev_addr)[0] = le16_to_cpu(w);
1330 return 0;
1334 * sis190_get_mac_addr_from_apc - Get MAC address for SiS965 model
1335 * @pdev: PCI device
1336 * @dev: network device to get address for
1338 * SiS965 model, use APC CMOS RAM to store MAC address.
1339 * APC CMOS RAM is accessed through ISA bridge.
1340 * MAC address is read into @net_dev->dev_addr.
1342 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1343 struct net_device *dev)
1345 struct sis190_private *tp = netdev_priv(dev);
1346 struct pci_dev *isa_bridge;
1347 u8 reg, tmp8;
1348 int i;
1350 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1351 pci_name(pdev));
1353 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL);
1354 if (!isa_bridge) {
1355 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1356 pci_name(pdev));
1357 return -EIO;
1360 /* Enable port 78h & 79h to access APC Registers. */
1361 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1362 reg = (tmp8 & ~0x02);
1363 pci_write_config_byte(isa_bridge, 0x48, reg);
1364 udelay(50);
1365 pci_read_config_byte(isa_bridge, 0x48, &reg);
1367 for (i = 0; i < MAC_ADDR_LEN; i++) {
1368 outb(0x9 + i, 0x78);
1369 dev->dev_addr[i] = inb(0x79);
1372 outb(0x12, 0x78);
1373 reg = inb(0x79);
1375 /* Restore the value to ISA Bridge */
1376 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1377 pci_dev_put(isa_bridge);
1379 return 0;
1383 * sis190_init_rxfilter - Initialize the Rx filter
1384 * @dev: network device to initialize
1386 * Set receive filter address to our MAC address
1387 * and enable packet filtering.
1389 static inline void sis190_init_rxfilter(struct net_device *dev)
1391 struct sis190_private *tp = netdev_priv(dev);
1392 void __iomem *ioaddr = tp->mmio_addr;
1393 u16 ctl;
1394 int i;
1396 ctl = SIS_R16(RxMacControl);
1398 * Disable packet filtering before setting filter.
1399 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1400 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1402 SIS_W16(RxMacControl, ctl & ~0x0f00);
1404 for (i = 0; i < MAC_ADDR_LEN; i++)
1405 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1407 SIS_W16(RxMacControl, ctl);
1408 SIS_PCI_COMMIT();
1411 static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1413 u8 from;
1415 pci_read_config_byte(pdev, 0x73, &from);
1417 return (from & 0x00000001) ?
1418 sis190_get_mac_addr_from_apc(pdev, dev) :
1419 sis190_get_mac_addr_from_eeprom(pdev, dev);
1422 static void sis190_set_speed_auto(struct net_device *dev)
1424 struct sis190_private *tp = netdev_priv(dev);
1425 void __iomem *ioaddr = tp->mmio_addr;
1426 int val;
1428 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1430 val = mdio_read(ioaddr, MII_ADVERTISE);
1432 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1433 // unchanged.
1434 mdio_write(ioaddr, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1435 ADVERTISE_100FULL | ADVERTISE_10FULL |
1436 ADVERTISE_100HALF | ADVERTISE_10HALF);
1438 // Enable 1000 Full Mode.
1439 mdio_write(ioaddr, MII_CTRL1000, ADVERTISE_1000FULL);
1441 // Enable auto-negotiation and restart auto-negotiation.
1442 mdio_write(ioaddr, MII_BMCR,
1443 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1446 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1448 struct sis190_private *tp = netdev_priv(dev);
1450 return mii_ethtool_gset(&tp->mii_if, cmd);
1453 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1455 struct sis190_private *tp = netdev_priv(dev);
1457 return mii_ethtool_sset(&tp->mii_if, cmd);
1460 static void sis190_get_drvinfo(struct net_device *dev,
1461 struct ethtool_drvinfo *info)
1463 struct sis190_private *tp = netdev_priv(dev);
1465 strcpy(info->driver, DRV_NAME);
1466 strcpy(info->version, DRV_VERSION);
1467 strcpy(info->bus_info, pci_name(tp->pci_dev));
1470 static int sis190_get_regs_len(struct net_device *dev)
1472 return SIS190_REGS_SIZE;
1475 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1476 void *p)
1478 struct sis190_private *tp = netdev_priv(dev);
1479 unsigned long flags;
1481 if (regs->len > SIS190_REGS_SIZE)
1482 regs->len = SIS190_REGS_SIZE;
1484 spin_lock_irqsave(&tp->lock, flags);
1485 memcpy_fromio(p, tp->mmio_addr, regs->len);
1486 spin_unlock_irqrestore(&tp->lock, flags);
1489 static int sis190_nway_reset(struct net_device *dev)
1491 struct sis190_private *tp = netdev_priv(dev);
1493 return mii_nway_restart(&tp->mii_if);
1496 static u32 sis190_get_msglevel(struct net_device *dev)
1498 struct sis190_private *tp = netdev_priv(dev);
1500 return tp->msg_enable;
1503 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1505 struct sis190_private *tp = netdev_priv(dev);
1507 tp->msg_enable = value;
1510 static struct ethtool_ops sis190_ethtool_ops = {
1511 .get_settings = sis190_get_settings,
1512 .set_settings = sis190_set_settings,
1513 .get_drvinfo = sis190_get_drvinfo,
1514 .get_regs_len = sis190_get_regs_len,
1515 .get_regs = sis190_get_regs,
1516 .get_link = ethtool_op_get_link,
1517 .get_msglevel = sis190_get_msglevel,
1518 .set_msglevel = sis190_set_msglevel,
1519 .nway_reset = sis190_nway_reset,
1522 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1524 struct sis190_private *tp = netdev_priv(dev);
1526 return !netif_running(dev) ? -EINVAL :
1527 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1530 static int __devinit sis190_init_one(struct pci_dev *pdev,
1531 const struct pci_device_id *ent)
1533 static int printed_version = 0;
1534 struct sis190_private *tp;
1535 struct net_device *dev;
1536 void __iomem *ioaddr;
1537 int rc;
1539 if (!printed_version) {
1540 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1541 printed_version = 1;
1544 dev = sis190_init_board(pdev);
1545 if (IS_ERR(dev)) {
1546 rc = PTR_ERR(dev);
1547 goto out;
1550 tp = netdev_priv(dev);
1551 ioaddr = tp->mmio_addr;
1553 rc = sis190_get_mac_addr(pdev, dev);
1554 if (rc < 0)
1555 goto err_release_board;
1557 sis190_init_rxfilter(dev);
1559 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1561 dev->open = sis190_open;
1562 dev->stop = sis190_close;
1563 dev->do_ioctl = sis190_ioctl;
1564 dev->get_stats = sis190_get_stats;
1565 dev->tx_timeout = sis190_tx_timeout;
1566 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1567 dev->hard_start_xmit = sis190_start_xmit;
1568 #ifdef CONFIG_NET_POLL_CONTROLLER
1569 dev->poll_controller = sis190_netpoll;
1570 #endif
1571 dev->set_multicast_list = sis190_set_rx_mode;
1572 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1573 dev->irq = pdev->irq;
1574 dev->base_addr = (unsigned long) 0xdead;
1576 spin_lock_init(&tp->lock);
1577 rc = register_netdev(dev);
1578 if (rc < 0)
1579 goto err_release_board;
1581 pci_set_drvdata(pdev, dev);
1583 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1584 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1585 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1586 ioaddr, dev->irq,
1587 dev->dev_addr[0], dev->dev_addr[1],
1588 dev->dev_addr[2], dev->dev_addr[3],
1589 dev->dev_addr[4], dev->dev_addr[5]);
1591 netif_carrier_off(dev);
1593 sis190_set_speed_auto(dev);
1594 out:
1595 return rc;
1597 err_release_board:
1598 sis190_release_board(pdev);
1599 goto out;
1602 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1604 struct net_device *dev = pci_get_drvdata(pdev);
1606 unregister_netdev(dev);
1607 sis190_release_board(pdev);
1608 pci_set_drvdata(pdev, NULL);
1611 static struct pci_driver sis190_pci_driver = {
1612 .name = DRV_NAME,
1613 .id_table = sis190_pci_tbl,
1614 .probe = sis190_init_one,
1615 .remove = __devexit_p(sis190_remove_one),
1618 static int __init sis190_init_module(void)
1620 return pci_module_init(&sis190_pci_driver);
1623 static void __exit sis190_cleanup_module(void)
1625 pci_unregister_driver(&sis190_pci_driver);
1628 module_init(sis190_init_module);
1629 module_exit(sis190_cleanup_module);