hwmon: (pmbus) Add comments explaining internal driver API return values
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / sis190.c
blob3c0f1312b3914f29dba7660068a923141eb09663
1 /*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9 genuine driver.
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
18 See the file COPYING in this distribution for more information.
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/netdevice.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/pci.h>
32 #include <linux/mii.h>
33 #include <linux/delay.h>
34 #include <linux/crc32.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/slab.h>
37 #include <asm/irq.h>
39 #define PHY_MAX_ADDR 32
40 #define PHY_ID_ANY 0x1f
41 #define MII_REG_ANY 0x1f
43 #define DRV_VERSION "1.4"
44 #define DRV_NAME "sis190"
45 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
47 #define sis190_rx_skb netif_rx
48 #define sis190_rx_quota(count, quota) count
50 #define MAC_ADDR_LEN 6
52 #define NUM_TX_DESC 64 /* [8..1024] */
53 #define NUM_RX_DESC 64 /* [8..8192] */
54 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
55 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
56 #define RX_BUF_SIZE 1536
57 #define RX_BUF_MASK 0xfff8
59 #define SIS190_REGS_SIZE 0x80
60 #define SIS190_TX_TIMEOUT (6*HZ)
61 #define SIS190_PHY_TIMEOUT (10*HZ)
62 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
63 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
64 NETIF_MSG_IFDOWN)
66 /* Enhanced PHY access register bit definitions */
67 #define EhnMIIread 0x0000
68 #define EhnMIIwrite 0x0020
69 #define EhnMIIdataShift 16
70 #define EhnMIIpmdShift 6 /* 7016 only */
71 #define EhnMIIregShift 11
72 #define EhnMIIreq 0x0010
73 #define EhnMIInotDone 0x0010
75 /* Write/read MMIO register */
76 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
77 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
78 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
79 #define SIS_R8(reg) readb (ioaddr + (reg))
80 #define SIS_R16(reg) readw (ioaddr + (reg))
81 #define SIS_R32(reg) readl (ioaddr + (reg))
83 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
85 enum sis190_registers {
86 TxControl = 0x00,
87 TxDescStartAddr = 0x04,
88 rsv0 = 0x08, // reserved
89 TxSts = 0x0c, // unused (Control/Status)
90 RxControl = 0x10,
91 RxDescStartAddr = 0x14,
92 rsv1 = 0x18, // reserved
93 RxSts = 0x1c, // unused
94 IntrStatus = 0x20,
95 IntrMask = 0x24,
96 IntrControl = 0x28,
97 IntrTimer = 0x2c, // unused (Interrupt Timer)
98 PMControl = 0x30, // unused (Power Mgmt Control/Status)
99 rsv2 = 0x34, // reserved
100 ROMControl = 0x38,
101 ROMInterface = 0x3c,
102 StationControl = 0x40,
103 GMIIControl = 0x44,
104 GIoCR = 0x48, // unused (GMAC IO Compensation)
105 GIoCtrl = 0x4c, // unused (GMAC IO Control)
106 TxMacControl = 0x50,
107 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
108 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
109 rsv3 = 0x5c, // reserved
110 RxMacControl = 0x60,
111 RxMacAddr = 0x62,
112 RxHashTable = 0x68,
113 // Undocumented = 0x6c,
114 RxWolCtrl = 0x70,
115 RxWolData = 0x74, // unused (Rx WOL Data Access)
116 RxMPSControl = 0x78, // unused (Rx MPS Control)
117 rsv4 = 0x7c, // reserved
120 enum sis190_register_content {
121 /* IntrStatus */
122 SoftInt = 0x40000000, // unused
123 Timeup = 0x20000000, // unused
124 PauseFrame = 0x00080000, // unused
125 MagicPacket = 0x00040000, // unused
126 WakeupFrame = 0x00020000, // unused
127 LinkChange = 0x00010000,
128 RxQEmpty = 0x00000080,
129 RxQInt = 0x00000040,
130 TxQ1Empty = 0x00000020, // unused
131 TxQ1Int = 0x00000010,
132 TxQ0Empty = 0x00000008, // unused
133 TxQ0Int = 0x00000004,
134 RxHalt = 0x00000002,
135 TxHalt = 0x00000001,
137 /* {Rx/Tx}CmdBits */
138 CmdReset = 0x10,
139 CmdRxEnb = 0x08, // unused
140 CmdTxEnb = 0x01,
141 RxBufEmpty = 0x01, // unused
143 /* Cfg9346Bits */
144 Cfg9346_Lock = 0x00, // unused
145 Cfg9346_Unlock = 0xc0, // unused
147 /* RxMacControl */
148 AcceptErr = 0x20, // unused
149 AcceptRunt = 0x10, // unused
150 AcceptBroadcast = 0x0800,
151 AcceptMulticast = 0x0400,
152 AcceptMyPhys = 0x0200,
153 AcceptAllPhys = 0x0100,
155 /* RxConfigBits */
156 RxCfgFIFOShift = 13,
157 RxCfgDMAShift = 8, // 0x1a in RxControl ?
159 /* TxConfigBits */
160 TxInterFrameGapShift = 24,
161 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
163 LinkStatus = 0x02, // unused
164 FullDup = 0x01, // unused
166 /* TBICSRBit */
167 TBILinkOK = 0x02000000, // unused
170 struct TxDesc {
171 __le32 PSize;
172 __le32 status;
173 __le32 addr;
174 __le32 size;
177 struct RxDesc {
178 __le32 PSize;
179 __le32 status;
180 __le32 addr;
181 __le32 size;
184 enum _DescStatusBit {
185 /* _Desc.status */
186 OWNbit = 0x80000000, // RXOWN/TXOWN
187 INTbit = 0x40000000, // RXINT/TXINT
188 CRCbit = 0x00020000, // CRCOFF/CRCEN
189 PADbit = 0x00010000, // PREADD/PADEN
190 /* _Desc.size */
191 RingEnd = 0x80000000,
192 /* TxDesc.status */
193 LSEN = 0x08000000, // TSO ? -- FR
194 IPCS = 0x04000000,
195 TCPCS = 0x02000000,
196 UDPCS = 0x01000000,
197 BSTEN = 0x00800000,
198 EXTEN = 0x00400000,
199 DEFEN = 0x00200000,
200 BKFEN = 0x00100000,
201 CRSEN = 0x00080000,
202 COLEN = 0x00040000,
203 THOL3 = 0x30000000,
204 THOL2 = 0x20000000,
205 THOL1 = 0x10000000,
206 THOL0 = 0x00000000,
208 WND = 0x00080000,
209 TABRT = 0x00040000,
210 FIFO = 0x00020000,
211 LINK = 0x00010000,
212 ColCountMask = 0x0000ffff,
213 /* RxDesc.status */
214 IPON = 0x20000000,
215 TCPON = 0x10000000,
216 UDPON = 0x08000000,
217 Wakup = 0x00400000,
218 Magic = 0x00200000,
219 Pause = 0x00100000,
220 DEFbit = 0x00200000,
221 BCAST = 0x000c0000,
222 MCAST = 0x00080000,
223 UCAST = 0x00040000,
224 /* RxDesc.PSize */
225 TAGON = 0x80000000,
226 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
227 ABORT = 0x00800000,
228 SHORT = 0x00400000,
229 LIMIT = 0x00200000,
230 MIIER = 0x00100000,
231 OVRUN = 0x00080000,
232 NIBON = 0x00040000,
233 COLON = 0x00020000,
234 CRCOK = 0x00010000,
235 RxSizeMask = 0x0000ffff
237 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
238 * provide two (unused with Linux) Tx queues. No publicly
239 * available documentation alas.
243 enum sis190_eeprom_access_register_bits {
244 EECS = 0x00000001, // unused
245 EECLK = 0x00000002, // unused
246 EEDO = 0x00000008, // unused
247 EEDI = 0x00000004, // unused
248 EEREQ = 0x00000080,
249 EEROP = 0x00000200,
250 EEWOP = 0x00000100 // unused
253 /* EEPROM Addresses */
254 enum sis190_eeprom_address {
255 EEPROMSignature = 0x00,
256 EEPROMCLK = 0x01, // unused
257 EEPROMInfo = 0x02,
258 EEPROMMACAddr = 0x03
261 enum sis190_feature {
262 F_HAS_RGMII = 1,
263 F_PHY_88E1111 = 2,
264 F_PHY_BCM5461 = 4
267 struct sis190_private {
268 void __iomem *mmio_addr;
269 struct pci_dev *pci_dev;
270 struct net_device *dev;
271 spinlock_t lock;
272 u32 rx_buf_sz;
273 u32 cur_rx;
274 u32 cur_tx;
275 u32 dirty_rx;
276 u32 dirty_tx;
277 dma_addr_t rx_dma;
278 dma_addr_t tx_dma;
279 struct RxDesc *RxDescRing;
280 struct TxDesc *TxDescRing;
281 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
282 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
283 struct work_struct phy_task;
284 struct timer_list timer;
285 u32 msg_enable;
286 struct mii_if_info mii_if;
287 struct list_head first_phy;
288 u32 features;
289 u32 negotiated_lpa;
290 enum {
291 LNK_OFF,
292 LNK_ON,
293 LNK_AUTONEG,
294 } link_status;
297 struct sis190_phy {
298 struct list_head list;
299 int phy_id;
300 u16 id[2];
301 u16 status;
302 u8 type;
305 enum sis190_phy_type {
306 UNKNOWN = 0x00,
307 HOME = 0x01,
308 LAN = 0x02,
309 MIX = 0x03
312 static struct mii_chip_info {
313 const char *name;
314 u16 id[2];
315 unsigned int type;
316 u32 feature;
317 } mii_chip_table[] = {
318 { "Atheros PHY", { 0x004d, 0xd010 }, LAN, 0 },
319 { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
320 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
321 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
322 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
323 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
324 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
325 { NULL, }
328 static const struct {
329 const char *name;
330 } sis_chip_info[] = {
331 { "SiS 190 PCI Fast Ethernet adapter" },
332 { "SiS 191 PCI Gigabit Ethernet adapter" },
335 static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = {
336 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
337 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
338 { 0, },
341 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
343 static int rx_copybreak = 200;
345 static struct {
346 u32 msg_enable;
347 } debug = { -1 };
349 MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver");
350 module_param(rx_copybreak, int, 0);
351 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
352 module_param_named(debug, debug.msg_enable, int, 0);
353 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
354 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
355 MODULE_VERSION(DRV_VERSION);
356 MODULE_LICENSE("GPL");
358 static const u32 sis190_intr_mask =
359 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
362 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
363 * The chips use a 64 element hash table based on the Ethernet CRC.
365 static const int multicast_filter_limit = 32;
367 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
369 unsigned int i;
371 SIS_W32(GMIIControl, ctl);
373 msleep(1);
375 for (i = 0; i < 100; i++) {
376 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
377 break;
378 msleep(1);
381 if (i > 99)
382 pr_err("PHY command failed !\n");
385 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
387 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
388 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
389 (((u32) val) << EhnMIIdataShift));
392 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
394 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
395 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
397 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
400 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
402 struct sis190_private *tp = netdev_priv(dev);
404 mdio_write(tp->mmio_addr, phy_id, reg, val);
407 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
409 struct sis190_private *tp = netdev_priv(dev);
411 return mdio_read(tp->mmio_addr, phy_id, reg);
414 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
416 mdio_read(ioaddr, phy_id, reg);
417 return mdio_read(ioaddr, phy_id, reg);
420 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
422 u16 data = 0xffff;
423 unsigned int i;
425 if (!(SIS_R32(ROMControl) & 0x0002))
426 return 0;
428 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
430 for (i = 0; i < 200; i++) {
431 if (!(SIS_R32(ROMInterface) & EEREQ)) {
432 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
433 break;
435 msleep(1);
438 return data;
441 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
443 SIS_W32(IntrMask, 0x00);
444 SIS_W32(IntrStatus, 0xffffffff);
445 SIS_PCI_COMMIT();
448 static void sis190_asic_down(void __iomem *ioaddr)
450 /* Stop the chip's Tx and Rx DMA processes. */
452 SIS_W32(TxControl, 0x1a00);
453 SIS_W32(RxControl, 0x1a00);
455 sis190_irq_mask_and_ack(ioaddr);
458 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
460 desc->size |= cpu_to_le32(RingEnd);
463 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
465 u32 eor = le32_to_cpu(desc->size) & RingEnd;
467 desc->PSize = 0x0;
468 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
469 wmb();
470 desc->status = cpu_to_le32(OWNbit | INTbit);
473 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
474 u32 rx_buf_sz)
476 desc->addr = cpu_to_le32(mapping);
477 sis190_give_to_asic(desc, rx_buf_sz);
480 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
482 desc->PSize = 0x0;
483 desc->addr = cpu_to_le32(0xdeadbeef);
484 desc->size &= cpu_to_le32(RingEnd);
485 wmb();
486 desc->status = 0x0;
489 static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
490 struct RxDesc *desc)
492 u32 rx_buf_sz = tp->rx_buf_sz;
493 struct sk_buff *skb;
494 dma_addr_t mapping;
496 skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
497 if (unlikely(!skb))
498 goto skb_alloc_failed;
499 mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
500 PCI_DMA_FROMDEVICE);
501 if (pci_dma_mapping_error(tp->pci_dev, mapping))
502 goto out;
503 sis190_map_to_asic(desc, mapping, rx_buf_sz);
505 return skb;
507 out:
508 dev_kfree_skb_any(skb);
509 skb_alloc_failed:
510 sis190_make_unusable_by_asic(desc);
511 return NULL;
514 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
515 u32 start, u32 end)
517 u32 cur;
519 for (cur = start; cur < end; cur++) {
520 unsigned int i = cur % NUM_RX_DESC;
522 if (tp->Rx_skbuff[i])
523 continue;
525 tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
527 if (!tp->Rx_skbuff[i])
528 break;
530 return cur - start;
533 static bool sis190_try_rx_copy(struct sis190_private *tp,
534 struct sk_buff **sk_buff, int pkt_size,
535 dma_addr_t addr)
537 struct sk_buff *skb;
538 bool done = false;
540 if (pkt_size >= rx_copybreak)
541 goto out;
543 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
544 if (!skb)
545 goto out;
547 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
548 PCI_DMA_FROMDEVICE);
549 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
550 *sk_buff = skb;
551 done = true;
552 out:
553 return done;
556 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
558 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
560 if ((status & CRCOK) && !(status & ErrMask))
561 return 0;
563 if (!(status & CRCOK))
564 stats->rx_crc_errors++;
565 else if (status & OVRUN)
566 stats->rx_over_errors++;
567 else if (status & (SHORT | LIMIT))
568 stats->rx_length_errors++;
569 else if (status & (MIIER | NIBON | COLON))
570 stats->rx_frame_errors++;
572 stats->rx_errors++;
573 return -1;
576 static int sis190_rx_interrupt(struct net_device *dev,
577 struct sis190_private *tp, void __iomem *ioaddr)
579 struct net_device_stats *stats = &dev->stats;
580 u32 rx_left, cur_rx = tp->cur_rx;
581 u32 delta, count;
583 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
584 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
586 for (; rx_left > 0; rx_left--, cur_rx++) {
587 unsigned int entry = cur_rx % NUM_RX_DESC;
588 struct RxDesc *desc = tp->RxDescRing + entry;
589 u32 status;
591 if (le32_to_cpu(desc->status) & OWNbit)
592 break;
594 status = le32_to_cpu(desc->PSize);
596 //netif_info(tp, intr, dev, "Rx PSize = %08x\n", status);
598 if (sis190_rx_pkt_err(status, stats) < 0)
599 sis190_give_to_asic(desc, tp->rx_buf_sz);
600 else {
601 struct sk_buff *skb = tp->Rx_skbuff[entry];
602 dma_addr_t addr = le32_to_cpu(desc->addr);
603 int pkt_size = (status & RxSizeMask) - 4;
604 struct pci_dev *pdev = tp->pci_dev;
606 if (unlikely(pkt_size > tp->rx_buf_sz)) {
607 netif_info(tp, intr, dev,
608 "(frag) status = %08x\n", status);
609 stats->rx_dropped++;
610 stats->rx_length_errors++;
611 sis190_give_to_asic(desc, tp->rx_buf_sz);
612 continue;
616 if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
617 pci_dma_sync_single_for_device(pdev, addr,
618 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
619 sis190_give_to_asic(desc, tp->rx_buf_sz);
620 } else {
621 pci_unmap_single(pdev, addr, tp->rx_buf_sz,
622 PCI_DMA_FROMDEVICE);
623 tp->Rx_skbuff[entry] = NULL;
624 sis190_make_unusable_by_asic(desc);
627 skb_put(skb, pkt_size);
628 skb->protocol = eth_type_trans(skb, dev);
630 sis190_rx_skb(skb);
632 stats->rx_packets++;
633 stats->rx_bytes += pkt_size;
634 if ((status & BCAST) == MCAST)
635 stats->multicast++;
638 count = cur_rx - tp->cur_rx;
639 tp->cur_rx = cur_rx;
641 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
642 if (!delta && count)
643 netif_info(tp, intr, dev, "no Rx buffer allocated\n");
644 tp->dirty_rx += delta;
646 if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx)
647 netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
649 return count;
652 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
653 struct TxDesc *desc)
655 unsigned int len;
657 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
659 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
661 memset(desc, 0x00, sizeof(*desc));
664 static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
666 #define TxErrMask (WND | TABRT | FIFO | LINK)
668 if (!unlikely(status & TxErrMask))
669 return 0;
671 if (status & WND)
672 stats->tx_window_errors++;
673 if (status & TABRT)
674 stats->tx_aborted_errors++;
675 if (status & FIFO)
676 stats->tx_fifo_errors++;
677 if (status & LINK)
678 stats->tx_carrier_errors++;
680 stats->tx_errors++;
682 return -1;
685 static void sis190_tx_interrupt(struct net_device *dev,
686 struct sis190_private *tp, void __iomem *ioaddr)
688 struct net_device_stats *stats = &dev->stats;
689 u32 pending, dirty_tx = tp->dirty_tx;
691 * It would not be needed if queueing was allowed to be enabled
692 * again too early (hint: think preempt and unclocked smp systems).
694 unsigned int queue_stopped;
696 smp_rmb();
697 pending = tp->cur_tx - dirty_tx;
698 queue_stopped = (pending == NUM_TX_DESC);
700 for (; pending; pending--, dirty_tx++) {
701 unsigned int entry = dirty_tx % NUM_TX_DESC;
702 struct TxDesc *txd = tp->TxDescRing + entry;
703 u32 status = le32_to_cpu(txd->status);
704 struct sk_buff *skb;
706 if (status & OWNbit)
707 break;
709 skb = tp->Tx_skbuff[entry];
711 if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
712 stats->tx_packets++;
713 stats->tx_bytes += skb->len;
714 stats->collisions += ((status & ColCountMask) - 1);
717 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
718 tp->Tx_skbuff[entry] = NULL;
719 dev_kfree_skb_irq(skb);
722 if (tp->dirty_tx != dirty_tx) {
723 tp->dirty_tx = dirty_tx;
724 smp_wmb();
725 if (queue_stopped)
726 netif_wake_queue(dev);
731 * The interrupt handler does all of the Rx thread work and cleans up after
732 * the Tx thread.
734 static irqreturn_t sis190_interrupt(int irq, void *__dev)
736 struct net_device *dev = __dev;
737 struct sis190_private *tp = netdev_priv(dev);
738 void __iomem *ioaddr = tp->mmio_addr;
739 unsigned int handled = 0;
740 u32 status;
742 status = SIS_R32(IntrStatus);
744 if ((status == 0xffffffff) || !status)
745 goto out;
747 handled = 1;
749 if (unlikely(!netif_running(dev))) {
750 sis190_asic_down(ioaddr);
751 goto out;
754 SIS_W32(IntrStatus, status);
756 // netif_info(tp, intr, dev, "status = %08x\n", status);
758 if (status & LinkChange) {
759 netif_info(tp, intr, dev, "link change\n");
760 del_timer(&tp->timer);
761 schedule_work(&tp->phy_task);
764 if (status & RxQInt)
765 sis190_rx_interrupt(dev, tp, ioaddr);
767 if (status & TxQ0Int)
768 sis190_tx_interrupt(dev, tp, ioaddr);
769 out:
770 return IRQ_RETVAL(handled);
773 #ifdef CONFIG_NET_POLL_CONTROLLER
774 static void sis190_netpoll(struct net_device *dev)
776 struct sis190_private *tp = netdev_priv(dev);
777 struct pci_dev *pdev = tp->pci_dev;
779 disable_irq(pdev->irq);
780 sis190_interrupt(pdev->irq, dev);
781 enable_irq(pdev->irq);
783 #endif
785 static void sis190_free_rx_skb(struct sis190_private *tp,
786 struct sk_buff **sk_buff, struct RxDesc *desc)
788 struct pci_dev *pdev = tp->pci_dev;
790 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
791 PCI_DMA_FROMDEVICE);
792 dev_kfree_skb(*sk_buff);
793 *sk_buff = NULL;
794 sis190_make_unusable_by_asic(desc);
797 static void sis190_rx_clear(struct sis190_private *tp)
799 unsigned int i;
801 for (i = 0; i < NUM_RX_DESC; i++) {
802 if (!tp->Rx_skbuff[i])
803 continue;
804 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
808 static void sis190_init_ring_indexes(struct sis190_private *tp)
810 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
813 static int sis190_init_ring(struct net_device *dev)
815 struct sis190_private *tp = netdev_priv(dev);
817 sis190_init_ring_indexes(tp);
819 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
820 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
822 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
823 goto err_rx_clear;
825 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
827 return 0;
829 err_rx_clear:
830 sis190_rx_clear(tp);
831 return -ENOMEM;
834 static void sis190_set_rx_mode(struct net_device *dev)
836 struct sis190_private *tp = netdev_priv(dev);
837 void __iomem *ioaddr = tp->mmio_addr;
838 unsigned long flags;
839 u32 mc_filter[2]; /* Multicast hash filter */
840 u16 rx_mode;
842 if (dev->flags & IFF_PROMISC) {
843 rx_mode =
844 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
845 AcceptAllPhys;
846 mc_filter[1] = mc_filter[0] = 0xffffffff;
847 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
848 (dev->flags & IFF_ALLMULTI)) {
849 /* Too many to filter perfectly -- accept all multicasts. */
850 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
851 mc_filter[1] = mc_filter[0] = 0xffffffff;
852 } else {
853 struct netdev_hw_addr *ha;
855 rx_mode = AcceptBroadcast | AcceptMyPhys;
856 mc_filter[1] = mc_filter[0] = 0;
857 netdev_for_each_mc_addr(ha, dev) {
858 int bit_nr =
859 ether_crc(ETH_ALEN, ha->addr) & 0x3f;
860 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
861 rx_mode |= AcceptMulticast;
865 spin_lock_irqsave(&tp->lock, flags);
867 SIS_W16(RxMacControl, rx_mode | 0x2);
868 SIS_W32(RxHashTable, mc_filter[0]);
869 SIS_W32(RxHashTable + 4, mc_filter[1]);
871 spin_unlock_irqrestore(&tp->lock, flags);
874 static void sis190_soft_reset(void __iomem *ioaddr)
876 SIS_W32(IntrControl, 0x8000);
877 SIS_PCI_COMMIT();
878 SIS_W32(IntrControl, 0x0);
879 sis190_asic_down(ioaddr);
882 static void sis190_hw_start(struct net_device *dev)
884 struct sis190_private *tp = netdev_priv(dev);
885 void __iomem *ioaddr = tp->mmio_addr;
887 sis190_soft_reset(ioaddr);
889 SIS_W32(TxDescStartAddr, tp->tx_dma);
890 SIS_W32(RxDescStartAddr, tp->rx_dma);
892 SIS_W32(IntrStatus, 0xffffffff);
893 SIS_W32(IntrMask, 0x0);
894 SIS_W32(GMIIControl, 0x0);
895 SIS_W32(TxMacControl, 0x60);
896 SIS_W16(RxMacControl, 0x02);
897 SIS_W32(RxHashTable, 0x0);
898 SIS_W32(0x6c, 0x0);
899 SIS_W32(RxWolCtrl, 0x0);
900 SIS_W32(RxWolData, 0x0);
902 SIS_PCI_COMMIT();
904 sis190_set_rx_mode(dev);
906 /* Enable all known interrupts by setting the interrupt mask. */
907 SIS_W32(IntrMask, sis190_intr_mask);
909 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
910 SIS_W32(RxControl, 0x1a1d);
912 netif_start_queue(dev);
915 static void sis190_phy_task(struct work_struct *work)
917 struct sis190_private *tp =
918 container_of(work, struct sis190_private, phy_task);
919 struct net_device *dev = tp->dev;
920 void __iomem *ioaddr = tp->mmio_addr;
921 int phy_id = tp->mii_if.phy_id;
922 u16 val;
924 rtnl_lock();
926 if (!netif_running(dev))
927 goto out_unlock;
929 val = mdio_read(ioaddr, phy_id, MII_BMCR);
930 if (val & BMCR_RESET) {
931 // FIXME: needlessly high ? -- FR 02/07/2005
932 mod_timer(&tp->timer, jiffies + HZ/10);
933 goto out_unlock;
936 val = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
937 if (!(val & BMSR_ANEGCOMPLETE) && tp->link_status != LNK_AUTONEG) {
938 netif_carrier_off(dev);
939 netif_warn(tp, link, dev, "auto-negotiating...\n");
940 tp->link_status = LNK_AUTONEG;
941 } else if ((val & BMSR_LSTATUS) && tp->link_status != LNK_ON) {
942 /* Rejoice ! */
943 struct {
944 int val;
945 u32 ctl;
946 const char *msg;
947 } reg31[] = {
948 { LPA_1000FULL, 0x07000c00 | 0x00001000,
949 "1000 Mbps Full Duplex" },
950 { LPA_1000HALF, 0x07000c00,
951 "1000 Mbps Half Duplex" },
952 { LPA_100FULL, 0x04000800 | 0x00001000,
953 "100 Mbps Full Duplex" },
954 { LPA_100HALF, 0x04000800,
955 "100 Mbps Half Duplex" },
956 { LPA_10FULL, 0x04000400 | 0x00001000,
957 "10 Mbps Full Duplex" },
958 { LPA_10HALF, 0x04000400,
959 "10 Mbps Half Duplex" },
960 { 0, 0x04000400, "unknown" }
961 }, *p = NULL;
962 u16 adv, autoexp, gigadv, gigrec;
964 val = mdio_read(ioaddr, phy_id, 0x1f);
965 netif_info(tp, link, dev, "mii ext = %04x\n", val);
967 val = mdio_read(ioaddr, phy_id, MII_LPA);
968 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
969 autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
970 netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n",
971 val, adv, autoexp);
973 if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
974 /* check for gigabit speed */
975 gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000);
976 gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000);
977 val = (gigadv & (gigrec >> 2));
978 if (val & ADVERTISE_1000FULL)
979 p = reg31;
980 else if (val & ADVERTISE_1000HALF)
981 p = reg31 + 1;
983 if (!p) {
984 val &= adv;
986 for (p = reg31; p->val; p++) {
987 if ((val & p->val) == p->val)
988 break;
992 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
994 if ((tp->features & F_HAS_RGMII) &&
995 (tp->features & F_PHY_BCM5461)) {
996 // Set Tx Delay in RGMII mode.
997 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
998 udelay(200);
999 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
1000 p->ctl |= 0x03000000;
1003 SIS_W32(StationControl, p->ctl);
1005 if (tp->features & F_HAS_RGMII) {
1006 SIS_W32(RGDelay, 0x0441);
1007 SIS_W32(RGDelay, 0x0440);
1010 tp->negotiated_lpa = p->val;
1012 netif_info(tp, link, dev, "link on %s mode\n", p->msg);
1013 netif_carrier_on(dev);
1014 tp->link_status = LNK_ON;
1015 } else if (!(val & BMSR_LSTATUS) && tp->link_status != LNK_AUTONEG)
1016 tp->link_status = LNK_OFF;
1017 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
1019 out_unlock:
1020 rtnl_unlock();
1023 static void sis190_phy_timer(unsigned long __opaque)
1025 struct net_device *dev = (struct net_device *)__opaque;
1026 struct sis190_private *tp = netdev_priv(dev);
1028 if (likely(netif_running(dev)))
1029 schedule_work(&tp->phy_task);
1032 static inline void sis190_delete_timer(struct net_device *dev)
1034 struct sis190_private *tp = netdev_priv(dev);
1036 del_timer_sync(&tp->timer);
1039 static inline void sis190_request_timer(struct net_device *dev)
1041 struct sis190_private *tp = netdev_priv(dev);
1042 struct timer_list *timer = &tp->timer;
1044 init_timer(timer);
1045 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1046 timer->data = (unsigned long)dev;
1047 timer->function = sis190_phy_timer;
1048 add_timer(timer);
1051 static void sis190_set_rxbufsize(struct sis190_private *tp,
1052 struct net_device *dev)
1054 unsigned int mtu = dev->mtu;
1056 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1057 /* RxDesc->size has a licence to kill the lower bits */
1058 if (tp->rx_buf_sz & 0x07) {
1059 tp->rx_buf_sz += 8;
1060 tp->rx_buf_sz &= RX_BUF_MASK;
1064 static int sis190_open(struct net_device *dev)
1066 struct sis190_private *tp = netdev_priv(dev);
1067 struct pci_dev *pdev = tp->pci_dev;
1068 int rc = -ENOMEM;
1070 sis190_set_rxbufsize(tp, dev);
1073 * Rx and Tx descriptors need 256 bytes alignment.
1074 * pci_alloc_consistent() guarantees a stronger alignment.
1076 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1077 if (!tp->TxDescRing)
1078 goto out;
1080 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1081 if (!tp->RxDescRing)
1082 goto err_free_tx_0;
1084 rc = sis190_init_ring(dev);
1085 if (rc < 0)
1086 goto err_free_rx_1;
1088 sis190_request_timer(dev);
1090 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1091 if (rc < 0)
1092 goto err_release_timer_2;
1094 sis190_hw_start(dev);
1095 out:
1096 return rc;
1098 err_release_timer_2:
1099 sis190_delete_timer(dev);
1100 sis190_rx_clear(tp);
1101 err_free_rx_1:
1102 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1103 tp->rx_dma);
1104 err_free_tx_0:
1105 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1106 tp->tx_dma);
1107 goto out;
1110 static void sis190_tx_clear(struct sis190_private *tp)
1112 unsigned int i;
1114 for (i = 0; i < NUM_TX_DESC; i++) {
1115 struct sk_buff *skb = tp->Tx_skbuff[i];
1117 if (!skb)
1118 continue;
1120 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1121 tp->Tx_skbuff[i] = NULL;
1122 dev_kfree_skb(skb);
1124 tp->dev->stats.tx_dropped++;
1126 tp->cur_tx = tp->dirty_tx = 0;
1129 static void sis190_down(struct net_device *dev)
1131 struct sis190_private *tp = netdev_priv(dev);
1132 void __iomem *ioaddr = tp->mmio_addr;
1133 unsigned int poll_locked = 0;
1135 sis190_delete_timer(dev);
1137 netif_stop_queue(dev);
1139 do {
1140 spin_lock_irq(&tp->lock);
1142 sis190_asic_down(ioaddr);
1144 spin_unlock_irq(&tp->lock);
1146 synchronize_irq(dev->irq);
1148 if (!poll_locked)
1149 poll_locked++;
1151 synchronize_sched();
1153 } while (SIS_R32(IntrMask));
1155 sis190_tx_clear(tp);
1156 sis190_rx_clear(tp);
1159 static int sis190_close(struct net_device *dev)
1161 struct sis190_private *tp = netdev_priv(dev);
1162 struct pci_dev *pdev = tp->pci_dev;
1164 sis190_down(dev);
1166 free_irq(dev->irq, dev);
1168 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1169 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1171 tp->TxDescRing = NULL;
1172 tp->RxDescRing = NULL;
1174 return 0;
1177 static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
1178 struct net_device *dev)
1180 struct sis190_private *tp = netdev_priv(dev);
1181 void __iomem *ioaddr = tp->mmio_addr;
1182 u32 len, entry, dirty_tx;
1183 struct TxDesc *desc;
1184 dma_addr_t mapping;
1186 if (unlikely(skb->len < ETH_ZLEN)) {
1187 if (skb_padto(skb, ETH_ZLEN)) {
1188 dev->stats.tx_dropped++;
1189 goto out;
1191 len = ETH_ZLEN;
1192 } else {
1193 len = skb->len;
1196 entry = tp->cur_tx % NUM_TX_DESC;
1197 desc = tp->TxDescRing + entry;
1199 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1200 netif_stop_queue(dev);
1201 netif_err(tp, tx_err, dev,
1202 "BUG! Tx Ring full when queue awake!\n");
1203 return NETDEV_TX_BUSY;
1206 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1207 if (pci_dma_mapping_error(tp->pci_dev, mapping)) {
1208 netif_err(tp, tx_err, dev,
1209 "PCI mapping failed, dropping packet");
1210 return NETDEV_TX_BUSY;
1213 tp->Tx_skbuff[entry] = skb;
1215 desc->PSize = cpu_to_le32(len);
1216 desc->addr = cpu_to_le32(mapping);
1218 desc->size = cpu_to_le32(len);
1219 if (entry == (NUM_TX_DESC - 1))
1220 desc->size |= cpu_to_le32(RingEnd);
1222 wmb();
1224 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1225 if (tp->negotiated_lpa & (LPA_1000HALF | LPA_100HALF | LPA_10HALF)) {
1226 /* Half Duplex */
1227 desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN);
1228 if (tp->negotiated_lpa & (LPA_1000HALF | LPA_1000FULL))
1229 desc->status |= cpu_to_le32(EXTEN | BSTEN); /* gigabit HD */
1232 tp->cur_tx++;
1234 smp_wmb();
1236 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1238 dirty_tx = tp->dirty_tx;
1239 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1240 netif_stop_queue(dev);
1241 smp_rmb();
1242 if (dirty_tx != tp->dirty_tx)
1243 netif_wake_queue(dev);
1245 out:
1246 return NETDEV_TX_OK;
1249 static void sis190_free_phy(struct list_head *first_phy)
1251 struct sis190_phy *cur, *next;
1253 list_for_each_entry_safe(cur, next, first_phy, list) {
1254 kfree(cur);
1259 * sis190_default_phy - Select default PHY for sis190 mac.
1260 * @dev: the net device to probe for
1262 * Select first detected PHY with link as default.
1263 * If no one is link on, select PHY whose types is HOME as default.
1264 * If HOME doesn't exist, select LAN.
1266 static u16 sis190_default_phy(struct net_device *dev)
1268 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1269 struct sis190_private *tp = netdev_priv(dev);
1270 struct mii_if_info *mii_if = &tp->mii_if;
1271 void __iomem *ioaddr = tp->mmio_addr;
1272 u16 status;
1274 phy_home = phy_default = phy_lan = NULL;
1276 list_for_each_entry(phy, &tp->first_phy, list) {
1277 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1279 // Link ON & Not select default PHY & not ghost PHY.
1280 if ((status & BMSR_LSTATUS) &&
1281 !phy_default &&
1282 (phy->type != UNKNOWN)) {
1283 phy_default = phy;
1284 } else {
1285 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1286 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1287 status | BMCR_ANENABLE | BMCR_ISOLATE);
1288 if (phy->type == HOME)
1289 phy_home = phy;
1290 else if (phy->type == LAN)
1291 phy_lan = phy;
1295 if (!phy_default) {
1296 if (phy_home)
1297 phy_default = phy_home;
1298 else if (phy_lan)
1299 phy_default = phy_lan;
1300 else
1301 phy_default = list_first_entry(&tp->first_phy,
1302 struct sis190_phy, list);
1305 if (mii_if->phy_id != phy_default->phy_id) {
1306 mii_if->phy_id = phy_default->phy_id;
1307 if (netif_msg_probe(tp))
1308 pr_info("%s: Using transceiver at address %d as default\n",
1309 pci_name(tp->pci_dev), mii_if->phy_id);
1312 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1313 status &= (~BMCR_ISOLATE);
1315 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1316 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1318 return status;
1321 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1322 struct sis190_phy *phy, unsigned int phy_id,
1323 u16 mii_status)
1325 void __iomem *ioaddr = tp->mmio_addr;
1326 struct mii_chip_info *p;
1328 INIT_LIST_HEAD(&phy->list);
1329 phy->status = mii_status;
1330 phy->phy_id = phy_id;
1332 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1333 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1335 for (p = mii_chip_table; p->type; p++) {
1336 if ((p->id[0] == phy->id[0]) &&
1337 (p->id[1] == (phy->id[1] & 0xfff0))) {
1338 break;
1342 if (p->id[1]) {
1343 phy->type = (p->type == MIX) ?
1344 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1345 LAN : HOME) : p->type;
1346 tp->features |= p->feature;
1347 if (netif_msg_probe(tp))
1348 pr_info("%s: %s transceiver at address %d\n",
1349 pci_name(tp->pci_dev), p->name, phy_id);
1350 } else {
1351 phy->type = UNKNOWN;
1352 if (netif_msg_probe(tp))
1353 pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
1354 pci_name(tp->pci_dev),
1355 phy->id[0], (phy->id[1] & 0xfff0), phy_id);
1359 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1361 if (tp->features & F_PHY_88E1111) {
1362 void __iomem *ioaddr = tp->mmio_addr;
1363 int phy_id = tp->mii_if.phy_id;
1364 u16 reg[2][2] = {
1365 { 0x808b, 0x0ce1 },
1366 { 0x808f, 0x0c60 }
1367 }, *p;
1369 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1371 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1372 udelay(200);
1373 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1374 udelay(200);
1379 * sis190_mii_probe - Probe MII PHY for sis190
1380 * @dev: the net device to probe for
1382 * Search for total of 32 possible mii phy addresses.
1383 * Identify and set current phy if found one,
1384 * return error if it failed to found.
1386 static int __devinit sis190_mii_probe(struct net_device *dev)
1388 struct sis190_private *tp = netdev_priv(dev);
1389 struct mii_if_info *mii_if = &tp->mii_if;
1390 void __iomem *ioaddr = tp->mmio_addr;
1391 int phy_id;
1392 int rc = 0;
1394 INIT_LIST_HEAD(&tp->first_phy);
1396 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1397 struct sis190_phy *phy;
1398 u16 status;
1400 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1402 // Try next mii if the current one is not accessible.
1403 if (status == 0xffff || status == 0x0000)
1404 continue;
1406 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1407 if (!phy) {
1408 sis190_free_phy(&tp->first_phy);
1409 rc = -ENOMEM;
1410 goto out;
1413 sis190_init_phy(dev, tp, phy, phy_id, status);
1415 list_add(&tp->first_phy, &phy->list);
1418 if (list_empty(&tp->first_phy)) {
1419 if (netif_msg_probe(tp))
1420 pr_info("%s: No MII transceivers found!\n",
1421 pci_name(tp->pci_dev));
1422 rc = -EIO;
1423 goto out;
1426 /* Select default PHY for mac */
1427 sis190_default_phy(dev);
1429 sis190_mii_probe_88e1111_fixup(tp);
1431 mii_if->dev = dev;
1432 mii_if->mdio_read = __mdio_read;
1433 mii_if->mdio_write = __mdio_write;
1434 mii_if->phy_id_mask = PHY_ID_ANY;
1435 mii_if->reg_num_mask = MII_REG_ANY;
1436 out:
1437 return rc;
1440 static void sis190_mii_remove(struct net_device *dev)
1442 struct sis190_private *tp = netdev_priv(dev);
1444 sis190_free_phy(&tp->first_phy);
1447 static void sis190_release_board(struct pci_dev *pdev)
1449 struct net_device *dev = pci_get_drvdata(pdev);
1450 struct sis190_private *tp = netdev_priv(dev);
1452 iounmap(tp->mmio_addr);
1453 pci_release_regions(pdev);
1454 pci_disable_device(pdev);
1455 free_netdev(dev);
1458 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1460 struct sis190_private *tp;
1461 struct net_device *dev;
1462 void __iomem *ioaddr;
1463 int rc;
1465 dev = alloc_etherdev(sizeof(*tp));
1466 if (!dev) {
1467 if (netif_msg_drv(&debug))
1468 pr_err("unable to alloc new ethernet\n");
1469 rc = -ENOMEM;
1470 goto err_out_0;
1473 SET_NETDEV_DEV(dev, &pdev->dev);
1475 tp = netdev_priv(dev);
1476 tp->dev = dev;
1477 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1479 rc = pci_enable_device(pdev);
1480 if (rc < 0) {
1481 if (netif_msg_probe(tp))
1482 pr_err("%s: enable failure\n", pci_name(pdev));
1483 goto err_free_dev_1;
1486 rc = -ENODEV;
1488 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1489 if (netif_msg_probe(tp))
1490 pr_err("%s: region #0 is no MMIO resource\n",
1491 pci_name(pdev));
1492 goto err_pci_disable_2;
1494 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1495 if (netif_msg_probe(tp))
1496 pr_err("%s: invalid PCI region size(s)\n",
1497 pci_name(pdev));
1498 goto err_pci_disable_2;
1501 rc = pci_request_regions(pdev, DRV_NAME);
1502 if (rc < 0) {
1503 if (netif_msg_probe(tp))
1504 pr_err("%s: could not request regions\n",
1505 pci_name(pdev));
1506 goto err_pci_disable_2;
1509 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1510 if (rc < 0) {
1511 if (netif_msg_probe(tp))
1512 pr_err("%s: DMA configuration failed\n",
1513 pci_name(pdev));
1514 goto err_free_res_3;
1517 pci_set_master(pdev);
1519 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1520 if (!ioaddr) {
1521 if (netif_msg_probe(tp))
1522 pr_err("%s: cannot remap MMIO, aborting\n",
1523 pci_name(pdev));
1524 rc = -EIO;
1525 goto err_free_res_3;
1528 tp->pci_dev = pdev;
1529 tp->mmio_addr = ioaddr;
1530 tp->link_status = LNK_OFF;
1532 sis190_irq_mask_and_ack(ioaddr);
1534 sis190_soft_reset(ioaddr);
1535 out:
1536 return dev;
1538 err_free_res_3:
1539 pci_release_regions(pdev);
1540 err_pci_disable_2:
1541 pci_disable_device(pdev);
1542 err_free_dev_1:
1543 free_netdev(dev);
1544 err_out_0:
1545 dev = ERR_PTR(rc);
1546 goto out;
1549 static void sis190_tx_timeout(struct net_device *dev)
1551 struct sis190_private *tp = netdev_priv(dev);
1552 void __iomem *ioaddr = tp->mmio_addr;
1553 u8 tmp8;
1555 /* Disable Tx, if not already */
1556 tmp8 = SIS_R8(TxControl);
1557 if (tmp8 & CmdTxEnb)
1558 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1560 netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n",
1561 SIS_R32(TxControl), SIS_R32(TxSts));
1563 /* Disable interrupts by clearing the interrupt mask. */
1564 SIS_W32(IntrMask, 0x0000);
1566 /* Stop a shared interrupt from scavenging while we are. */
1567 spin_lock_irq(&tp->lock);
1568 sis190_tx_clear(tp);
1569 spin_unlock_irq(&tp->lock);
1571 /* ...and finally, reset everything. */
1572 sis190_hw_start(dev);
1574 netif_wake_queue(dev);
1577 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1579 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1582 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1583 struct net_device *dev)
1585 struct sis190_private *tp = netdev_priv(dev);
1586 void __iomem *ioaddr = tp->mmio_addr;
1587 u16 sig;
1588 int i;
1590 if (netif_msg_probe(tp))
1591 pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev));
1593 /* Check to see if there is a sane EEPROM */
1594 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1596 if ((sig == 0xffff) || (sig == 0x0000)) {
1597 if (netif_msg_probe(tp))
1598 pr_info("%s: Error EEPROM read %x\n",
1599 pci_name(pdev), sig);
1600 return -EIO;
1603 /* Get MAC address from EEPROM */
1604 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1605 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1607 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1610 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1612 return 0;
1616 * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model
1617 * @pdev: PCI device
1618 * @dev: network device to get address for
1620 * SiS96x model, use APC CMOS RAM to store MAC address.
1621 * APC CMOS RAM is accessed through ISA bridge.
1622 * MAC address is read into @net_dev->dev_addr.
1624 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1625 struct net_device *dev)
1627 static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
1628 struct sis190_private *tp = netdev_priv(dev);
1629 struct pci_dev *isa_bridge;
1630 u8 reg, tmp8;
1631 unsigned int i;
1633 if (netif_msg_probe(tp))
1634 pr_info("%s: Read MAC address from APC\n", pci_name(pdev));
1636 for (i = 0; i < ARRAY_SIZE(ids); i++) {
1637 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1638 if (isa_bridge)
1639 break;
1642 if (!isa_bridge) {
1643 if (netif_msg_probe(tp))
1644 pr_info("%s: Can not find ISA bridge\n",
1645 pci_name(pdev));
1646 return -EIO;
1649 /* Enable port 78h & 79h to access APC Registers. */
1650 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1651 reg = (tmp8 & ~0x02);
1652 pci_write_config_byte(isa_bridge, 0x48, reg);
1653 udelay(50);
1654 pci_read_config_byte(isa_bridge, 0x48, &reg);
1656 for (i = 0; i < MAC_ADDR_LEN; i++) {
1657 outb(0x9 + i, 0x78);
1658 dev->dev_addr[i] = inb(0x79);
1661 outb(0x12, 0x78);
1662 reg = inb(0x79);
1664 sis190_set_rgmii(tp, reg);
1666 /* Restore the value to ISA Bridge */
1667 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1668 pci_dev_put(isa_bridge);
1670 return 0;
1674 * sis190_init_rxfilter - Initialize the Rx filter
1675 * @dev: network device to initialize
1677 * Set receive filter address to our MAC address
1678 * and enable packet filtering.
1680 static inline void sis190_init_rxfilter(struct net_device *dev)
1682 struct sis190_private *tp = netdev_priv(dev);
1683 void __iomem *ioaddr = tp->mmio_addr;
1684 u16 ctl;
1685 int i;
1687 ctl = SIS_R16(RxMacControl);
1689 * Disable packet filtering before setting filter.
1690 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1691 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1693 SIS_W16(RxMacControl, ctl & ~0x0f00);
1695 for (i = 0; i < MAC_ADDR_LEN; i++)
1696 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1698 SIS_W16(RxMacControl, ctl);
1699 SIS_PCI_COMMIT();
1702 static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1703 struct net_device *dev)
1705 int rc;
1707 rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1708 if (rc < 0) {
1709 u8 reg;
1711 pci_read_config_byte(pdev, 0x73, &reg);
1713 if (reg & 0x00000001)
1714 rc = sis190_get_mac_addr_from_apc(pdev, dev);
1716 return rc;
1719 static void sis190_set_speed_auto(struct net_device *dev)
1721 struct sis190_private *tp = netdev_priv(dev);
1722 void __iomem *ioaddr = tp->mmio_addr;
1723 int phy_id = tp->mii_if.phy_id;
1724 int val;
1726 netif_info(tp, link, dev, "Enabling Auto-negotiation\n");
1728 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1730 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1731 // unchanged.
1732 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1733 ADVERTISE_100FULL | ADVERTISE_10FULL |
1734 ADVERTISE_100HALF | ADVERTISE_10HALF);
1736 // Enable 1000 Full Mode.
1737 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1739 // Enable auto-negotiation and restart auto-negotiation.
1740 mdio_write(ioaddr, phy_id, MII_BMCR,
1741 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1744 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1746 struct sis190_private *tp = netdev_priv(dev);
1748 return mii_ethtool_gset(&tp->mii_if, cmd);
1751 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1753 struct sis190_private *tp = netdev_priv(dev);
1755 return mii_ethtool_sset(&tp->mii_if, cmd);
1758 static void sis190_get_drvinfo(struct net_device *dev,
1759 struct ethtool_drvinfo *info)
1761 struct sis190_private *tp = netdev_priv(dev);
1763 strcpy(info->driver, DRV_NAME);
1764 strcpy(info->version, DRV_VERSION);
1765 strcpy(info->bus_info, pci_name(tp->pci_dev));
1768 static int sis190_get_regs_len(struct net_device *dev)
1770 return SIS190_REGS_SIZE;
1773 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1774 void *p)
1776 struct sis190_private *tp = netdev_priv(dev);
1777 unsigned long flags;
1779 if (regs->len > SIS190_REGS_SIZE)
1780 regs->len = SIS190_REGS_SIZE;
1782 spin_lock_irqsave(&tp->lock, flags);
1783 memcpy_fromio(p, tp->mmio_addr, regs->len);
1784 spin_unlock_irqrestore(&tp->lock, flags);
1787 static int sis190_nway_reset(struct net_device *dev)
1789 struct sis190_private *tp = netdev_priv(dev);
1791 return mii_nway_restart(&tp->mii_if);
1794 static u32 sis190_get_msglevel(struct net_device *dev)
1796 struct sis190_private *tp = netdev_priv(dev);
1798 return tp->msg_enable;
1801 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1803 struct sis190_private *tp = netdev_priv(dev);
1805 tp->msg_enable = value;
1808 static const struct ethtool_ops sis190_ethtool_ops = {
1809 .get_settings = sis190_get_settings,
1810 .set_settings = sis190_set_settings,
1811 .get_drvinfo = sis190_get_drvinfo,
1812 .get_regs_len = sis190_get_regs_len,
1813 .get_regs = sis190_get_regs,
1814 .get_link = ethtool_op_get_link,
1815 .get_msglevel = sis190_get_msglevel,
1816 .set_msglevel = sis190_set_msglevel,
1817 .nway_reset = sis190_nway_reset,
1820 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1822 struct sis190_private *tp = netdev_priv(dev);
1824 return !netif_running(dev) ? -EINVAL :
1825 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1828 static int sis190_mac_addr(struct net_device *dev, void *p)
1830 int rc;
1832 rc = eth_mac_addr(dev, p);
1833 if (!rc)
1834 sis190_init_rxfilter(dev);
1835 return rc;
1838 static const struct net_device_ops sis190_netdev_ops = {
1839 .ndo_open = sis190_open,
1840 .ndo_stop = sis190_close,
1841 .ndo_do_ioctl = sis190_ioctl,
1842 .ndo_start_xmit = sis190_start_xmit,
1843 .ndo_tx_timeout = sis190_tx_timeout,
1844 .ndo_set_multicast_list = sis190_set_rx_mode,
1845 .ndo_change_mtu = eth_change_mtu,
1846 .ndo_set_mac_address = sis190_mac_addr,
1847 .ndo_validate_addr = eth_validate_addr,
1848 #ifdef CONFIG_NET_POLL_CONTROLLER
1849 .ndo_poll_controller = sis190_netpoll,
1850 #endif
1853 static int __devinit sis190_init_one(struct pci_dev *pdev,
1854 const struct pci_device_id *ent)
1856 static int printed_version = 0;
1857 struct sis190_private *tp;
1858 struct net_device *dev;
1859 void __iomem *ioaddr;
1860 int rc;
1862 if (!printed_version) {
1863 if (netif_msg_drv(&debug))
1864 pr_info(SIS190_DRIVER_NAME " loaded\n");
1865 printed_version = 1;
1868 dev = sis190_init_board(pdev);
1869 if (IS_ERR(dev)) {
1870 rc = PTR_ERR(dev);
1871 goto out;
1874 pci_set_drvdata(pdev, dev);
1876 tp = netdev_priv(dev);
1877 ioaddr = tp->mmio_addr;
1879 rc = sis190_get_mac_addr(pdev, dev);
1880 if (rc < 0)
1881 goto err_release_board;
1883 sis190_init_rxfilter(dev);
1885 INIT_WORK(&tp->phy_task, sis190_phy_task);
1887 dev->netdev_ops = &sis190_netdev_ops;
1889 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1890 dev->irq = pdev->irq;
1891 dev->base_addr = (unsigned long) 0xdead;
1892 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1894 spin_lock_init(&tp->lock);
1896 rc = sis190_mii_probe(dev);
1897 if (rc < 0)
1898 goto err_release_board;
1900 rc = register_netdev(dev);
1901 if (rc < 0)
1902 goto err_remove_mii;
1904 if (netif_msg_probe(tp)) {
1905 netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
1906 pci_name(pdev),
1907 sis_chip_info[ent->driver_data].name,
1908 ioaddr, dev->irq, dev->dev_addr);
1909 netdev_info(dev, "%s mode.\n",
1910 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1913 netif_carrier_off(dev);
1915 sis190_set_speed_auto(dev);
1916 out:
1917 return rc;
1919 err_remove_mii:
1920 sis190_mii_remove(dev);
1921 err_release_board:
1922 sis190_release_board(pdev);
1923 goto out;
1926 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1928 struct net_device *dev = pci_get_drvdata(pdev);
1929 struct sis190_private *tp = netdev_priv(dev);
1931 sis190_mii_remove(dev);
1932 cancel_work_sync(&tp->phy_task);
1933 unregister_netdev(dev);
1934 sis190_release_board(pdev);
1935 pci_set_drvdata(pdev, NULL);
1938 static struct pci_driver sis190_pci_driver = {
1939 .name = DRV_NAME,
1940 .id_table = sis190_pci_tbl,
1941 .probe = sis190_init_one,
1942 .remove = __devexit_p(sis190_remove_one),
1945 static int __init sis190_init_module(void)
1947 return pci_register_driver(&sis190_pci_driver);
1950 static void __exit sis190_cleanup_module(void)
1952 pci_unregister_driver(&sis190_pci_driver);
1955 module_init(sis190_init_module);
1956 module_exit(sis190_cleanup_module);