2 * New driver for Marvell Yukon chipset and SysKonnect Gigabit
3 * Ethernet adapters. Based on earlier sk98lin, e100 and
4 * FreeBSD if_sk drivers.
6 * This driver intentionally does not support all the features
7 * of the original driver such as link fail-over and link management because
8 * those should be done at higher levels.
10 * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #include <linux/config.h>
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/ethtool.h>
34 #include <linux/pci.h>
35 #include <linux/if_vlan.h>
37 #include <linux/delay.h>
38 #include <linux/crc32.h>
39 #include <linux/dma-mapping.h>
44 #define DRV_NAME "skge"
45 #define DRV_VERSION "0.6"
46 #define PFX DRV_NAME " "
48 #define DEFAULT_TX_RING_SIZE 128
49 #define DEFAULT_RX_RING_SIZE 512
50 #define MAX_TX_RING_SIZE 1024
51 #define MAX_RX_RING_SIZE 4096
52 #define PHY_RETRIES 1000
53 #define ETH_JUMBO_MTU 9000
54 #define TX_WATCHDOG (5 * HZ)
55 #define NAPI_WEIGHT 64
56 #define BLINK_HZ (HZ/4)
57 #define LINK_POLL_HZ (HZ/10)
59 MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver");
60 MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>");
61 MODULE_LICENSE("GPL");
62 MODULE_VERSION(DRV_VERSION
);
64 static const u32 default_msg
65 = NETIF_MSG_DRV
| NETIF_MSG_PROBE
| NETIF_MSG_LINK
66 | NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
;
68 static int debug
= -1; /* defaults above */
69 module_param(debug
, int, 0);
70 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
72 static const struct pci_device_id skge_id_table
[] = {
73 { PCI_VENDOR_ID_3COM
, PCI_DEVICE_ID_3COM_3C940
,
74 PCI_ANY_ID
, PCI_ANY_ID
},
75 { PCI_VENDOR_ID_3COM
, PCI_DEVICE_ID_3COM_3C940B
,
76 PCI_ANY_ID
, PCI_ANY_ID
},
77 { PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_GE
,
78 PCI_ANY_ID
, PCI_ANY_ID
},
79 { PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_YU
,
80 PCI_ANY_ID
, PCI_ANY_ID
},
81 { PCI_VENDOR_ID_SYSKONNECT
, 0x9E00, /* SK-9Exx */
82 PCI_ANY_ID
, PCI_ANY_ID
},
83 { PCI_VENDOR_ID_DLINK
, PCI_DEVICE_ID_DLINK_DGE510T
,
84 PCI_ANY_ID
, PCI_ANY_ID
},
85 { PCI_VENDOR_ID_MARVELL
, 0x4320, /* Gigabit Ethernet Controller */
86 PCI_ANY_ID
, PCI_ANY_ID
},
87 { PCI_VENDOR_ID_MARVELL
, 0x5005, /* Marvell (11ab), Belkin */
88 PCI_ANY_ID
, PCI_ANY_ID
},
89 { PCI_VENDOR_ID_CNET
, PCI_DEVICE_ID_CNET_GIGACARD
,
90 PCI_ANY_ID
, PCI_ANY_ID
},
91 { PCI_VENDOR_ID_LINKSYS
, PCI_DEVICE_ID_LINKSYS_EG1032
,
92 PCI_ANY_ID
, PCI_ANY_ID
},
93 { PCI_VENDOR_ID_LINKSYS
, PCI_DEVICE_ID_LINKSYS_EG1064
,
94 PCI_ANY_ID
, PCI_ANY_ID
},
97 MODULE_DEVICE_TABLE(pci
, skge_id_table
);
99 static int skge_up(struct net_device
*dev
);
100 static int skge_down(struct net_device
*dev
);
101 static void skge_tx_clean(struct skge_port
*skge
);
102 static void skge_xm_phy_write(struct skge_hw
*hw
, int port
, u16 reg
, u16 val
);
103 static void skge_gm_phy_write(struct skge_hw
*hw
, int port
, u16 reg
, u16 val
);
104 static void genesis_get_stats(struct skge_port
*skge
, u64
*data
);
105 static void yukon_get_stats(struct skge_port
*skge
, u64
*data
);
106 static void yukon_init(struct skge_hw
*hw
, int port
);
107 static void yukon_reset(struct skge_hw
*hw
, int port
);
108 static void genesis_mac_init(struct skge_hw
*hw
, int port
);
109 static void genesis_reset(struct skge_hw
*hw
, int port
);
111 static const int txqaddr
[] = { Q_XA1
, Q_XA2
};
112 static const int rxqaddr
[] = { Q_R1
, Q_R2
};
113 static const u32 rxirqmask
[] = { IS_R1_F
, IS_R2_F
};
114 static const u32 txirqmask
[] = { IS_XA1_F
, IS_XA2_F
};
116 /* Don't need to look at whole 16K.
117 * last interesting register is descriptor poll timer.
119 #define SKGE_REGS_LEN (29*128)
121 static int skge_get_regs_len(struct net_device
*dev
)
123 return SKGE_REGS_LEN
;
127 * Returns copy of control register region
128 * I/O region is divided into banks and certain regions are unreadable
130 static void skge_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
133 const struct skge_port
*skge
= netdev_priv(dev
);
135 const void __iomem
*io
= skge
->hw
->regs
;
136 static const unsigned long bankmap
137 = (1<<0) | (1<<2) | (1<<8) | (1<<9)
138 | (1<<12) | (1<<13) | (1<<14) | (1<<15) | (1<<16)
139 | (1<<17) | (1<<20) | (1<<21) | (1<<22) | (1<<23)
140 | (1<<24) | (1<<25) | (1<<26) | (1<<27) | (1<<28);
143 for (offs
= 0; offs
< regs
->len
; offs
+= 128) {
144 u32 len
= min_t(u32
, 128, regs
->len
- offs
);
146 if (bankmap
& (1<<(offs
/128)))
147 memcpy_fromio(p
+ offs
, io
+ offs
, len
);
149 memset(p
+ offs
, 0, len
);
153 /* Wake on Lan only supported on Yukon chps with rev 1 or above */
154 static int wol_supported(const struct skge_hw
*hw
)
156 return !((hw
->chip_id
== CHIP_ID_GENESIS
||
157 (hw
->chip_id
== CHIP_ID_YUKON
&& chip_rev(hw
) == 0)));
160 static void skge_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
162 struct skge_port
*skge
= netdev_priv(dev
);
164 wol
->supported
= wol_supported(skge
->hw
) ? WAKE_MAGIC
: 0;
165 wol
->wolopts
= skge
->wol
? WAKE_MAGIC
: 0;
168 static int skge_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
170 struct skge_port
*skge
= netdev_priv(dev
);
171 struct skge_hw
*hw
= skge
->hw
;
173 if(wol
->wolopts
!= WAKE_MAGIC
&& wol
->wolopts
!= 0)
176 if (wol
->wolopts
== WAKE_MAGIC
&& !wol_supported(hw
))
179 skge
->wol
= wol
->wolopts
== WAKE_MAGIC
;
182 memcpy_toio(hw
->regs
+ WOL_MAC_ADDR
, dev
->dev_addr
, ETH_ALEN
);
184 skge_write16(hw
, WOL_CTRL_STAT
,
185 WOL_CTL_ENA_PME_ON_MAGIC_PKT
|
186 WOL_CTL_ENA_MAGIC_PKT_UNIT
);
188 skge_write16(hw
, WOL_CTRL_STAT
, WOL_CTL_DEFAULT
);
194 static int skge_get_settings(struct net_device
*dev
,
195 struct ethtool_cmd
*ecmd
)
197 struct skge_port
*skge
= netdev_priv(dev
);
198 struct skge_hw
*hw
= skge
->hw
;
200 ecmd
->transceiver
= XCVR_INTERNAL
;
203 if (hw
->chip_id
== CHIP_ID_GENESIS
)
204 ecmd
->supported
= SUPPORTED_1000baseT_Full
205 | SUPPORTED_1000baseT_Half
206 | SUPPORTED_Autoneg
| SUPPORTED_TP
;
208 ecmd
->supported
= SUPPORTED_10baseT_Half
209 | SUPPORTED_10baseT_Full
210 | SUPPORTED_100baseT_Half
211 | SUPPORTED_100baseT_Full
212 | SUPPORTED_1000baseT_Half
213 | SUPPORTED_1000baseT_Full
214 | SUPPORTED_Autoneg
| SUPPORTED_TP
;
216 if (hw
->chip_id
== CHIP_ID_YUKON
)
217 ecmd
->supported
&= ~SUPPORTED_1000baseT_Half
;
219 else if (hw
->chip_id
== CHIP_ID_YUKON_FE
)
220 ecmd
->supported
&= ~(SUPPORTED_1000baseT_Half
221 | SUPPORTED_1000baseT_Full
);
224 ecmd
->port
= PORT_TP
;
225 ecmd
->phy_address
= hw
->phy_addr
;
227 ecmd
->supported
= SUPPORTED_1000baseT_Full
231 ecmd
->port
= PORT_FIBRE
;
234 ecmd
->advertising
= skge
->advertising
;
235 ecmd
->autoneg
= skge
->autoneg
;
236 ecmd
->speed
= skge
->speed
;
237 ecmd
->duplex
= skge
->duplex
;
241 static u32
skge_modes(const struct skge_hw
*hw
)
243 u32 modes
= ADVERTISED_Autoneg
244 | ADVERTISED_1000baseT_Full
| ADVERTISED_1000baseT_Half
245 | ADVERTISED_100baseT_Full
| ADVERTISED_100baseT_Half
246 | ADVERTISED_10baseT_Full
| ADVERTISED_10baseT_Half
;
249 modes
|= ADVERTISED_TP
;
250 switch(hw
->chip_id
) {
251 case CHIP_ID_GENESIS
:
252 modes
&= ~(ADVERTISED_100baseT_Full
253 | ADVERTISED_100baseT_Half
254 | ADVERTISED_10baseT_Full
255 | ADVERTISED_10baseT_Half
);
259 modes
&= ~ADVERTISED_1000baseT_Half
;
262 case CHIP_ID_YUKON_FE
:
263 modes
&= ~(ADVERTISED_1000baseT_Half
|ADVERTISED_1000baseT_Full
);
267 modes
|= ADVERTISED_FIBRE
;
268 modes
&= ~ADVERTISED_1000baseT_Half
;
273 static int skge_set_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
275 struct skge_port
*skge
= netdev_priv(dev
);
276 const struct skge_hw
*hw
= skge
->hw
;
278 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
279 if (ecmd
->advertising
& skge_modes(hw
))
282 switch(ecmd
->speed
) {
284 if (hw
->chip_id
== CHIP_ID_YUKON_FE
)
289 if (iscopper(hw
) || hw
->chip_id
== CHIP_ID_GENESIS
)
297 skge
->autoneg
= ecmd
->autoneg
;
298 skge
->speed
= ecmd
->speed
;
299 skge
->duplex
= ecmd
->duplex
;
300 skge
->advertising
= ecmd
->advertising
;
302 if (netif_running(dev
)) {
309 static void skge_get_drvinfo(struct net_device
*dev
,
310 struct ethtool_drvinfo
*info
)
312 struct skge_port
*skge
= netdev_priv(dev
);
314 strcpy(info
->driver
, DRV_NAME
);
315 strcpy(info
->version
, DRV_VERSION
);
316 strcpy(info
->fw_version
, "N/A");
317 strcpy(info
->bus_info
, pci_name(skge
->hw
->pdev
));
320 static const struct skge_stat
{
321 char name
[ETH_GSTRING_LEN
];
325 { "tx_bytes", XM_TXO_OK_HI
, GM_TXO_OK_HI
},
326 { "rx_bytes", XM_RXO_OK_HI
, GM_RXO_OK_HI
},
328 { "tx_broadcast", XM_TXF_BC_OK
, GM_TXF_BC_OK
},
329 { "rx_broadcast", XM_RXF_BC_OK
, GM_RXF_BC_OK
},
330 { "tx_multicast", XM_TXF_MC_OK
, GM_TXF_MC_OK
},
331 { "rx_multicast", XM_RXF_MC_OK
, GM_RXF_MC_OK
},
332 { "tx_unicast", XM_TXF_UC_OK
, GM_TXF_UC_OK
},
333 { "rx_unicast", XM_RXF_UC_OK
, GM_RXF_UC_OK
},
334 { "tx_mac_pause", XM_TXF_MPAUSE
, GM_TXF_MPAUSE
},
335 { "rx_mac_pause", XM_RXF_MPAUSE
, GM_RXF_MPAUSE
},
337 { "collisions", XM_TXF_SNG_COL
, GM_TXF_SNG_COL
},
338 { "multi_collisions", XM_TXF_MUL_COL
, GM_TXF_MUL_COL
},
339 { "aborted", XM_TXF_ABO_COL
, GM_TXF_ABO_COL
},
340 { "late_collision", XM_TXF_LAT_COL
, GM_TXF_LAT_COL
},
341 { "fifo_underrun", XM_TXE_FIFO_UR
, GM_TXE_FIFO_UR
},
342 { "fifo_overflow", XM_RXE_FIFO_OV
, GM_RXE_FIFO_OV
},
344 { "rx_toolong", XM_RXF_LNG_ERR
, GM_RXF_LNG_ERR
},
345 { "rx_jabber", XM_RXF_JAB_PKT
, GM_RXF_JAB_PKT
},
346 { "rx_runt", XM_RXE_RUNT
, GM_RXE_FRAG
},
347 { "rx_too_long", XM_RXF_LNG_ERR
, GM_RXF_LNG_ERR
},
348 { "rx_fcs_error", XM_RXF_FCS_ERR
, GM_RXF_FCS_ERR
},
351 static int skge_get_stats_count(struct net_device
*dev
)
353 return ARRAY_SIZE(skge_stats
);
356 static void skge_get_ethtool_stats(struct net_device
*dev
,
357 struct ethtool_stats
*stats
, u64
*data
)
359 struct skge_port
*skge
= netdev_priv(dev
);
361 if (skge
->hw
->chip_id
== CHIP_ID_GENESIS
)
362 genesis_get_stats(skge
, data
);
364 yukon_get_stats(skge
, data
);
367 /* Use hardware MIB variables for critical path statistics and
368 * transmit feedback not reported at interrupt.
369 * Other errors are accounted for in interrupt handler.
371 static struct net_device_stats
*skge_get_stats(struct net_device
*dev
)
373 struct skge_port
*skge
= netdev_priv(dev
);
374 u64 data
[ARRAY_SIZE(skge_stats
)];
376 if (skge
->hw
->chip_id
== CHIP_ID_GENESIS
)
377 genesis_get_stats(skge
, data
);
379 yukon_get_stats(skge
, data
);
381 skge
->net_stats
.tx_bytes
= data
[0];
382 skge
->net_stats
.rx_bytes
= data
[1];
383 skge
->net_stats
.tx_packets
= data
[2] + data
[4] + data
[6];
384 skge
->net_stats
.rx_packets
= data
[3] + data
[5] + data
[7];
385 skge
->net_stats
.multicast
= data
[5] + data
[7];
386 skge
->net_stats
.collisions
= data
[10];
387 skge
->net_stats
.tx_aborted_errors
= data
[12];
389 return &skge
->net_stats
;
392 static void skge_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
398 for (i
= 0; i
< ARRAY_SIZE(skge_stats
); i
++)
399 memcpy(data
+ i
* ETH_GSTRING_LEN
,
400 skge_stats
[i
].name
, ETH_GSTRING_LEN
);
405 static void skge_get_ring_param(struct net_device
*dev
,
406 struct ethtool_ringparam
*p
)
408 struct skge_port
*skge
= netdev_priv(dev
);
410 p
->rx_max_pending
= MAX_RX_RING_SIZE
;
411 p
->tx_max_pending
= MAX_TX_RING_SIZE
;
412 p
->rx_mini_max_pending
= 0;
413 p
->rx_jumbo_max_pending
= 0;
415 p
->rx_pending
= skge
->rx_ring
.count
;
416 p
->tx_pending
= skge
->tx_ring
.count
;
417 p
->rx_mini_pending
= 0;
418 p
->rx_jumbo_pending
= 0;
421 static int skge_set_ring_param(struct net_device
*dev
,
422 struct ethtool_ringparam
*p
)
424 struct skge_port
*skge
= netdev_priv(dev
);
426 if (p
->rx_pending
== 0 || p
->rx_pending
> MAX_RX_RING_SIZE
||
427 p
->tx_pending
== 0 || p
->tx_pending
> MAX_TX_RING_SIZE
)
430 skge
->rx_ring
.count
= p
->rx_pending
;
431 skge
->tx_ring
.count
= p
->tx_pending
;
433 if (netif_running(dev
)) {
441 static u32
skge_get_msglevel(struct net_device
*netdev
)
443 struct skge_port
*skge
= netdev_priv(netdev
);
444 return skge
->msg_enable
;
447 static void skge_set_msglevel(struct net_device
*netdev
, u32 value
)
449 struct skge_port
*skge
= netdev_priv(netdev
);
450 skge
->msg_enable
= value
;
453 static int skge_nway_reset(struct net_device
*dev
)
455 struct skge_port
*skge
= netdev_priv(dev
);
456 struct skge_hw
*hw
= skge
->hw
;
457 int port
= skge
->port
;
459 if (skge
->autoneg
!= AUTONEG_ENABLE
|| !netif_running(dev
))
462 spin_lock_bh(&hw
->phy_lock
);
463 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
464 genesis_reset(hw
, port
);
465 genesis_mac_init(hw
, port
);
467 yukon_reset(hw
, port
);
468 yukon_init(hw
, port
);
470 spin_unlock_bh(&hw
->phy_lock
);
474 static int skge_set_sg(struct net_device
*dev
, u32 data
)
476 struct skge_port
*skge
= netdev_priv(dev
);
477 struct skge_hw
*hw
= skge
->hw
;
479 if (hw
->chip_id
== CHIP_ID_GENESIS
&& data
)
481 return ethtool_op_set_sg(dev
, data
);
484 static int skge_set_tx_csum(struct net_device
*dev
, u32 data
)
486 struct skge_port
*skge
= netdev_priv(dev
);
487 struct skge_hw
*hw
= skge
->hw
;
489 if (hw
->chip_id
== CHIP_ID_GENESIS
&& data
)
492 return ethtool_op_set_tx_csum(dev
, data
);
495 static u32
skge_get_rx_csum(struct net_device
*dev
)
497 struct skge_port
*skge
= netdev_priv(dev
);
499 return skge
->rx_csum
;
502 /* Only Yukon supports checksum offload. */
503 static int skge_set_rx_csum(struct net_device
*dev
, u32 data
)
505 struct skge_port
*skge
= netdev_priv(dev
);
507 if (skge
->hw
->chip_id
== CHIP_ID_GENESIS
&& data
)
510 skge
->rx_csum
= data
;
514 /* Only Yukon II supports TSO (not implemented yet) */
515 static int skge_set_tso(struct net_device
*dev
, u32 data
)
522 static void skge_get_pauseparam(struct net_device
*dev
,
523 struct ethtool_pauseparam
*ecmd
)
525 struct skge_port
*skge
= netdev_priv(dev
);
527 ecmd
->tx_pause
= (skge
->flow_control
== FLOW_MODE_LOC_SEND
)
528 || (skge
->flow_control
== FLOW_MODE_SYMMETRIC
);
529 ecmd
->rx_pause
= (skge
->flow_control
== FLOW_MODE_REM_SEND
)
530 || (skge
->flow_control
== FLOW_MODE_SYMMETRIC
);
532 ecmd
->autoneg
= skge
->autoneg
;
535 static int skge_set_pauseparam(struct net_device
*dev
,
536 struct ethtool_pauseparam
*ecmd
)
538 struct skge_port
*skge
= netdev_priv(dev
);
540 skge
->autoneg
= ecmd
->autoneg
;
541 if (ecmd
->rx_pause
&& ecmd
->tx_pause
)
542 skge
->flow_control
= FLOW_MODE_SYMMETRIC
;
543 else if(ecmd
->rx_pause
&& !ecmd
->tx_pause
)
544 skge
->flow_control
= FLOW_MODE_REM_SEND
;
545 else if(!ecmd
->rx_pause
&& ecmd
->tx_pause
)
546 skge
->flow_control
= FLOW_MODE_LOC_SEND
;
548 skge
->flow_control
= FLOW_MODE_NONE
;
550 if (netif_running(dev
)) {
557 /* Chip internal frequency for clock calculations */
558 static inline u32
hwkhz(const struct skge_hw
*hw
)
560 if (hw
->chip_id
== CHIP_ID_GENESIS
)
561 return 53215; /* or: 53.125 MHz */
562 else if (hw
->chip_id
== CHIP_ID_YUKON_EC
)
563 return 125000; /* or: 125.000 MHz */
565 return 78215; /* or: 78.125 MHz */
568 /* Chip hz to microseconds */
569 static inline u32
skge_clk2usec(const struct skge_hw
*hw
, u32 ticks
)
571 return (ticks
* 1000) / hwkhz(hw
);
574 /* Microseconds to chip hz */
575 static inline u32
skge_usecs2clk(const struct skge_hw
*hw
, u32 usec
)
577 return hwkhz(hw
) * usec
/ 1000;
580 static int skge_get_coalesce(struct net_device
*dev
,
581 struct ethtool_coalesce
*ecmd
)
583 struct skge_port
*skge
= netdev_priv(dev
);
584 struct skge_hw
*hw
= skge
->hw
;
585 int port
= skge
->port
;
587 ecmd
->rx_coalesce_usecs
= 0;
588 ecmd
->tx_coalesce_usecs
= 0;
590 if (skge_read32(hw
, B2_IRQM_CTRL
) & TIM_START
) {
591 u32 delay
= skge_clk2usec(hw
, skge_read32(hw
, B2_IRQM_INI
));
592 u32 msk
= skge_read32(hw
, B2_IRQM_MSK
);
594 if (msk
& rxirqmask
[port
])
595 ecmd
->rx_coalesce_usecs
= delay
;
596 if (msk
& txirqmask
[port
])
597 ecmd
->tx_coalesce_usecs
= delay
;
603 /* Note: interrupt timer is per board, but can turn on/off per port */
604 static int skge_set_coalesce(struct net_device
*dev
,
605 struct ethtool_coalesce
*ecmd
)
607 struct skge_port
*skge
= netdev_priv(dev
);
608 struct skge_hw
*hw
= skge
->hw
;
609 int port
= skge
->port
;
610 u32 msk
= skge_read32(hw
, B2_IRQM_MSK
);
613 if (ecmd
->rx_coalesce_usecs
== 0)
614 msk
&= ~rxirqmask
[port
];
615 else if (ecmd
->rx_coalesce_usecs
< 25 ||
616 ecmd
->rx_coalesce_usecs
> 33333)
619 msk
|= rxirqmask
[port
];
620 delay
= ecmd
->rx_coalesce_usecs
;
623 if (ecmd
->tx_coalesce_usecs
== 0)
624 msk
&= ~txirqmask
[port
];
625 else if (ecmd
->tx_coalesce_usecs
< 25 ||
626 ecmd
->tx_coalesce_usecs
> 33333)
629 msk
|= txirqmask
[port
];
630 delay
= min(delay
, ecmd
->rx_coalesce_usecs
);
633 skge_write32(hw
, B2_IRQM_MSK
, msk
);
635 skge_write32(hw
, B2_IRQM_CTRL
, TIM_STOP
);
637 skge_write32(hw
, B2_IRQM_INI
, skge_usecs2clk(hw
, delay
));
638 skge_write32(hw
, B2_IRQM_CTRL
, TIM_START
);
643 static void skge_led_on(struct skge_hw
*hw
, int port
)
645 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
646 skge_write8(hw
, SKGEMAC_REG(port
, LNK_LED_REG
), LINKLED_ON
);
647 skge_write8(hw
, B0_LED
, LED_STAT_ON
);
649 skge_write8(hw
, SKGEMAC_REG(port
, RX_LED_TST
), LED_T_ON
);
650 skge_write32(hw
, SKGEMAC_REG(port
, RX_LED_VAL
), 100);
651 skge_write8(hw
, SKGEMAC_REG(port
, RX_LED_CTRL
), LED_START
);
653 switch (hw
->phy_type
) {
655 skge_xm_phy_write(hw
, port
, PHY_BCOM_P_EXT_CTRL
,
659 skge_xm_phy_write(hw
, port
, PHY_LONE_LED_CFG
,
663 skge_write8(hw
, SKGEMAC_REG(port
, TX_LED_TST
), LED_T_ON
);
664 skge_write32(hw
, SKGEMAC_REG(port
, TX_LED_VAL
), 100);
665 skge_write8(hw
, SKGEMAC_REG(port
, TX_LED_CTRL
), LED_START
);
668 skge_gm_phy_write(hw
, port
, PHY_MARV_LED_CTRL
, 0);
669 skge_gm_phy_write(hw
, port
, PHY_MARV_LED_OVER
,
670 PHY_M_LED_MO_DUP(MO_LED_ON
) |
671 PHY_M_LED_MO_10(MO_LED_ON
) |
672 PHY_M_LED_MO_100(MO_LED_ON
) |
673 PHY_M_LED_MO_1000(MO_LED_ON
) |
674 PHY_M_LED_MO_RX(MO_LED_ON
));
678 static void skge_led_off(struct skge_hw
*hw
, int port
)
680 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
681 skge_write8(hw
, SKGEMAC_REG(port
, LNK_LED_REG
), LINKLED_OFF
);
682 skge_write8(hw
, B0_LED
, LED_STAT_OFF
);
684 skge_write32(hw
, SKGEMAC_REG(port
, RX_LED_VAL
), 0);
685 skge_write8(hw
, SKGEMAC_REG(port
, RX_LED_CTRL
), LED_T_OFF
);
687 switch (hw
->phy_type
) {
689 skge_xm_phy_write(hw
, port
, PHY_BCOM_P_EXT_CTRL
,
693 skge_xm_phy_write(hw
, port
, PHY_LONE_LED_CFG
,
697 skge_write32(hw
, SKGEMAC_REG(port
, TX_LED_VAL
), 0);
698 skge_write8(hw
, SKGEMAC_REG(port
, TX_LED_CTRL
), LED_T_OFF
);
701 skge_gm_phy_write(hw
, port
, PHY_MARV_LED_CTRL
, 0);
702 skge_gm_phy_write(hw
, port
, PHY_MARV_LED_OVER
,
703 PHY_M_LED_MO_DUP(MO_LED_OFF
) |
704 PHY_M_LED_MO_10(MO_LED_OFF
) |
705 PHY_M_LED_MO_100(MO_LED_OFF
) |
706 PHY_M_LED_MO_1000(MO_LED_OFF
) |
707 PHY_M_LED_MO_RX(MO_LED_OFF
));
711 static void skge_blink_timer(unsigned long data
)
713 struct skge_port
*skge
= (struct skge_port
*) data
;
714 struct skge_hw
*hw
= skge
->hw
;
717 spin_lock_irqsave(&hw
->phy_lock
, flags
);
719 skge_led_on(hw
, skge
->port
);
721 skge_led_off(hw
, skge
->port
);
722 spin_unlock_irqrestore(&hw
->phy_lock
, flags
);
724 skge
->blink_on
= !skge
->blink_on
;
725 mod_timer(&skge
->led_blink
, jiffies
+ BLINK_HZ
);
728 /* blink LED's for finding board */
729 static int skge_phys_id(struct net_device
*dev
, u32 data
)
731 struct skge_port
*skge
= netdev_priv(dev
);
733 if(!data
|| data
> (u32
)(MAX_SCHEDULE_TIMEOUT
/ HZ
))
734 data
= (u32
)(MAX_SCHEDULE_TIMEOUT
/ HZ
);
738 mod_timer(&skge
->led_blink
, jiffies
+1);
740 msleep_interruptible(data
* 1000);
741 del_timer_sync(&skge
->led_blink
);
743 skge_led_off(skge
->hw
, skge
->port
);
748 static struct ethtool_ops skge_ethtool_ops
= {
749 .get_settings
= skge_get_settings
,
750 .set_settings
= skge_set_settings
,
751 .get_drvinfo
= skge_get_drvinfo
,
752 .get_regs_len
= skge_get_regs_len
,
753 .get_regs
= skge_get_regs
,
754 .get_wol
= skge_get_wol
,
755 .set_wol
= skge_set_wol
,
756 .get_msglevel
= skge_get_msglevel
,
757 .set_msglevel
= skge_set_msglevel
,
758 .nway_reset
= skge_nway_reset
,
759 .get_link
= ethtool_op_get_link
,
760 .get_ringparam
= skge_get_ring_param
,
761 .set_ringparam
= skge_set_ring_param
,
762 .get_pauseparam
= skge_get_pauseparam
,
763 .set_pauseparam
= skge_set_pauseparam
,
764 .get_coalesce
= skge_get_coalesce
,
765 .set_coalesce
= skge_set_coalesce
,
766 .get_tso
= ethtool_op_get_tso
,
767 .set_tso
= skge_set_tso
,
768 .get_sg
= ethtool_op_get_sg
,
769 .set_sg
= skge_set_sg
,
770 .get_tx_csum
= ethtool_op_get_tx_csum
,
771 .set_tx_csum
= skge_set_tx_csum
,
772 .get_rx_csum
= skge_get_rx_csum
,
773 .set_rx_csum
= skge_set_rx_csum
,
774 .get_strings
= skge_get_strings
,
775 .phys_id
= skge_phys_id
,
776 .get_stats_count
= skge_get_stats_count
,
777 .get_ethtool_stats
= skge_get_ethtool_stats
,
781 * Allocate ring elements and chain them together
782 * One-to-one association of board descriptors with ring elements
784 static int skge_ring_alloc(struct skge_ring
*ring
, void *vaddr
, u64 base
)
786 struct skge_tx_desc
*d
;
787 struct skge_element
*e
;
790 ring
->start
= kmalloc(sizeof(*e
)*ring
->count
, GFP_KERNEL
);
794 for (i
= 0, e
= ring
->start
, d
= vaddr
; i
< ring
->count
; i
++, e
++, d
++) {
796 if (i
== ring
->count
- 1) {
797 e
->next
= ring
->start
;
798 d
->next_offset
= base
;
801 d
->next_offset
= base
+ (i
+1) * sizeof(*d
);
804 ring
->to_use
= ring
->to_clean
= ring
->start
;
809 /* Setup buffer for receiving */
810 static inline int skge_rx_alloc(struct skge_port
*skge
,
811 struct skge_element
*e
)
813 unsigned long bufsize
= skge
->netdev
->mtu
+ ETH_HLEN
; /* VLAN? */
814 struct skge_rx_desc
*rd
= e
->desc
;
818 skb
= dev_alloc_skb(bufsize
+ NET_IP_ALIGN
);
819 if (unlikely(!skb
)) {
820 printk(KERN_DEBUG PFX
"%s: out of memory for receive\n",
825 skb
->dev
= skge
->netdev
;
826 skb_reserve(skb
, NET_IP_ALIGN
);
828 map
= pci_map_single(skge
->hw
->pdev
, skb
->data
, bufsize
,
832 rd
->dma_hi
= map
>> 32;
834 rd
->csum1_start
= ETH_HLEN
;
835 rd
->csum2_start
= ETH_HLEN
;
841 rd
->control
= BMU_OWN
| BMU_STF
| BMU_IRQ_EOF
| BMU_TCP_CHECK
| bufsize
;
842 pci_unmap_addr_set(e
, mapaddr
, map
);
843 pci_unmap_len_set(e
, maplen
, bufsize
);
847 /* Free all unused buffers in receive ring, assumes receiver stopped */
848 static void skge_rx_clean(struct skge_port
*skge
)
850 struct skge_hw
*hw
= skge
->hw
;
851 struct skge_ring
*ring
= &skge
->rx_ring
;
852 struct skge_element
*e
;
854 for (e
= ring
->to_clean
; e
!= ring
->to_use
; e
= e
->next
) {
855 struct skge_rx_desc
*rd
= e
->desc
;
858 pci_unmap_single(hw
->pdev
,
859 pci_unmap_addr(e
, mapaddr
),
860 pci_unmap_len(e
, maplen
),
862 dev_kfree_skb(e
->skb
);
868 /* Allocate buffers for receive ring
869 * For receive: to_use is refill location
870 * to_clean is next received frame.
872 * if (to_use == to_clean)
873 * then ring all frames in ring need buffers
874 * if (to_use->next == to_clean)
875 * then ring all frames in ring have buffers
877 static int skge_rx_fill(struct skge_port
*skge
)
879 struct skge_ring
*ring
= &skge
->rx_ring
;
880 struct skge_element
*e
;
883 for (e
= ring
->to_use
; e
->next
!= ring
->to_clean
; e
= e
->next
) {
884 if (skge_rx_alloc(skge
, e
)) {
895 static void skge_link_up(struct skge_port
*skge
)
897 netif_carrier_on(skge
->netdev
);
898 if (skge
->tx_avail
> MAX_SKB_FRAGS
+ 1)
899 netif_wake_queue(skge
->netdev
);
901 if (netif_msg_link(skge
))
903 "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
904 skge
->netdev
->name
, skge
->speed
,
905 skge
->duplex
== DUPLEX_FULL
? "full" : "half",
906 (skge
->flow_control
== FLOW_MODE_NONE
) ? "none" :
907 (skge
->flow_control
== FLOW_MODE_LOC_SEND
) ? "tx only" :
908 (skge
->flow_control
== FLOW_MODE_REM_SEND
) ? "rx only" :
909 (skge
->flow_control
== FLOW_MODE_SYMMETRIC
) ? "tx and rx" :
913 static void skge_link_down(struct skge_port
*skge
)
915 netif_carrier_off(skge
->netdev
);
916 netif_stop_queue(skge
->netdev
);
918 if (netif_msg_link(skge
))
919 printk(KERN_INFO PFX
"%s: Link is down.\n", skge
->netdev
->name
);
922 static u16
skge_xm_phy_read(struct skge_hw
*hw
, int port
, u16 reg
)
927 skge_xm_write16(hw
, port
, XM_PHY_ADDR
, reg
| hw
->phy_addr
);
928 v
= skge_xm_read16(hw
, port
, XM_PHY_DATA
);
929 if (hw
->phy_type
!= SK_PHY_XMAC
) {
930 for (i
= 0; i
< PHY_RETRIES
; i
++) {
932 if (skge_xm_read16(hw
, port
, XM_MMU_CMD
)
937 printk(KERN_WARNING PFX
"%s: phy read timed out\n",
938 hw
->dev
[port
]->name
);
941 v
= skge_xm_read16(hw
, port
, XM_PHY_DATA
);
947 static void skge_xm_phy_write(struct skge_hw
*hw
, int port
, u16 reg
, u16 val
)
951 skge_xm_write16(hw
, port
, XM_PHY_ADDR
, reg
| hw
->phy_addr
);
952 for (i
= 0; i
< PHY_RETRIES
; i
++) {
953 if (!(skge_xm_read16(hw
, port
, XM_MMU_CMD
) & XM_MMU_PHY_BUSY
))
957 printk(KERN_WARNING PFX
"%s: phy write failed to come ready\n",
958 hw
->dev
[port
]->name
);
962 skge_xm_write16(hw
, port
, XM_PHY_DATA
, val
);
963 for (i
= 0; i
< PHY_RETRIES
; i
++) {
965 if (!(skge_xm_read16(hw
, port
, XM_MMU_CMD
) & XM_MMU_PHY_BUSY
))
968 printk(KERN_WARNING PFX
"%s: phy write timed out\n",
969 hw
->dev
[port
]->name
);
972 static void genesis_init(struct skge_hw
*hw
)
974 /* set blink source counter */
975 skge_write32(hw
, B2_BSC_INI
, (SK_BLK_DUR
* SK_FACT_53
) / 100);
976 skge_write8(hw
, B2_BSC_CTRL
, BSC_START
);
978 /* configure mac arbiter */
979 skge_write16(hw
, B3_MA_TO_CTRL
, MA_RST_CLR
);
981 /* configure mac arbiter timeout values */
982 skge_write8(hw
, B3_MA_TOINI_RX1
, SK_MAC_TO_53
);
983 skge_write8(hw
, B3_MA_TOINI_RX2
, SK_MAC_TO_53
);
984 skge_write8(hw
, B3_MA_TOINI_TX1
, SK_MAC_TO_53
);
985 skge_write8(hw
, B3_MA_TOINI_TX2
, SK_MAC_TO_53
);
987 skge_write8(hw
, B3_MA_RCINI_RX1
, 0);
988 skge_write8(hw
, B3_MA_RCINI_RX2
, 0);
989 skge_write8(hw
, B3_MA_RCINI_TX1
, 0);
990 skge_write8(hw
, B3_MA_RCINI_TX2
, 0);
992 /* configure packet arbiter timeout */
993 skge_write16(hw
, B3_PA_CTRL
, PA_RST_CLR
);
994 skge_write16(hw
, B3_PA_TOINI_RX1
, SK_PKT_TO_MAX
);
995 skge_write16(hw
, B3_PA_TOINI_TX1
, SK_PKT_TO_MAX
);
996 skge_write16(hw
, B3_PA_TOINI_RX2
, SK_PKT_TO_MAX
);
997 skge_write16(hw
, B3_PA_TOINI_TX2
, SK_PKT_TO_MAX
);
1000 static void genesis_reset(struct skge_hw
*hw
, int port
)
1005 /* reset the statistics module */
1006 skge_xm_write32(hw
, port
, XM_GP_PORT
, XM_GP_RES_STAT
);
1007 skge_xm_write16(hw
, port
, XM_IMSK
, 0xffff); /* disable XMAC IRQs */
1008 skge_xm_write32(hw
, port
, XM_MODE
, 0); /* clear Mode Reg */
1009 skge_xm_write16(hw
, port
, XM_TX_CMD
, 0); /* reset TX CMD Reg */
1010 skge_xm_write16(hw
, port
, XM_RX_CMD
, 0); /* reset RX CMD Reg */
1012 /* disable all PHY IRQs */
1013 if (hw
->phy_type
== SK_PHY_BCOM
)
1014 skge_xm_write16(hw
, port
, PHY_BCOM_INT_MASK
, 0xffff);
1016 skge_xm_outhash(hw
, port
, XM_HSM
, (u8
*) &zero
);
1017 for (i
= 0; i
< 15; i
++)
1018 skge_xm_outaddr(hw
, port
, XM_EXM(i
), (u8
*) &zero
);
1019 skge_xm_outhash(hw
, port
, XM_SRC_CHK
, (u8
*) &zero
);
1023 static void genesis_mac_init(struct skge_hw
*hw
, int port
)
1025 struct skge_port
*skge
= netdev_priv(hw
->dev
[port
]);
1029 u16 ctrl1
, ctrl2
, ctrl3
, ctrl4
, ctrl5
;
1031 /* magic workaround patterns for Broadcom */
1032 static const struct {
1036 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 },
1037 { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 },
1038 { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 },
1039 { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
1041 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 },
1042 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
1046 /* initialize Rx, Tx and Link LED */
1047 skge_write8(hw
, SKGEMAC_REG(port
, LNK_LED_REG
), LINKLED_ON
);
1048 skge_write8(hw
, SKGEMAC_REG(port
, LNK_LED_REG
), LINKLED_LINKSYNC_ON
);
1050 skge_write8(hw
, SKGEMAC_REG(port
, RX_LED_CTRL
), LED_START
);
1051 skge_write8(hw
, SKGEMAC_REG(port
, TX_LED_CTRL
), LED_START
);
1053 /* Unreset the XMAC. */
1054 skge_write16(hw
, SKGEMAC_REG(port
, TX_MFF_CTRL1
), MFF_CLR_MAC_RST
);
1057 * Perform additional initialization for external PHYs,
1058 * namely for the 1000baseTX cards that use the XMAC's
1061 spin_lock_bh(&hw
->phy_lock
);
1062 if (hw
->phy_type
!= SK_PHY_XMAC
) {
1063 /* Take PHY out of reset. */
1064 r
= skge_read32(hw
, B2_GP_IO
);
1066 r
|= GP_DIR_0
|GP_IO_0
;
1068 r
|= GP_DIR_2
|GP_IO_2
;
1070 skge_write32(hw
, B2_GP_IO
, r
);
1071 skge_read32(hw
, B2_GP_IO
);
1073 /* Enable GMII mode on the XMAC. */
1074 skge_xm_write16(hw
, port
, XM_HW_CFG
, XM_HW_GMII_MD
);
1076 id1
= skge_xm_phy_read(hw
, port
, PHY_XMAC_ID1
);
1078 /* Optimize MDIO transfer by suppressing preamble. */
1079 skge_xm_write16(hw
, port
, XM_MMU_CMD
,
1080 skge_xm_read16(hw
, port
, XM_MMU_CMD
)
1083 if (id1
== PHY_BCOM_ID1_C0
) {
1085 * Workaround BCOM Errata for the C0 type.
1086 * Write magic patterns to reserved registers.
1088 for (i
= 0; i
< ARRAY_SIZE(C0hack
); i
++)
1089 skge_xm_phy_write(hw
, port
,
1090 C0hack
[i
].reg
, C0hack
[i
].val
);
1092 } else if (id1
== PHY_BCOM_ID1_A1
) {
1094 * Workaround BCOM Errata for the A1 type.
1095 * Write magic patterns to reserved registers.
1097 for (i
= 0; i
< ARRAY_SIZE(A1hack
); i
++)
1098 skge_xm_phy_write(hw
, port
,
1099 A1hack
[i
].reg
, A1hack
[i
].val
);
1103 * Workaround BCOM Errata (#10523) for all BCom PHYs.
1104 * Disable Power Management after reset.
1106 r
= skge_xm_phy_read(hw
, port
, PHY_BCOM_AUX_CTRL
);
1107 skge_xm_phy_write(hw
, port
, PHY_BCOM_AUX_CTRL
, r
| PHY_B_AC_DIS_PM
);
1111 skge_xm_read16(hw
, port
, XM_ISRC
);
1113 r
= skge_xm_read32(hw
, port
, XM_MODE
);
1114 skge_xm_write32(hw
, port
, XM_MODE
, r
|XM_MD_CSA
);
1116 /* We don't need the FCS appended to the packet. */
1117 r
= skge_xm_read16(hw
, port
, XM_RX_CMD
);
1118 skge_xm_write16(hw
, port
, XM_RX_CMD
, r
| XM_RX_STRIP_FCS
);
1120 /* We want short frames padded to 60 bytes. */
1121 r
= skge_xm_read16(hw
, port
, XM_TX_CMD
);
1122 skge_xm_write16(hw
, port
, XM_TX_CMD
, r
| XM_TX_AUTO_PAD
);
1125 * Enable the reception of all error frames. This is is
1126 * a necessary evil due to the design of the XMAC. The
1127 * XMAC's receive FIFO is only 8K in size, however jumbo
1128 * frames can be up to 9000 bytes in length. When bad
1129 * frame filtering is enabled, the XMAC's RX FIFO operates
1130 * in 'store and forward' mode. For this to work, the
1131 * entire frame has to fit into the FIFO, but that means
1132 * that jumbo frames larger than 8192 bytes will be
1133 * truncated. Disabling all bad frame filtering causes
1134 * the RX FIFO to operate in streaming mode, in which
1135 * case the XMAC will start transfering frames out of the
1136 * RX FIFO as soon as the FIFO threshold is reached.
1138 r
= skge_xm_read32(hw
, port
, XM_MODE
);
1139 skge_xm_write32(hw
, port
, XM_MODE
,
1140 XM_MD_RX_CRCE
|XM_MD_RX_LONG
|XM_MD_RX_RUNT
|
1141 XM_MD_RX_ERR
|XM_MD_RX_IRLE
);
1143 skge_xm_outaddr(hw
, port
, XM_SA
, hw
->dev
[port
]->dev_addr
);
1144 skge_xm_outaddr(hw
, port
, XM_EXM(0), hw
->dev
[port
]->dev_addr
);
1147 * Bump up the transmit threshold. This helps hold off transmit
1148 * underruns when we're blasting traffic from both ports at once.
1150 skge_xm_write16(hw
, port
, XM_TX_THR
, 512);
1152 /* Configure MAC arbiter */
1153 skge_write16(hw
, B3_MA_TO_CTRL
, MA_RST_CLR
);
1155 /* configure timeout values */
1156 skge_write8(hw
, B3_MA_TOINI_RX1
, 72);
1157 skge_write8(hw
, B3_MA_TOINI_RX2
, 72);
1158 skge_write8(hw
, B3_MA_TOINI_TX1
, 72);
1159 skge_write8(hw
, B3_MA_TOINI_TX2
, 72);
1161 skge_write8(hw
, B3_MA_RCINI_RX1
, 0);
1162 skge_write8(hw
, B3_MA_RCINI_RX2
, 0);
1163 skge_write8(hw
, B3_MA_RCINI_TX1
, 0);
1164 skge_write8(hw
, B3_MA_RCINI_TX2
, 0);
1166 /* Configure Rx MAC FIFO */
1167 skge_write8(hw
, SKGEMAC_REG(port
, RX_MFF_CTRL2
), MFF_RST_CLR
);
1168 skge_write16(hw
, SKGEMAC_REG(port
, RX_MFF_CTRL1
), MFF_ENA_TIM_PAT
);
1169 skge_write8(hw
, SKGEMAC_REG(port
, RX_MFF_CTRL2
), MFF_ENA_OP_MD
);
1171 /* Configure Tx MAC FIFO */
1172 skge_write8(hw
, SKGEMAC_REG(port
, TX_MFF_CTRL2
), MFF_RST_CLR
);
1173 skge_write16(hw
, SKGEMAC_REG(port
, TX_MFF_CTRL1
), MFF_TX_CTRL_DEF
);
1174 skge_write8(hw
, SKGEMAC_REG(port
, TX_MFF_CTRL2
), MFF_ENA_OP_MD
);
1176 if (hw
->dev
[port
]->mtu
> ETH_DATA_LEN
) {
1177 /* Enable frame flushing if jumbo frames used */
1178 skge_write16(hw
, SKGEMAC_REG(port
,RX_MFF_CTRL1
), MFF_ENA_FLUSH
);
1180 /* enable timeout timers if normal frames */
1181 skge_write16(hw
, B3_PA_CTRL
,
1182 port
== 0 ? PA_ENA_TO_TX1
: PA_ENA_TO_TX2
);
1186 r
= skge_xm_read16(hw
, port
, XM_RX_CMD
);
1187 if (hw
->dev
[port
]->mtu
> ETH_DATA_LEN
)
1188 skge_xm_write16(hw
, port
, XM_RX_CMD
, r
| XM_RX_BIG_PK_OK
);
1190 skge_xm_write16(hw
, port
, XM_RX_CMD
, r
& ~(XM_RX_BIG_PK_OK
));
1192 switch (hw
->phy_type
) {
1194 if (skge
->autoneg
== AUTONEG_ENABLE
) {
1195 ctrl1
= PHY_X_AN_FD
| PHY_X_AN_HD
;
1197 switch (skge
->flow_control
) {
1198 case FLOW_MODE_NONE
:
1199 ctrl1
|= PHY_X_P_NO_PAUSE
;
1201 case FLOW_MODE_LOC_SEND
:
1202 ctrl1
|= PHY_X_P_ASYM_MD
;
1204 case FLOW_MODE_SYMMETRIC
:
1205 ctrl1
|= PHY_X_P_SYM_MD
;
1207 case FLOW_MODE_REM_SEND
:
1208 ctrl1
|= PHY_X_P_BOTH_MD
;
1212 skge_xm_phy_write(hw
, port
, PHY_XMAC_AUNE_ADV
, ctrl1
);
1213 ctrl2
= PHY_CT_ANE
| PHY_CT_RE_CFG
;
1216 if (skge
->duplex
== DUPLEX_FULL
)
1217 ctrl2
|= PHY_CT_DUP_MD
;
1220 skge_xm_phy_write(hw
, port
, PHY_XMAC_CTRL
, ctrl2
);
1224 ctrl1
= PHY_CT_SP1000
;
1226 ctrl3
= PHY_SEL_TYPE
;
1227 ctrl4
= PHY_B_PEC_EN_LTR
;
1228 ctrl5
= PHY_B_AC_TX_TST
;
1230 if (skge
->autoneg
== AUTONEG_ENABLE
) {
1232 * Workaround BCOM Errata #1 for the C5 type.
1233 * 1000Base-T Link Acquisition Failure in Slave Mode
1234 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
1236 ctrl2
|= PHY_B_1000C_RD
;
1237 if (skge
->advertising
& ADVERTISED_1000baseT_Half
)
1238 ctrl2
|= PHY_B_1000C_AHD
;
1239 if (skge
->advertising
& ADVERTISED_1000baseT_Full
)
1240 ctrl2
|= PHY_B_1000C_AFD
;
1242 /* Set Flow-control capabilities */
1243 switch (skge
->flow_control
) {
1244 case FLOW_MODE_NONE
:
1245 ctrl3
|= PHY_B_P_NO_PAUSE
;
1247 case FLOW_MODE_LOC_SEND
:
1248 ctrl3
|= PHY_B_P_ASYM_MD
;
1250 case FLOW_MODE_SYMMETRIC
:
1251 ctrl3
|= PHY_B_P_SYM_MD
;
1253 case FLOW_MODE_REM_SEND
:
1254 ctrl3
|= PHY_B_P_BOTH_MD
;
1258 /* Restart Auto-negotiation */
1259 ctrl1
|= PHY_CT_ANE
| PHY_CT_RE_CFG
;
1261 if (skge
->duplex
== DUPLEX_FULL
)
1262 ctrl1
|= PHY_CT_DUP_MD
;
1264 ctrl2
|= PHY_B_1000C_MSE
; /* set it to Slave */
1267 skge_xm_phy_write(hw
, port
, PHY_BCOM_1000T_CTRL
, ctrl2
);
1268 skge_xm_phy_write(hw
, port
, PHY_BCOM_AUNE_ADV
, ctrl3
);
1270 if (skge
->netdev
->mtu
> ETH_DATA_LEN
) {
1271 ctrl4
|= PHY_B_PEC_HIGH_LA
;
1272 ctrl5
|= PHY_B_AC_LONG_PACK
;
1274 skge_xm_phy_write(hw
, port
,PHY_BCOM_AUX_CTRL
, ctrl5
);
1277 skge_xm_phy_write(hw
, port
, PHY_BCOM_P_EXT_CTRL
, ctrl4
);
1278 skge_xm_phy_write(hw
, port
, PHY_BCOM_CTRL
, ctrl1
);
1281 spin_unlock_bh(&hw
->phy_lock
);
1283 /* Clear MIB counters */
1284 skge_xm_write16(hw
, port
, XM_STAT_CMD
,
1285 XM_SC_CLR_RXC
| XM_SC_CLR_TXC
);
1286 /* Clear two times according to Errata #3 */
1287 skge_xm_write16(hw
, port
, XM_STAT_CMD
,
1288 XM_SC_CLR_RXC
| XM_SC_CLR_TXC
);
1290 /* Start polling for link status */
1291 mod_timer(&skge
->link_check
, jiffies
+ LINK_POLL_HZ
);
1294 static void genesis_stop(struct skge_port
*skge
)
1296 struct skge_hw
*hw
= skge
->hw
;
1297 int port
= skge
->port
;
1299 /* Clear Tx packet arbiter timeout IRQ */
1300 skge_write16(hw
, B3_PA_CTRL
,
1301 port
== 0 ? PA_CLR_TO_TX1
: PA_CLR_TO_TX2
);
1304 * If the transfer stucks at the MAC the STOP command will not
1305 * terminate if we don't flush the XMAC's transmit FIFO !
1307 skge_xm_write32(hw
, port
, XM_MODE
,
1308 skge_xm_read32(hw
, port
, XM_MODE
)|XM_MD_FTF
);
1312 skge_write16(hw
, SKGEMAC_REG(port
, TX_MFF_CTRL1
), MFF_SET_MAC_RST
);
1314 /* For external PHYs there must be special handling */
1315 if (hw
->phy_type
!= SK_PHY_XMAC
) {
1316 u32 reg
= skge_read32(hw
, B2_GP_IO
);
1325 skge_write32(hw
, B2_GP_IO
, reg
);
1326 skge_read32(hw
, B2_GP_IO
);
1329 skge_xm_write16(hw
, port
, XM_MMU_CMD
,
1330 skge_xm_read16(hw
, port
, XM_MMU_CMD
)
1331 & ~(XM_MMU_ENA_RX
| XM_MMU_ENA_TX
));
1333 skge_xm_read16(hw
, port
, XM_MMU_CMD
);
1337 static void genesis_get_stats(struct skge_port
*skge
, u64
*data
)
1339 struct skge_hw
*hw
= skge
->hw
;
1340 int port
= skge
->port
;
1342 unsigned long timeout
= jiffies
+ HZ
;
1344 skge_xm_write16(hw
, port
,
1345 XM_STAT_CMD
, XM_SC_SNP_TXC
| XM_SC_SNP_RXC
);
1347 /* wait for update to complete */
1348 while (skge_xm_read16(hw
, port
, XM_STAT_CMD
)
1349 & (XM_SC_SNP_TXC
| XM_SC_SNP_RXC
)) {
1350 if (time_after(jiffies
, timeout
))
1355 /* special case for 64 bit octet counter */
1356 data
[0] = (u64
) skge_xm_read32(hw
, port
, XM_TXO_OK_HI
) << 32
1357 | skge_xm_read32(hw
, port
, XM_TXO_OK_LO
);
1358 data
[1] = (u64
) skge_xm_read32(hw
, port
, XM_RXO_OK_HI
) << 32
1359 | skge_xm_read32(hw
, port
, XM_RXO_OK_LO
);
1361 for (i
= 2; i
< ARRAY_SIZE(skge_stats
); i
++)
1362 data
[i
] = skge_xm_read32(hw
, port
, skge_stats
[i
].xmac_offset
);
1365 static void genesis_mac_intr(struct skge_hw
*hw
, int port
)
1367 struct skge_port
*skge
= netdev_priv(hw
->dev
[port
]);
1368 u16 status
= skge_xm_read16(hw
, port
, XM_ISRC
);
1370 pr_debug("genesis_intr status %x\n", status
);
1371 if (hw
->phy_type
== SK_PHY_XMAC
) {
1372 /* LInk down, start polling for state change */
1373 if (status
& XM_IS_INP_ASS
) {
1374 skge_xm_write16(hw
, port
, XM_IMSK
,
1375 skge_xm_read16(hw
, port
, XM_IMSK
) | XM_IS_INP_ASS
);
1376 mod_timer(&skge
->link_check
, jiffies
+ LINK_POLL_HZ
);
1378 else if (status
& XM_IS_AND
)
1379 mod_timer(&skge
->link_check
, jiffies
+ LINK_POLL_HZ
);
1382 if (status
& XM_IS_TXF_UR
) {
1383 skge_xm_write32(hw
, port
, XM_MODE
, XM_MD_FTF
);
1384 ++skge
->net_stats
.tx_fifo_errors
;
1386 if (status
& XM_IS_RXF_OV
) {
1387 skge_xm_write32(hw
, port
, XM_MODE
, XM_MD_FRF
);
1388 ++skge
->net_stats
.rx_fifo_errors
;
1392 static void skge_gm_phy_write(struct skge_hw
*hw
, int port
, u16 reg
, u16 val
)
1396 skge_gma_write16(hw
, port
, GM_SMI_DATA
, val
);
1397 skge_gma_write16(hw
, port
, GM_SMI_CTRL
,
1398 GM_SMI_CT_PHY_AD(hw
->phy_addr
) | GM_SMI_CT_REG_AD(reg
));
1399 for (i
= 0; i
< PHY_RETRIES
; i
++) {
1402 if (!(skge_gma_read16(hw
, port
, GM_SMI_CTRL
) & GM_SMI_CT_BUSY
))
1407 static u16
skge_gm_phy_read(struct skge_hw
*hw
, int port
, u16 reg
)
1411 skge_gma_write16(hw
, port
, GM_SMI_CTRL
,
1412 GM_SMI_CT_PHY_AD(hw
->phy_addr
)
1413 | GM_SMI_CT_REG_AD(reg
) | GM_SMI_CT_OP_RD
);
1415 for (i
= 0; i
< PHY_RETRIES
; i
++) {
1417 if (skge_gma_read16(hw
, port
, GM_SMI_CTRL
) & GM_SMI_CT_RD_VAL
)
1421 printk(KERN_WARNING PFX
"%s: phy read timeout\n",
1422 hw
->dev
[port
]->name
);
1425 return skge_gma_read16(hw
, port
, GM_SMI_DATA
);
1428 static void genesis_link_down(struct skge_port
*skge
)
1430 struct skge_hw
*hw
= skge
->hw
;
1431 int port
= skge
->port
;
1433 pr_debug("genesis_link_down\n");
1435 skge_xm_write16(hw
, port
, XM_MMU_CMD
,
1436 skge_xm_read16(hw
, port
, XM_MMU_CMD
)
1437 & ~(XM_MMU_ENA_RX
| XM_MMU_ENA_TX
));
1439 /* dummy read to ensure writing */
1440 (void) skge_xm_read16(hw
, port
, XM_MMU_CMD
);
1442 skge_link_down(skge
);
1445 static void genesis_link_up(struct skge_port
*skge
)
1447 struct skge_hw
*hw
= skge
->hw
;
1448 int port
= skge
->port
;
1452 pr_debug("genesis_link_up\n");
1453 cmd
= skge_xm_read16(hw
, port
, XM_MMU_CMD
);
1456 * enabling pause frame reception is required for 1000BT
1457 * because the XMAC is not reset if the link is going down
1459 if (skge
->flow_control
== FLOW_MODE_NONE
||
1460 skge
->flow_control
== FLOW_MODE_LOC_SEND
)
1461 cmd
|= XM_MMU_IGN_PF
;
1463 /* Enable Pause Frame Reception */
1464 cmd
&= ~XM_MMU_IGN_PF
;
1466 skge_xm_write16(hw
, port
, XM_MMU_CMD
, cmd
);
1468 mode
= skge_xm_read32(hw
, port
, XM_MODE
);
1469 if (skge
->flow_control
== FLOW_MODE_SYMMETRIC
||
1470 skge
->flow_control
== FLOW_MODE_LOC_SEND
) {
1472 * Configure Pause Frame Generation
1473 * Use internal and external Pause Frame Generation.
1474 * Sending pause frames is edge triggered.
1475 * Send a Pause frame with the maximum pause time if
1476 * internal oder external FIFO full condition occurs.
1477 * Send a zero pause time frame to re-start transmission.
1479 /* XM_PAUSE_DA = '010000C28001' (default) */
1480 /* XM_MAC_PTIME = 0xffff (maximum) */
1481 /* remember this value is defined in big endian (!) */
1482 skge_xm_write16(hw
, port
, XM_MAC_PTIME
, 0xffff);
1484 mode
|= XM_PAUSE_MODE
;
1485 skge_write16(hw
, SKGEMAC_REG(port
, RX_MFF_CTRL1
), MFF_ENA_PAUSE
);
1488 * disable pause frame generation is required for 1000BT
1489 * because the XMAC is not reset if the link is going down
1491 /* Disable Pause Mode in Mode Register */
1492 mode
&= ~XM_PAUSE_MODE
;
1494 skge_write16(hw
, SKGEMAC_REG(port
, RX_MFF_CTRL1
), MFF_DIS_PAUSE
);
1497 skge_xm_write32(hw
, port
, XM_MODE
, mode
);
1500 if (hw
->phy_type
!= SK_PHY_XMAC
)
1501 msk
|= XM_IS_INP_ASS
; /* disable GP0 interrupt bit */
1503 skge_xm_write16(hw
, port
, XM_IMSK
, msk
);
1504 skge_xm_read16(hw
, port
, XM_ISRC
);
1506 /* get MMU Command Reg. */
1507 cmd
= skge_xm_read16(hw
, port
, XM_MMU_CMD
);
1508 if (hw
->phy_type
!= SK_PHY_XMAC
&& skge
->duplex
== DUPLEX_FULL
)
1509 cmd
|= XM_MMU_GMII_FD
;
1511 if (hw
->phy_type
== SK_PHY_BCOM
) {
1513 * Workaround BCOM Errata (#10523) for all BCom Phys
1514 * Enable Power Management after link up
1516 skge_xm_phy_write(hw
, port
, PHY_BCOM_AUX_CTRL
,
1517 skge_xm_phy_read(hw
, port
, PHY_BCOM_AUX_CTRL
)
1518 & ~PHY_B_AC_DIS_PM
);
1519 skge_xm_phy_write(hw
, port
, PHY_BCOM_INT_MASK
,
1524 skge_xm_write16(hw
, port
, XM_MMU_CMD
,
1525 cmd
| XM_MMU_ENA_RX
| XM_MMU_ENA_TX
);
1530 static void genesis_bcom_intr(struct skge_port
*skge
)
1532 struct skge_hw
*hw
= skge
->hw
;
1533 int port
= skge
->port
;
1534 u16 stat
= skge_xm_phy_read(hw
, port
, PHY_BCOM_INT_STAT
);
1536 pr_debug("genesis_bcom intr stat=%x\n", stat
);
1538 /* Workaround BCom Errata:
1539 * enable and disable loopback mode if "NO HCD" occurs.
1541 if (stat
& PHY_B_IS_NO_HDCL
) {
1542 u16 ctrl
= skge_xm_phy_read(hw
, port
, PHY_BCOM_CTRL
);
1543 skge_xm_phy_write(hw
, port
, PHY_BCOM_CTRL
,
1544 ctrl
| PHY_CT_LOOP
);
1545 skge_xm_phy_write(hw
, port
, PHY_BCOM_CTRL
,
1546 ctrl
& ~PHY_CT_LOOP
);
1549 stat
= skge_xm_phy_read(hw
, port
, PHY_BCOM_STAT
);
1550 if (stat
& (PHY_B_IS_AN_PR
| PHY_B_IS_LST_CHANGE
)) {
1551 u16 aux
= skge_xm_phy_read(hw
, port
, PHY_BCOM_AUX_STAT
);
1552 if ( !(aux
& PHY_B_AS_LS
) && netif_carrier_ok(skge
->netdev
))
1553 genesis_link_down(skge
);
1555 else if (stat
& PHY_B_IS_LST_CHANGE
) {
1556 if (aux
& PHY_B_AS_AN_C
) {
1557 switch (aux
& PHY_B_AS_AN_RES_MSK
) {
1558 case PHY_B_RES_1000FD
:
1559 skge
->duplex
= DUPLEX_FULL
;
1561 case PHY_B_RES_1000HD
:
1562 skge
->duplex
= DUPLEX_HALF
;
1566 switch (aux
& PHY_B_AS_PAUSE_MSK
) {
1567 case PHY_B_AS_PAUSE_MSK
:
1568 skge
->flow_control
= FLOW_MODE_SYMMETRIC
;
1571 skge
->flow_control
= FLOW_MODE_REM_SEND
;
1574 skge
->flow_control
= FLOW_MODE_LOC_SEND
;
1577 skge
->flow_control
= FLOW_MODE_NONE
;
1579 skge
->speed
= SPEED_1000
;
1581 genesis_link_up(skge
);
1584 mod_timer(&skge
->link_check
, jiffies
+ LINK_POLL_HZ
);
1588 /* Perodic poll of phy status to check for link transistion */
1589 static void skge_link_timer(unsigned long __arg
)
1591 struct skge_port
*skge
= (struct skge_port
*) __arg
;
1592 struct skge_hw
*hw
= skge
->hw
;
1593 int port
= skge
->port
;
1595 if (hw
->chip_id
!= CHIP_ID_GENESIS
|| !netif_running(skge
->netdev
))
1598 spin_lock_bh(&hw
->phy_lock
);
1599 if (hw
->phy_type
== SK_PHY_BCOM
)
1600 genesis_bcom_intr(skge
);
1603 for (i
= 0; i
< 3; i
++)
1604 if (skge_xm_read16(hw
, port
, XM_ISRC
) & XM_IS_INP_ASS
)
1608 mod_timer(&skge
->link_check
, jiffies
+ LINK_POLL_HZ
);
1610 genesis_link_up(skge
);
1612 spin_unlock_bh(&hw
->phy_lock
);
1615 /* Marvell Phy Initailization */
1616 static void yukon_init(struct skge_hw
*hw
, int port
)
1618 struct skge_port
*skge
= netdev_priv(hw
->dev
[port
]);
1619 u16 ctrl
, ct1000
, adv
;
1620 u16 ledctrl
, ledover
;
1622 pr_debug("yukon_init\n");
1623 if (skge
->autoneg
== AUTONEG_ENABLE
) {
1624 u16 ectrl
= skge_gm_phy_read(hw
, port
, PHY_MARV_EXT_CTRL
);
1626 ectrl
&= ~(PHY_M_EC_M_DSC_MSK
| PHY_M_EC_S_DSC_MSK
|
1627 PHY_M_EC_MAC_S_MSK
);
1628 ectrl
|= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ
);
1630 /* on PHY 88E1111 there is a change for downshift control */
1631 if (hw
->chip_id
== CHIP_ID_YUKON_EC
)
1632 ectrl
|= PHY_M_EC_M_DSC_2(0) | PHY_M_EC_DOWN_S_ENA
;
1634 ectrl
|= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1636 skge_gm_phy_write(hw
, port
, PHY_MARV_EXT_CTRL
, ectrl
);
1639 ctrl
= skge_gm_phy_read(hw
, port
, PHY_MARV_CTRL
);
1640 if (skge
->autoneg
== AUTONEG_DISABLE
)
1641 ctrl
&= ~PHY_CT_ANE
;
1643 ctrl
|= PHY_CT_RESET
;
1644 skge_gm_phy_write(hw
, port
, PHY_MARV_CTRL
, ctrl
);
1650 if (skge
->autoneg
== AUTONEG_ENABLE
) {
1652 if (skge
->advertising
& ADVERTISED_1000baseT_Full
)
1653 ct1000
|= PHY_M_1000C_AFD
;
1654 if (skge
->advertising
& ADVERTISED_1000baseT_Half
)
1655 ct1000
|= PHY_M_1000C_AHD
;
1656 if (skge
->advertising
& ADVERTISED_100baseT_Full
)
1657 adv
|= PHY_M_AN_100_FD
;
1658 if (skge
->advertising
& ADVERTISED_100baseT_Half
)
1659 adv
|= PHY_M_AN_100_HD
;
1660 if (skge
->advertising
& ADVERTISED_10baseT_Full
)
1661 adv
|= PHY_M_AN_10_FD
;
1662 if (skge
->advertising
& ADVERTISED_10baseT_Half
)
1663 adv
|= PHY_M_AN_10_HD
;
1665 /* Set Flow-control capabilities */
1666 switch (skge
->flow_control
) {
1667 case FLOW_MODE_NONE
:
1668 adv
|= PHY_B_P_NO_PAUSE
;
1670 case FLOW_MODE_LOC_SEND
:
1671 adv
|= PHY_B_P_ASYM_MD
;
1673 case FLOW_MODE_SYMMETRIC
:
1674 adv
|= PHY_B_P_SYM_MD
;
1676 case FLOW_MODE_REM_SEND
:
1677 adv
|= PHY_B_P_BOTH_MD
;
1680 } else { /* special defines for FIBER (88E1011S only) */
1681 adv
|= PHY_M_AN_1000X_AHD
| PHY_M_AN_1000X_AFD
;
1683 /* Set Flow-control capabilities */
1684 switch (skge
->flow_control
) {
1685 case FLOW_MODE_NONE
:
1686 adv
|= PHY_M_P_NO_PAUSE_X
;
1688 case FLOW_MODE_LOC_SEND
:
1689 adv
|= PHY_M_P_ASYM_MD_X
;
1691 case FLOW_MODE_SYMMETRIC
:
1692 adv
|= PHY_M_P_SYM_MD_X
;
1694 case FLOW_MODE_REM_SEND
:
1695 adv
|= PHY_M_P_BOTH_MD_X
;
1699 /* Restart Auto-negotiation */
1700 ctrl
|= PHY_CT_ANE
| PHY_CT_RE_CFG
;
1702 /* forced speed/duplex settings */
1703 ct1000
= PHY_M_1000C_MSE
;
1705 if (skge
->duplex
== DUPLEX_FULL
)
1706 ctrl
|= PHY_CT_DUP_MD
;
1708 switch (skge
->speed
) {
1710 ctrl
|= PHY_CT_SP1000
;
1713 ctrl
|= PHY_CT_SP100
;
1717 ctrl
|= PHY_CT_RESET
;
1720 if (hw
->chip_id
!= CHIP_ID_YUKON_FE
)
1721 skge_gm_phy_write(hw
, port
, PHY_MARV_1000T_CTRL
, ct1000
);
1723 skge_gm_phy_write(hw
, port
, PHY_MARV_AUNE_ADV
, adv
);
1724 skge_gm_phy_write(hw
, port
, PHY_MARV_CTRL
, ctrl
);
1726 /* Setup Phy LED's */
1727 ledctrl
= PHY_M_LED_PULS_DUR(PULS_170MS
);
1730 if (hw
->chip_id
== CHIP_ID_YUKON_FE
) {
1731 /* on 88E3082 these bits are at 11..9 (shifted left) */
1732 ledctrl
|= PHY_M_LED_BLINK_RT(BLINK_84MS
) << 1;
1734 skge_gm_phy_write(hw
, port
, PHY_MARV_FE_LED_PAR
,
1735 ((skge_gm_phy_read(hw
, port
, PHY_MARV_FE_LED_PAR
)
1737 & ~PHY_M_FELP_LED1_MSK
)
1738 | PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL
)));
1740 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
1741 ledctrl
|= PHY_M_LED_BLINK_RT(BLINK_84MS
) | PHY_M_LEDC_TX_CTRL
;
1743 /* turn off the Rx LED (LED_RX) */
1744 ledover
|= PHY_M_LED_MO_RX(MO_LED_OFF
);
1747 /* disable blink mode (LED_DUPLEX) on collisions */
1748 ctrl
|= PHY_M_LEDC_DP_CTRL
;
1749 skge_gm_phy_write(hw
, port
, PHY_MARV_LED_CTRL
, ledctrl
);
1751 if (skge
->autoneg
== AUTONEG_DISABLE
|| skge
->speed
== SPEED_100
) {
1752 /* turn on 100 Mbps LED (LED_LINK100) */
1753 ledover
|= PHY_M_LED_MO_100(MO_LED_ON
);
1757 skge_gm_phy_write(hw
, port
, PHY_MARV_LED_OVER
, ledover
);
1759 /* Enable phy interrupt on autonegotiation complete (or link up) */
1760 if (skge
->autoneg
== AUTONEG_ENABLE
)
1761 skge_gm_phy_write(hw
, port
, PHY_MARV_INT_MASK
, PHY_M_IS_AN_COMPL
);
1763 skge_gm_phy_write(hw
, port
, PHY_MARV_INT_MASK
, PHY_M_DEF_MSK
);
1766 static void yukon_reset(struct skge_hw
*hw
, int port
)
1768 skge_gm_phy_write(hw
, port
, PHY_MARV_INT_MASK
, 0);/* disable PHY IRQs */
1769 skge_gma_write16(hw
, port
, GM_MC_ADDR_H1
, 0); /* clear MC hash */
1770 skge_gma_write16(hw
, port
, GM_MC_ADDR_H2
, 0);
1771 skge_gma_write16(hw
, port
, GM_MC_ADDR_H3
, 0);
1772 skge_gma_write16(hw
, port
, GM_MC_ADDR_H4
, 0);
1774 skge_gma_write16(hw
, port
, GM_RX_CTRL
,
1775 skge_gma_read16(hw
, port
, GM_RX_CTRL
)
1776 | GM_RXCR_UCF_ENA
| GM_RXCR_MCF_ENA
);
1779 static void yukon_mac_init(struct skge_hw
*hw
, int port
)
1781 struct skge_port
*skge
= netdev_priv(hw
->dev
[port
]);
1784 const u8
*addr
= hw
->dev
[port
]->dev_addr
;
1786 /* WA code for COMA mode -- set PHY reset */
1787 if (hw
->chip_id
== CHIP_ID_YUKON_LITE
&&
1788 chip_rev(hw
) == CHIP_REV_YU_LITE_A3
)
1789 skge_write32(hw
, B2_GP_IO
,
1790 (skge_read32(hw
, B2_GP_IO
) | GP_DIR_9
| GP_IO_9
));
1793 skge_write32(hw
, SKGEMAC_REG(port
, GPHY_CTRL
), GPC_RST_SET
);
1794 skge_write32(hw
, SKGEMAC_REG(port
, GMAC_CTRL
), GMC_RST_SET
);
1796 /* WA code for COMA mode -- clear PHY reset */
1797 if (hw
->chip_id
== CHIP_ID_YUKON_LITE
&&
1798 chip_rev(hw
) == CHIP_REV_YU_LITE_A3
)
1799 skge_write32(hw
, B2_GP_IO
,
1800 (skge_read32(hw
, B2_GP_IO
) | GP_DIR_9
)
1803 /* Set hardware config mode */
1804 reg
= GPC_INT_POL_HI
| GPC_DIS_FC
| GPC_DIS_SLEEP
|
1805 GPC_ENA_XC
| GPC_ANEG_ADV_ALL_M
| GPC_ENA_PAUSE
;
1806 reg
|= iscopper(hw
) ? GPC_HWCFG_GMII_COP
: GPC_HWCFG_GMII_FIB
;
1808 /* Clear GMC reset */
1809 skge_write32(hw
, SKGEMAC_REG(port
, GPHY_CTRL
), reg
| GPC_RST_SET
);
1810 skge_write32(hw
, SKGEMAC_REG(port
, GPHY_CTRL
), reg
| GPC_RST_CLR
);
1811 skge_write32(hw
, SKGEMAC_REG(port
, GMAC_CTRL
), GMC_PAUSE_ON
| GMC_RST_CLR
);
1812 if (skge
->autoneg
== AUTONEG_DISABLE
) {
1813 reg
= GM_GPCR_AU_ALL_DIS
;
1814 skge_gma_write16(hw
, port
, GM_GP_CTRL
,
1815 skge_gma_read16(hw
, port
, GM_GP_CTRL
) | reg
);
1817 switch (skge
->speed
) {
1819 reg
|= GM_GPCR_SPEED_1000
;
1822 reg
|= GM_GPCR_SPEED_100
;
1825 if (skge
->duplex
== DUPLEX_FULL
)
1826 reg
|= GM_GPCR_DUP_FULL
;
1828 reg
= GM_GPCR_SPEED_1000
| GM_GPCR_SPEED_100
| GM_GPCR_DUP_FULL
;
1829 switch (skge
->flow_control
) {
1830 case FLOW_MODE_NONE
:
1831 skge_write32(hw
, SKGEMAC_REG(port
, GMAC_CTRL
), GMC_PAUSE_OFF
);
1832 reg
|= GM_GPCR_FC_TX_DIS
| GM_GPCR_FC_RX_DIS
| GM_GPCR_AU_FCT_DIS
;
1834 case FLOW_MODE_LOC_SEND
:
1835 /* disable Rx flow-control */
1836 reg
|= GM_GPCR_FC_RX_DIS
| GM_GPCR_AU_FCT_DIS
;
1839 skge_gma_write16(hw
, port
, GM_GP_CTRL
, reg
);
1840 skge_read16(hw
, GMAC_IRQ_SRC
);
1842 spin_lock_bh(&hw
->phy_lock
);
1843 yukon_init(hw
, port
);
1844 spin_unlock_bh(&hw
->phy_lock
);
1847 reg
= skge_gma_read16(hw
, port
, GM_PHY_ADDR
);
1848 skge_gma_write16(hw
, port
, GM_PHY_ADDR
, reg
| GM_PAR_MIB_CLR
);
1850 for (i
= 0; i
< GM_MIB_CNT_SIZE
; i
++)
1851 skge_gma_read16(hw
, port
, GM_MIB_CNT_BASE
+ 8*i
);
1852 skge_gma_write16(hw
, port
, GM_PHY_ADDR
, reg
);
1854 /* transmit control */
1855 skge_gma_write16(hw
, port
, GM_TX_CTRL
, TX_COL_THR(TX_COL_DEF
));
1857 /* receive control reg: unicast + multicast + no FCS */
1858 skge_gma_write16(hw
, port
, GM_RX_CTRL
,
1859 GM_RXCR_UCF_ENA
| GM_RXCR_CRC_DIS
| GM_RXCR_MCF_ENA
);
1861 /* transmit flow control */
1862 skge_gma_write16(hw
, port
, GM_TX_FLOW_CTRL
, 0xffff);
1864 /* transmit parameter */
1865 skge_gma_write16(hw
, port
, GM_TX_PARAM
,
1866 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF
) |
1867 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF
) |
1868 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF
));
1870 /* serial mode register */
1871 reg
= GM_SMOD_VLAN_ENA
| IPG_DATA_VAL(IPG_DATA_DEF
);
1872 if (hw
->dev
[port
]->mtu
> 1500)
1873 reg
|= GM_SMOD_JUMBO_ENA
;
1875 skge_gma_write16(hw
, port
, GM_SERIAL_MODE
, reg
);
1877 /* physical address: used for pause frames */
1878 skge_gm_set_addr(hw
, port
, GM_SRC_ADDR_1L
, addr
);
1879 /* virtual address for data */
1880 skge_gm_set_addr(hw
, port
, GM_SRC_ADDR_2L
, addr
);
1882 /* enable interrupt mask for counter overflows */
1883 skge_gma_write16(hw
, port
, GM_TX_IRQ_MSK
, 0);
1884 skge_gma_write16(hw
, port
, GM_RX_IRQ_MSK
, 0);
1885 skge_gma_write16(hw
, port
, GM_TR_IRQ_MSK
, 0);
1887 /* Initialize Mac Fifo */
1889 /* Configure Rx MAC FIFO */
1890 skge_write16(hw
, SKGEMAC_REG(port
, RX_GMF_FL_MSK
), RX_FF_FL_DEF_MSK
);
1891 reg
= GMF_OPER_ON
| GMF_RX_F_FL_ON
;
1892 if (hw
->chip_id
== CHIP_ID_YUKON_LITE
&&
1893 chip_rev(hw
) == CHIP_REV_YU_LITE_A3
)
1894 reg
&= ~GMF_RX_F_FL_ON
;
1895 skge_write8(hw
, SKGEMAC_REG(port
, RX_GMF_CTRL_T
), GMF_RST_CLR
);
1896 skge_write16(hw
, SKGEMAC_REG(port
, RX_GMF_CTRL_T
), reg
);
1897 skge_write16(hw
, SKGEMAC_REG(port
, RX_GMF_FL_THR
), RX_GMF_FL_THR_DEF
);
1899 /* Configure Tx MAC FIFO */
1900 skge_write8(hw
, SKGEMAC_REG(port
, TX_GMF_CTRL_T
), GMF_RST_CLR
);
1901 skge_write16(hw
, SKGEMAC_REG(port
, TX_GMF_CTRL_T
), GMF_OPER_ON
);
1904 static void yukon_stop(struct skge_port
*skge
)
1906 struct skge_hw
*hw
= skge
->hw
;
1907 int port
= skge
->port
;
1909 if (hw
->chip_id
== CHIP_ID_YUKON_LITE
&&
1910 chip_rev(hw
) == CHIP_REV_YU_LITE_A3
) {
1911 skge_write32(hw
, B2_GP_IO
,
1912 skge_read32(hw
, B2_GP_IO
) | GP_DIR_9
| GP_IO_9
);
1915 skge_gma_write16(hw
, port
, GM_GP_CTRL
,
1916 skge_gma_read16(hw
, port
, GM_GP_CTRL
)
1917 & ~(GM_GPCR_RX_ENA
|GM_GPCR_RX_ENA
));
1918 skge_gma_read16(hw
, port
, GM_GP_CTRL
);
1920 /* set GPHY Control reset */
1921 skge_gma_write32(hw
, port
, GPHY_CTRL
, GPC_RST_SET
);
1922 skge_gma_write32(hw
, port
, GMAC_CTRL
, GMC_RST_SET
);
1925 static void yukon_get_stats(struct skge_port
*skge
, u64
*data
)
1927 struct skge_hw
*hw
= skge
->hw
;
1928 int port
= skge
->port
;
1931 data
[0] = (u64
) skge_gma_read32(hw
, port
, GM_TXO_OK_HI
) << 32
1932 | skge_gma_read32(hw
, port
, GM_TXO_OK_LO
);
1933 data
[1] = (u64
) skge_gma_read32(hw
, port
, GM_RXO_OK_HI
) << 32
1934 | skge_gma_read32(hw
, port
, GM_RXO_OK_LO
);
1936 for (i
= 2; i
< ARRAY_SIZE(skge_stats
); i
++)
1937 data
[i
] = skge_gma_read32(hw
, port
,
1938 skge_stats
[i
].gma_offset
);
1941 static void yukon_mac_intr(struct skge_hw
*hw
, int port
)
1943 struct skge_port
*skge
= netdev_priv(hw
->dev
[port
]);
1944 u8 status
= skge_read8(hw
, SKGEMAC_REG(port
, GMAC_IRQ_SRC
));
1946 pr_debug("yukon_intr status %x\n", status
);
1947 if (status
& GM_IS_RX_FF_OR
) {
1948 ++skge
->net_stats
.rx_fifo_errors
;
1949 skge_gma_write8(hw
, port
, RX_GMF_CTRL_T
, GMF_CLI_RX_FO
);
1951 if (status
& GM_IS_TX_FF_UR
) {
1952 ++skge
->net_stats
.tx_fifo_errors
;
1953 skge_gma_write8(hw
, port
, TX_GMF_CTRL_T
, GMF_CLI_TX_FU
);
1958 static u16
yukon_speed(const struct skge_hw
*hw
, u16 aux
)
1960 if (hw
->chip_id
== CHIP_ID_YUKON_FE
)
1961 return (aux
& PHY_M_PS_SPEED_100
) ? SPEED_100
: SPEED_10
;
1963 switch(aux
& PHY_M_PS_SPEED_MSK
) {
1964 case PHY_M_PS_SPEED_1000
:
1966 case PHY_M_PS_SPEED_100
:
1973 static void yukon_link_up(struct skge_port
*skge
)
1975 struct skge_hw
*hw
= skge
->hw
;
1976 int port
= skge
->port
;
1979 pr_debug("yukon_link_up\n");
1981 /* Enable Transmit FIFO Underrun */
1982 skge_write8(hw
, GMAC_IRQ_MSK
, GMAC_DEF_MSK
);
1984 reg
= skge_gma_read16(hw
, port
, GM_GP_CTRL
);
1985 if (skge
->duplex
== DUPLEX_FULL
|| skge
->autoneg
== AUTONEG_ENABLE
)
1986 reg
|= GM_GPCR_DUP_FULL
;
1989 reg
|= GM_GPCR_RX_ENA
| GM_GPCR_TX_ENA
;
1990 skge_gma_write16(hw
, port
, GM_GP_CTRL
, reg
);
1992 skge_gm_phy_write(hw
, port
, PHY_MARV_INT_MASK
, PHY_M_DEF_MSK
);
1996 static void yukon_link_down(struct skge_port
*skge
)
1998 struct skge_hw
*hw
= skge
->hw
;
1999 int port
= skge
->port
;
2001 pr_debug("yukon_link_down\n");
2002 skge_gm_phy_write(hw
, port
, PHY_MARV_INT_MASK
, 0);
2003 skge_gm_phy_write(hw
, port
, GM_GP_CTRL
,
2004 skge_gm_phy_read(hw
, port
, GM_GP_CTRL
)
2005 & ~(GM_GPCR_RX_ENA
| GM_GPCR_TX_ENA
));
2007 if (hw
->chip_id
!= CHIP_ID_YUKON_FE
&&
2008 skge
->flow_control
== FLOW_MODE_REM_SEND
) {
2009 /* restore Asymmetric Pause bit */
2010 skge_gm_phy_write(hw
, port
, PHY_MARV_AUNE_ADV
,
2011 skge_gm_phy_read(hw
, port
,
2017 yukon_reset(hw
, port
);
2018 skge_link_down(skge
);
2020 yukon_init(hw
, port
);
2023 static void yukon_phy_intr(struct skge_port
*skge
)
2025 struct skge_hw
*hw
= skge
->hw
;
2026 int port
= skge
->port
;
2027 const char *reason
= NULL
;
2028 u16 istatus
, phystat
;
2030 istatus
= skge_gm_phy_read(hw
, port
, PHY_MARV_INT_STAT
);
2031 phystat
= skge_gm_phy_read(hw
, port
, PHY_MARV_PHY_STAT
);
2032 pr_debug("yukon phy intr istat=%x phy_stat=%x\n", istatus
, phystat
);
2034 if (istatus
& PHY_M_IS_AN_COMPL
) {
2035 if (skge_gm_phy_read(hw
, port
, PHY_MARV_AUNE_LP
)
2037 reason
= "remote fault";
2041 if (!(hw
->chip_id
== CHIP_ID_YUKON_FE
|| hw
->chip_id
== CHIP_ID_YUKON_EC
)
2042 && (skge_gm_phy_read(hw
, port
, PHY_MARV_1000T_STAT
)
2043 & PHY_B_1000S_MSF
)) {
2044 reason
= "master/slave fault";
2048 if (!(phystat
& PHY_M_PS_SPDUP_RES
)) {
2049 reason
= "speed/duplex";
2053 skge
->duplex
= (phystat
& PHY_M_PS_FULL_DUP
)
2054 ? DUPLEX_FULL
: DUPLEX_HALF
;
2055 skge
->speed
= yukon_speed(hw
, phystat
);
2057 /* Tx & Rx Pause Enabled bits are at 9..8 */
2058 if (hw
->chip_id
== CHIP_ID_YUKON_XL
)
2061 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
2062 switch (phystat
& PHY_M_PS_PAUSE_MSK
) {
2063 case PHY_M_PS_PAUSE_MSK
:
2064 skge
->flow_control
= FLOW_MODE_SYMMETRIC
;
2066 case PHY_M_PS_RX_P_EN
:
2067 skge
->flow_control
= FLOW_MODE_REM_SEND
;
2069 case PHY_M_PS_TX_P_EN
:
2070 skge
->flow_control
= FLOW_MODE_LOC_SEND
;
2073 skge
->flow_control
= FLOW_MODE_NONE
;
2076 if (skge
->flow_control
== FLOW_MODE_NONE
||
2077 (skge
->speed
< SPEED_1000
&& skge
->duplex
== DUPLEX_HALF
))
2078 skge_write8(hw
, SKGEMAC_REG(port
, GMAC_CTRL
), GMC_PAUSE_OFF
);
2080 skge_write8(hw
, SKGEMAC_REG(port
, GMAC_CTRL
), GMC_PAUSE_ON
);
2081 yukon_link_up(skge
);
2085 if (istatus
& PHY_M_IS_LSP_CHANGE
)
2086 skge
->speed
= yukon_speed(hw
, phystat
);
2088 if (istatus
& PHY_M_IS_DUP_CHANGE
)
2089 skge
->duplex
= (phystat
& PHY_M_PS_FULL_DUP
) ? DUPLEX_FULL
: DUPLEX_HALF
;
2090 if (istatus
& PHY_M_IS_LST_CHANGE
) {
2091 if (phystat
& PHY_M_PS_LINK_UP
)
2092 yukon_link_up(skge
);
2094 yukon_link_down(skge
);
2098 printk(KERN_ERR PFX
"%s: autonegotiation failed (%s)\n",
2099 skge
->netdev
->name
, reason
);
2101 /* XXX restart autonegotiation? */
2104 static void skge_ramset(struct skge_hw
*hw
, u16 q
, u32 start
, size_t len
)
2110 end
= start
+ len
- 1;
2112 skge_write8(hw
, RB_ADDR(q
, RB_CTRL
), RB_RST_CLR
);
2113 skge_write32(hw
, RB_ADDR(q
, RB_START
), start
);
2114 skge_write32(hw
, RB_ADDR(q
, RB_WP
), start
);
2115 skge_write32(hw
, RB_ADDR(q
, RB_RP
), start
);
2116 skge_write32(hw
, RB_ADDR(q
, RB_END
), end
);
2118 if (q
== Q_R1
|| q
== Q_R2
) {
2119 /* Set thresholds on receive queue's */
2120 skge_write32(hw
, RB_ADDR(q
, RB_RX_UTPP
),
2122 skge_write32(hw
, RB_ADDR(q
, RB_RX_LTPP
),
2125 /* Enable store & forward on Tx queue's because
2126 * Tx FIFO is only 4K on Genesis and 1K on Yukon
2128 skge_write8(hw
, RB_ADDR(q
, RB_CTRL
), RB_ENA_STFWD
);
2131 skge_write8(hw
, RB_ADDR(q
, RB_CTRL
), RB_ENA_OP_MD
);
2134 /* Setup Bus Memory Interface */
2135 static void skge_qset(struct skge_port
*skge
, u16 q
,
2136 const struct skge_element
*e
)
2138 struct skge_hw
*hw
= skge
->hw
;
2139 u32 watermark
= 0x600;
2140 u64 base
= skge
->dma
+ (e
->desc
- skge
->mem
);
2142 /* optimization to reduce window on 32bit/33mhz */
2143 if ((skge_read16(hw
, B0_CTST
) & (CS_BUS_CLOCK
| CS_BUS_SLOT_SZ
)) == 0)
2146 skge_write32(hw
, Q_ADDR(q
, Q_CSR
), CSR_CLR_RESET
);
2147 skge_write32(hw
, Q_ADDR(q
, Q_F
), watermark
);
2148 skge_write32(hw
, Q_ADDR(q
, Q_DA_H
), (u32
)(base
>> 32));
2149 skge_write32(hw
, Q_ADDR(q
, Q_DA_L
), (u32
)base
);
2152 static int skge_up(struct net_device
*dev
)
2154 struct skge_port
*skge
= netdev_priv(dev
);
2155 struct skge_hw
*hw
= skge
->hw
;
2156 int port
= skge
->port
;
2157 u32 chunk
, ram_addr
;
2158 size_t rx_size
, tx_size
;
2161 if (netif_msg_ifup(skge
))
2162 printk(KERN_INFO PFX
"%s: enabling interface\n", dev
->name
);
2164 rx_size
= skge
->rx_ring
.count
* sizeof(struct skge_rx_desc
);
2165 tx_size
= skge
->tx_ring
.count
* sizeof(struct skge_tx_desc
);
2166 skge
->mem_size
= tx_size
+ rx_size
;
2167 skge
->mem
= pci_alloc_consistent(hw
->pdev
, skge
->mem_size
, &skge
->dma
);
2171 memset(skge
->mem
, 0, skge
->mem_size
);
2173 if ((err
= skge_ring_alloc(&skge
->rx_ring
, skge
->mem
, skge
->dma
)))
2176 if (skge_rx_fill(skge
))
2179 if ((err
= skge_ring_alloc(&skge
->tx_ring
, skge
->mem
+ rx_size
,
2180 skge
->dma
+ rx_size
)))
2183 skge
->tx_avail
= skge
->tx_ring
.count
- 1;
2186 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2187 genesis_mac_init(hw
, port
);
2189 yukon_mac_init(hw
, port
);
2191 /* Configure RAMbuffers */
2192 chunk
= hw
->ram_size
/ (isdualport(hw
) ? 4 : 2);
2193 ram_addr
= hw
->ram_offset
+ 2 * chunk
* port
;
2195 skge_ramset(hw
, rxqaddr
[port
], ram_addr
, chunk
);
2196 skge_qset(skge
, rxqaddr
[port
], skge
->rx_ring
.to_clean
);
2198 BUG_ON(skge
->tx_ring
.to_use
!= skge
->tx_ring
.to_clean
);
2199 skge_ramset(hw
, txqaddr
[port
], ram_addr
+chunk
, chunk
);
2200 skge_qset(skge
, txqaddr
[port
], skge
->tx_ring
.to_use
);
2202 /* Start receiver BMU */
2204 skge_write8(hw
, Q_ADDR(rxqaddr
[port
], Q_CSR
), CSR_START
| CSR_IRQ_CL_F
);
2206 pr_debug("skge_up completed\n");
2210 skge_rx_clean(skge
);
2211 kfree(skge
->rx_ring
.start
);
2213 pci_free_consistent(hw
->pdev
, skge
->mem_size
, skge
->mem
, skge
->dma
);
2218 static int skge_down(struct net_device
*dev
)
2220 struct skge_port
*skge
= netdev_priv(dev
);
2221 struct skge_hw
*hw
= skge
->hw
;
2222 int port
= skge
->port
;
2224 if (netif_msg_ifdown(skge
))
2225 printk(KERN_INFO PFX
"%s: disabling interface\n", dev
->name
);
2227 netif_stop_queue(dev
);
2229 del_timer_sync(&skge
->led_blink
);
2230 del_timer_sync(&skge
->link_check
);
2232 /* Stop transmitter */
2233 skge_write8(hw
, Q_ADDR(txqaddr
[port
], Q_CSR
), CSR_STOP
);
2234 skge_write32(hw
, RB_ADDR(txqaddr
[port
], RB_CTRL
),
2235 RB_RST_SET
|RB_DIS_OP_MD
);
2237 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2242 /* Disable Force Sync bit and Enable Alloc bit */
2243 skge_write8(hw
, SKGEMAC_REG(port
, TXA_CTRL
),
2244 TXA_DIS_FSYNC
| TXA_DIS_ALLOC
| TXA_STOP_RC
);
2246 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
2247 skge_write32(hw
, SKGEMAC_REG(port
, TXA_ITI_INI
), 0L);
2248 skge_write32(hw
, SKGEMAC_REG(port
, TXA_LIM_INI
), 0L);
2250 /* Reset PCI FIFO */
2251 skge_write32(hw
, Q_ADDR(txqaddr
[port
], Q_CSR
), CSR_SET_RESET
);
2252 skge_write32(hw
, RB_ADDR(txqaddr
[port
], RB_CTRL
), RB_RST_SET
);
2254 /* Reset the RAM Buffer async Tx queue */
2255 skge_write8(hw
, RB_ADDR(port
== 0 ? Q_XA1
: Q_XA2
, RB_CTRL
), RB_RST_SET
);
2257 skge_write8(hw
, Q_ADDR(rxqaddr
[port
], Q_CSR
), CSR_STOP
);
2258 skge_write32(hw
, RB_ADDR(port
? Q_R2
: Q_R1
, RB_CTRL
),
2259 RB_RST_SET
|RB_DIS_OP_MD
);
2260 skge_write32(hw
, Q_ADDR(rxqaddr
[port
], Q_CSR
), CSR_SET_RESET
);
2262 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
2263 skge_write8(hw
, SKGEMAC_REG(port
, TX_MFF_CTRL2
), MFF_RST_SET
);
2264 skge_write8(hw
, SKGEMAC_REG(port
, RX_MFF_CTRL2
), MFF_RST_SET
);
2265 skge_write8(hw
, SKGEMAC_REG(port
, TX_LED_CTRL
), LED_STOP
);
2266 skge_write8(hw
, SKGEMAC_REG(port
, RX_LED_CTRL
), LED_STOP
);
2268 skge_write8(hw
, SKGEMAC_REG(port
, RX_GMF_CTRL_T
), GMF_RST_SET
);
2269 skge_write8(hw
, SKGEMAC_REG(port
, TX_GMF_CTRL_T
), GMF_RST_SET
);
2272 /* turn off led's */
2273 skge_write16(hw
, B0_LED
, LED_STAT_OFF
);
2275 skge_tx_clean(skge
);
2276 skge_rx_clean(skge
);
2278 kfree(skge
->rx_ring
.start
);
2279 kfree(skge
->tx_ring
.start
);
2280 pci_free_consistent(hw
->pdev
, skge
->mem_size
, skge
->mem
, skge
->dma
);
2284 static int skge_xmit_frame(struct sk_buff
*skb
, struct net_device
*dev
)
2286 struct skge_port
*skge
= netdev_priv(dev
);
2287 struct skge_hw
*hw
= skge
->hw
;
2288 struct skge_ring
*ring
= &skge
->tx_ring
;
2289 struct skge_element
*e
;
2290 struct skge_tx_desc
*td
;
2294 unsigned long flags
;
2296 skb
= skb_padto(skb
, ETH_ZLEN
);
2298 return NETDEV_TX_OK
;
2300 local_irq_save(flags
);
2301 if (!spin_trylock(&skge
->tx_lock
)) {
2302 /* Collision - tell upper layer to requeue */
2303 local_irq_restore(flags
);
2304 return NETDEV_TX_LOCKED
;
2307 if (unlikely(skge
->tx_avail
< skb_shinfo(skb
)->nr_frags
+1)) {
2308 netif_stop_queue(dev
);
2309 spin_unlock_irqrestore(&skge
->tx_lock
, flags
);
2311 printk(KERN_WARNING PFX
"%s: ring full when queue awake!\n",
2313 return NETDEV_TX_BUSY
;
2319 len
= skb_headlen(skb
);
2320 map
= pci_map_single(hw
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
2321 pci_unmap_addr_set(e
, mapaddr
, map
);
2322 pci_unmap_len_set(e
, maplen
, len
);
2325 td
->dma_hi
= map
>> 32;
2327 if (skb
->ip_summed
== CHECKSUM_HW
) {
2328 const struct iphdr
*ip
2329 = (const struct iphdr
*) (skb
->data
+ ETH_HLEN
);
2330 int offset
= skb
->h
.raw
- skb
->data
;
2332 /* This seems backwards, but it is what the sk98lin
2333 * does. Looks like hardware is wrong?
2335 if (ip
->protocol
== IPPROTO_UDP
2336 && chip_rev(hw
) == 0 && hw
->chip_id
== CHIP_ID_YUKON
)
2337 control
= BMU_TCP_CHECK
;
2339 control
= BMU_UDP_CHECK
;
2342 td
->csum_start
= offset
;
2343 td
->csum_write
= offset
+ skb
->csum
;
2345 control
= BMU_CHECK
;
2347 if (!skb_shinfo(skb
)->nr_frags
) /* single buffer i.e. no fragments */
2348 control
|= BMU_EOF
| BMU_IRQ_EOF
;
2350 struct skge_tx_desc
*tf
= td
;
2352 control
|= BMU_STFWD
;
2353 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2354 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2356 map
= pci_map_page(hw
->pdev
, frag
->page
, frag
->page_offset
,
2357 frag
->size
, PCI_DMA_TODEVICE
);
2363 tf
->dma_hi
= (u64
) map
>> 32;
2364 pci_unmap_addr_set(e
, mapaddr
, map
);
2365 pci_unmap_len_set(e
, maplen
, frag
->size
);
2367 tf
->control
= BMU_OWN
| BMU_SW
| control
| frag
->size
;
2369 tf
->control
|= BMU_EOF
| BMU_IRQ_EOF
;
2371 /* Make sure all the descriptors written */
2373 td
->control
= BMU_OWN
| BMU_SW
| BMU_STF
| control
| len
;
2376 skge_write8(hw
, Q_ADDR(txqaddr
[skge
->port
], Q_CSR
), CSR_START
);
2378 if (netif_msg_tx_queued(skge
))
2379 printk(KERN_DEBUG
"%s: tx queued, slot %td, len %d\n",
2380 dev
->name
, e
- ring
->start
, skb
->len
);
2382 ring
->to_use
= e
->next
;
2383 skge
->tx_avail
-= skb_shinfo(skb
)->nr_frags
+ 1;
2384 if (skge
->tx_avail
<= MAX_SKB_FRAGS
+ 1) {
2385 pr_debug("%s: transmit queue full\n", dev
->name
);
2386 netif_stop_queue(dev
);
2389 dev
->trans_start
= jiffies
;
2390 spin_unlock_irqrestore(&skge
->tx_lock
, flags
);
2392 return NETDEV_TX_OK
;
2395 static inline void skge_tx_free(struct skge_hw
*hw
, struct skge_element
*e
)
2398 pci_unmap_single(hw
->pdev
,
2399 pci_unmap_addr(e
, mapaddr
),
2400 pci_unmap_len(e
, maplen
),
2402 dev_kfree_skb_any(e
->skb
);
2405 pci_unmap_page(hw
->pdev
,
2406 pci_unmap_addr(e
, mapaddr
),
2407 pci_unmap_len(e
, maplen
),
2412 static void skge_tx_clean(struct skge_port
*skge
)
2414 struct skge_ring
*ring
= &skge
->tx_ring
;
2415 struct skge_element
*e
;
2416 unsigned long flags
;
2418 spin_lock_irqsave(&skge
->tx_lock
, flags
);
2419 for (e
= ring
->to_clean
; e
!= ring
->to_use
; e
= e
->next
) {
2421 skge_tx_free(skge
->hw
, e
);
2424 spin_unlock_irqrestore(&skge
->tx_lock
, flags
);
2427 static void skge_tx_timeout(struct net_device
*dev
)
2429 struct skge_port
*skge
= netdev_priv(dev
);
2431 if (netif_msg_timer(skge
))
2432 printk(KERN_DEBUG PFX
"%s: tx timeout\n", dev
->name
);
2434 skge_write8(skge
->hw
, Q_ADDR(txqaddr
[skge
->port
], Q_CSR
), CSR_STOP
);
2435 skge_tx_clean(skge
);
2438 static int skge_change_mtu(struct net_device
*dev
, int new_mtu
)
2442 if(new_mtu
< ETH_ZLEN
|| new_mtu
> ETH_JUMBO_MTU
)
2447 if (netif_running(dev
)) {
2455 static void genesis_set_multicast(struct net_device
*dev
)
2457 struct skge_port
*skge
= netdev_priv(dev
);
2458 struct skge_hw
*hw
= skge
->hw
;
2459 int port
= skge
->port
;
2460 int i
, count
= dev
->mc_count
;
2461 struct dev_mc_list
*list
= dev
->mc_list
;
2465 mode
= skge_xm_read32(hw
, port
, XM_MODE
);
2466 mode
|= XM_MD_ENA_HASH
;
2467 if (dev
->flags
& IFF_PROMISC
)
2468 mode
|= XM_MD_ENA_PROM
;
2470 mode
&= ~XM_MD_ENA_PROM
;
2472 if (dev
->flags
& IFF_ALLMULTI
)
2473 memset(filter
, 0xff, sizeof(filter
));
2475 memset(filter
, 0, sizeof(filter
));
2476 for(i
= 0; list
&& i
< count
; i
++, list
= list
->next
) {
2477 u32 crc
= crc32_le(~0, list
->dmi_addr
, ETH_ALEN
);
2478 u8 bit
= 63 - (crc
& 63);
2480 filter
[bit
/8] |= 1 << (bit
%8);
2484 skge_xm_outhash(hw
, port
, XM_HSM
, filter
);
2486 skge_xm_write32(hw
, port
, XM_MODE
, mode
);
2489 static void yukon_set_multicast(struct net_device
*dev
)
2491 struct skge_port
*skge
= netdev_priv(dev
);
2492 struct skge_hw
*hw
= skge
->hw
;
2493 int port
= skge
->port
;
2494 struct dev_mc_list
*list
= dev
->mc_list
;
2498 memset(filter
, 0, sizeof(filter
));
2500 reg
= skge_gma_read16(hw
, port
, GM_RX_CTRL
);
2501 reg
|= GM_RXCR_UCF_ENA
;
2503 if (dev
->flags
& IFF_PROMISC
) /* promiscious */
2504 reg
&= ~(GM_RXCR_UCF_ENA
| GM_RXCR_MCF_ENA
);
2505 else if (dev
->flags
& IFF_ALLMULTI
) /* all multicast */
2506 memset(filter
, 0xff, sizeof(filter
));
2507 else if (dev
->mc_count
== 0) /* no multicast */
2508 reg
&= ~GM_RXCR_MCF_ENA
;
2511 reg
|= GM_RXCR_MCF_ENA
;
2513 for(i
= 0; list
&& i
< dev
->mc_count
; i
++, list
= list
->next
) {
2514 u32 bit
= ether_crc(ETH_ALEN
, list
->dmi_addr
) & 0x3f;
2515 filter
[bit
/8] |= 1 << (bit
%8);
2520 skge_gma_write16(hw
, port
, GM_MC_ADDR_H1
,
2521 (u16
)filter
[0] | ((u16
)filter
[1] << 8));
2522 skge_gma_write16(hw
, port
, GM_MC_ADDR_H2
,
2523 (u16
)filter
[2] | ((u16
)filter
[3] << 8));
2524 skge_gma_write16(hw
, port
, GM_MC_ADDR_H3
,
2525 (u16
)filter
[4] | ((u16
)filter
[5] << 8));
2526 skge_gma_write16(hw
, port
, GM_MC_ADDR_H4
,
2527 (u16
)filter
[6] | ((u16
)filter
[7] << 8));
2529 skge_gma_write16(hw
, port
, GM_RX_CTRL
, reg
);
2532 static inline int bad_phy_status(const struct skge_hw
*hw
, u32 status
)
2534 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2535 return (status
& (XMR_FS_ERR
| XMR_FS_2L_VLAN
)) != 0;
2537 return (status
& GMR_FS_ANY_ERR
) ||
2538 (status
& GMR_FS_RX_OK
) == 0;
2541 static void skge_rx_error(struct skge_port
*skge
, int slot
,
2542 u32 control
, u32 status
)
2544 if (netif_msg_rx_err(skge
))
2545 printk(KERN_DEBUG PFX
"%s: rx err, slot %d control 0x%x status 0x%x\n",
2546 skge
->netdev
->name
, slot
, control
, status
);
2548 if ((control
& (BMU_EOF
|BMU_STF
)) != (BMU_STF
|BMU_EOF
)
2549 || (control
& BMU_BBC
) > skge
->netdev
->mtu
+ VLAN_ETH_HLEN
)
2550 skge
->net_stats
.rx_length_errors
++;
2552 if (skge
->hw
->chip_id
== CHIP_ID_GENESIS
) {
2553 if (status
& (XMR_FS_RUNT
|XMR_FS_LNG_ERR
))
2554 skge
->net_stats
.rx_length_errors
++;
2555 if (status
& XMR_FS_FRA_ERR
)
2556 skge
->net_stats
.rx_frame_errors
++;
2557 if (status
& XMR_FS_FCS_ERR
)
2558 skge
->net_stats
.rx_crc_errors
++;
2560 if (status
& (GMR_FS_LONG_ERR
|GMR_FS_UN_SIZE
))
2561 skge
->net_stats
.rx_length_errors
++;
2562 if (status
& GMR_FS_FRAGMENT
)
2563 skge
->net_stats
.rx_frame_errors
++;
2564 if (status
& GMR_FS_CRC_ERR
)
2565 skge
->net_stats
.rx_crc_errors
++;
2570 static int skge_poll(struct net_device
*dev
, int *budget
)
2572 struct skge_port
*skge
= netdev_priv(dev
);
2573 struct skge_hw
*hw
= skge
->hw
;
2574 struct skge_ring
*ring
= &skge
->rx_ring
;
2575 struct skge_element
*e
;
2576 unsigned int to_do
= min(dev
->quota
, *budget
);
2577 unsigned int work_done
= 0;
2579 static const u32 irqmask
[] = { IS_PORT_1
, IS_PORT_2
};
2581 for (e
= ring
->to_clean
; e
!= ring
->to_use
&& work_done
< to_do
;
2583 struct skge_rx_desc
*rd
= e
->desc
;
2584 struct sk_buff
*skb
= e
->skb
;
2585 u32 control
, len
, status
;
2588 control
= rd
->control
;
2589 if (control
& BMU_OWN
)
2592 len
= control
& BMU_BBC
;
2595 pci_unmap_single(hw
->pdev
,
2596 pci_unmap_addr(e
, mapaddr
),
2597 pci_unmap_len(e
, maplen
),
2598 PCI_DMA_FROMDEVICE
);
2600 status
= rd
->status
;
2601 if ((control
& (BMU_EOF
|BMU_STF
)) != (BMU_STF
|BMU_EOF
)
2602 || len
> dev
->mtu
+ VLAN_ETH_HLEN
2603 || bad_phy_status(hw
, status
)) {
2604 skge_rx_error(skge
, e
- ring
->start
, control
, status
);
2609 if (netif_msg_rx_status(skge
))
2610 printk(KERN_DEBUG PFX
"%s: rx slot %td status 0x%x len %d\n",
2611 dev
->name
, e
- ring
->start
, rd
->status
, len
);
2614 skb
->protocol
= eth_type_trans(skb
, dev
);
2616 if (skge
->rx_csum
) {
2617 skb
->csum
= le16_to_cpu(rd
->csum2
);
2618 skb
->ip_summed
= CHECKSUM_HW
;
2621 dev
->last_rx
= jiffies
;
2622 netif_receive_skb(skb
);
2628 *budget
-= work_done
;
2629 dev
->quota
-= work_done
;
2630 done
= work_done
< to_do
;
2632 if (skge_rx_fill(skge
))
2635 /* restart receiver */
2637 skge_write8(hw
, Q_ADDR(rxqaddr
[skge
->port
], Q_CSR
),
2638 CSR_START
| CSR_IRQ_CL_F
);
2641 local_irq_disable();
2642 hw
->intr_mask
|= irqmask
[skge
->port
];
2643 /* Order is important since data can get interrupted */
2644 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
2645 __netif_rx_complete(dev
);
2652 static inline void skge_tx_intr(struct net_device
*dev
)
2654 struct skge_port
*skge
= netdev_priv(dev
);
2655 struct skge_hw
*hw
= skge
->hw
;
2656 struct skge_ring
*ring
= &skge
->tx_ring
;
2657 struct skge_element
*e
;
2659 spin_lock(&skge
->tx_lock
);
2660 for(e
= ring
->to_clean
; e
!= ring
->to_use
; e
= e
->next
) {
2661 struct skge_tx_desc
*td
= e
->desc
;
2665 control
= td
->control
;
2666 if (control
& BMU_OWN
)
2669 if (unlikely(netif_msg_tx_done(skge
)))
2670 printk(KERN_DEBUG PFX
"%s: tx done slot %td status 0x%x\n",
2671 dev
->name
, e
- ring
->start
, td
->status
);
2673 skge_tx_free(hw
, e
);
2678 skge_write8(hw
, Q_ADDR(txqaddr
[skge
->port
], Q_CSR
), CSR_IRQ_CL_F
);
2680 if (skge
->tx_avail
> MAX_SKB_FRAGS
+ 1)
2681 netif_wake_queue(dev
);
2683 spin_unlock(&skge
->tx_lock
);
2686 static void skge_mac_parity(struct skge_hw
*hw
, int port
)
2688 printk(KERN_ERR PFX
"%s: mac data parity error\n",
2689 hw
->dev
[port
] ? hw
->dev
[port
]->name
2690 : (port
== 0 ? "(port A)": "(port B"));
2692 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2693 skge_write16(hw
, SKGEMAC_REG(port
, TX_MFF_CTRL1
),
2696 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */
2697 skge_write8(hw
, SKGEMAC_REG(port
, TX_GMF_CTRL_T
),
2698 (hw
->chip_id
== CHIP_ID_YUKON
&& chip_rev(hw
) == 0)
2699 ? GMF_CLI_TX_FC
: GMF_CLI_TX_PE
);
2702 static void skge_pci_clear(struct skge_hw
*hw
)
2706 status
= skge_read16(hw
, SKGEPCI_REG(PCI_STATUS
));
2707 skge_write8(hw
, B2_TST_CTRL1
, TST_CFG_WRITE_ON
);
2708 skge_write16(hw
, SKGEPCI_REG(PCI_STATUS
),
2709 status
| PCI_STATUS_ERROR_BITS
);
2710 skge_write8(hw
, B2_TST_CTRL1
, TST_CFG_WRITE_OFF
);
2713 static void skge_mac_intr(struct skge_hw
*hw
, int port
)
2715 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2716 genesis_mac_intr(hw
, port
);
2718 yukon_mac_intr(hw
, port
);
2721 /* Handle device specific framing and timeout interrupts */
2722 static void skge_error_irq(struct skge_hw
*hw
)
2724 u32 hwstatus
= skge_read32(hw
, B0_HWE_ISRC
);
2726 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
2727 /* clear xmac errors */
2728 if (hwstatus
& (IS_NO_STAT_M1
|IS_NO_TIST_M1
))
2729 skge_write16(hw
, SKGEMAC_REG(0, RX_MFF_CTRL1
), MFF_CLR_INSTAT
);
2730 if (hwstatus
& (IS_NO_STAT_M2
|IS_NO_TIST_M2
))
2731 skge_write16(hw
, SKGEMAC_REG(0, RX_MFF_CTRL2
), MFF_CLR_INSTAT
);
2733 /* Timestamp (unused) overflow */
2734 if (hwstatus
& IS_IRQ_TIST_OV
)
2735 skge_write8(hw
, GMAC_TI_ST_CTRL
, GMT_ST_CLR_IRQ
);
2737 if (hwstatus
& IS_IRQ_SENSOR
) {
2738 /* no sensors on 32-bit Yukon */
2739 if (!(skge_read16(hw
, B0_CTST
) & CS_BUS_SLOT_SZ
)) {
2740 printk(KERN_ERR PFX
"ignoring bogus sensor interrups\n");
2741 skge_write32(hw
, B0_HWE_IMSK
,
2742 IS_ERR_MSK
& ~IS_IRQ_SENSOR
);
2744 printk(KERN_WARNING PFX
"sensor interrupt\n");
2750 if (hwstatus
& IS_RAM_RD_PAR
) {
2751 printk(KERN_ERR PFX
"Ram read data parity error\n");
2752 skge_write16(hw
, B3_RI_CTRL
, RI_CLR_RD_PERR
);
2755 if (hwstatus
& IS_RAM_WR_PAR
) {
2756 printk(KERN_ERR PFX
"Ram write data parity error\n");
2757 skge_write16(hw
, B3_RI_CTRL
, RI_CLR_WR_PERR
);
2760 if (hwstatus
& IS_M1_PAR_ERR
)
2761 skge_mac_parity(hw
, 0);
2763 if (hwstatus
& IS_M2_PAR_ERR
)
2764 skge_mac_parity(hw
, 1);
2766 if (hwstatus
& IS_R1_PAR_ERR
)
2767 skge_write32(hw
, B0_R1_CSR
, CSR_IRQ_CL_P
);
2769 if (hwstatus
& IS_R2_PAR_ERR
)
2770 skge_write32(hw
, B0_R2_CSR
, CSR_IRQ_CL_P
);
2772 if (hwstatus
& (IS_IRQ_MST_ERR
|IS_IRQ_STAT
)) {
2773 printk(KERN_ERR PFX
"hardware error detected (status 0x%x)\n",
2778 hwstatus
= skge_read32(hw
, B0_HWE_ISRC
);
2779 if (hwstatus
& IS_IRQ_STAT
) {
2780 printk(KERN_WARNING PFX
"IRQ status %x: still set ignoring hardware errors\n",
2782 hw
->intr_mask
&= ~IS_HW_ERR
;
2788 * Interrrupt from PHY are handled in tasklet (soft irq)
2789 * because accessing phy registers requires spin wait which might
2790 * cause excess interrupt latency.
2792 static void skge_extirq(unsigned long data
)
2794 struct skge_hw
*hw
= (struct skge_hw
*) data
;
2797 spin_lock(&hw
->phy_lock
);
2798 for (port
= 0; port
< 2; port
++) {
2799 struct net_device
*dev
= hw
->dev
[port
];
2801 if (dev
&& netif_running(dev
)) {
2802 struct skge_port
*skge
= netdev_priv(dev
);
2804 if (hw
->chip_id
!= CHIP_ID_GENESIS
)
2805 yukon_phy_intr(skge
);
2806 else if (hw
->phy_type
== SK_PHY_BCOM
)
2807 genesis_bcom_intr(skge
);
2810 spin_unlock(&hw
->phy_lock
);
2812 local_irq_disable();
2813 hw
->intr_mask
|= IS_EXT_REG
;
2814 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
2818 static irqreturn_t
skge_intr(int irq
, void *dev_id
, struct pt_regs
*regs
)
2820 struct skge_hw
*hw
= dev_id
;
2821 u32 status
= skge_read32(hw
, B0_SP_ISRC
);
2823 if (status
== 0 || status
== ~0) /* hotplug or shared irq */
2826 status
&= hw
->intr_mask
;
2828 if ((status
& IS_R1_F
) && netif_rx_schedule_prep(hw
->dev
[0])) {
2830 hw
->intr_mask
&= ~IS_R1_F
;
2831 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
2832 __netif_rx_schedule(hw
->dev
[0]);
2835 if ((status
& IS_R2_F
) && netif_rx_schedule_prep(hw
->dev
[1])) {
2837 hw
->intr_mask
&= ~IS_R2_F
;
2838 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
2839 __netif_rx_schedule(hw
->dev
[1]);
2842 if (status
& IS_XA1_F
)
2843 skge_tx_intr(hw
->dev
[0]);
2845 if (status
& IS_XA2_F
)
2846 skge_tx_intr(hw
->dev
[1]);
2848 if (status
& IS_MAC1
)
2849 skge_mac_intr(hw
, 0);
2851 if (status
& IS_MAC2
)
2852 skge_mac_intr(hw
, 1);
2854 if (status
& IS_HW_ERR
)
2857 if (status
& IS_EXT_REG
) {
2858 hw
->intr_mask
&= ~IS_EXT_REG
;
2859 tasklet_schedule(&hw
->ext_tasklet
);
2863 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
2868 #ifdef CONFIG_NET_POLL_CONTROLLER
2869 static void skge_netpoll(struct net_device
*dev
)
2871 struct skge_port
*skge
= netdev_priv(dev
);
2873 disable_irq(dev
->irq
);
2874 skge_intr(dev
->irq
, skge
->hw
, NULL
);
2875 enable_irq(dev
->irq
);
2879 static int skge_set_mac_address(struct net_device
*dev
, void *p
)
2881 struct skge_port
*skge
= netdev_priv(dev
);
2882 struct sockaddr
*addr
= p
;
2885 if (!is_valid_ether_addr(addr
->sa_data
))
2886 return -EADDRNOTAVAIL
;
2889 memcpy(dev
->dev_addr
, addr
->sa_data
, ETH_ALEN
);
2890 memcpy_toio(skge
->hw
->regs
+ B2_MAC_1
+ skge
->port
*8,
2891 dev
->dev_addr
, ETH_ALEN
);
2892 memcpy_toio(skge
->hw
->regs
+ B2_MAC_2
+ skge
->port
*8,
2893 dev
->dev_addr
, ETH_ALEN
);
2894 if (dev
->flags
& IFF_UP
)
2899 static const struct {
2903 { CHIP_ID_GENESIS
, "Genesis" },
2904 { CHIP_ID_YUKON
, "Yukon" },
2905 { CHIP_ID_YUKON_LITE
, "Yukon-Lite"},
2906 { CHIP_ID_YUKON_LP
, "Yukon-LP"},
2907 { CHIP_ID_YUKON_XL
, "Yukon-2 XL"},
2908 { CHIP_ID_YUKON_EC
, "YUKON-2 EC"},
2909 { CHIP_ID_YUKON_FE
, "YUKON-2 FE"},
2912 static const char *skge_board_name(const struct skge_hw
*hw
)
2915 static char buf
[16];
2917 for (i
= 0; i
< ARRAY_SIZE(skge_chips
); i
++)
2918 if (skge_chips
[i
].id
== hw
->chip_id
)
2919 return skge_chips
[i
].name
;
2921 snprintf(buf
, sizeof buf
, "chipid 0x%x", hw
->chip_id
);
2927 * Setup the board data structure, but don't bring up
2930 static int skge_reset(struct skge_hw
*hw
)
2936 ctst
= skge_read16(hw
, B0_CTST
);
2939 skge_write8(hw
, B0_CTST
, CS_RST_SET
);
2940 skge_write8(hw
, B0_CTST
, CS_RST_CLR
);
2942 /* clear PCI errors, if any */
2945 skge_write8(hw
, B0_CTST
, CS_MRST_CLR
);
2947 /* restore CLK_RUN bits (for Yukon-Lite) */
2948 skge_write16(hw
, B0_CTST
,
2949 ctst
& (CS_CLK_RUN_HOT
|CS_CLK_RUN_RST
|CS_CLK_RUN_ENA
));
2951 hw
->chip_id
= skge_read8(hw
, B2_CHIP_ID
);
2952 hw
->phy_type
= skge_read8(hw
, B2_E_1
) & 0xf;
2953 hw
->pmd_type
= skge_read8(hw
, B2_PMD_TYP
);
2955 switch(hw
->chip_id
) {
2956 case CHIP_ID_GENESIS
:
2957 switch (hw
->phy_type
) {
2959 hw
->phy_addr
= PHY_ADDR_XMAC
;
2962 hw
->phy_addr
= PHY_ADDR_BCOM
;
2965 printk(KERN_ERR PFX
"%s: unsupported phy type 0x%x\n",
2966 pci_name(hw
->pdev
), hw
->phy_type
);
2972 case CHIP_ID_YUKON_LITE
:
2973 case CHIP_ID_YUKON_LP
:
2974 if (hw
->phy_type
< SK_PHY_MARV_COPPER
&& hw
->pmd_type
!= 'S')
2975 hw
->phy_type
= SK_PHY_MARV_COPPER
;
2977 hw
->phy_addr
= PHY_ADDR_MARV
;
2979 hw
->phy_type
= SK_PHY_MARV_FIBER
;
2984 printk(KERN_ERR PFX
"%s: unsupported chip type 0x%x\n",
2985 pci_name(hw
->pdev
), hw
->chip_id
);
2989 hw
->mac_cfg
= skge_read8(hw
, B2_MAC_CFG
);
2990 ports
= isdualport(hw
) ? 2 : 1;
2992 /* read the adapters RAM size */
2993 t8
= skge_read8(hw
, B2_E_0
);
2994 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
2996 /* special case: 4 x 64k x 36, offset = 0x80000 */
2997 hw
->ram_size
= 0x100000;
2998 hw
->ram_offset
= 0x80000;
3000 hw
->ram_size
= t8
* 512;
3003 hw
->ram_size
= 0x20000;
3005 hw
->ram_size
= t8
* 4096;
3007 if (hw
->chip_id
== CHIP_ID_GENESIS
)
3010 /* switch power to VCC (WA for VAUX problem) */
3011 skge_write8(hw
, B0_POWER_CTRL
,
3012 PC_VAUX_ENA
| PC_VCC_ENA
| PC_VAUX_OFF
| PC_VCC_ON
);
3013 for (i
= 0; i
< ports
; i
++) {
3014 skge_write16(hw
, SKGEMAC_REG(i
, GMAC_LINK_CTRL
), GMLC_RST_SET
);
3015 skge_write16(hw
, SKGEMAC_REG(i
, GMAC_LINK_CTRL
), GMLC_RST_CLR
);
3019 /* turn off hardware timer (unused) */
3020 skge_write8(hw
, B2_TI_CTRL
, TIM_STOP
);
3021 skge_write8(hw
, B2_TI_CTRL
, TIM_CLR_IRQ
);
3022 skge_write8(hw
, B0_LED
, LED_STAT_ON
);
3024 /* enable the Tx Arbiters */
3025 for (i
= 0; i
< ports
; i
++)
3026 skge_write8(hw
, SKGEMAC_REG(i
, TXA_CTRL
), TXA_ENA_ARB
);
3028 /* Initialize ram interface */
3029 skge_write16(hw
, B3_RI_CTRL
, RI_RST_CLR
);
3031 skge_write8(hw
, B3_RI_WTO_R1
, SK_RI_TO_53
);
3032 skge_write8(hw
, B3_RI_WTO_XA1
, SK_RI_TO_53
);
3033 skge_write8(hw
, B3_RI_WTO_XS1
, SK_RI_TO_53
);
3034 skge_write8(hw
, B3_RI_RTO_R1
, SK_RI_TO_53
);
3035 skge_write8(hw
, B3_RI_RTO_XA1
, SK_RI_TO_53
);
3036 skge_write8(hw
, B3_RI_RTO_XS1
, SK_RI_TO_53
);
3037 skge_write8(hw
, B3_RI_WTO_R2
, SK_RI_TO_53
);
3038 skge_write8(hw
, B3_RI_WTO_XA2
, SK_RI_TO_53
);
3039 skge_write8(hw
, B3_RI_WTO_XS2
, SK_RI_TO_53
);
3040 skge_write8(hw
, B3_RI_RTO_R2
, SK_RI_TO_53
);
3041 skge_write8(hw
, B3_RI_RTO_XA2
, SK_RI_TO_53
);
3042 skge_write8(hw
, B3_RI_RTO_XS2
, SK_RI_TO_53
);
3044 skge_write32(hw
, B0_HWE_IMSK
, IS_ERR_MSK
);
3046 /* Set interrupt moderation for Transmit only
3047 * Receive interrupts avoided by NAPI
3049 skge_write32(hw
, B2_IRQM_MSK
, IS_XA1_F
|IS_XA2_F
);
3050 skge_write32(hw
, B2_IRQM_INI
, skge_usecs2clk(hw
, 100));
3051 skge_write32(hw
, B2_IRQM_CTRL
, TIM_START
);
3053 hw
->intr_mask
= IS_HW_ERR
| IS_EXT_REG
| IS_PORT_1
;
3055 hw
->intr_mask
|= IS_PORT_2
;
3056 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
3058 if (hw
->chip_id
!= CHIP_ID_GENESIS
)
3059 skge_write8(hw
, GMAC_IRQ_MSK
, 0);
3061 spin_lock_bh(&hw
->phy_lock
);
3062 for (i
= 0; i
< ports
; i
++) {
3063 if (hw
->chip_id
== CHIP_ID_GENESIS
)
3064 genesis_reset(hw
, i
);
3068 spin_unlock_bh(&hw
->phy_lock
);
3073 /* Initialize network device */
3074 static struct net_device
*skge_devinit(struct skge_hw
*hw
, int port
)
3076 struct skge_port
*skge
;
3077 struct net_device
*dev
= alloc_etherdev(sizeof(*skge
));
3080 printk(KERN_ERR
"skge etherdev alloc failed");
3084 SET_MODULE_OWNER(dev
);
3085 SET_NETDEV_DEV(dev
, &hw
->pdev
->dev
);
3086 dev
->open
= skge_up
;
3087 dev
->stop
= skge_down
;
3088 dev
->hard_start_xmit
= skge_xmit_frame
;
3089 dev
->get_stats
= skge_get_stats
;
3090 if (hw
->chip_id
== CHIP_ID_GENESIS
)
3091 dev
->set_multicast_list
= genesis_set_multicast
;
3093 dev
->set_multicast_list
= yukon_set_multicast
;
3095 dev
->set_mac_address
= skge_set_mac_address
;
3096 dev
->change_mtu
= skge_change_mtu
;
3097 SET_ETHTOOL_OPS(dev
, &skge_ethtool_ops
);
3098 dev
->tx_timeout
= skge_tx_timeout
;
3099 dev
->watchdog_timeo
= TX_WATCHDOG
;
3100 dev
->poll
= skge_poll
;
3101 dev
->weight
= NAPI_WEIGHT
;
3102 #ifdef CONFIG_NET_POLL_CONTROLLER
3103 dev
->poll_controller
= skge_netpoll
;
3105 dev
->irq
= hw
->pdev
->irq
;
3106 dev
->features
= NETIF_F_LLTX
;
3108 skge
= netdev_priv(dev
);
3111 skge
->msg_enable
= netif_msg_init(debug
, default_msg
);
3112 skge
->tx_ring
.count
= DEFAULT_TX_RING_SIZE
;
3113 skge
->rx_ring
.count
= DEFAULT_RX_RING_SIZE
;
3115 /* Auto speed and flow control */
3116 skge
->autoneg
= AUTONEG_ENABLE
;
3117 skge
->flow_control
= FLOW_MODE_SYMMETRIC
;
3120 skge
->advertising
= skge_modes(hw
);
3122 hw
->dev
[port
] = dev
;
3126 spin_lock_init(&skge
->tx_lock
);
3128 init_timer(&skge
->link_check
);
3129 skge
->link_check
.function
= skge_link_timer
;
3130 skge
->link_check
.data
= (unsigned long) skge
;
3132 init_timer(&skge
->led_blink
);
3133 skge
->led_blink
.function
= skge_blink_timer
;
3134 skge
->led_blink
.data
= (unsigned long) skge
;
3136 if (hw
->chip_id
!= CHIP_ID_GENESIS
) {
3137 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
;
3141 /* read the mac address */
3142 memcpy_fromio(dev
->dev_addr
, hw
->regs
+ B2_MAC_1
+ port
*8, ETH_ALEN
);
3144 /* device is off until link detection */
3145 netif_carrier_off(dev
);
3146 netif_stop_queue(dev
);
3151 static void __devinit
skge_show_addr(struct net_device
*dev
)
3153 const struct skge_port
*skge
= netdev_priv(dev
);
3155 if (netif_msg_probe(skge
))
3156 printk(KERN_INFO PFX
"%s: addr %02x:%02x:%02x:%02x:%02x:%02x\n",
3158 dev
->dev_addr
[0], dev
->dev_addr
[1], dev
->dev_addr
[2],
3159 dev
->dev_addr
[3], dev
->dev_addr
[4], dev
->dev_addr
[5]);
3162 static int __devinit
skge_probe(struct pci_dev
*pdev
,
3163 const struct pci_device_id
*ent
)
3165 struct net_device
*dev
, *dev1
;
3167 int err
, using_dac
= 0;
3169 if ((err
= pci_enable_device(pdev
))) {
3170 printk(KERN_ERR PFX
"%s cannot enable PCI device\n",
3175 if ((err
= pci_request_regions(pdev
, DRV_NAME
))) {
3176 printk(KERN_ERR PFX
"%s cannot obtain PCI resources\n",
3178 goto err_out_disable_pdev
;
3181 pci_set_master(pdev
);
3183 if (!(err
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)))
3185 else if (!(err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
))) {
3186 printk(KERN_ERR PFX
"%s no usable DMA configuration\n",
3188 goto err_out_free_regions
;
3192 /* byte swap decriptors in hardware */
3196 pci_read_config_dword(pdev
, PCI_DEV_REG2
, ®
);
3197 reg
|= PCI_REV_DESC
;
3198 pci_write_config_dword(pdev
, PCI_DEV_REG2
, reg
);
3203 hw
= kmalloc(sizeof(*hw
), GFP_KERNEL
);
3205 printk(KERN_ERR PFX
"%s: cannot allocate hardware struct\n",
3207 goto err_out_free_regions
;
3210 memset(hw
, 0, sizeof(*hw
));
3212 spin_lock_init(&hw
->phy_lock
);
3213 tasklet_init(&hw
->ext_tasklet
, skge_extirq
, (unsigned long) hw
);
3215 hw
->regs
= ioremap_nocache(pci_resource_start(pdev
, 0), 0x4000);
3217 printk(KERN_ERR PFX
"%s: cannot map device registers\n",
3219 goto err_out_free_hw
;
3222 if ((err
= request_irq(pdev
->irq
, skge_intr
, SA_SHIRQ
, DRV_NAME
, hw
))) {
3223 printk(KERN_ERR PFX
"%s: cannot assign irq %d\n",
3224 pci_name(pdev
), pdev
->irq
);
3225 goto err_out_iounmap
;
3227 pci_set_drvdata(pdev
, hw
);
3229 err
= skge_reset(hw
);
3231 goto err_out_free_irq
;
3233 printk(KERN_INFO PFX
"addr 0x%lx irq %d chip %s rev %d\n",
3234 pci_resource_start(pdev
, 0), pdev
->irq
,
3235 skge_board_name(hw
), chip_rev(hw
));
3237 if ((dev
= skge_devinit(hw
, 0)) == NULL
)
3238 goto err_out_led_off
;
3241 dev
->features
|= NETIF_F_HIGHDMA
;
3243 if ((err
= register_netdev(dev
))) {
3244 printk(KERN_ERR PFX
"%s: cannot register net device\n",
3246 goto err_out_free_netdev
;
3249 skge_show_addr(dev
);
3251 if (isdualport(hw
) && (dev1
= skge_devinit(hw
, 1))) {
3253 dev1
->features
|= NETIF_F_HIGHDMA
;
3255 if (register_netdev(dev1
) == 0)
3256 skge_show_addr(dev1
);
3258 /* Failure to register second port need not be fatal */
3259 printk(KERN_WARNING PFX
"register of second port failed\n");
3267 err_out_free_netdev
:
3270 skge_write16(hw
, B0_LED
, LED_STAT_OFF
);
3272 free_irq(pdev
->irq
, hw
);
3277 err_out_free_regions
:
3278 pci_release_regions(pdev
);
3279 err_out_disable_pdev
:
3280 pci_disable_device(pdev
);
3281 pci_set_drvdata(pdev
, NULL
);
3286 static void __devexit
skge_remove(struct pci_dev
*pdev
)
3288 struct skge_hw
*hw
= pci_get_drvdata(pdev
);
3289 struct net_device
*dev0
, *dev1
;
3294 if ((dev1
= hw
->dev
[1]))
3295 unregister_netdev(dev1
);
3297 unregister_netdev(dev0
);
3299 tasklet_kill(&hw
->ext_tasklet
);
3301 free_irq(pdev
->irq
, hw
);
3302 pci_release_regions(pdev
);
3303 pci_disable_device(pdev
);
3307 skge_write16(hw
, B0_LED
, LED_STAT_OFF
);
3310 pci_set_drvdata(pdev
, NULL
);
3314 static int skge_suspend(struct pci_dev
*pdev
, u32 state
)
3316 struct skge_hw
*hw
= pci_get_drvdata(pdev
);
3319 for(i
= 0; i
< 2; i
++) {
3320 struct net_device
*dev
= hw
->dev
[i
];
3323 struct skge_port
*skge
= netdev_priv(dev
);
3324 if (netif_running(dev
)) {
3325 netif_carrier_off(dev
);
3328 netif_device_detach(dev
);
3333 pci_save_state(pdev
);
3334 pci_enable_wake(pdev
, state
, wol
);
3335 pci_disable_device(pdev
);
3336 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
3341 static int skge_resume(struct pci_dev
*pdev
)
3343 struct skge_hw
*hw
= pci_get_drvdata(pdev
);
3346 pci_set_power_state(pdev
, PCI_D0
);
3347 pci_restore_state(pdev
);
3348 pci_enable_wake(pdev
, PCI_D0
, 0);
3352 for(i
= 0; i
< 2; i
++) {
3353 struct net_device
*dev
= hw
->dev
[i
];
3355 netif_device_attach(dev
);
3356 if(netif_running(dev
))
3364 static struct pci_driver skge_driver
= {
3366 .id_table
= skge_id_table
,
3367 .probe
= skge_probe
,
3368 .remove
= __devexit_p(skge_remove
),
3370 .suspend
= skge_suspend
,
3371 .resume
= skge_resume
,
3375 static int __init
skge_init_module(void)
3377 return pci_module_init(&skge_driver
);
3380 static void __exit
skge_cleanup_module(void)
3382 pci_unregister_driver(&skge_driver
);
3385 module_init(skge_init_module
);
3386 module_exit(skge_cleanup_module
);