2 * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter
4 * Copyright (C) 2003, 2007 IC Plus Corp
9 * Sundance Technology, Inc.
11 * craig_rich@sundanceti.com
16 * http://www.icplus.com.tw
17 * sorbica@icplus.com.tw
20 * http://www.icplus.com.tw
23 #include <linux/crc32.h>
24 #include <linux/ethtool.h>
25 #include <linux/mii.h>
26 #include <linux/mutex.h>
28 #include <asm/div64.h>
30 #define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH)
31 #define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH)
32 #define IPG_RESET_MASK \
33 (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \
34 IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \
37 #define ipg_w32(val32, reg) iowrite32((val32), ioaddr + (reg))
38 #define ipg_w16(val16, reg) iowrite16((val16), ioaddr + (reg))
39 #define ipg_w8(val8, reg) iowrite8((val8), ioaddr + (reg))
41 #define ipg_r32(reg) ioread32(ioaddr + (reg))
42 #define ipg_r16(reg) ioread16(ioaddr + (reg))
43 #define ipg_r8(reg) ioread8(ioaddr + (reg))
50 #define DRV_NAME "ipg"
52 MODULE_AUTHOR("IC Plus Corp. 2003");
53 MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver");
54 MODULE_LICENSE("GPL");
59 #define IPG_MAX_RXFRAME_SIZE 0x0600
60 #define IPG_RXFRAG_SIZE 0x0600
61 #define IPG_RXSUPPORT_SIZE 0x0600
62 #define IPG_IS_JUMBO false
65 * Variable record -- index by leading revision/length
66 * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN
68 static unsigned short DefaultPhyParam
[] = {
69 /* 11/12/03 IP1000A v1-3 rev=0x40 */
70 /*--------------------------------------------------------------------------
71 (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2,
72 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6,
73 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700,
74 --------------------------------------------------------------------------*/
75 /* 12/17/03 IP1000A v1-4 rev=0x40 */
76 (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
78 30, 0x005e, 9, 0x0700,
79 /* 01/09/04 IP1000A v1-5 rev=0x41 */
80 (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
82 30, 0x005e, 9, 0x0700,
86 static const char *ipg_brand_name
[] = {
87 "IC PLUS IP1000 1000/100/10 based NIC",
88 "Sundance Technology ST2021 based NIC",
89 "Tamarack Microelectronics TC9020/9021 based NIC",
90 "Tamarack Microelectronics TC9020/9021 based NIC",
95 static struct pci_device_id ipg_pci_tbl
[] __devinitdata
= {
96 { PCI_VDEVICE(SUNDANCE
, 0x1023), 0 },
97 { PCI_VDEVICE(SUNDANCE
, 0x2021), 1 },
98 { PCI_VDEVICE(SUNDANCE
, 0x1021), 2 },
99 { PCI_VDEVICE(DLINK
, 0x9021), 3 },
100 { PCI_VDEVICE(DLINK
, 0x4000), 4 },
101 { PCI_VDEVICE(DLINK
, 0x4020), 5 },
105 MODULE_DEVICE_TABLE(pci
, ipg_pci_tbl
);
107 static inline void __iomem
*ipg_ioaddr(struct net_device
*dev
)
109 struct ipg_nic_private
*sp
= netdev_priv(dev
);
114 static void ipg_dump_rfdlist(struct net_device
*dev
)
116 struct ipg_nic_private
*sp
= netdev_priv(dev
);
117 void __iomem
*ioaddr
= sp
->ioaddr
;
121 IPG_DEBUG_MSG("_dump_rfdlist\n");
123 printk(KERN_INFO
"rx_current = %2.2x\n", sp
->rx_current
);
124 printk(KERN_INFO
"rx_dirty = %2.2x\n", sp
->rx_dirty
);
125 printk(KERN_INFO
"RFDList start address = %16.16lx\n",
126 (unsigned long) sp
->rxd_map
);
127 printk(KERN_INFO
"RFDListPtr register = %8.8x%8.8x\n",
128 ipg_r32(IPG_RFDLISTPTR1
), ipg_r32(IPG_RFDLISTPTR0
));
130 for (i
= 0; i
< IPG_RFDLIST_LENGTH
; i
++) {
131 offset
= (u32
) &sp
->rxd
[i
].next_desc
- (u32
) sp
->rxd
;
132 printk(KERN_INFO
"%2.2x %4.4x RFDNextPtr = %16.16lx\n", i
,
133 offset
, (unsigned long) sp
->rxd
[i
].next_desc
);
134 offset
= (u32
) &sp
->rxd
[i
].rfs
- (u32
) sp
->rxd
;
135 printk(KERN_INFO
"%2.2x %4.4x RFS = %16.16lx\n", i
,
136 offset
, (unsigned long) sp
->rxd
[i
].rfs
);
137 offset
= (u32
) &sp
->rxd
[i
].frag_info
- (u32
) sp
->rxd
;
138 printk(KERN_INFO
"%2.2x %4.4x frag_info = %16.16lx\n", i
,
139 offset
, (unsigned long) sp
->rxd
[i
].frag_info
);
143 static void ipg_dump_tfdlist(struct net_device
*dev
)
145 struct ipg_nic_private
*sp
= netdev_priv(dev
);
146 void __iomem
*ioaddr
= sp
->ioaddr
;
150 IPG_DEBUG_MSG("_dump_tfdlist\n");
152 printk(KERN_INFO
"tx_current = %2.2x\n", sp
->tx_current
);
153 printk(KERN_INFO
"tx_dirty = %2.2x\n", sp
->tx_dirty
);
154 printk(KERN_INFO
"TFDList start address = %16.16lx\n",
155 (unsigned long) sp
->txd_map
);
156 printk(KERN_INFO
"TFDListPtr register = %8.8x%8.8x\n",
157 ipg_r32(IPG_TFDLISTPTR1
), ipg_r32(IPG_TFDLISTPTR0
));
159 for (i
= 0; i
< IPG_TFDLIST_LENGTH
; i
++) {
160 offset
= (u32
) &sp
->txd
[i
].next_desc
- (u32
) sp
->txd
;
161 printk(KERN_INFO
"%2.2x %4.4x TFDNextPtr = %16.16lx\n", i
,
162 offset
, (unsigned long) sp
->txd
[i
].next_desc
);
164 offset
= (u32
) &sp
->txd
[i
].tfc
- (u32
) sp
->txd
;
165 printk(KERN_INFO
"%2.2x %4.4x TFC = %16.16lx\n", i
,
166 offset
, (unsigned long) sp
->txd
[i
].tfc
);
167 offset
= (u32
) &sp
->txd
[i
].frag_info
- (u32
) sp
->txd
;
168 printk(KERN_INFO
"%2.2x %4.4x frag_info = %16.16lx\n", i
,
169 offset
, (unsigned long) sp
->txd
[i
].frag_info
);
174 static void ipg_write_phy_ctl(void __iomem
*ioaddr
, u8 data
)
176 ipg_w8(IPG_PC_RSVD_MASK
& data
, PHY_CTRL
);
177 ndelay(IPG_PC_PHYCTRLWAIT_NS
);
180 static void ipg_drive_phy_ctl_low_high(void __iomem
*ioaddr
, u8 data
)
182 ipg_write_phy_ctl(ioaddr
, IPG_PC_MGMTCLK_LO
| data
);
183 ipg_write_phy_ctl(ioaddr
, IPG_PC_MGMTCLK_HI
| data
);
186 static void send_three_state(void __iomem
*ioaddr
, u8 phyctrlpolarity
)
188 phyctrlpolarity
|= (IPG_PC_MGMTDATA
& 0) | IPG_PC_MGMTDIR
;
190 ipg_drive_phy_ctl_low_high(ioaddr
, phyctrlpolarity
);
193 static void send_end(void __iomem
*ioaddr
, u8 phyctrlpolarity
)
195 ipg_w8((IPG_PC_MGMTCLK_LO
| (IPG_PC_MGMTDATA
& 0) | IPG_PC_MGMTDIR
|
196 phyctrlpolarity
) & IPG_PC_RSVD_MASK
, PHY_CTRL
);
199 static u16
read_phy_bit(void __iomem
*ioaddr
, u8 phyctrlpolarity
)
203 ipg_write_phy_ctl(ioaddr
, IPG_PC_MGMTCLK_LO
| phyctrlpolarity
);
205 bit_data
= ((ipg_r8(PHY_CTRL
) & IPG_PC_MGMTDATA
) >> 1) & 1;
207 ipg_write_phy_ctl(ioaddr
, IPG_PC_MGMTCLK_HI
| phyctrlpolarity
);
213 * Read a register from the Physical Layer device located
214 * on the IPG NIC, using the IPG PHYCTRL register.
216 static int mdio_read(struct net_device
*dev
, int phy_id
, int phy_reg
)
218 void __iomem
*ioaddr
= ipg_ioaddr(dev
);
220 * The GMII mangement frame structure for a read is as follows:
222 * |Preamble|st|op|phyad|regad|ta| data |idle|
223 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
225 * <32 1s> = 32 consecutive logic 1 values
226 * A = bit of Physical Layer device address (MSB first)
227 * R = bit of register address (MSB first)
228 * z = High impedance state
229 * D = bit of read data (MSB first)
231 * Transmission order is 'Preamble' field first, bits transmitted
232 * left to right (first to last).
238 { GMII_PREAMBLE
, 32 }, /* Preamble */
239 { GMII_ST
, 2 }, /* ST */
240 { GMII_READ
, 2 }, /* OP */
241 { phy_id
, 5 }, /* PHYAD */
242 { phy_reg
, 5 }, /* REGAD */
243 { 0x0000, 2 }, /* TA */
244 { 0x0000, 16 }, /* DATA */
245 { 0x0000, 1 } /* IDLE */
250 polarity
= ipg_r8(PHY_CTRL
);
251 polarity
&= (IPG_PC_DUPLEX_POLARITY
| IPG_PC_LINK_POLARITY
);
253 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
254 for (j
= 0; j
< 5; j
++) {
255 for (i
= 0; i
< p
[j
].len
; i
++) {
256 /* For each variable length field, the MSB must be
257 * transmitted first. Rotate through the field bits,
258 * starting with the MSB, and move each bit into the
259 * the 1st (2^1) bit position (this is the bit position
260 * corresponding to the MgmtData bit of the PhyCtrl
261 * register for the IPG).
265 * First write a '0' to bit 1 of the PhyCtrl
266 * register, then write a '1' to bit 1 of the
269 * To do this, right shift the MSB of ST by the value:
270 * [field length - 1 - #ST bits already written]
271 * then left shift this result by 1.
273 data
= (p
[j
].field
>> (p
[j
].len
- 1 - i
)) << 1;
274 data
&= IPG_PC_MGMTDATA
;
275 data
|= polarity
| IPG_PC_MGMTDIR
;
277 ipg_drive_phy_ctl_low_high(ioaddr
, data
);
281 send_three_state(ioaddr
, polarity
);
283 read_phy_bit(ioaddr
, polarity
);
286 * For a read cycle, the bits for the next two fields (TA and
287 * DATA) are driven by the PHY (the IPG reads these bits).
289 for (i
= 0; i
< p
[6].len
; i
++) {
291 (read_phy_bit(ioaddr
, polarity
) << (p
[6].len
- 1 - i
));
294 send_three_state(ioaddr
, polarity
);
295 send_three_state(ioaddr
, polarity
);
296 send_three_state(ioaddr
, polarity
);
297 send_end(ioaddr
, polarity
);
299 /* Return the value of the DATA field. */
304 * Write to a register from the Physical Layer device located
305 * on the IPG NIC, using the IPG PHYCTRL register.
307 static void mdio_write(struct net_device
*dev
, int phy_id
, int phy_reg
, int val
)
309 void __iomem
*ioaddr
= ipg_ioaddr(dev
);
311 * The GMII mangement frame structure for a read is as follows:
313 * |Preamble|st|op|phyad|regad|ta| data |idle|
314 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
316 * <32 1s> = 32 consecutive logic 1 values
317 * A = bit of Physical Layer device address (MSB first)
318 * R = bit of register address (MSB first)
319 * z = High impedance state
320 * D = bit of write data (MSB first)
322 * Transmission order is 'Preamble' field first, bits transmitted
323 * left to right (first to last).
329 { GMII_PREAMBLE
, 32 }, /* Preamble */
330 { GMII_ST
, 2 }, /* ST */
331 { GMII_WRITE
, 2 }, /* OP */
332 { phy_id
, 5 }, /* PHYAD */
333 { phy_reg
, 5 }, /* REGAD */
334 { 0x0002, 2 }, /* TA */
335 { val
& 0xffff, 16 }, /* DATA */
336 { 0x0000, 1 } /* IDLE */
341 polarity
= ipg_r8(PHY_CTRL
);
342 polarity
&= (IPG_PC_DUPLEX_POLARITY
| IPG_PC_LINK_POLARITY
);
344 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
345 for (j
= 0; j
< 7; j
++) {
346 for (i
= 0; i
< p
[j
].len
; i
++) {
347 /* For each variable length field, the MSB must be
348 * transmitted first. Rotate through the field bits,
349 * starting with the MSB, and move each bit into the
350 * the 1st (2^1) bit position (this is the bit position
351 * corresponding to the MgmtData bit of the PhyCtrl
352 * register for the IPG).
356 * First write a '0' to bit 1 of the PhyCtrl
357 * register, then write a '1' to bit 1 of the
360 * To do this, right shift the MSB of ST by the value:
361 * [field length - 1 - #ST bits already written]
362 * then left shift this result by 1.
364 data
= (p
[j
].field
>> (p
[j
].len
- 1 - i
)) << 1;
365 data
&= IPG_PC_MGMTDATA
;
366 data
|= polarity
| IPG_PC_MGMTDIR
;
368 ipg_drive_phy_ctl_low_high(ioaddr
, data
);
372 /* The last cycle is a tri-state, so read from the PHY. */
373 for (j
= 7; j
< 8; j
++) {
374 for (i
= 0; i
< p
[j
].len
; i
++) {
375 ipg_write_phy_ctl(ioaddr
, IPG_PC_MGMTCLK_LO
| polarity
);
377 p
[j
].field
|= ((ipg_r8(PHY_CTRL
) &
378 IPG_PC_MGMTDATA
) >> 1) << (p
[j
].len
- 1 - i
);
380 ipg_write_phy_ctl(ioaddr
, IPG_PC_MGMTCLK_HI
| polarity
);
385 static void ipg_set_led_mode(struct net_device
*dev
)
387 struct ipg_nic_private
*sp
= netdev_priv(dev
);
388 void __iomem
*ioaddr
= sp
->ioaddr
;
391 mode
= ipg_r32(ASIC_CTRL
);
392 mode
&= ~(IPG_AC_LED_MODE_BIT_1
| IPG_AC_LED_MODE
| IPG_AC_LED_SPEED
);
394 if ((sp
->led_mode
& 0x03) > 1)
395 mode
|= IPG_AC_LED_MODE_BIT_1
; /* Write Asic Control Bit 29 */
397 if ((sp
->led_mode
& 0x01) == 1)
398 mode
|= IPG_AC_LED_MODE
; /* Write Asic Control Bit 14 */
400 if ((sp
->led_mode
& 0x08) == 8)
401 mode
|= IPG_AC_LED_SPEED
; /* Write Asic Control Bit 27 */
403 ipg_w32(mode
, ASIC_CTRL
);
406 static void ipg_set_phy_set(struct net_device
*dev
)
408 struct ipg_nic_private
*sp
= netdev_priv(dev
);
409 void __iomem
*ioaddr
= sp
->ioaddr
;
412 physet
= ipg_r8(PHY_SET
);
413 physet
&= ~(IPG_PS_MEM_LENB9B
| IPG_PS_MEM_LEN9
| IPG_PS_NON_COMPDET
);
414 physet
|= ((sp
->led_mode
& 0x70) >> 4);
415 ipg_w8(physet
, PHY_SET
);
418 static int ipg_reset(struct net_device
*dev
, u32 resetflags
)
420 /* Assert functional resets via the IPG AsicCtrl
421 * register as specified by the 'resetflags' input
424 void __iomem
*ioaddr
= ipg_ioaddr(dev
);
425 unsigned int timeout_count
= 0;
427 IPG_DEBUG_MSG("_reset\n");
429 ipg_w32(ipg_r32(ASIC_CTRL
) | resetflags
, ASIC_CTRL
);
431 /* Delay added to account for problem with 10Mbps reset. */
432 mdelay(IPG_AC_RESETWAIT
);
434 while (IPG_AC_RESET_BUSY
& ipg_r32(ASIC_CTRL
)) {
435 mdelay(IPG_AC_RESETWAIT
);
436 if (++timeout_count
> IPG_AC_RESET_TIMEOUT
)
439 /* Set LED Mode in Asic Control */
440 ipg_set_led_mode(dev
);
442 /* Set PHYSet Register Value */
443 ipg_set_phy_set(dev
);
447 /* Find the GMII PHY address. */
448 static int ipg_find_phyaddr(struct net_device
*dev
)
450 unsigned int phyaddr
, i
;
452 for (i
= 0; i
< 32; i
++) {
455 /* Search for the correct PHY address among 32 possible. */
456 phyaddr
= (IPG_NIC_PHY_ADDRESS
+ i
) % 32;
458 /* 10/22/03 Grace change verify from GMII_PHY_STATUS to
462 status
= mdio_read(dev
, phyaddr
, MII_BMSR
);
464 if ((status
!= 0xFFFF) && (status
!= 0))
472 * Configure IPG based on result of IEEE 802.3 PHY
475 static int ipg_config_autoneg(struct net_device
*dev
)
477 struct ipg_nic_private
*sp
= netdev_priv(dev
);
478 void __iomem
*ioaddr
= sp
->ioaddr
;
479 unsigned int txflowcontrol
;
480 unsigned int rxflowcontrol
;
481 unsigned int fullduplex
;
486 IPG_DEBUG_MSG("_config_autoneg\n");
488 asicctrl
= ipg_r32(ASIC_CTRL
);
489 phyctrl
= ipg_r8(PHY_CTRL
);
490 mac_ctrl_val
= ipg_r32(MAC_CTRL
);
492 /* Set flags for use in resolving auto-negotation, assuming
493 * non-1000Mbps, half duplex, no flow control.
499 /* To accomodate a problem in 10Mbps operation,
500 * set a global flag if PHY running in 10Mbps mode.
504 printk(KERN_INFO
"%s: Link speed = ", dev
->name
);
506 /* Determine actual speed of operation. */
507 switch (phyctrl
& IPG_PC_LINK_SPEED
) {
508 case IPG_PC_LINK_SPEED_10MBPS
:
510 printk(KERN_INFO
"%s: 10Mbps operational mode enabled.\n",
514 case IPG_PC_LINK_SPEED_100MBPS
:
515 printk("100Mbps.\n");
517 case IPG_PC_LINK_SPEED_1000MBPS
:
518 printk("1000Mbps.\n");
521 printk("undefined!\n");
525 if (phyctrl
& IPG_PC_DUPLEX_STATUS
) {
531 /* Configure full duplex, and flow control. */
532 if (fullduplex
== 1) {
533 /* Configure IPG for full duplex operation. */
534 printk(KERN_INFO
"%s: setting full duplex, ", dev
->name
);
536 mac_ctrl_val
|= IPG_MC_DUPLEX_SELECT_FD
;
538 if (txflowcontrol
== 1) {
539 printk("TX flow control");
540 mac_ctrl_val
|= IPG_MC_TX_FLOW_CONTROL_ENABLE
;
542 printk("no TX flow control");
543 mac_ctrl_val
&= ~IPG_MC_TX_FLOW_CONTROL_ENABLE
;
546 if (rxflowcontrol
== 1) {
547 printk(", RX flow control.");
548 mac_ctrl_val
|= IPG_MC_RX_FLOW_CONTROL_ENABLE
;
550 printk(", no RX flow control.");
551 mac_ctrl_val
&= ~IPG_MC_RX_FLOW_CONTROL_ENABLE
;
556 /* Configure IPG for half duplex operation. */
557 printk(KERN_INFO
"%s: setting half duplex, "
558 "no TX flow control, no RX flow control.\n", dev
->name
);
560 mac_ctrl_val
&= ~IPG_MC_DUPLEX_SELECT_FD
&
561 ~IPG_MC_TX_FLOW_CONTROL_ENABLE
&
562 ~IPG_MC_RX_FLOW_CONTROL_ENABLE
;
564 ipg_w32(mac_ctrl_val
, MAC_CTRL
);
568 /* Determine and configure multicast operation and set
569 * receive mode for IPG.
571 static void ipg_nic_set_multicast_list(struct net_device
*dev
)
573 void __iomem
*ioaddr
= ipg_ioaddr(dev
);
574 struct dev_mc_list
*mc_list_ptr
;
575 unsigned int hashindex
;
579 IPG_DEBUG_MSG("_nic_set_multicast_list\n");
581 receivemode
= IPG_RM_RECEIVEUNICAST
| IPG_RM_RECEIVEBROADCAST
;
583 if (dev
->flags
& IFF_PROMISC
) {
584 /* NIC to be configured in promiscuous mode. */
585 receivemode
= IPG_RM_RECEIVEALLFRAMES
;
586 } else if ((dev
->flags
& IFF_ALLMULTI
) ||
587 ((dev
->flags
& IFF_MULTICAST
) &&
588 (dev
->mc_count
> IPG_MULTICAST_HASHTABLE_SIZE
))) {
589 /* NIC to be configured to receive all multicast
591 receivemode
|= IPG_RM_RECEIVEMULTICAST
;
592 } else if ((dev
->flags
& IFF_MULTICAST
) && (dev
->mc_count
> 0)) {
593 /* NIC to be configured to receive selected
594 * multicast addresses. */
595 receivemode
|= IPG_RM_RECEIVEMULTICASTHASH
;
598 /* Calculate the bits to set for the 64 bit, IPG HASHTABLE.
599 * The IPG applies a cyclic-redundancy-check (the same CRC
600 * used to calculate the frame data FCS) to the destination
601 * address all incoming multicast frames whose destination
602 * address has the multicast bit set. The least significant
603 * 6 bits of the CRC result are used as an addressing index
604 * into the hash table. If the value of the bit addressed by
605 * this index is a 1, the frame is passed to the host system.
608 /* Clear hashtable. */
609 hashtable
[0] = 0x00000000;
610 hashtable
[1] = 0x00000000;
612 /* Cycle through all multicast addresses to filter. */
613 for (mc_list_ptr
= dev
->mc_list
;
614 mc_list_ptr
!= NULL
; mc_list_ptr
= mc_list_ptr
->next
) {
615 /* Calculate CRC result for each multicast address. */
616 hashindex
= crc32_le(0xffffffff, mc_list_ptr
->dmi_addr
,
619 /* Use only the least significant 6 bits. */
620 hashindex
= hashindex
& 0x3F;
622 /* Within "hashtable", set bit number "hashindex"
625 set_bit(hashindex
, (void *)hashtable
);
628 /* Write the value of the hashtable, to the 4, 16 bit
629 * HASHTABLE IPG registers.
631 ipg_w32(hashtable
[0], HASHTABLE_0
);
632 ipg_w32(hashtable
[1], HASHTABLE_1
);
634 ipg_w8(IPG_RM_RSVD_MASK
& receivemode
, RECEIVE_MODE
);
636 IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE
));
639 static int ipg_io_config(struct net_device
*dev
)
641 struct ipg_nic_private
*sp
= netdev_priv(dev
);
642 void __iomem
*ioaddr
= ipg_ioaddr(dev
);
646 IPG_DEBUG_MSG("_io_config\n");
648 origmacctrl
= ipg_r32(MAC_CTRL
);
650 restoremacctrl
= origmacctrl
| IPG_MC_STATISTICS_ENABLE
;
652 /* Based on compilation option, determine if FCS is to be
653 * stripped on receive frames by IPG.
655 if (!IPG_STRIP_FCS_ON_RX
)
656 restoremacctrl
|= IPG_MC_RCV_FCS
;
658 /* Determine if transmitter and/or receiver are
659 * enabled so we may restore MACCTRL correctly.
661 if (origmacctrl
& IPG_MC_TX_ENABLED
)
662 restoremacctrl
|= IPG_MC_TX_ENABLE
;
664 if (origmacctrl
& IPG_MC_RX_ENABLED
)
665 restoremacctrl
|= IPG_MC_RX_ENABLE
;
667 /* Transmitter and receiver must be disabled before setting
670 ipg_w32((origmacctrl
& (IPG_MC_RX_DISABLE
| IPG_MC_TX_DISABLE
)) &
671 IPG_MC_RSVD_MASK
, MAC_CTRL
);
673 /* Now that transmitter and receiver are disabled, write
676 ipg_w32((origmacctrl
& IPG_MC_IFS_96BIT
) & IPG_MC_RSVD_MASK
, MAC_CTRL
);
678 /* Set RECEIVEMODE register. */
679 ipg_nic_set_multicast_list(dev
);
681 ipg_w16(sp
->max_rxframe_size
, MAX_FRAME_SIZE
);
683 ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE
, RX_DMA_POLL_PERIOD
);
684 ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE
, RX_DMA_URGENT_THRESH
);
685 ipg_w8(IPG_RXDMABURSTTHRESH_VALUE
, RX_DMA_BURST_THRESH
);
686 ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE
, TX_DMA_POLL_PERIOD
);
687 ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE
, TX_DMA_URGENT_THRESH
);
688 ipg_w8(IPG_TXDMABURSTTHRESH_VALUE
, TX_DMA_BURST_THRESH
);
689 ipg_w16((IPG_IE_HOST_ERROR
| IPG_IE_TX_DMA_COMPLETE
|
690 IPG_IE_TX_COMPLETE
| IPG_IE_INT_REQUESTED
|
691 IPG_IE_UPDATE_STATS
| IPG_IE_LINK_EVENT
|
692 IPG_IE_RX_DMA_COMPLETE
| IPG_IE_RX_DMA_PRIORITY
), INT_ENABLE
);
693 ipg_w16(IPG_FLOWONTHRESH_VALUE
, FLOW_ON_THRESH
);
694 ipg_w16(IPG_FLOWOFFTHRESH_VALUE
, FLOW_OFF_THRESH
);
696 /* IPG multi-frag frame bug workaround.
697 * Per silicon revision B3 eratta.
699 ipg_w16(ipg_r16(DEBUG_CTRL
) | 0x0200, DEBUG_CTRL
);
701 /* IPG TX poll now bug workaround.
702 * Per silicon revision B3 eratta.
704 ipg_w16(ipg_r16(DEBUG_CTRL
) | 0x0010, DEBUG_CTRL
);
706 /* IPG RX poll now bug workaround.
707 * Per silicon revision B3 eratta.
709 ipg_w16(ipg_r16(DEBUG_CTRL
) | 0x0020, DEBUG_CTRL
);
711 /* Now restore MACCTRL to original setting. */
712 ipg_w32(IPG_MC_RSVD_MASK
& restoremacctrl
, MAC_CTRL
);
714 /* Disable unused RMON statistics. */
715 ipg_w32(IPG_RZ_ALL
, RMON_STATISTICS_MASK
);
717 /* Disable unused MIB statistics. */
718 ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD
| IPG_SM_MACCONTROLFRAMESRCVD
|
719 IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK
| IPG_SM_TXJUMBOFRAMES
|
720 IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK
| IPG_SM_RXJUMBOFRAMES
|
721 IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK
|
722 IPG_SM_UDPCHECKSUMERRORS
| IPG_SM_TCPCHECKSUMERRORS
|
723 IPG_SM_IPCHECKSUMERRORS
, STATISTICS_MASK
);
729 * Create a receive buffer within system memory and update
730 * NIC private structure appropriately.
732 static int ipg_get_rxbuff(struct net_device
*dev
, int entry
)
734 struct ipg_nic_private
*sp
= netdev_priv(dev
);
735 struct ipg_rx
*rxfd
= sp
->rxd
+ entry
;
739 IPG_DEBUG_MSG("_get_rxbuff\n");
741 skb
= netdev_alloc_skb(dev
, sp
->rxsupport_size
+ NET_IP_ALIGN
);
743 sp
->rx_buff
[entry
] = NULL
;
747 /* Adjust the data start location within the buffer to
748 * align IP address field to a 16 byte boundary.
750 skb_reserve(skb
, NET_IP_ALIGN
);
752 /* Associate the receive buffer with the IPG NIC. */
755 /* Save the address of the sk_buff structure. */
756 sp
->rx_buff
[entry
] = skb
;
758 rxfd
->frag_info
= cpu_to_le64(pci_map_single(sp
->pdev
, skb
->data
,
759 sp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
));
761 /* Set the RFD fragment length. */
762 rxfragsize
= sp
->rxfrag_size
;
763 rxfd
->frag_info
|= cpu_to_le64((rxfragsize
<< 48) & IPG_RFI_FRAGLEN
);
768 static int init_rfdlist(struct net_device
*dev
)
770 struct ipg_nic_private
*sp
= netdev_priv(dev
);
771 void __iomem
*ioaddr
= sp
->ioaddr
;
774 IPG_DEBUG_MSG("_init_rfdlist\n");
776 for (i
= 0; i
< IPG_RFDLIST_LENGTH
; i
++) {
777 struct ipg_rx
*rxfd
= sp
->rxd
+ i
;
779 if (sp
->rx_buff
[i
]) {
780 pci_unmap_single(sp
->pdev
,
781 le64_to_cpu(rxfd
->frag_info
) & ~IPG_RFI_FRAGLEN
,
782 sp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
783 dev_kfree_skb_irq(sp
->rx_buff
[i
]);
784 sp
->rx_buff
[i
] = NULL
;
787 /* Clear out the RFS field. */
788 rxfd
->rfs
= 0x0000000000000000;
790 if (ipg_get_rxbuff(dev
, i
) < 0) {
792 * A receive buffer was not ready, break the
795 IPG_DEBUG_MSG("Cannot allocate Rx buffer.\n");
797 /* Just in case we cannot allocate a single RFD.
801 printk(KERN_ERR
"%s: No memory available"
802 " for RFD list.\n", dev
->name
);
807 rxfd
->next_desc
= cpu_to_le64(sp
->rxd_map
+
808 sizeof(struct ipg_rx
)*(i
+ 1));
810 sp
->rxd
[i
- 1].next_desc
= cpu_to_le64(sp
->rxd_map
);
815 /* Write the location of the RFDList to the IPG. */
816 ipg_w32((u32
) sp
->rxd_map
, RFD_LIST_PTR_0
);
817 ipg_w32(0x00000000, RFD_LIST_PTR_1
);
822 static void init_tfdlist(struct net_device
*dev
)
824 struct ipg_nic_private
*sp
= netdev_priv(dev
);
825 void __iomem
*ioaddr
= sp
->ioaddr
;
828 IPG_DEBUG_MSG("_init_tfdlist\n");
830 for (i
= 0; i
< IPG_TFDLIST_LENGTH
; i
++) {
831 struct ipg_tx
*txfd
= sp
->txd
+ i
;
833 txfd
->tfc
= cpu_to_le64(IPG_TFC_TFDDONE
);
835 if (sp
->tx_buff
[i
]) {
836 dev_kfree_skb_irq(sp
->tx_buff
[i
]);
837 sp
->tx_buff
[i
] = NULL
;
840 txfd
->next_desc
= cpu_to_le64(sp
->txd_map
+
841 sizeof(struct ipg_tx
)*(i
+ 1));
843 sp
->txd
[i
- 1].next_desc
= cpu_to_le64(sp
->txd_map
);
848 /* Write the location of the TFDList to the IPG. */
849 IPG_DDEBUG_MSG("Starting TFDListPtr = %8.8x\n",
851 ipg_w32((u32
) sp
->txd_map
, TFD_LIST_PTR_0
);
852 ipg_w32(0x00000000, TFD_LIST_PTR_1
);
854 sp
->reset_current_tfd
= 1;
858 * Free all transmit buffers which have already been transfered
859 * via DMA to the IPG.
861 static void ipg_nic_txfree(struct net_device
*dev
)
863 struct ipg_nic_private
*sp
= netdev_priv(dev
);
864 unsigned int released
, pending
, dirty
;
866 IPG_DEBUG_MSG("_nic_txfree\n");
868 pending
= sp
->tx_current
- sp
->tx_dirty
;
869 dirty
= sp
->tx_dirty
% IPG_TFDLIST_LENGTH
;
871 for (released
= 0; released
< pending
; released
++) {
872 struct sk_buff
*skb
= sp
->tx_buff
[dirty
];
873 struct ipg_tx
*txfd
= sp
->txd
+ dirty
;
875 IPG_DEBUG_MSG("TFC = %16.16lx\n", (unsigned long) txfd
->tfc
);
877 /* Look at each TFD's TFC field beginning
878 * at the last freed TFD up to the current TFD.
879 * If the TFDDone bit is set, free the associated
882 if (!(txfd
->tfc
& cpu_to_le64(IPG_TFC_TFDDONE
)))
885 /* Free the transmit buffer. */
887 pci_unmap_single(sp
->pdev
,
888 le64_to_cpu(txfd
->frag_info
) & ~IPG_TFI_FRAGLEN
,
889 skb
->len
, PCI_DMA_TODEVICE
);
891 dev_kfree_skb_irq(skb
);
893 sp
->tx_buff
[dirty
] = NULL
;
895 dirty
= (dirty
+ 1) % IPG_TFDLIST_LENGTH
;
898 sp
->tx_dirty
+= released
;
900 if (netif_queue_stopped(dev
) &&
901 (sp
->tx_current
!= (sp
->tx_dirty
+ IPG_TFDLIST_LENGTH
))) {
902 netif_wake_queue(dev
);
906 static void ipg_tx_timeout(struct net_device
*dev
)
908 struct ipg_nic_private
*sp
= netdev_priv(dev
);
909 void __iomem
*ioaddr
= sp
->ioaddr
;
911 ipg_reset(dev
, IPG_AC_TX_RESET
| IPG_AC_DMA
| IPG_AC_NETWORK
|
914 spin_lock_irq(&sp
->lock
);
916 /* Re-configure after DMA reset. */
917 if (ipg_io_config(dev
) < 0) {
918 printk(KERN_INFO
"%s: Error during re-configuration.\n",
924 spin_unlock_irq(&sp
->lock
);
926 ipg_w32((ipg_r32(MAC_CTRL
) | IPG_MC_TX_ENABLE
) & IPG_MC_RSVD_MASK
,
931 * For TxComplete interrupts, free all transmit
932 * buffers which have already been transfered via DMA
935 static void ipg_nic_txcleanup(struct net_device
*dev
)
937 struct ipg_nic_private
*sp
= netdev_priv(dev
);
938 void __iomem
*ioaddr
= sp
->ioaddr
;
941 IPG_DEBUG_MSG("_nic_txcleanup\n");
943 for (i
= 0; i
< IPG_TFDLIST_LENGTH
; i
++) {
944 /* Reading the TXSTATUS register clears the
945 * TX_COMPLETE interrupt.
947 u32 txstatusdword
= ipg_r32(TX_STATUS
);
949 IPG_DEBUG_MSG("TxStatus = %8.8x\n", txstatusdword
);
951 /* Check for Transmit errors. Error bits only valid if
952 * TX_COMPLETE bit in the TXSTATUS register is a 1.
954 if (!(txstatusdword
& IPG_TS_TX_COMPLETE
))
957 /* If in 10Mbps mode, indicate transmit is ready. */
958 if (sp
->tenmbpsmode
) {
959 netif_wake_queue(dev
);
962 /* Transmit error, increment stat counters. */
963 if (txstatusdword
& IPG_TS_TX_ERROR
) {
964 IPG_DEBUG_MSG("Transmit error.\n");
965 sp
->stats
.tx_errors
++;
968 /* Late collision, re-enable transmitter. */
969 if (txstatusdword
& IPG_TS_LATE_COLLISION
) {
970 IPG_DEBUG_MSG("Late collision on transmit.\n");
971 ipg_w32((ipg_r32(MAC_CTRL
) | IPG_MC_TX_ENABLE
) &
972 IPG_MC_RSVD_MASK
, MAC_CTRL
);
975 /* Maximum collisions, re-enable transmitter. */
976 if (txstatusdword
& IPG_TS_TX_MAX_COLL
) {
977 IPG_DEBUG_MSG("Maximum collisions on transmit.\n");
978 ipg_w32((ipg_r32(MAC_CTRL
) | IPG_MC_TX_ENABLE
) &
979 IPG_MC_RSVD_MASK
, MAC_CTRL
);
982 /* Transmit underrun, reset and re-enable
985 if (txstatusdword
& IPG_TS_TX_UNDERRUN
) {
986 IPG_DEBUG_MSG("Transmitter underrun.\n");
987 sp
->stats
.tx_fifo_errors
++;
988 ipg_reset(dev
, IPG_AC_TX_RESET
| IPG_AC_DMA
|
989 IPG_AC_NETWORK
| IPG_AC_FIFO
);
991 /* Re-configure after DMA reset. */
992 if (ipg_io_config(dev
) < 0) {
994 "%s: Error during re-configuration.\n",
999 ipg_w32((ipg_r32(MAC_CTRL
) | IPG_MC_TX_ENABLE
) &
1000 IPG_MC_RSVD_MASK
, MAC_CTRL
);
1004 ipg_nic_txfree(dev
);
1007 /* Provides statistical information about the IPG NIC. */
1008 static struct net_device_stats
*ipg_nic_get_stats(struct net_device
*dev
)
1010 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1011 void __iomem
*ioaddr
= sp
->ioaddr
;
1015 IPG_DEBUG_MSG("_nic_get_stats\n");
1017 /* Check to see if the NIC has been initialized via nic_open,
1018 * before trying to read statistic registers.
1020 if (!test_bit(__LINK_STATE_START
, &dev
->state
))
1023 sp
->stats
.rx_packets
+= ipg_r32(IPG_FRAMESRCVDOK
);
1024 sp
->stats
.tx_packets
+= ipg_r32(IPG_FRAMESXMTDOK
);
1025 sp
->stats
.rx_bytes
+= ipg_r32(IPG_OCTETRCVOK
);
1026 sp
->stats
.tx_bytes
+= ipg_r32(IPG_OCTETXMTOK
);
1027 temp1
= ipg_r16(IPG_FRAMESLOSTRXERRORS
);
1028 sp
->stats
.rx_errors
+= temp1
;
1029 sp
->stats
.rx_missed_errors
+= temp1
;
1030 temp1
= ipg_r32(IPG_SINGLECOLFRAMES
) + ipg_r32(IPG_MULTICOLFRAMES
) +
1031 ipg_r32(IPG_LATECOLLISIONS
);
1032 temp2
= ipg_r16(IPG_CARRIERSENSEERRORS
);
1033 sp
->stats
.collisions
+= temp1
;
1034 sp
->stats
.tx_dropped
+= ipg_r16(IPG_FRAMESABORTXSCOLLS
);
1035 sp
->stats
.tx_errors
+= ipg_r16(IPG_FRAMESWEXDEFERRAL
) +
1036 ipg_r32(IPG_FRAMESWDEFERREDXMT
) + temp1
+ temp2
;
1037 sp
->stats
.multicast
+= ipg_r32(IPG_MCSTOCTETRCVDOK
);
1039 /* detailed tx_errors */
1040 sp
->stats
.tx_carrier_errors
+= temp2
;
1042 /* detailed rx_errors */
1043 sp
->stats
.rx_length_errors
+= ipg_r16(IPG_INRANGELENGTHERRORS
) +
1044 ipg_r16(IPG_FRAMETOOLONGERRRORS
);
1045 sp
->stats
.rx_crc_errors
+= ipg_r16(IPG_FRAMECHECKSEQERRORS
);
1047 /* Unutilized IPG statistic registers. */
1048 ipg_r32(IPG_MCSTFRAMESRCVDOK
);
1053 /* Restore used receive buffers. */
1054 static int ipg_nic_rxrestore(struct net_device
*dev
)
1056 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1057 const unsigned int curr
= sp
->rx_current
;
1058 unsigned int dirty
= sp
->rx_dirty
;
1060 IPG_DEBUG_MSG("_nic_rxrestore\n");
1062 for (dirty
= sp
->rx_dirty
; curr
- dirty
> 0; dirty
++) {
1063 unsigned int entry
= dirty
% IPG_RFDLIST_LENGTH
;
1065 /* rx_copybreak may poke hole here and there. */
1066 if (sp
->rx_buff
[entry
])
1069 /* Generate a new receive buffer to replace the
1070 * current buffer (which will be released by the
1073 if (ipg_get_rxbuff(dev
, entry
) < 0) {
1074 IPG_DEBUG_MSG("Cannot allocate new Rx buffer.\n");
1079 /* Reset the RFS field. */
1080 sp
->rxd
[entry
].rfs
= 0x0000000000000000;
1082 sp
->rx_dirty
= dirty
;
1087 /* use jumboindex and jumbosize to control jumbo frame status
1088 * initial status is jumboindex=-1 and jumbosize=0
1089 * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done.
1090 * 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving
1091 * 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump
1092 * previous receiving and need to continue dumping the current one
1100 FRAME_NO_START_NO_END
= 0,
1101 FRAME_WITH_START
= 1,
1102 FRAME_WITH_END
= 10,
1103 FRAME_WITH_START_WITH_END
= 11
1106 static void ipg_nic_rx_free_skb(struct net_device
*dev
)
1108 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1109 unsigned int entry
= sp
->rx_current
% IPG_RFDLIST_LENGTH
;
1111 if (sp
->rx_buff
[entry
]) {
1112 struct ipg_rx
*rxfd
= sp
->rxd
+ entry
;
1114 pci_unmap_single(sp
->pdev
,
1115 le64_to_cpu(rxfd
->frag_info
) & ~IPG_RFI_FRAGLEN
,
1116 sp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1117 dev_kfree_skb_irq(sp
->rx_buff
[entry
]);
1118 sp
->rx_buff
[entry
] = NULL
;
1122 static int ipg_nic_rx_check_frame_type(struct net_device
*dev
)
1124 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1125 struct ipg_rx
*rxfd
= sp
->rxd
+ (sp
->rx_current
% IPG_RFDLIST_LENGTH
);
1126 int type
= FRAME_NO_START_NO_END
;
1128 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_FRAMESTART
)
1129 type
+= FRAME_WITH_START
;
1130 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_FRAMEEND
)
1131 type
+= FRAME_WITH_END
;
1135 static int ipg_nic_rx_check_error(struct net_device
*dev
)
1137 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1138 unsigned int entry
= sp
->rx_current
% IPG_RFDLIST_LENGTH
;
1139 struct ipg_rx
*rxfd
= sp
->rxd
+ entry
;
1141 if (IPG_DROP_ON_RX_ETH_ERRORS
&& (le64_to_cpu(rxfd
->rfs
) &
1142 (IPG_RFS_RXFIFOOVERRUN
| IPG_RFS_RXRUNTFRAME
|
1143 IPG_RFS_RXALIGNMENTERROR
| IPG_RFS_RXFCSERROR
|
1144 IPG_RFS_RXOVERSIZEDFRAME
| IPG_RFS_RXLENGTHERROR
))) {
1145 IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n",
1146 (unsigned long) rxfd
->rfs
);
1148 /* Increment general receive error statistic. */
1149 sp
->stats
.rx_errors
++;
1151 /* Increment detailed receive error statistics. */
1152 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXFIFOOVERRUN
) {
1153 IPG_DEBUG_MSG("RX FIFO overrun occured.\n");
1155 sp
->stats
.rx_fifo_errors
++;
1158 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXRUNTFRAME
) {
1159 IPG_DEBUG_MSG("RX runt occured.\n");
1160 sp
->stats
.rx_length_errors
++;
1163 /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME,
1164 * error count handled by a IPG statistic register.
1167 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXALIGNMENTERROR
) {
1168 IPG_DEBUG_MSG("RX alignment error occured.\n");
1169 sp
->stats
.rx_frame_errors
++;
1172 /* Do nothing for IPG_RFS_RXFCSERROR, error count
1173 * handled by a IPG statistic register.
1176 /* Free the memory associated with the RX
1177 * buffer since it is erroneous and we will
1178 * not pass it to higher layer processes.
1180 if (sp
->rx_buff
[entry
]) {
1181 pci_unmap_single(sp
->pdev
,
1182 le64_to_cpu(rxfd
->frag_info
) & ~IPG_RFI_FRAGLEN
,
1183 sp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1185 dev_kfree_skb_irq(sp
->rx_buff
[entry
]);
1186 sp
->rx_buff
[entry
] = NULL
;
1188 return ERROR_PACKET
;
1190 return NORMAL_PACKET
;
1193 static void ipg_nic_rx_with_start_and_end(struct net_device
*dev
,
1194 struct ipg_nic_private
*sp
,
1195 struct ipg_rx
*rxfd
, unsigned entry
)
1197 struct ipg_jumbo
*jumbo
= &sp
->jumbo
;
1198 struct sk_buff
*skb
;
1201 if (jumbo
->found_start
) {
1202 dev_kfree_skb_irq(jumbo
->skb
);
1203 jumbo
->found_start
= 0;
1204 jumbo
->current_size
= 0;
1208 /* 1: found error, 0 no error */
1209 if (ipg_nic_rx_check_error(dev
) != NORMAL_PACKET
)
1212 skb
= sp
->rx_buff
[entry
];
1216 /* accept this frame and send to upper layer */
1217 framelen
= le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXFRAMELEN
;
1218 if (framelen
> sp
->rxfrag_size
)
1219 framelen
= sp
->rxfrag_size
;
1221 skb_put(skb
, framelen
);
1222 skb
->protocol
= eth_type_trans(skb
, dev
);
1223 skb
->ip_summed
= CHECKSUM_NONE
;
1225 sp
->rx_buff
[entry
] = NULL
;
1228 static void ipg_nic_rx_with_start(struct net_device
*dev
,
1229 struct ipg_nic_private
*sp
,
1230 struct ipg_rx
*rxfd
, unsigned entry
)
1232 struct ipg_jumbo
*jumbo
= &sp
->jumbo
;
1233 struct pci_dev
*pdev
= sp
->pdev
;
1234 struct sk_buff
*skb
;
1236 /* 1: found error, 0 no error */
1237 if (ipg_nic_rx_check_error(dev
) != NORMAL_PACKET
)
1240 /* accept this frame and send to upper layer */
1241 skb
= sp
->rx_buff
[entry
];
1245 if (jumbo
->found_start
)
1246 dev_kfree_skb_irq(jumbo
->skb
);
1248 pci_unmap_single(pdev
, le64_to_cpu(rxfd
->frag_info
) & ~IPG_RFI_FRAGLEN
,
1249 sp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1251 skb_put(skb
, sp
->rxfrag_size
);
1253 jumbo
->found_start
= 1;
1254 jumbo
->current_size
= sp
->rxfrag_size
;
1257 sp
->rx_buff
[entry
] = NULL
;
1260 static void ipg_nic_rx_with_end(struct net_device
*dev
,
1261 struct ipg_nic_private
*sp
,
1262 struct ipg_rx
*rxfd
, unsigned entry
)
1264 struct ipg_jumbo
*jumbo
= &sp
->jumbo
;
1266 /* 1: found error, 0 no error */
1267 if (ipg_nic_rx_check_error(dev
) == NORMAL_PACKET
) {
1268 struct sk_buff
*skb
= sp
->rx_buff
[entry
];
1273 if (jumbo
->found_start
) {
1274 int framelen
, endframelen
;
1276 framelen
= le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXFRAMELEN
;
1278 endframelen
= framelen
- jumbo
->current_size
;
1279 if (framelen
> sp
->rxsupport_size
)
1280 dev_kfree_skb_irq(jumbo
->skb
);
1282 memcpy(skb_put(jumbo
->skb
, endframelen
),
1283 skb
->data
, endframelen
);
1285 jumbo
->skb
->protocol
=
1286 eth_type_trans(jumbo
->skb
, dev
);
1288 jumbo
->skb
->ip_summed
= CHECKSUM_NONE
;
1289 netif_rx(jumbo
->skb
);
1293 jumbo
->found_start
= 0;
1294 jumbo
->current_size
= 0;
1297 ipg_nic_rx_free_skb(dev
);
1299 dev_kfree_skb_irq(jumbo
->skb
);
1300 jumbo
->found_start
= 0;
1301 jumbo
->current_size
= 0;
1306 static void ipg_nic_rx_no_start_no_end(struct net_device
*dev
,
1307 struct ipg_nic_private
*sp
,
1308 struct ipg_rx
*rxfd
, unsigned entry
)
1310 struct ipg_jumbo
*jumbo
= &sp
->jumbo
;
1312 /* 1: found error, 0 no error */
1313 if (ipg_nic_rx_check_error(dev
) == NORMAL_PACKET
) {
1314 struct sk_buff
*skb
= sp
->rx_buff
[entry
];
1317 if (jumbo
->found_start
) {
1318 jumbo
->current_size
+= sp
->rxfrag_size
;
1319 if (jumbo
->current_size
<= sp
->rxsupport_size
) {
1320 memcpy(skb_put(jumbo
->skb
,
1322 skb
->data
, sp
->rxfrag_size
);
1325 ipg_nic_rx_free_skb(dev
);
1328 dev_kfree_skb_irq(jumbo
->skb
);
1329 jumbo
->found_start
= 0;
1330 jumbo
->current_size
= 0;
1335 static int ipg_nic_rx_jumbo(struct net_device
*dev
)
1337 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1338 unsigned int curr
= sp
->rx_current
;
1339 void __iomem
*ioaddr
= sp
->ioaddr
;
1342 IPG_DEBUG_MSG("_nic_rx\n");
1344 for (i
= 0; i
< IPG_MAXRFDPROCESS_COUNT
; i
++, curr
++) {
1345 unsigned int entry
= curr
% IPG_RFDLIST_LENGTH
;
1346 struct ipg_rx
*rxfd
= sp
->rxd
+ entry
;
1348 if (!(rxfd
->rfs
& cpu_to_le64(IPG_RFS_RFDDONE
)))
1351 switch (ipg_nic_rx_check_frame_type(dev
)) {
1352 case FRAME_WITH_START_WITH_END
:
1353 ipg_nic_rx_with_start_and_end(dev
, sp
, rxfd
, entry
);
1355 case FRAME_WITH_START
:
1356 ipg_nic_rx_with_start(dev
, sp
, rxfd
, entry
);
1358 case FRAME_WITH_END
:
1359 ipg_nic_rx_with_end(dev
, sp
, rxfd
, entry
);
1361 case FRAME_NO_START_NO_END
:
1362 ipg_nic_rx_no_start_no_end(dev
, sp
, rxfd
, entry
);
1367 sp
->rx_current
= curr
;
1369 if (i
== IPG_MAXRFDPROCESS_COUNT
) {
1370 /* There are more RFDs to process, however the
1371 * allocated amount of RFD processing time has
1372 * expired. Assert Interrupt Requested to make
1373 * sure we come back to process the remaining RFDs.
1375 ipg_w32(ipg_r32(ASIC_CTRL
) | IPG_AC_INT_REQUEST
, ASIC_CTRL
);
1378 ipg_nic_rxrestore(dev
);
1383 static int ipg_nic_rx(struct net_device
*dev
)
1385 /* Transfer received Ethernet frames to higher network layers. */
1386 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1387 unsigned int curr
= sp
->rx_current
;
1388 void __iomem
*ioaddr
= sp
->ioaddr
;
1389 struct ipg_rx
*rxfd
;
1392 IPG_DEBUG_MSG("_nic_rx\n");
1394 #define __RFS_MASK \
1395 cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND)
1397 for (i
= 0; i
< IPG_MAXRFDPROCESS_COUNT
; i
++, curr
++) {
1398 unsigned int entry
= curr
% IPG_RFDLIST_LENGTH
;
1399 struct sk_buff
*skb
= sp
->rx_buff
[entry
];
1400 unsigned int framelen
;
1402 rxfd
= sp
->rxd
+ entry
;
1404 if (((rxfd
->rfs
& __RFS_MASK
) != __RFS_MASK
) || !skb
)
1407 /* Get received frame length. */
1408 framelen
= le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXFRAMELEN
;
1410 /* Check for jumbo frame arrival with too small
1413 if (framelen
> sp
->rxfrag_size
) {
1415 ("RFS FrameLen > allocated fragment size.\n");
1417 framelen
= sp
->rxfrag_size
;
1420 if ((IPG_DROP_ON_RX_ETH_ERRORS
&& (le64_to_cpu(rxfd
->rfs
) &
1421 (IPG_RFS_RXFIFOOVERRUN
| IPG_RFS_RXRUNTFRAME
|
1422 IPG_RFS_RXALIGNMENTERROR
| IPG_RFS_RXFCSERROR
|
1423 IPG_RFS_RXOVERSIZEDFRAME
| IPG_RFS_RXLENGTHERROR
)))) {
1425 IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n",
1426 (unsigned long int) rxfd
->rfs
);
1428 /* Increment general receive error statistic. */
1429 sp
->stats
.rx_errors
++;
1431 /* Increment detailed receive error statistics. */
1432 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXFIFOOVERRUN
) {
1433 IPG_DEBUG_MSG("RX FIFO overrun occured.\n");
1434 sp
->stats
.rx_fifo_errors
++;
1437 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXRUNTFRAME
) {
1438 IPG_DEBUG_MSG("RX runt occured.\n");
1439 sp
->stats
.rx_length_errors
++;
1442 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXOVERSIZEDFRAME
) ;
1443 /* Do nothing, error count handled by a IPG
1444 * statistic register.
1447 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXALIGNMENTERROR
) {
1448 IPG_DEBUG_MSG("RX alignment error occured.\n");
1449 sp
->stats
.rx_frame_errors
++;
1452 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXFCSERROR
) ;
1453 /* Do nothing, error count handled by a IPG
1454 * statistic register.
1457 /* Free the memory associated with the RX
1458 * buffer since it is erroneous and we will
1459 * not pass it to higher layer processes.
1462 __le64 info
= rxfd
->frag_info
;
1464 pci_unmap_single(sp
->pdev
,
1465 le64_to_cpu(info
) & ~IPG_RFI_FRAGLEN
,
1466 sp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1468 dev_kfree_skb_irq(skb
);
1472 /* Adjust the new buffer length to accomodate the size
1473 * of the received frame.
1475 skb_put(skb
, framelen
);
1477 /* Set the buffer's protocol field to Ethernet. */
1478 skb
->protocol
= eth_type_trans(skb
, dev
);
1480 /* The IPG encountered an error with (or
1481 * there were no) IP/TCP/UDP checksums.
1482 * This may or may not indicate an invalid
1483 * IP/TCP/UDP frame was received. Let the
1484 * upper layer decide.
1486 skb
->ip_summed
= CHECKSUM_NONE
;
1488 /* Hand off frame for higher layer processing.
1489 * The function netif_rx() releases the sk_buff
1490 * when processing completes.
1495 /* Assure RX buffer is not reused by IPG. */
1496 sp
->rx_buff
[entry
] = NULL
;
1500 * If there are more RFDs to proces and the allocated amount of RFD
1501 * processing time has expired, assert Interrupt Requested to make
1502 * sure we come back to process the remaining RFDs.
1504 if (i
== IPG_MAXRFDPROCESS_COUNT
)
1505 ipg_w32(ipg_r32(ASIC_CTRL
) | IPG_AC_INT_REQUEST
, ASIC_CTRL
);
1508 /* Check if the RFD list contained no receive frame data. */
1510 sp
->EmptyRFDListCount
++;
1512 while ((le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RFDDONE
) &&
1513 !((le64_to_cpu(rxfd
->rfs
) & IPG_RFS_FRAMESTART
) &&
1514 (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_FRAMEEND
))) {
1515 unsigned int entry
= curr
++ % IPG_RFDLIST_LENGTH
;
1517 rxfd
= sp
->rxd
+ entry
;
1519 IPG_DEBUG_MSG("Frame requires multiple RFDs.\n");
1521 /* An unexpected event, additional code needed to handle
1522 * properly. So for the time being, just disregard the
1526 /* Free the memory associated with the RX
1527 * buffer since it is erroneous and we will
1528 * not pass it to higher layer processes.
1530 if (sp
->rx_buff
[entry
]) {
1531 pci_unmap_single(sp
->pdev
,
1532 le64_to_cpu(rxfd
->frag_info
) & ~IPG_RFI_FRAGLEN
,
1533 sp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1534 dev_kfree_skb_irq(sp
->rx_buff
[entry
]);
1537 /* Assure RX buffer is not reused by IPG. */
1538 sp
->rx_buff
[entry
] = NULL
;
1541 sp
->rx_current
= curr
;
1543 /* Check to see if there are a minimum number of used
1544 * RFDs before restoring any (should improve performance.)
1546 if ((curr
- sp
->rx_dirty
) >= IPG_MINUSEDRFDSTOFREE
)
1547 ipg_nic_rxrestore(dev
);
1552 static void ipg_reset_after_host_error(struct work_struct
*work
)
1554 struct ipg_nic_private
*sp
=
1555 container_of(work
, struct ipg_nic_private
, task
.work
);
1556 struct net_device
*dev
= sp
->dev
;
1558 IPG_DDEBUG_MSG("DMACtrl = %8.8x\n", ioread32(sp
->ioaddr
+ IPG_DMACTRL
));
1561 * Acknowledge HostError interrupt by resetting
1564 ipg_reset(dev
, IPG_AC_GLOBAL_RESET
| IPG_AC_HOST
| IPG_AC_DMA
);
1569 if (ipg_io_config(dev
) < 0) {
1570 printk(KERN_INFO
"%s: Cannot recover from PCI error.\n",
1572 schedule_delayed_work(&sp
->task
, HZ
);
1576 static irqreturn_t
ipg_interrupt_handler(int irq
, void *dev_inst
)
1578 struct net_device
*dev
= dev_inst
;
1579 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1580 void __iomem
*ioaddr
= sp
->ioaddr
;
1581 unsigned int handled
= 0;
1584 IPG_DEBUG_MSG("_interrupt_handler\n");
1587 ipg_nic_rxrestore(dev
);
1589 spin_lock(&sp
->lock
);
1591 /* Get interrupt source information, and acknowledge
1592 * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly,
1593 * IntRequested, MacControlFrame, LinkEvent) interrupts
1594 * if issued. Also, all IPG interrupts are disabled by
1595 * reading IntStatusAck.
1597 status
= ipg_r16(INT_STATUS_ACK
);
1599 IPG_DEBUG_MSG("IntStatusAck = %4.4x\n", status
);
1601 /* Shared IRQ of remove event. */
1602 if (!(status
& IPG_IS_RSVD_MASK
))
1607 if (unlikely(!netif_running(dev
)))
1610 /* If RFDListEnd interrupt, restore all used RFDs. */
1611 if (status
& IPG_IS_RFD_LIST_END
) {
1612 IPG_DEBUG_MSG("RFDListEnd Interrupt.\n");
1614 /* The RFD list end indicates an RFD was encountered
1615 * with a 0 NextPtr, or with an RFDDone bit set to 1
1616 * (indicating the RFD is not read for use by the
1617 * IPG.) Try to restore all RFDs.
1619 ipg_nic_rxrestore(dev
);
1622 /* Increment the RFDlistendCount counter. */
1623 sp
->RFDlistendCount
++;
1627 /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or
1628 * IntRequested interrupt, process received frames. */
1629 if ((status
& IPG_IS_RX_DMA_PRIORITY
) ||
1630 (status
& IPG_IS_RFD_LIST_END
) ||
1631 (status
& IPG_IS_RX_DMA_COMPLETE
) ||
1632 (status
& IPG_IS_INT_REQUESTED
)) {
1634 /* Increment the RFD list checked counter if interrupted
1635 * only to check the RFD list. */
1636 if (status
& (~(IPG_IS_RX_DMA_PRIORITY
| IPG_IS_RFD_LIST_END
|
1637 IPG_IS_RX_DMA_COMPLETE
| IPG_IS_INT_REQUESTED
) &
1638 (IPG_IS_HOST_ERROR
| IPG_IS_TX_DMA_COMPLETE
|
1639 IPG_IS_LINK_EVENT
| IPG_IS_TX_COMPLETE
|
1640 IPG_IS_UPDATE_STATS
)))
1641 sp
->RFDListCheckedCount
++;
1645 ipg_nic_rx_jumbo(dev
);
1650 /* If TxDMAComplete interrupt, free used TFDs. */
1651 if (status
& IPG_IS_TX_DMA_COMPLETE
)
1652 ipg_nic_txfree(dev
);
1654 /* TxComplete interrupts indicate one of numerous actions.
1655 * Determine what action to take based on TXSTATUS register.
1657 if (status
& IPG_IS_TX_COMPLETE
)
1658 ipg_nic_txcleanup(dev
);
1660 /* If UpdateStats interrupt, update Linux Ethernet statistics */
1661 if (status
& IPG_IS_UPDATE_STATS
)
1662 ipg_nic_get_stats(dev
);
1664 /* If HostError interrupt, reset IPG. */
1665 if (status
& IPG_IS_HOST_ERROR
) {
1666 IPG_DDEBUG_MSG("HostError Interrupt\n");
1668 schedule_delayed_work(&sp
->task
, 0);
1671 /* If LinkEvent interrupt, resolve autonegotiation. */
1672 if (status
& IPG_IS_LINK_EVENT
) {
1673 if (ipg_config_autoneg(dev
) < 0)
1674 printk(KERN_INFO
"%s: Auto-negotiation error.\n",
1678 /* If MACCtrlFrame interrupt, do nothing. */
1679 if (status
& IPG_IS_MAC_CTRL_FRAME
)
1680 IPG_DEBUG_MSG("MACCtrlFrame interrupt.\n");
1682 /* If RxComplete interrupt, do nothing. */
1683 if (status
& IPG_IS_RX_COMPLETE
)
1684 IPG_DEBUG_MSG("RxComplete interrupt.\n");
1686 /* If RxEarly interrupt, do nothing. */
1687 if (status
& IPG_IS_RX_EARLY
)
1688 IPG_DEBUG_MSG("RxEarly interrupt.\n");
1691 /* Re-enable IPG interrupts. */
1692 ipg_w16(IPG_IE_TX_DMA_COMPLETE
| IPG_IE_RX_DMA_COMPLETE
|
1693 IPG_IE_HOST_ERROR
| IPG_IE_INT_REQUESTED
| IPG_IE_TX_COMPLETE
|
1694 IPG_IE_LINK_EVENT
| IPG_IE_UPDATE_STATS
, INT_ENABLE
);
1696 spin_unlock(&sp
->lock
);
1698 return IRQ_RETVAL(handled
);
1701 static void ipg_rx_clear(struct ipg_nic_private
*sp
)
1705 for (i
= 0; i
< IPG_RFDLIST_LENGTH
; i
++) {
1706 if (sp
->rx_buff
[i
]) {
1707 struct ipg_rx
*rxfd
= sp
->rxd
+ i
;
1709 dev_kfree_skb_irq(sp
->rx_buff
[i
]);
1710 sp
->rx_buff
[i
] = NULL
;
1711 pci_unmap_single(sp
->pdev
,
1712 le64_to_cpu(rxfd
->frag_info
) & ~IPG_RFI_FRAGLEN
,
1713 sp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1718 static void ipg_tx_clear(struct ipg_nic_private
*sp
)
1722 for (i
= 0; i
< IPG_TFDLIST_LENGTH
; i
++) {
1723 if (sp
->tx_buff
[i
]) {
1724 struct ipg_tx
*txfd
= sp
->txd
+ i
;
1726 pci_unmap_single(sp
->pdev
,
1727 le64_to_cpu(txfd
->frag_info
) & ~IPG_TFI_FRAGLEN
,
1728 sp
->tx_buff
[i
]->len
, PCI_DMA_TODEVICE
);
1730 dev_kfree_skb_irq(sp
->tx_buff
[i
]);
1732 sp
->tx_buff
[i
] = NULL
;
1737 static int ipg_nic_open(struct net_device
*dev
)
1739 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1740 void __iomem
*ioaddr
= sp
->ioaddr
;
1741 struct pci_dev
*pdev
= sp
->pdev
;
1744 IPG_DEBUG_MSG("_nic_open\n");
1746 sp
->rx_buf_sz
= sp
->rxsupport_size
;
1748 /* Check for interrupt line conflicts, and request interrupt
1751 * IMPORTANT: Disable IPG interrupts prior to registering
1754 ipg_w16(0x0000, INT_ENABLE
);
1756 /* Register the interrupt line to be used by the IPG within
1759 rc
= request_irq(pdev
->irq
, &ipg_interrupt_handler
, IRQF_SHARED
,
1762 printk(KERN_INFO
"%s: Error when requesting interrupt.\n",
1767 dev
->irq
= pdev
->irq
;
1771 sp
->rxd
= dma_alloc_coherent(&pdev
->dev
, IPG_RX_RING_BYTES
,
1772 &sp
->rxd_map
, GFP_KERNEL
);
1774 goto err_free_irq_0
;
1776 sp
->txd
= dma_alloc_coherent(&pdev
->dev
, IPG_TX_RING_BYTES
,
1777 &sp
->txd_map
, GFP_KERNEL
);
1781 rc
= init_rfdlist(dev
);
1783 printk(KERN_INFO
"%s: Error during configuration.\n",
1790 rc
= ipg_io_config(dev
);
1792 printk(KERN_INFO
"%s: Error during configuration.\n",
1794 goto err_release_tfdlist_3
;
1797 /* Resolve autonegotiation. */
1798 if (ipg_config_autoneg(dev
) < 0)
1799 printk(KERN_INFO
"%s: Auto-negotiation error.\n", dev
->name
);
1801 /* initialize JUMBO Frame control variable */
1802 sp
->jumbo
.found_start
= 0;
1803 sp
->jumbo
.current_size
= 0;
1804 sp
->jumbo
.skb
= NULL
;
1806 /* Enable transmit and receive operation of the IPG. */
1807 ipg_w32((ipg_r32(MAC_CTRL
) | IPG_MC_RX_ENABLE
| IPG_MC_TX_ENABLE
) &
1808 IPG_MC_RSVD_MASK
, MAC_CTRL
);
1810 netif_start_queue(dev
);
1814 err_release_tfdlist_3
:
1818 dma_free_coherent(&pdev
->dev
, IPG_TX_RING_BYTES
, sp
->txd
, sp
->txd_map
);
1820 dma_free_coherent(&pdev
->dev
, IPG_RX_RING_BYTES
, sp
->rxd
, sp
->rxd_map
);
1822 free_irq(pdev
->irq
, dev
);
1826 static int ipg_nic_stop(struct net_device
*dev
)
1828 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1829 void __iomem
*ioaddr
= sp
->ioaddr
;
1830 struct pci_dev
*pdev
= sp
->pdev
;
1832 IPG_DEBUG_MSG("_nic_stop\n");
1834 netif_stop_queue(dev
);
1836 IPG_DDEBUG_MSG("RFDlistendCount = %i\n", sp
->RFDlistendCount
);
1837 IPG_DDEBUG_MSG("RFDListCheckedCount = %i\n", sp
->rxdCheckedCount
);
1838 IPG_DDEBUG_MSG("EmptyRFDListCount = %i\n", sp
->EmptyRFDListCount
);
1839 IPG_DUMPTFDLIST(dev
);
1842 (void) ipg_r16(INT_STATUS_ACK
);
1844 ipg_reset(dev
, IPG_AC_GLOBAL_RESET
| IPG_AC_HOST
| IPG_AC_DMA
);
1846 synchronize_irq(pdev
->irq
);
1847 } while (ipg_r16(INT_ENABLE
) & IPG_IE_RSVD_MASK
);
1853 pci_free_consistent(pdev
, IPG_RX_RING_BYTES
, sp
->rxd
, sp
->rxd_map
);
1854 pci_free_consistent(pdev
, IPG_TX_RING_BYTES
, sp
->txd
, sp
->txd_map
);
1856 free_irq(pdev
->irq
, dev
);
1861 static int ipg_nic_hard_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1863 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1864 void __iomem
*ioaddr
= sp
->ioaddr
;
1865 unsigned int entry
= sp
->tx_current
% IPG_TFDLIST_LENGTH
;
1866 unsigned long flags
;
1867 struct ipg_tx
*txfd
;
1869 IPG_DDEBUG_MSG("_nic_hard_start_xmit\n");
1871 /* If in 10Mbps mode, stop the transmit queue so
1872 * no more transmit frames are accepted.
1874 if (sp
->tenmbpsmode
)
1875 netif_stop_queue(dev
);
1877 if (sp
->reset_current_tfd
) {
1878 sp
->reset_current_tfd
= 0;
1882 txfd
= sp
->txd
+ entry
;
1884 sp
->tx_buff
[entry
] = skb
;
1886 /* Clear all TFC fields, except TFDDONE. */
1887 txfd
->tfc
= cpu_to_le64(IPG_TFC_TFDDONE
);
1889 /* Specify the TFC field within the TFD. */
1890 txfd
->tfc
|= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED
|
1891 (IPG_TFC_FRAMEID
& sp
->tx_current
) |
1892 (IPG_TFC_FRAGCOUNT
& (1 << 24)));
1894 * 16--17 (WordAlign) <- 3 (disable),
1895 * 0--15 (FrameId) <- sp->tx_current,
1896 * 24--27 (FragCount) <- 1
1899 /* Request TxComplete interrupts at an interval defined
1900 * by the constant IPG_FRAMESBETWEENTXCOMPLETES.
1901 * Request TxComplete interrupt for every frame
1902 * if in 10Mbps mode to accomodate problem with 10Mbps
1905 if (sp
->tenmbpsmode
)
1906 txfd
->tfc
|= cpu_to_le64(IPG_TFC_TXINDICATE
);
1907 txfd
->tfc
|= cpu_to_le64(IPG_TFC_TXDMAINDICATE
);
1908 /* Based on compilation option, determine if FCS is to be
1909 * appended to transmit frame by IPG.
1911 if (!(IPG_APPEND_FCS_ON_TX
))
1912 txfd
->tfc
|= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE
);
1914 /* Based on compilation option, determine if IP, TCP and/or
1915 * UDP checksums are to be added to transmit frame by IPG.
1917 if (IPG_ADD_IPCHECKSUM_ON_TX
)
1918 txfd
->tfc
|= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE
);
1920 if (IPG_ADD_TCPCHECKSUM_ON_TX
)
1921 txfd
->tfc
|= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE
);
1923 if (IPG_ADD_UDPCHECKSUM_ON_TX
)
1924 txfd
->tfc
|= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE
);
1926 /* Based on compilation option, determine if VLAN tag info is to be
1927 * inserted into transmit frame by IPG.
1929 if (IPG_INSERT_MANUAL_VLAN_TAG
) {
1930 txfd
->tfc
|= cpu_to_le64(IPG_TFC_VLANTAGINSERT
|
1931 ((u64
) IPG_MANUAL_VLAN_VID
<< 32) |
1932 ((u64
) IPG_MANUAL_VLAN_CFI
<< 44) |
1933 ((u64
) IPG_MANUAL_VLAN_USERPRIORITY
<< 45));
1936 /* The fragment start location within system memory is defined
1937 * by the sk_buff structure's data field. The physical address
1938 * of this location within the system's virtual memory space
1939 * is determined using the IPG_HOST2BUS_MAP function.
1941 txfd
->frag_info
= cpu_to_le64(pci_map_single(sp
->pdev
, skb
->data
,
1942 skb
->len
, PCI_DMA_TODEVICE
));
1944 /* The length of the fragment within system memory is defined by
1945 * the sk_buff structure's len field.
1947 txfd
->frag_info
|= cpu_to_le64(IPG_TFI_FRAGLEN
&
1948 ((u64
) (skb
->len
& 0xffff) << 48));
1950 /* Clear the TFDDone bit last to indicate the TFD is ready
1951 * for transfer to the IPG.
1953 txfd
->tfc
&= cpu_to_le64(~IPG_TFC_TFDDONE
);
1955 spin_lock_irqsave(&sp
->lock
, flags
);
1961 ipg_w32(IPG_DC_TX_DMA_POLL_NOW
, DMA_CTRL
);
1963 if (sp
->tx_current
== (sp
->tx_dirty
+ IPG_TFDLIST_LENGTH
))
1964 netif_stop_queue(dev
);
1966 spin_unlock_irqrestore(&sp
->lock
, flags
);
1968 return NETDEV_TX_OK
;
1971 static void ipg_set_phy_default_param(unsigned char rev
,
1972 struct net_device
*dev
, int phy_address
)
1974 unsigned short length
;
1975 unsigned char revision
;
1976 unsigned short *phy_param
;
1977 unsigned short address
, value
;
1979 phy_param
= &DefaultPhyParam
[0];
1980 length
= *phy_param
& 0x00FF;
1981 revision
= (unsigned char)((*phy_param
) >> 8);
1983 while (length
!= 0) {
1984 if (rev
== revision
) {
1985 while (length
> 1) {
1986 address
= *phy_param
;
1987 value
= *(phy_param
+ 1);
1989 mdio_write(dev
, phy_address
, address
, value
);
1994 phy_param
+= length
/ 2;
1995 length
= *phy_param
& 0x00FF;
1996 revision
= (unsigned char)((*phy_param
) >> 8);
2002 static int read_eeprom(struct net_device
*dev
, int eep_addr
)
2004 void __iomem
*ioaddr
= ipg_ioaddr(dev
);
2009 value
= IPG_EC_EEPROM_READOPCODE
| (eep_addr
& 0xff);
2010 ipg_w16(value
, EEPROM_CTRL
);
2012 for (i
= 0; i
< 1000; i
++) {
2016 data
= ipg_r16(EEPROM_CTRL
);
2017 if (!(data
& IPG_EC_EEPROM_BUSY
)) {
2018 ret
= ipg_r16(EEPROM_DATA
);
2025 static void ipg_init_mii(struct net_device
*dev
)
2027 struct ipg_nic_private
*sp
= netdev_priv(dev
);
2028 struct mii_if_info
*mii_if
= &sp
->mii_if
;
2032 mii_if
->mdio_read
= mdio_read
;
2033 mii_if
->mdio_write
= mdio_write
;
2034 mii_if
->phy_id_mask
= 0x1f;
2035 mii_if
->reg_num_mask
= 0x1f;
2037 mii_if
->phy_id
= phyaddr
= ipg_find_phyaddr(dev
);
2039 if (phyaddr
!= 0x1f) {
2040 u16 mii_phyctrl
, mii_1000cr
;
2043 mii_1000cr
= mdio_read(dev
, phyaddr
, MII_CTRL1000
);
2044 mii_1000cr
|= ADVERTISE_1000FULL
| ADVERTISE_1000HALF
|
2045 GMII_PHY_1000BASETCONTROL_PreferMaster
;
2046 mdio_write(dev
, phyaddr
, MII_CTRL1000
, mii_1000cr
);
2048 mii_phyctrl
= mdio_read(dev
, phyaddr
, MII_BMCR
);
2050 /* Set default phyparam */
2051 pci_read_config_byte(sp
->pdev
, PCI_REVISION_ID
, &revisionid
);
2052 ipg_set_phy_default_param(revisionid
, dev
, phyaddr
);
2055 mii_phyctrl
|= BMCR_RESET
| BMCR_ANRESTART
;
2056 mdio_write(dev
, phyaddr
, MII_BMCR
, mii_phyctrl
);
2061 static int ipg_hw_init(struct net_device
*dev
)
2063 struct ipg_nic_private
*sp
= netdev_priv(dev
);
2064 void __iomem
*ioaddr
= sp
->ioaddr
;
2068 /* Read/Write and Reset EEPROM Value */
2069 /* Read LED Mode Configuration from EEPROM */
2070 sp
->led_mode
= read_eeprom(dev
, 6);
2072 /* Reset all functions within the IPG. Do not assert
2073 * RST_OUT as not compatible with some PHYs.
2075 rc
= ipg_reset(dev
, IPG_RESET_MASK
);
2081 /* Read MAC Address from EEPROM */
2082 for (i
= 0; i
< 3; i
++)
2083 sp
->station_addr
[i
] = read_eeprom(dev
, 16 + i
);
2085 for (i
= 0; i
< 3; i
++)
2086 ipg_w16(sp
->station_addr
[i
], STATION_ADDRESS_0
+ 2*i
);
2088 /* Set station address in ethernet_device structure. */
2089 dev
->dev_addr
[0] = ipg_r16(STATION_ADDRESS_0
) & 0x00ff;
2090 dev
->dev_addr
[1] = (ipg_r16(STATION_ADDRESS_0
) & 0xff00) >> 8;
2091 dev
->dev_addr
[2] = ipg_r16(STATION_ADDRESS_1
) & 0x00ff;
2092 dev
->dev_addr
[3] = (ipg_r16(STATION_ADDRESS_1
) & 0xff00) >> 8;
2093 dev
->dev_addr
[4] = ipg_r16(STATION_ADDRESS_2
) & 0x00ff;
2094 dev
->dev_addr
[5] = (ipg_r16(STATION_ADDRESS_2
) & 0xff00) >> 8;
2099 static int ipg_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
2101 struct ipg_nic_private
*sp
= netdev_priv(dev
);
2104 mutex_lock(&sp
->mii_mutex
);
2105 rc
= generic_mii_ioctl(&sp
->mii_if
, if_mii(ifr
), cmd
, NULL
);
2106 mutex_unlock(&sp
->mii_mutex
);
2111 static int ipg_nic_change_mtu(struct net_device
*dev
, int new_mtu
)
2113 struct ipg_nic_private
*sp
= netdev_priv(dev
);
2116 /* Function to accomodate changes to Maximum Transfer Unit
2117 * (or MTU) of IPG NIC. Cannot use default function since
2118 * the default will not allow for MTU > 1500 bytes.
2121 IPG_DEBUG_MSG("_nic_change_mtu\n");
2124 * Check that the new MTU value is between 68 (14 byte header, 46 byte
2125 * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU.
2127 if (new_mtu
< 68 || new_mtu
> 10240)
2130 err
= ipg_nic_stop(dev
);
2136 sp
->max_rxframe_size
= new_mtu
;
2138 sp
->rxfrag_size
= new_mtu
;
2139 if (sp
->rxfrag_size
> 4088)
2140 sp
->rxfrag_size
= 4088;
2142 sp
->rxsupport_size
= sp
->max_rxframe_size
;
2144 if (new_mtu
> 0x0600)
2145 sp
->is_jumbo
= true;
2147 sp
->is_jumbo
= false;
2149 return ipg_nic_open(dev
);
2152 static int ipg_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2154 struct ipg_nic_private
*sp
= netdev_priv(dev
);
2157 mutex_lock(&sp
->mii_mutex
);
2158 rc
= mii_ethtool_gset(&sp
->mii_if
, cmd
);
2159 mutex_unlock(&sp
->mii_mutex
);
2164 static int ipg_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2166 struct ipg_nic_private
*sp
= netdev_priv(dev
);
2169 mutex_lock(&sp
->mii_mutex
);
2170 rc
= mii_ethtool_sset(&sp
->mii_if
, cmd
);
2171 mutex_unlock(&sp
->mii_mutex
);
2176 static int ipg_nway_reset(struct net_device
*dev
)
2178 struct ipg_nic_private
*sp
= netdev_priv(dev
);
2181 mutex_lock(&sp
->mii_mutex
);
2182 rc
= mii_nway_restart(&sp
->mii_if
);
2183 mutex_unlock(&sp
->mii_mutex
);
2188 static struct ethtool_ops ipg_ethtool_ops
= {
2189 .get_settings
= ipg_get_settings
,
2190 .set_settings
= ipg_set_settings
,
2191 .nway_reset
= ipg_nway_reset
,
2194 static void __devexit
ipg_remove(struct pci_dev
*pdev
)
2196 struct net_device
*dev
= pci_get_drvdata(pdev
);
2197 struct ipg_nic_private
*sp
= netdev_priv(dev
);
2199 IPG_DEBUG_MSG("_remove\n");
2201 /* Un-register Ethernet device. */
2202 unregister_netdev(dev
);
2204 pci_iounmap(pdev
, sp
->ioaddr
);
2206 pci_release_regions(pdev
);
2209 pci_disable_device(pdev
);
2210 pci_set_drvdata(pdev
, NULL
);
2213 static const struct net_device_ops ipg_netdev_ops
= {
2214 .ndo_open
= ipg_nic_open
,
2215 .ndo_stop
= ipg_nic_stop
,
2216 .ndo_start_xmit
= ipg_nic_hard_start_xmit
,
2217 .ndo_get_stats
= ipg_nic_get_stats
,
2218 .ndo_set_multicast_list
= ipg_nic_set_multicast_list
,
2219 .ndo_do_ioctl
= ipg_ioctl
,
2220 .ndo_tx_timeout
= ipg_tx_timeout
,
2221 .ndo_change_mtu
= ipg_nic_change_mtu
,
2222 .ndo_set_mac_address
= eth_mac_addr
,
2223 .ndo_validate_addr
= eth_validate_addr
,
2226 static int __devinit
ipg_probe(struct pci_dev
*pdev
,
2227 const struct pci_device_id
*id
)
2229 unsigned int i
= id
->driver_data
;
2230 struct ipg_nic_private
*sp
;
2231 struct net_device
*dev
;
2232 void __iomem
*ioaddr
;
2235 rc
= pci_enable_device(pdev
);
2239 printk(KERN_INFO
"%s: %s\n", pci_name(pdev
), ipg_brand_name
[i
]);
2241 pci_set_master(pdev
);
2243 rc
= pci_set_dma_mask(pdev
, DMA_40BIT_MASK
);
2245 rc
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
2247 printk(KERN_ERR
"%s: DMA config failed.\n",
2254 * Initialize net device.
2256 dev
= alloc_etherdev(sizeof(struct ipg_nic_private
));
2258 printk(KERN_ERR
"%s: alloc_etherdev failed\n", pci_name(pdev
));
2263 sp
= netdev_priv(dev
);
2264 spin_lock_init(&sp
->lock
);
2265 mutex_init(&sp
->mii_mutex
);
2267 sp
->is_jumbo
= IPG_IS_JUMBO
;
2268 sp
->rxfrag_size
= IPG_RXFRAG_SIZE
;
2269 sp
->rxsupport_size
= IPG_RXSUPPORT_SIZE
;
2270 sp
->max_rxframe_size
= IPG_MAX_RXFRAME_SIZE
;
2272 /* Declare IPG NIC functions for Ethernet device methods.
2274 dev
->netdev_ops
= &ipg_netdev_ops
;
2275 SET_NETDEV_DEV(dev
, &pdev
->dev
);
2276 SET_ETHTOOL_OPS(dev
, &ipg_ethtool_ops
);
2278 rc
= pci_request_regions(pdev
, DRV_NAME
);
2280 goto err_free_dev_1
;
2282 ioaddr
= pci_iomap(pdev
, 1, pci_resource_len(pdev
, 1));
2284 printk(KERN_ERR
"%s cannot map MMIO\n", pci_name(pdev
));
2286 goto err_release_regions_2
;
2289 /* Save the pointer to the PCI device information. */
2290 sp
->ioaddr
= ioaddr
;
2294 INIT_DELAYED_WORK(&sp
->task
, ipg_reset_after_host_error
);
2296 pci_set_drvdata(pdev
, dev
);
2298 rc
= ipg_hw_init(dev
);
2302 rc
= register_netdev(dev
);
2306 printk(KERN_INFO
"Ethernet device registered as: %s\n", dev
->name
);
2311 pci_iounmap(pdev
, ioaddr
);
2312 err_release_regions_2
:
2313 pci_release_regions(pdev
);
2317 pci_disable_device(pdev
);
2321 static struct pci_driver ipg_pci_driver
= {
2322 .name
= IPG_DRIVER_NAME
,
2323 .id_table
= ipg_pci_tbl
,
2325 .remove
= __devexit_p(ipg_remove
),
2328 static int __init
ipg_init_module(void)
2330 return pci_register_driver(&ipg_pci_driver
);
2333 static void __exit
ipg_exit_module(void)
2335 pci_unregister_driver(&ipg_pci_driver
);
2338 module_init(ipg_init_module
);
2339 module_exit(ipg_exit_module
);