2 * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter
4 * Copyright (C) 2003, 2007 IC Plus Corp
9 * Sundance Technology, Inc.
11 * craig_rich@sundanceti.com
16 * http://www.icplus.com.tw
17 * sorbica@icplus.com.tw
20 * http://www.icplus.com.tw
23 #include <linux/crc32.h>
24 #include <linux/ethtool.h>
25 #include <linux/gfp.h>
26 #include <linux/mii.h>
27 #include <linux/mutex.h>
29 #include <asm/div64.h>
31 #define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH)
32 #define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH)
33 #define IPG_RESET_MASK \
34 (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \
35 IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \
38 #define ipg_w32(val32, reg) iowrite32((val32), ioaddr + (reg))
39 #define ipg_w16(val16, reg) iowrite16((val16), ioaddr + (reg))
40 #define ipg_w8(val8, reg) iowrite8((val8), ioaddr + (reg))
42 #define ipg_r32(reg) ioread32(ioaddr + (reg))
43 #define ipg_r16(reg) ioread16(ioaddr + (reg))
44 #define ipg_r8(reg) ioread8(ioaddr + (reg))
51 #define DRV_NAME "ipg"
53 MODULE_AUTHOR("IC Plus Corp. 2003");
54 MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver");
55 MODULE_LICENSE("GPL");
60 #define IPG_MAX_RXFRAME_SIZE 0x0600
61 #define IPG_RXFRAG_SIZE 0x0600
62 #define IPG_RXSUPPORT_SIZE 0x0600
63 #define IPG_IS_JUMBO false
66 * Variable record -- index by leading revision/length
67 * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN
69 static unsigned short DefaultPhyParam
[] = {
70 /* 11/12/03 IP1000A v1-3 rev=0x40 */
71 /*--------------------------------------------------------------------------
72 (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2,
73 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6,
74 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700,
75 --------------------------------------------------------------------------*/
76 /* 12/17/03 IP1000A v1-4 rev=0x40 */
77 (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
79 30, 0x005e, 9, 0x0700,
80 /* 01/09/04 IP1000A v1-5 rev=0x41 */
81 (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
83 30, 0x005e, 9, 0x0700,
87 static const char *ipg_brand_name
[] = {
88 "IC PLUS IP1000 1000/100/10 based NIC",
89 "Sundance Technology ST2021 based NIC",
90 "Tamarack Microelectronics TC9020/9021 based NIC",
91 "Tamarack Microelectronics TC9020/9021 based NIC",
95 static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl
) = {
96 { PCI_VDEVICE(SUNDANCE
, 0x1023), 0 },
97 { PCI_VDEVICE(SUNDANCE
, 0x2021), 1 },
98 { PCI_VDEVICE(SUNDANCE
, 0x1021), 2 },
99 { PCI_VDEVICE(DLINK
, 0x9021), 3 },
100 { PCI_VDEVICE(DLINK
, 0x4020), 4 },
104 MODULE_DEVICE_TABLE(pci
, ipg_pci_tbl
);
106 static inline void __iomem
*ipg_ioaddr(struct net_device
*dev
)
108 struct ipg_nic_private
*sp
= netdev_priv(dev
);
113 static void ipg_dump_rfdlist(struct net_device
*dev
)
115 struct ipg_nic_private
*sp
= netdev_priv(dev
);
116 void __iomem
*ioaddr
= sp
->ioaddr
;
120 IPG_DEBUG_MSG("_dump_rfdlist\n");
122 printk(KERN_INFO
"rx_current = %2.2x\n", sp
->rx_current
);
123 printk(KERN_INFO
"rx_dirty = %2.2x\n", sp
->rx_dirty
);
124 printk(KERN_INFO
"RFDList start address = %16.16lx\n",
125 (unsigned long) sp
->rxd_map
);
126 printk(KERN_INFO
"RFDListPtr register = %8.8x%8.8x\n",
127 ipg_r32(IPG_RFDLISTPTR1
), ipg_r32(IPG_RFDLISTPTR0
));
129 for (i
= 0; i
< IPG_RFDLIST_LENGTH
; i
++) {
130 offset
= (u32
) &sp
->rxd
[i
].next_desc
- (u32
) sp
->rxd
;
131 printk(KERN_INFO
"%2.2x %4.4x RFDNextPtr = %16.16lx\n", i
,
132 offset
, (unsigned long) sp
->rxd
[i
].next_desc
);
133 offset
= (u32
) &sp
->rxd
[i
].rfs
- (u32
) sp
->rxd
;
134 printk(KERN_INFO
"%2.2x %4.4x RFS = %16.16lx\n", i
,
135 offset
, (unsigned long) sp
->rxd
[i
].rfs
);
136 offset
= (u32
) &sp
->rxd
[i
].frag_info
- (u32
) sp
->rxd
;
137 printk(KERN_INFO
"%2.2x %4.4x frag_info = %16.16lx\n", i
,
138 offset
, (unsigned long) sp
->rxd
[i
].frag_info
);
142 static void ipg_dump_tfdlist(struct net_device
*dev
)
144 struct ipg_nic_private
*sp
= netdev_priv(dev
);
145 void __iomem
*ioaddr
= sp
->ioaddr
;
149 IPG_DEBUG_MSG("_dump_tfdlist\n");
151 printk(KERN_INFO
"tx_current = %2.2x\n", sp
->tx_current
);
152 printk(KERN_INFO
"tx_dirty = %2.2x\n", sp
->tx_dirty
);
153 printk(KERN_INFO
"TFDList start address = %16.16lx\n",
154 (unsigned long) sp
->txd_map
);
155 printk(KERN_INFO
"TFDListPtr register = %8.8x%8.8x\n",
156 ipg_r32(IPG_TFDLISTPTR1
), ipg_r32(IPG_TFDLISTPTR0
));
158 for (i
= 0; i
< IPG_TFDLIST_LENGTH
; i
++) {
159 offset
= (u32
) &sp
->txd
[i
].next_desc
- (u32
) sp
->txd
;
160 printk(KERN_INFO
"%2.2x %4.4x TFDNextPtr = %16.16lx\n", i
,
161 offset
, (unsigned long) sp
->txd
[i
].next_desc
);
163 offset
= (u32
) &sp
->txd
[i
].tfc
- (u32
) sp
->txd
;
164 printk(KERN_INFO
"%2.2x %4.4x TFC = %16.16lx\n", i
,
165 offset
, (unsigned long) sp
->txd
[i
].tfc
);
166 offset
= (u32
) &sp
->txd
[i
].frag_info
- (u32
) sp
->txd
;
167 printk(KERN_INFO
"%2.2x %4.4x frag_info = %16.16lx\n", i
,
168 offset
, (unsigned long) sp
->txd
[i
].frag_info
);
173 static void ipg_write_phy_ctl(void __iomem
*ioaddr
, u8 data
)
175 ipg_w8(IPG_PC_RSVD_MASK
& data
, PHY_CTRL
);
176 ndelay(IPG_PC_PHYCTRLWAIT_NS
);
179 static void ipg_drive_phy_ctl_low_high(void __iomem
*ioaddr
, u8 data
)
181 ipg_write_phy_ctl(ioaddr
, IPG_PC_MGMTCLK_LO
| data
);
182 ipg_write_phy_ctl(ioaddr
, IPG_PC_MGMTCLK_HI
| data
);
185 static void send_three_state(void __iomem
*ioaddr
, u8 phyctrlpolarity
)
187 phyctrlpolarity
|= (IPG_PC_MGMTDATA
& 0) | IPG_PC_MGMTDIR
;
189 ipg_drive_phy_ctl_low_high(ioaddr
, phyctrlpolarity
);
192 static void send_end(void __iomem
*ioaddr
, u8 phyctrlpolarity
)
194 ipg_w8((IPG_PC_MGMTCLK_LO
| (IPG_PC_MGMTDATA
& 0) | IPG_PC_MGMTDIR
|
195 phyctrlpolarity
) & IPG_PC_RSVD_MASK
, PHY_CTRL
);
198 static u16
read_phy_bit(void __iomem
*ioaddr
, u8 phyctrlpolarity
)
202 ipg_write_phy_ctl(ioaddr
, IPG_PC_MGMTCLK_LO
| phyctrlpolarity
);
204 bit_data
= ((ipg_r8(PHY_CTRL
) & IPG_PC_MGMTDATA
) >> 1) & 1;
206 ipg_write_phy_ctl(ioaddr
, IPG_PC_MGMTCLK_HI
| phyctrlpolarity
);
212 * Read a register from the Physical Layer device located
213 * on the IPG NIC, using the IPG PHYCTRL register.
215 static int mdio_read(struct net_device
*dev
, int phy_id
, int phy_reg
)
217 void __iomem
*ioaddr
= ipg_ioaddr(dev
);
219 * The GMII mangement frame structure for a read is as follows:
221 * |Preamble|st|op|phyad|regad|ta| data |idle|
222 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
224 * <32 1s> = 32 consecutive logic 1 values
225 * A = bit of Physical Layer device address (MSB first)
226 * R = bit of register address (MSB first)
227 * z = High impedance state
228 * D = bit of read data (MSB first)
230 * Transmission order is 'Preamble' field first, bits transmitted
231 * left to right (first to last).
237 { GMII_PREAMBLE
, 32 }, /* Preamble */
238 { GMII_ST
, 2 }, /* ST */
239 { GMII_READ
, 2 }, /* OP */
240 { phy_id
, 5 }, /* PHYAD */
241 { phy_reg
, 5 }, /* REGAD */
242 { 0x0000, 2 }, /* TA */
243 { 0x0000, 16 }, /* DATA */
244 { 0x0000, 1 } /* IDLE */
249 polarity
= ipg_r8(PHY_CTRL
);
250 polarity
&= (IPG_PC_DUPLEX_POLARITY
| IPG_PC_LINK_POLARITY
);
252 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
253 for (j
= 0; j
< 5; j
++) {
254 for (i
= 0; i
< p
[j
].len
; i
++) {
255 /* For each variable length field, the MSB must be
256 * transmitted first. Rotate through the field bits,
257 * starting with the MSB, and move each bit into the
258 * the 1st (2^1) bit position (this is the bit position
259 * corresponding to the MgmtData bit of the PhyCtrl
260 * register for the IPG).
264 * First write a '0' to bit 1 of the PhyCtrl
265 * register, then write a '1' to bit 1 of the
268 * To do this, right shift the MSB of ST by the value:
269 * [field length - 1 - #ST bits already written]
270 * then left shift this result by 1.
272 data
= (p
[j
].field
>> (p
[j
].len
- 1 - i
)) << 1;
273 data
&= IPG_PC_MGMTDATA
;
274 data
|= polarity
| IPG_PC_MGMTDIR
;
276 ipg_drive_phy_ctl_low_high(ioaddr
, data
);
280 send_three_state(ioaddr
, polarity
);
282 read_phy_bit(ioaddr
, polarity
);
285 * For a read cycle, the bits for the next two fields (TA and
286 * DATA) are driven by the PHY (the IPG reads these bits).
288 for (i
= 0; i
< p
[6].len
; i
++) {
290 (read_phy_bit(ioaddr
, polarity
) << (p
[6].len
- 1 - i
));
293 send_three_state(ioaddr
, polarity
);
294 send_three_state(ioaddr
, polarity
);
295 send_three_state(ioaddr
, polarity
);
296 send_end(ioaddr
, polarity
);
298 /* Return the value of the DATA field. */
303 * Write to a register from the Physical Layer device located
304 * on the IPG NIC, using the IPG PHYCTRL register.
306 static void mdio_write(struct net_device
*dev
, int phy_id
, int phy_reg
, int val
)
308 void __iomem
*ioaddr
= ipg_ioaddr(dev
);
310 * The GMII mangement frame structure for a read is as follows:
312 * |Preamble|st|op|phyad|regad|ta| data |idle|
313 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
315 * <32 1s> = 32 consecutive logic 1 values
316 * A = bit of Physical Layer device address (MSB first)
317 * R = bit of register address (MSB first)
318 * z = High impedance state
319 * D = bit of write data (MSB first)
321 * Transmission order is 'Preamble' field first, bits transmitted
322 * left to right (first to last).
328 { GMII_PREAMBLE
, 32 }, /* Preamble */
329 { GMII_ST
, 2 }, /* ST */
330 { GMII_WRITE
, 2 }, /* OP */
331 { phy_id
, 5 }, /* PHYAD */
332 { phy_reg
, 5 }, /* REGAD */
333 { 0x0002, 2 }, /* TA */
334 { val
& 0xffff, 16 }, /* DATA */
335 { 0x0000, 1 } /* IDLE */
340 polarity
= ipg_r8(PHY_CTRL
);
341 polarity
&= (IPG_PC_DUPLEX_POLARITY
| IPG_PC_LINK_POLARITY
);
343 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
344 for (j
= 0; j
< 7; j
++) {
345 for (i
= 0; i
< p
[j
].len
; i
++) {
346 /* For each variable length field, the MSB must be
347 * transmitted first. Rotate through the field bits,
348 * starting with the MSB, and move each bit into the
349 * the 1st (2^1) bit position (this is the bit position
350 * corresponding to the MgmtData bit of the PhyCtrl
351 * register for the IPG).
355 * First write a '0' to bit 1 of the PhyCtrl
356 * register, then write a '1' to bit 1 of the
359 * To do this, right shift the MSB of ST by the value:
360 * [field length - 1 - #ST bits already written]
361 * then left shift this result by 1.
363 data
= (p
[j
].field
>> (p
[j
].len
- 1 - i
)) << 1;
364 data
&= IPG_PC_MGMTDATA
;
365 data
|= polarity
| IPG_PC_MGMTDIR
;
367 ipg_drive_phy_ctl_low_high(ioaddr
, data
);
371 /* The last cycle is a tri-state, so read from the PHY. */
372 for (j
= 7; j
< 8; j
++) {
373 for (i
= 0; i
< p
[j
].len
; i
++) {
374 ipg_write_phy_ctl(ioaddr
, IPG_PC_MGMTCLK_LO
| polarity
);
376 p
[j
].field
|= ((ipg_r8(PHY_CTRL
) &
377 IPG_PC_MGMTDATA
) >> 1) << (p
[j
].len
- 1 - i
);
379 ipg_write_phy_ctl(ioaddr
, IPG_PC_MGMTCLK_HI
| polarity
);
384 static void ipg_set_led_mode(struct net_device
*dev
)
386 struct ipg_nic_private
*sp
= netdev_priv(dev
);
387 void __iomem
*ioaddr
= sp
->ioaddr
;
390 mode
= ipg_r32(ASIC_CTRL
);
391 mode
&= ~(IPG_AC_LED_MODE_BIT_1
| IPG_AC_LED_MODE
| IPG_AC_LED_SPEED
);
393 if ((sp
->led_mode
& 0x03) > 1)
394 mode
|= IPG_AC_LED_MODE_BIT_1
; /* Write Asic Control Bit 29 */
396 if ((sp
->led_mode
& 0x01) == 1)
397 mode
|= IPG_AC_LED_MODE
; /* Write Asic Control Bit 14 */
399 if ((sp
->led_mode
& 0x08) == 8)
400 mode
|= IPG_AC_LED_SPEED
; /* Write Asic Control Bit 27 */
402 ipg_w32(mode
, ASIC_CTRL
);
405 static void ipg_set_phy_set(struct net_device
*dev
)
407 struct ipg_nic_private
*sp
= netdev_priv(dev
);
408 void __iomem
*ioaddr
= sp
->ioaddr
;
411 physet
= ipg_r8(PHY_SET
);
412 physet
&= ~(IPG_PS_MEM_LENB9B
| IPG_PS_MEM_LEN9
| IPG_PS_NON_COMPDET
);
413 physet
|= ((sp
->led_mode
& 0x70) >> 4);
414 ipg_w8(physet
, PHY_SET
);
417 static int ipg_reset(struct net_device
*dev
, u32 resetflags
)
419 /* Assert functional resets via the IPG AsicCtrl
420 * register as specified by the 'resetflags' input
423 void __iomem
*ioaddr
= ipg_ioaddr(dev
);
424 unsigned int timeout_count
= 0;
426 IPG_DEBUG_MSG("_reset\n");
428 ipg_w32(ipg_r32(ASIC_CTRL
) | resetflags
, ASIC_CTRL
);
430 /* Delay added to account for problem with 10Mbps reset. */
431 mdelay(IPG_AC_RESETWAIT
);
433 while (IPG_AC_RESET_BUSY
& ipg_r32(ASIC_CTRL
)) {
434 mdelay(IPG_AC_RESETWAIT
);
435 if (++timeout_count
> IPG_AC_RESET_TIMEOUT
)
438 /* Set LED Mode in Asic Control */
439 ipg_set_led_mode(dev
);
441 /* Set PHYSet Register Value */
442 ipg_set_phy_set(dev
);
446 /* Find the GMII PHY address. */
447 static int ipg_find_phyaddr(struct net_device
*dev
)
449 unsigned int phyaddr
, i
;
451 for (i
= 0; i
< 32; i
++) {
454 /* Search for the correct PHY address among 32 possible. */
455 phyaddr
= (IPG_NIC_PHY_ADDRESS
+ i
) % 32;
457 /* 10/22/03 Grace change verify from GMII_PHY_STATUS to
461 status
= mdio_read(dev
, phyaddr
, MII_BMSR
);
463 if ((status
!= 0xFFFF) && (status
!= 0))
471 * Configure IPG based on result of IEEE 802.3 PHY
474 static int ipg_config_autoneg(struct net_device
*dev
)
476 struct ipg_nic_private
*sp
= netdev_priv(dev
);
477 void __iomem
*ioaddr
= sp
->ioaddr
;
478 unsigned int txflowcontrol
;
479 unsigned int rxflowcontrol
;
480 unsigned int fullduplex
;
485 IPG_DEBUG_MSG("_config_autoneg\n");
487 asicctrl
= ipg_r32(ASIC_CTRL
);
488 phyctrl
= ipg_r8(PHY_CTRL
);
489 mac_ctrl_val
= ipg_r32(MAC_CTRL
);
491 /* Set flags for use in resolving auto-negotation, assuming
492 * non-1000Mbps, half duplex, no flow control.
498 /* To accomodate a problem in 10Mbps operation,
499 * set a global flag if PHY running in 10Mbps mode.
503 printk(KERN_INFO
"%s: Link speed = ", dev
->name
);
505 /* Determine actual speed of operation. */
506 switch (phyctrl
& IPG_PC_LINK_SPEED
) {
507 case IPG_PC_LINK_SPEED_10MBPS
:
509 printk(KERN_INFO
"%s: 10Mbps operational mode enabled.\n",
513 case IPG_PC_LINK_SPEED_100MBPS
:
514 printk("100Mbps.\n");
516 case IPG_PC_LINK_SPEED_1000MBPS
:
517 printk("1000Mbps.\n");
520 printk("undefined!\n");
524 if (phyctrl
& IPG_PC_DUPLEX_STATUS
) {
530 /* Configure full duplex, and flow control. */
531 if (fullduplex
== 1) {
532 /* Configure IPG for full duplex operation. */
533 printk(KERN_INFO
"%s: setting full duplex, ", dev
->name
);
535 mac_ctrl_val
|= IPG_MC_DUPLEX_SELECT_FD
;
537 if (txflowcontrol
== 1) {
538 printk("TX flow control");
539 mac_ctrl_val
|= IPG_MC_TX_FLOW_CONTROL_ENABLE
;
541 printk("no TX flow control");
542 mac_ctrl_val
&= ~IPG_MC_TX_FLOW_CONTROL_ENABLE
;
545 if (rxflowcontrol
== 1) {
546 printk(", RX flow control.");
547 mac_ctrl_val
|= IPG_MC_RX_FLOW_CONTROL_ENABLE
;
549 printk(", no RX flow control.");
550 mac_ctrl_val
&= ~IPG_MC_RX_FLOW_CONTROL_ENABLE
;
555 /* Configure IPG for half duplex operation. */
556 printk(KERN_INFO
"%s: setting half duplex, "
557 "no TX flow control, no RX flow control.\n", dev
->name
);
559 mac_ctrl_val
&= ~IPG_MC_DUPLEX_SELECT_FD
&
560 ~IPG_MC_TX_FLOW_CONTROL_ENABLE
&
561 ~IPG_MC_RX_FLOW_CONTROL_ENABLE
;
563 ipg_w32(mac_ctrl_val
, MAC_CTRL
);
567 /* Determine and configure multicast operation and set
568 * receive mode for IPG.
570 static void ipg_nic_set_multicast_list(struct net_device
*dev
)
572 void __iomem
*ioaddr
= ipg_ioaddr(dev
);
573 struct netdev_hw_addr
*ha
;
574 unsigned int hashindex
;
578 IPG_DEBUG_MSG("_nic_set_multicast_list\n");
580 receivemode
= IPG_RM_RECEIVEUNICAST
| IPG_RM_RECEIVEBROADCAST
;
582 if (dev
->flags
& IFF_PROMISC
) {
583 /* NIC to be configured in promiscuous mode. */
584 receivemode
= IPG_RM_RECEIVEALLFRAMES
;
585 } else if ((dev
->flags
& IFF_ALLMULTI
) ||
586 ((dev
->flags
& IFF_MULTICAST
) &&
587 (netdev_mc_count(dev
) > IPG_MULTICAST_HASHTABLE_SIZE
))) {
588 /* NIC to be configured to receive all multicast
590 receivemode
|= IPG_RM_RECEIVEMULTICAST
;
591 } else if ((dev
->flags
& IFF_MULTICAST
) && !netdev_mc_empty(dev
)) {
592 /* NIC to be configured to receive selected
593 * multicast addresses. */
594 receivemode
|= IPG_RM_RECEIVEMULTICASTHASH
;
597 /* Calculate the bits to set for the 64 bit, IPG HASHTABLE.
598 * The IPG applies a cyclic-redundancy-check (the same CRC
599 * used to calculate the frame data FCS) to the destination
600 * address all incoming multicast frames whose destination
601 * address has the multicast bit set. The least significant
602 * 6 bits of the CRC result are used as an addressing index
603 * into the hash table. If the value of the bit addressed by
604 * this index is a 1, the frame is passed to the host system.
607 /* Clear hashtable. */
608 hashtable
[0] = 0x00000000;
609 hashtable
[1] = 0x00000000;
611 /* Cycle through all multicast addresses to filter. */
612 netdev_for_each_mc_addr(ha
, dev
) {
613 /* Calculate CRC result for each multicast address. */
614 hashindex
= crc32_le(0xffffffff, ha
->addr
,
617 /* Use only the least significant 6 bits. */
618 hashindex
= hashindex
& 0x3F;
620 /* Within "hashtable", set bit number "hashindex"
623 set_bit(hashindex
, (void *)hashtable
);
626 /* Write the value of the hashtable, to the 4, 16 bit
627 * HASHTABLE IPG registers.
629 ipg_w32(hashtable
[0], HASHTABLE_0
);
630 ipg_w32(hashtable
[1], HASHTABLE_1
);
632 ipg_w8(IPG_RM_RSVD_MASK
& receivemode
, RECEIVE_MODE
);
634 IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE
));
637 static int ipg_io_config(struct net_device
*dev
)
639 struct ipg_nic_private
*sp
= netdev_priv(dev
);
640 void __iomem
*ioaddr
= ipg_ioaddr(dev
);
644 IPG_DEBUG_MSG("_io_config\n");
646 origmacctrl
= ipg_r32(MAC_CTRL
);
648 restoremacctrl
= origmacctrl
| IPG_MC_STATISTICS_ENABLE
;
650 /* Based on compilation option, determine if FCS is to be
651 * stripped on receive frames by IPG.
653 if (!IPG_STRIP_FCS_ON_RX
)
654 restoremacctrl
|= IPG_MC_RCV_FCS
;
656 /* Determine if transmitter and/or receiver are
657 * enabled so we may restore MACCTRL correctly.
659 if (origmacctrl
& IPG_MC_TX_ENABLED
)
660 restoremacctrl
|= IPG_MC_TX_ENABLE
;
662 if (origmacctrl
& IPG_MC_RX_ENABLED
)
663 restoremacctrl
|= IPG_MC_RX_ENABLE
;
665 /* Transmitter and receiver must be disabled before setting
668 ipg_w32((origmacctrl
& (IPG_MC_RX_DISABLE
| IPG_MC_TX_DISABLE
)) &
669 IPG_MC_RSVD_MASK
, MAC_CTRL
);
671 /* Now that transmitter and receiver are disabled, write
674 ipg_w32((origmacctrl
& IPG_MC_IFS_96BIT
) & IPG_MC_RSVD_MASK
, MAC_CTRL
);
676 /* Set RECEIVEMODE register. */
677 ipg_nic_set_multicast_list(dev
);
679 ipg_w16(sp
->max_rxframe_size
, MAX_FRAME_SIZE
);
681 ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE
, RX_DMA_POLL_PERIOD
);
682 ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE
, RX_DMA_URGENT_THRESH
);
683 ipg_w8(IPG_RXDMABURSTTHRESH_VALUE
, RX_DMA_BURST_THRESH
);
684 ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE
, TX_DMA_POLL_PERIOD
);
685 ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE
, TX_DMA_URGENT_THRESH
);
686 ipg_w8(IPG_TXDMABURSTTHRESH_VALUE
, TX_DMA_BURST_THRESH
);
687 ipg_w16((IPG_IE_HOST_ERROR
| IPG_IE_TX_DMA_COMPLETE
|
688 IPG_IE_TX_COMPLETE
| IPG_IE_INT_REQUESTED
|
689 IPG_IE_UPDATE_STATS
| IPG_IE_LINK_EVENT
|
690 IPG_IE_RX_DMA_COMPLETE
| IPG_IE_RX_DMA_PRIORITY
), INT_ENABLE
);
691 ipg_w16(IPG_FLOWONTHRESH_VALUE
, FLOW_ON_THRESH
);
692 ipg_w16(IPG_FLOWOFFTHRESH_VALUE
, FLOW_OFF_THRESH
);
694 ipg_w16(ipg_r16(DEBUG_CTRL
) | 0x0200, DEBUG_CTRL
);
696 ipg_w16(ipg_r16(DEBUG_CTRL
) | 0x0010, DEBUG_CTRL
);
698 ipg_w16(ipg_r16(DEBUG_CTRL
) | 0x0020, DEBUG_CTRL
);
700 /* Now restore MACCTRL to original setting. */
701 ipg_w32(IPG_MC_RSVD_MASK
& restoremacctrl
, MAC_CTRL
);
703 /* Disable unused RMON statistics. */
704 ipg_w32(IPG_RZ_ALL
, RMON_STATISTICS_MASK
);
706 /* Disable unused MIB statistics. */
707 ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD
| IPG_SM_MACCONTROLFRAMESRCVD
|
708 IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK
| IPG_SM_TXJUMBOFRAMES
|
709 IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK
| IPG_SM_RXJUMBOFRAMES
|
710 IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK
|
711 IPG_SM_UDPCHECKSUMERRORS
| IPG_SM_TCPCHECKSUMERRORS
|
712 IPG_SM_IPCHECKSUMERRORS
, STATISTICS_MASK
);
718 * Create a receive buffer within system memory and update
719 * NIC private structure appropriately.
721 static int ipg_get_rxbuff(struct net_device
*dev
, int entry
)
723 struct ipg_nic_private
*sp
= netdev_priv(dev
);
724 struct ipg_rx
*rxfd
= sp
->rxd
+ entry
;
728 IPG_DEBUG_MSG("_get_rxbuff\n");
730 skb
= netdev_alloc_skb_ip_align(dev
, sp
->rxsupport_size
);
732 sp
->rx_buff
[entry
] = NULL
;
736 /* Associate the receive buffer with the IPG NIC. */
739 /* Save the address of the sk_buff structure. */
740 sp
->rx_buff
[entry
] = skb
;
742 rxfd
->frag_info
= cpu_to_le64(pci_map_single(sp
->pdev
, skb
->data
,
743 sp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
));
745 /* Set the RFD fragment length. */
746 rxfragsize
= sp
->rxfrag_size
;
747 rxfd
->frag_info
|= cpu_to_le64((rxfragsize
<< 48) & IPG_RFI_FRAGLEN
);
752 static int init_rfdlist(struct net_device
*dev
)
754 struct ipg_nic_private
*sp
= netdev_priv(dev
);
755 void __iomem
*ioaddr
= sp
->ioaddr
;
758 IPG_DEBUG_MSG("_init_rfdlist\n");
760 for (i
= 0; i
< IPG_RFDLIST_LENGTH
; i
++) {
761 struct ipg_rx
*rxfd
= sp
->rxd
+ i
;
763 if (sp
->rx_buff
[i
]) {
764 pci_unmap_single(sp
->pdev
,
765 le64_to_cpu(rxfd
->frag_info
) & ~IPG_RFI_FRAGLEN
,
766 sp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
767 dev_kfree_skb_irq(sp
->rx_buff
[i
]);
768 sp
->rx_buff
[i
] = NULL
;
771 /* Clear out the RFS field. */
772 rxfd
->rfs
= 0x0000000000000000;
774 if (ipg_get_rxbuff(dev
, i
) < 0) {
776 * A receive buffer was not ready, break the
779 IPG_DEBUG_MSG("Cannot allocate Rx buffer.\n");
781 /* Just in case we cannot allocate a single RFD.
785 printk(KERN_ERR
"%s: No memory available"
786 " for RFD list.\n", dev
->name
);
791 rxfd
->next_desc
= cpu_to_le64(sp
->rxd_map
+
792 sizeof(struct ipg_rx
)*(i
+ 1));
794 sp
->rxd
[i
- 1].next_desc
= cpu_to_le64(sp
->rxd_map
);
799 /* Write the location of the RFDList to the IPG. */
800 ipg_w32((u32
) sp
->rxd_map
, RFD_LIST_PTR_0
);
801 ipg_w32(0x00000000, RFD_LIST_PTR_1
);
806 static void init_tfdlist(struct net_device
*dev
)
808 struct ipg_nic_private
*sp
= netdev_priv(dev
);
809 void __iomem
*ioaddr
= sp
->ioaddr
;
812 IPG_DEBUG_MSG("_init_tfdlist\n");
814 for (i
= 0; i
< IPG_TFDLIST_LENGTH
; i
++) {
815 struct ipg_tx
*txfd
= sp
->txd
+ i
;
817 txfd
->tfc
= cpu_to_le64(IPG_TFC_TFDDONE
);
819 if (sp
->tx_buff
[i
]) {
820 dev_kfree_skb_irq(sp
->tx_buff
[i
]);
821 sp
->tx_buff
[i
] = NULL
;
824 txfd
->next_desc
= cpu_to_le64(sp
->txd_map
+
825 sizeof(struct ipg_tx
)*(i
+ 1));
827 sp
->txd
[i
- 1].next_desc
= cpu_to_le64(sp
->txd_map
);
832 /* Write the location of the TFDList to the IPG. */
833 IPG_DDEBUG_MSG("Starting TFDListPtr = %8.8x\n",
835 ipg_w32((u32
) sp
->txd_map
, TFD_LIST_PTR_0
);
836 ipg_w32(0x00000000, TFD_LIST_PTR_1
);
838 sp
->reset_current_tfd
= 1;
842 * Free all transmit buffers which have already been transfered
843 * via DMA to the IPG.
845 static void ipg_nic_txfree(struct net_device
*dev
)
847 struct ipg_nic_private
*sp
= netdev_priv(dev
);
848 unsigned int released
, pending
, dirty
;
850 IPG_DEBUG_MSG("_nic_txfree\n");
852 pending
= sp
->tx_current
- sp
->tx_dirty
;
853 dirty
= sp
->tx_dirty
% IPG_TFDLIST_LENGTH
;
855 for (released
= 0; released
< pending
; released
++) {
856 struct sk_buff
*skb
= sp
->tx_buff
[dirty
];
857 struct ipg_tx
*txfd
= sp
->txd
+ dirty
;
859 IPG_DEBUG_MSG("TFC = %16.16lx\n", (unsigned long) txfd
->tfc
);
861 /* Look at each TFD's TFC field beginning
862 * at the last freed TFD up to the current TFD.
863 * If the TFDDone bit is set, free the associated
866 if (!(txfd
->tfc
& cpu_to_le64(IPG_TFC_TFDDONE
)))
869 /* Free the transmit buffer. */
871 pci_unmap_single(sp
->pdev
,
872 le64_to_cpu(txfd
->frag_info
) & ~IPG_TFI_FRAGLEN
,
873 skb
->len
, PCI_DMA_TODEVICE
);
875 dev_kfree_skb_irq(skb
);
877 sp
->tx_buff
[dirty
] = NULL
;
879 dirty
= (dirty
+ 1) % IPG_TFDLIST_LENGTH
;
882 sp
->tx_dirty
+= released
;
884 if (netif_queue_stopped(dev
) &&
885 (sp
->tx_current
!= (sp
->tx_dirty
+ IPG_TFDLIST_LENGTH
))) {
886 netif_wake_queue(dev
);
890 static void ipg_tx_timeout(struct net_device
*dev
)
892 struct ipg_nic_private
*sp
= netdev_priv(dev
);
893 void __iomem
*ioaddr
= sp
->ioaddr
;
895 ipg_reset(dev
, IPG_AC_TX_RESET
| IPG_AC_DMA
| IPG_AC_NETWORK
|
898 spin_lock_irq(&sp
->lock
);
900 /* Re-configure after DMA reset. */
901 if (ipg_io_config(dev
) < 0) {
902 printk(KERN_INFO
"%s: Error during re-configuration.\n",
908 spin_unlock_irq(&sp
->lock
);
910 ipg_w32((ipg_r32(MAC_CTRL
) | IPG_MC_TX_ENABLE
) & IPG_MC_RSVD_MASK
,
915 * For TxComplete interrupts, free all transmit
916 * buffers which have already been transfered via DMA
919 static void ipg_nic_txcleanup(struct net_device
*dev
)
921 struct ipg_nic_private
*sp
= netdev_priv(dev
);
922 void __iomem
*ioaddr
= sp
->ioaddr
;
925 IPG_DEBUG_MSG("_nic_txcleanup\n");
927 for (i
= 0; i
< IPG_TFDLIST_LENGTH
; i
++) {
928 /* Reading the TXSTATUS register clears the
929 * TX_COMPLETE interrupt.
931 u32 txstatusdword
= ipg_r32(TX_STATUS
);
933 IPG_DEBUG_MSG("TxStatus = %8.8x\n", txstatusdword
);
935 /* Check for Transmit errors. Error bits only valid if
936 * TX_COMPLETE bit in the TXSTATUS register is a 1.
938 if (!(txstatusdword
& IPG_TS_TX_COMPLETE
))
941 /* If in 10Mbps mode, indicate transmit is ready. */
942 if (sp
->tenmbpsmode
) {
943 netif_wake_queue(dev
);
946 /* Transmit error, increment stat counters. */
947 if (txstatusdword
& IPG_TS_TX_ERROR
) {
948 IPG_DEBUG_MSG("Transmit error.\n");
949 sp
->stats
.tx_errors
++;
952 /* Late collision, re-enable transmitter. */
953 if (txstatusdword
& IPG_TS_LATE_COLLISION
) {
954 IPG_DEBUG_MSG("Late collision on transmit.\n");
955 ipg_w32((ipg_r32(MAC_CTRL
) | IPG_MC_TX_ENABLE
) &
956 IPG_MC_RSVD_MASK
, MAC_CTRL
);
959 /* Maximum collisions, re-enable transmitter. */
960 if (txstatusdword
& IPG_TS_TX_MAX_COLL
) {
961 IPG_DEBUG_MSG("Maximum collisions on transmit.\n");
962 ipg_w32((ipg_r32(MAC_CTRL
) | IPG_MC_TX_ENABLE
) &
963 IPG_MC_RSVD_MASK
, MAC_CTRL
);
966 /* Transmit underrun, reset and re-enable
969 if (txstatusdword
& IPG_TS_TX_UNDERRUN
) {
970 IPG_DEBUG_MSG("Transmitter underrun.\n");
971 sp
->stats
.tx_fifo_errors
++;
972 ipg_reset(dev
, IPG_AC_TX_RESET
| IPG_AC_DMA
|
973 IPG_AC_NETWORK
| IPG_AC_FIFO
);
975 /* Re-configure after DMA reset. */
976 if (ipg_io_config(dev
) < 0) {
978 "%s: Error during re-configuration.\n",
983 ipg_w32((ipg_r32(MAC_CTRL
) | IPG_MC_TX_ENABLE
) &
984 IPG_MC_RSVD_MASK
, MAC_CTRL
);
991 /* Provides statistical information about the IPG NIC. */
992 static struct net_device_stats
*ipg_nic_get_stats(struct net_device
*dev
)
994 struct ipg_nic_private
*sp
= netdev_priv(dev
);
995 void __iomem
*ioaddr
= sp
->ioaddr
;
999 IPG_DEBUG_MSG("_nic_get_stats\n");
1001 /* Check to see if the NIC has been initialized via nic_open,
1002 * before trying to read statistic registers.
1004 if (!test_bit(__LINK_STATE_START
, &dev
->state
))
1007 sp
->stats
.rx_packets
+= ipg_r32(IPG_FRAMESRCVDOK
);
1008 sp
->stats
.tx_packets
+= ipg_r32(IPG_FRAMESXMTDOK
);
1009 sp
->stats
.rx_bytes
+= ipg_r32(IPG_OCTETRCVOK
);
1010 sp
->stats
.tx_bytes
+= ipg_r32(IPG_OCTETXMTOK
);
1011 temp1
= ipg_r16(IPG_FRAMESLOSTRXERRORS
);
1012 sp
->stats
.rx_errors
+= temp1
;
1013 sp
->stats
.rx_missed_errors
+= temp1
;
1014 temp1
= ipg_r32(IPG_SINGLECOLFRAMES
) + ipg_r32(IPG_MULTICOLFRAMES
) +
1015 ipg_r32(IPG_LATECOLLISIONS
);
1016 temp2
= ipg_r16(IPG_CARRIERSENSEERRORS
);
1017 sp
->stats
.collisions
+= temp1
;
1018 sp
->stats
.tx_dropped
+= ipg_r16(IPG_FRAMESABORTXSCOLLS
);
1019 sp
->stats
.tx_errors
+= ipg_r16(IPG_FRAMESWEXDEFERRAL
) +
1020 ipg_r32(IPG_FRAMESWDEFERREDXMT
) + temp1
+ temp2
;
1021 sp
->stats
.multicast
+= ipg_r32(IPG_MCSTOCTETRCVDOK
);
1023 /* detailed tx_errors */
1024 sp
->stats
.tx_carrier_errors
+= temp2
;
1026 /* detailed rx_errors */
1027 sp
->stats
.rx_length_errors
+= ipg_r16(IPG_INRANGELENGTHERRORS
) +
1028 ipg_r16(IPG_FRAMETOOLONGERRRORS
);
1029 sp
->stats
.rx_crc_errors
+= ipg_r16(IPG_FRAMECHECKSEQERRORS
);
1031 /* Unutilized IPG statistic registers. */
1032 ipg_r32(IPG_MCSTFRAMESRCVDOK
);
1037 /* Restore used receive buffers. */
1038 static int ipg_nic_rxrestore(struct net_device
*dev
)
1040 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1041 const unsigned int curr
= sp
->rx_current
;
1042 unsigned int dirty
= sp
->rx_dirty
;
1044 IPG_DEBUG_MSG("_nic_rxrestore\n");
1046 for (dirty
= sp
->rx_dirty
; curr
- dirty
> 0; dirty
++) {
1047 unsigned int entry
= dirty
% IPG_RFDLIST_LENGTH
;
1049 /* rx_copybreak may poke hole here and there. */
1050 if (sp
->rx_buff
[entry
])
1053 /* Generate a new receive buffer to replace the
1054 * current buffer (which will be released by the
1057 if (ipg_get_rxbuff(dev
, entry
) < 0) {
1058 IPG_DEBUG_MSG("Cannot allocate new Rx buffer.\n");
1063 /* Reset the RFS field. */
1064 sp
->rxd
[entry
].rfs
= 0x0000000000000000;
1066 sp
->rx_dirty
= dirty
;
1071 /* use jumboindex and jumbosize to control jumbo frame status
1072 * initial status is jumboindex=-1 and jumbosize=0
1073 * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done.
1074 * 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving
1075 * 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump
1076 * previous receiving and need to continue dumping the current one
1084 FRAME_NO_START_NO_END
= 0,
1085 FRAME_WITH_START
= 1,
1086 FRAME_WITH_END
= 10,
1087 FRAME_WITH_START_WITH_END
= 11
1090 static void ipg_nic_rx_free_skb(struct net_device
*dev
)
1092 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1093 unsigned int entry
= sp
->rx_current
% IPG_RFDLIST_LENGTH
;
1095 if (sp
->rx_buff
[entry
]) {
1096 struct ipg_rx
*rxfd
= sp
->rxd
+ entry
;
1098 pci_unmap_single(sp
->pdev
,
1099 le64_to_cpu(rxfd
->frag_info
) & ~IPG_RFI_FRAGLEN
,
1100 sp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1101 dev_kfree_skb_irq(sp
->rx_buff
[entry
]);
1102 sp
->rx_buff
[entry
] = NULL
;
1106 static int ipg_nic_rx_check_frame_type(struct net_device
*dev
)
1108 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1109 struct ipg_rx
*rxfd
= sp
->rxd
+ (sp
->rx_current
% IPG_RFDLIST_LENGTH
);
1110 int type
= FRAME_NO_START_NO_END
;
1112 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_FRAMESTART
)
1113 type
+= FRAME_WITH_START
;
1114 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_FRAMEEND
)
1115 type
+= FRAME_WITH_END
;
1119 static int ipg_nic_rx_check_error(struct net_device
*dev
)
1121 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1122 unsigned int entry
= sp
->rx_current
% IPG_RFDLIST_LENGTH
;
1123 struct ipg_rx
*rxfd
= sp
->rxd
+ entry
;
1125 if (IPG_DROP_ON_RX_ETH_ERRORS
&& (le64_to_cpu(rxfd
->rfs
) &
1126 (IPG_RFS_RXFIFOOVERRUN
| IPG_RFS_RXRUNTFRAME
|
1127 IPG_RFS_RXALIGNMENTERROR
| IPG_RFS_RXFCSERROR
|
1128 IPG_RFS_RXOVERSIZEDFRAME
| IPG_RFS_RXLENGTHERROR
))) {
1129 IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n",
1130 (unsigned long) rxfd
->rfs
);
1132 /* Increment general receive error statistic. */
1133 sp
->stats
.rx_errors
++;
1135 /* Increment detailed receive error statistics. */
1136 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXFIFOOVERRUN
) {
1137 IPG_DEBUG_MSG("RX FIFO overrun occured.\n");
1139 sp
->stats
.rx_fifo_errors
++;
1142 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXRUNTFRAME
) {
1143 IPG_DEBUG_MSG("RX runt occured.\n");
1144 sp
->stats
.rx_length_errors
++;
1147 /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME,
1148 * error count handled by a IPG statistic register.
1151 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXALIGNMENTERROR
) {
1152 IPG_DEBUG_MSG("RX alignment error occured.\n");
1153 sp
->stats
.rx_frame_errors
++;
1156 /* Do nothing for IPG_RFS_RXFCSERROR, error count
1157 * handled by a IPG statistic register.
1160 /* Free the memory associated with the RX
1161 * buffer since it is erroneous and we will
1162 * not pass it to higher layer processes.
1164 if (sp
->rx_buff
[entry
]) {
1165 pci_unmap_single(sp
->pdev
,
1166 le64_to_cpu(rxfd
->frag_info
) & ~IPG_RFI_FRAGLEN
,
1167 sp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1169 dev_kfree_skb_irq(sp
->rx_buff
[entry
]);
1170 sp
->rx_buff
[entry
] = NULL
;
1172 return ERROR_PACKET
;
1174 return NORMAL_PACKET
;
1177 static void ipg_nic_rx_with_start_and_end(struct net_device
*dev
,
1178 struct ipg_nic_private
*sp
,
1179 struct ipg_rx
*rxfd
, unsigned entry
)
1181 struct ipg_jumbo
*jumbo
= &sp
->jumbo
;
1182 struct sk_buff
*skb
;
1185 if (jumbo
->found_start
) {
1186 dev_kfree_skb_irq(jumbo
->skb
);
1187 jumbo
->found_start
= 0;
1188 jumbo
->current_size
= 0;
1192 /* 1: found error, 0 no error */
1193 if (ipg_nic_rx_check_error(dev
) != NORMAL_PACKET
)
1196 skb
= sp
->rx_buff
[entry
];
1200 /* accept this frame and send to upper layer */
1201 framelen
= le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXFRAMELEN
;
1202 if (framelen
> sp
->rxfrag_size
)
1203 framelen
= sp
->rxfrag_size
;
1205 skb_put(skb
, framelen
);
1206 skb
->protocol
= eth_type_trans(skb
, dev
);
1207 skb
->ip_summed
= CHECKSUM_NONE
;
1209 sp
->rx_buff
[entry
] = NULL
;
1212 static void ipg_nic_rx_with_start(struct net_device
*dev
,
1213 struct ipg_nic_private
*sp
,
1214 struct ipg_rx
*rxfd
, unsigned entry
)
1216 struct ipg_jumbo
*jumbo
= &sp
->jumbo
;
1217 struct pci_dev
*pdev
= sp
->pdev
;
1218 struct sk_buff
*skb
;
1220 /* 1: found error, 0 no error */
1221 if (ipg_nic_rx_check_error(dev
) != NORMAL_PACKET
)
1224 /* accept this frame and send to upper layer */
1225 skb
= sp
->rx_buff
[entry
];
1229 if (jumbo
->found_start
)
1230 dev_kfree_skb_irq(jumbo
->skb
);
1232 pci_unmap_single(pdev
, le64_to_cpu(rxfd
->frag_info
) & ~IPG_RFI_FRAGLEN
,
1233 sp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1235 skb_put(skb
, sp
->rxfrag_size
);
1237 jumbo
->found_start
= 1;
1238 jumbo
->current_size
= sp
->rxfrag_size
;
1241 sp
->rx_buff
[entry
] = NULL
;
1244 static void ipg_nic_rx_with_end(struct net_device
*dev
,
1245 struct ipg_nic_private
*sp
,
1246 struct ipg_rx
*rxfd
, unsigned entry
)
1248 struct ipg_jumbo
*jumbo
= &sp
->jumbo
;
1250 /* 1: found error, 0 no error */
1251 if (ipg_nic_rx_check_error(dev
) == NORMAL_PACKET
) {
1252 struct sk_buff
*skb
= sp
->rx_buff
[entry
];
1257 if (jumbo
->found_start
) {
1258 int framelen
, endframelen
;
1260 framelen
= le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXFRAMELEN
;
1262 endframelen
= framelen
- jumbo
->current_size
;
1263 if (framelen
> sp
->rxsupport_size
)
1264 dev_kfree_skb_irq(jumbo
->skb
);
1266 memcpy(skb_put(jumbo
->skb
, endframelen
),
1267 skb
->data
, endframelen
);
1269 jumbo
->skb
->protocol
=
1270 eth_type_trans(jumbo
->skb
, dev
);
1272 jumbo
->skb
->ip_summed
= CHECKSUM_NONE
;
1273 netif_rx(jumbo
->skb
);
1277 jumbo
->found_start
= 0;
1278 jumbo
->current_size
= 0;
1281 ipg_nic_rx_free_skb(dev
);
1283 dev_kfree_skb_irq(jumbo
->skb
);
1284 jumbo
->found_start
= 0;
1285 jumbo
->current_size
= 0;
1290 static void ipg_nic_rx_no_start_no_end(struct net_device
*dev
,
1291 struct ipg_nic_private
*sp
,
1292 struct ipg_rx
*rxfd
, unsigned entry
)
1294 struct ipg_jumbo
*jumbo
= &sp
->jumbo
;
1296 /* 1: found error, 0 no error */
1297 if (ipg_nic_rx_check_error(dev
) == NORMAL_PACKET
) {
1298 struct sk_buff
*skb
= sp
->rx_buff
[entry
];
1301 if (jumbo
->found_start
) {
1302 jumbo
->current_size
+= sp
->rxfrag_size
;
1303 if (jumbo
->current_size
<= sp
->rxsupport_size
) {
1304 memcpy(skb_put(jumbo
->skb
,
1306 skb
->data
, sp
->rxfrag_size
);
1309 ipg_nic_rx_free_skb(dev
);
1312 dev_kfree_skb_irq(jumbo
->skb
);
1313 jumbo
->found_start
= 0;
1314 jumbo
->current_size
= 0;
1319 static int ipg_nic_rx_jumbo(struct net_device
*dev
)
1321 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1322 unsigned int curr
= sp
->rx_current
;
1323 void __iomem
*ioaddr
= sp
->ioaddr
;
1326 IPG_DEBUG_MSG("_nic_rx\n");
1328 for (i
= 0; i
< IPG_MAXRFDPROCESS_COUNT
; i
++, curr
++) {
1329 unsigned int entry
= curr
% IPG_RFDLIST_LENGTH
;
1330 struct ipg_rx
*rxfd
= sp
->rxd
+ entry
;
1332 if (!(rxfd
->rfs
& cpu_to_le64(IPG_RFS_RFDDONE
)))
1335 switch (ipg_nic_rx_check_frame_type(dev
)) {
1336 case FRAME_WITH_START_WITH_END
:
1337 ipg_nic_rx_with_start_and_end(dev
, sp
, rxfd
, entry
);
1339 case FRAME_WITH_START
:
1340 ipg_nic_rx_with_start(dev
, sp
, rxfd
, entry
);
1342 case FRAME_WITH_END
:
1343 ipg_nic_rx_with_end(dev
, sp
, rxfd
, entry
);
1345 case FRAME_NO_START_NO_END
:
1346 ipg_nic_rx_no_start_no_end(dev
, sp
, rxfd
, entry
);
1351 sp
->rx_current
= curr
;
1353 if (i
== IPG_MAXRFDPROCESS_COUNT
) {
1354 /* There are more RFDs to process, however the
1355 * allocated amount of RFD processing time has
1356 * expired. Assert Interrupt Requested to make
1357 * sure we come back to process the remaining RFDs.
1359 ipg_w32(ipg_r32(ASIC_CTRL
) | IPG_AC_INT_REQUEST
, ASIC_CTRL
);
1362 ipg_nic_rxrestore(dev
);
1367 static int ipg_nic_rx(struct net_device
*dev
)
1369 /* Transfer received Ethernet frames to higher network layers. */
1370 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1371 unsigned int curr
= sp
->rx_current
;
1372 void __iomem
*ioaddr
= sp
->ioaddr
;
1373 struct ipg_rx
*rxfd
;
1376 IPG_DEBUG_MSG("_nic_rx\n");
1378 #define __RFS_MASK \
1379 cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND)
1381 for (i
= 0; i
< IPG_MAXRFDPROCESS_COUNT
; i
++, curr
++) {
1382 unsigned int entry
= curr
% IPG_RFDLIST_LENGTH
;
1383 struct sk_buff
*skb
= sp
->rx_buff
[entry
];
1384 unsigned int framelen
;
1386 rxfd
= sp
->rxd
+ entry
;
1388 if (((rxfd
->rfs
& __RFS_MASK
) != __RFS_MASK
) || !skb
)
1391 /* Get received frame length. */
1392 framelen
= le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXFRAMELEN
;
1394 /* Check for jumbo frame arrival with too small
1397 if (framelen
> sp
->rxfrag_size
) {
1399 ("RFS FrameLen > allocated fragment size.\n");
1401 framelen
= sp
->rxfrag_size
;
1404 if ((IPG_DROP_ON_RX_ETH_ERRORS
&& (le64_to_cpu(rxfd
->rfs
) &
1405 (IPG_RFS_RXFIFOOVERRUN
| IPG_RFS_RXRUNTFRAME
|
1406 IPG_RFS_RXALIGNMENTERROR
| IPG_RFS_RXFCSERROR
|
1407 IPG_RFS_RXOVERSIZEDFRAME
| IPG_RFS_RXLENGTHERROR
)))) {
1409 IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n",
1410 (unsigned long int) rxfd
->rfs
);
1412 /* Increment general receive error statistic. */
1413 sp
->stats
.rx_errors
++;
1415 /* Increment detailed receive error statistics. */
1416 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXFIFOOVERRUN
) {
1417 IPG_DEBUG_MSG("RX FIFO overrun occured.\n");
1418 sp
->stats
.rx_fifo_errors
++;
1421 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXRUNTFRAME
) {
1422 IPG_DEBUG_MSG("RX runt occured.\n");
1423 sp
->stats
.rx_length_errors
++;
1426 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXOVERSIZEDFRAME
) ;
1427 /* Do nothing, error count handled by a IPG
1428 * statistic register.
1431 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXALIGNMENTERROR
) {
1432 IPG_DEBUG_MSG("RX alignment error occured.\n");
1433 sp
->stats
.rx_frame_errors
++;
1436 if (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RXFCSERROR
) ;
1437 /* Do nothing, error count handled by a IPG
1438 * statistic register.
1441 /* Free the memory associated with the RX
1442 * buffer since it is erroneous and we will
1443 * not pass it to higher layer processes.
1446 __le64 info
= rxfd
->frag_info
;
1448 pci_unmap_single(sp
->pdev
,
1449 le64_to_cpu(info
) & ~IPG_RFI_FRAGLEN
,
1450 sp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1452 dev_kfree_skb_irq(skb
);
1456 /* Adjust the new buffer length to accomodate the size
1457 * of the received frame.
1459 skb_put(skb
, framelen
);
1461 /* Set the buffer's protocol field to Ethernet. */
1462 skb
->protocol
= eth_type_trans(skb
, dev
);
1464 /* The IPG encountered an error with (or
1465 * there were no) IP/TCP/UDP checksums.
1466 * This may or may not indicate an invalid
1467 * IP/TCP/UDP frame was received. Let the
1468 * upper layer decide.
1470 skb
->ip_summed
= CHECKSUM_NONE
;
1472 /* Hand off frame for higher layer processing.
1473 * The function netif_rx() releases the sk_buff
1474 * when processing completes.
1479 /* Assure RX buffer is not reused by IPG. */
1480 sp
->rx_buff
[entry
] = NULL
;
1484 * If there are more RFDs to proces and the allocated amount of RFD
1485 * processing time has expired, assert Interrupt Requested to make
1486 * sure we come back to process the remaining RFDs.
1488 if (i
== IPG_MAXRFDPROCESS_COUNT
)
1489 ipg_w32(ipg_r32(ASIC_CTRL
) | IPG_AC_INT_REQUEST
, ASIC_CTRL
);
1492 /* Check if the RFD list contained no receive frame data. */
1494 sp
->EmptyRFDListCount
++;
1496 while ((le64_to_cpu(rxfd
->rfs
) & IPG_RFS_RFDDONE
) &&
1497 !((le64_to_cpu(rxfd
->rfs
) & IPG_RFS_FRAMESTART
) &&
1498 (le64_to_cpu(rxfd
->rfs
) & IPG_RFS_FRAMEEND
))) {
1499 unsigned int entry
= curr
++ % IPG_RFDLIST_LENGTH
;
1501 rxfd
= sp
->rxd
+ entry
;
1503 IPG_DEBUG_MSG("Frame requires multiple RFDs.\n");
1505 /* An unexpected event, additional code needed to handle
1506 * properly. So for the time being, just disregard the
1510 /* Free the memory associated with the RX
1511 * buffer since it is erroneous and we will
1512 * not pass it to higher layer processes.
1514 if (sp
->rx_buff
[entry
]) {
1515 pci_unmap_single(sp
->pdev
,
1516 le64_to_cpu(rxfd
->frag_info
) & ~IPG_RFI_FRAGLEN
,
1517 sp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1518 dev_kfree_skb_irq(sp
->rx_buff
[entry
]);
1521 /* Assure RX buffer is not reused by IPG. */
1522 sp
->rx_buff
[entry
] = NULL
;
1525 sp
->rx_current
= curr
;
1527 /* Check to see if there are a minimum number of used
1528 * RFDs before restoring any (should improve performance.)
1530 if ((curr
- sp
->rx_dirty
) >= IPG_MINUSEDRFDSTOFREE
)
1531 ipg_nic_rxrestore(dev
);
1536 static void ipg_reset_after_host_error(struct work_struct
*work
)
1538 struct ipg_nic_private
*sp
=
1539 container_of(work
, struct ipg_nic_private
, task
.work
);
1540 struct net_device
*dev
= sp
->dev
;
1543 * Acknowledge HostError interrupt by resetting
1546 ipg_reset(dev
, IPG_AC_GLOBAL_RESET
| IPG_AC_HOST
| IPG_AC_DMA
);
1551 if (ipg_io_config(dev
) < 0) {
1552 printk(KERN_INFO
"%s: Cannot recover from PCI error.\n",
1554 schedule_delayed_work(&sp
->task
, HZ
);
1558 static irqreturn_t
ipg_interrupt_handler(int irq
, void *dev_inst
)
1560 struct net_device
*dev
= dev_inst
;
1561 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1562 void __iomem
*ioaddr
= sp
->ioaddr
;
1563 unsigned int handled
= 0;
1566 IPG_DEBUG_MSG("_interrupt_handler\n");
1569 ipg_nic_rxrestore(dev
);
1571 spin_lock(&sp
->lock
);
1573 /* Get interrupt source information, and acknowledge
1574 * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly,
1575 * IntRequested, MacControlFrame, LinkEvent) interrupts
1576 * if issued. Also, all IPG interrupts are disabled by
1577 * reading IntStatusAck.
1579 status
= ipg_r16(INT_STATUS_ACK
);
1581 IPG_DEBUG_MSG("IntStatusAck = %4.4x\n", status
);
1583 /* Shared IRQ of remove event. */
1584 if (!(status
& IPG_IS_RSVD_MASK
))
1589 if (unlikely(!netif_running(dev
)))
1592 /* If RFDListEnd interrupt, restore all used RFDs. */
1593 if (status
& IPG_IS_RFD_LIST_END
) {
1594 IPG_DEBUG_MSG("RFDListEnd Interrupt.\n");
1596 /* The RFD list end indicates an RFD was encountered
1597 * with a 0 NextPtr, or with an RFDDone bit set to 1
1598 * (indicating the RFD is not read for use by the
1599 * IPG.) Try to restore all RFDs.
1601 ipg_nic_rxrestore(dev
);
1604 /* Increment the RFDlistendCount counter. */
1605 sp
->RFDlistendCount
++;
1609 /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or
1610 * IntRequested interrupt, process received frames. */
1611 if ((status
& IPG_IS_RX_DMA_PRIORITY
) ||
1612 (status
& IPG_IS_RFD_LIST_END
) ||
1613 (status
& IPG_IS_RX_DMA_COMPLETE
) ||
1614 (status
& IPG_IS_INT_REQUESTED
)) {
1616 /* Increment the RFD list checked counter if interrupted
1617 * only to check the RFD list. */
1618 if (status
& (~(IPG_IS_RX_DMA_PRIORITY
| IPG_IS_RFD_LIST_END
|
1619 IPG_IS_RX_DMA_COMPLETE
| IPG_IS_INT_REQUESTED
) &
1620 (IPG_IS_HOST_ERROR
| IPG_IS_TX_DMA_COMPLETE
|
1621 IPG_IS_LINK_EVENT
| IPG_IS_TX_COMPLETE
|
1622 IPG_IS_UPDATE_STATS
)))
1623 sp
->RFDListCheckedCount
++;
1627 ipg_nic_rx_jumbo(dev
);
1632 /* If TxDMAComplete interrupt, free used TFDs. */
1633 if (status
& IPG_IS_TX_DMA_COMPLETE
)
1634 ipg_nic_txfree(dev
);
1636 /* TxComplete interrupts indicate one of numerous actions.
1637 * Determine what action to take based on TXSTATUS register.
1639 if (status
& IPG_IS_TX_COMPLETE
)
1640 ipg_nic_txcleanup(dev
);
1642 /* If UpdateStats interrupt, update Linux Ethernet statistics */
1643 if (status
& IPG_IS_UPDATE_STATS
)
1644 ipg_nic_get_stats(dev
);
1646 /* If HostError interrupt, reset IPG. */
1647 if (status
& IPG_IS_HOST_ERROR
) {
1648 IPG_DDEBUG_MSG("HostError Interrupt\n");
1650 schedule_delayed_work(&sp
->task
, 0);
1653 /* If LinkEvent interrupt, resolve autonegotiation. */
1654 if (status
& IPG_IS_LINK_EVENT
) {
1655 if (ipg_config_autoneg(dev
) < 0)
1656 printk(KERN_INFO
"%s: Auto-negotiation error.\n",
1660 /* If MACCtrlFrame interrupt, do nothing. */
1661 if (status
& IPG_IS_MAC_CTRL_FRAME
)
1662 IPG_DEBUG_MSG("MACCtrlFrame interrupt.\n");
1664 /* If RxComplete interrupt, do nothing. */
1665 if (status
& IPG_IS_RX_COMPLETE
)
1666 IPG_DEBUG_MSG("RxComplete interrupt.\n");
1668 /* If RxEarly interrupt, do nothing. */
1669 if (status
& IPG_IS_RX_EARLY
)
1670 IPG_DEBUG_MSG("RxEarly interrupt.\n");
1673 /* Re-enable IPG interrupts. */
1674 ipg_w16(IPG_IE_TX_DMA_COMPLETE
| IPG_IE_RX_DMA_COMPLETE
|
1675 IPG_IE_HOST_ERROR
| IPG_IE_INT_REQUESTED
| IPG_IE_TX_COMPLETE
|
1676 IPG_IE_LINK_EVENT
| IPG_IE_UPDATE_STATS
, INT_ENABLE
);
1678 spin_unlock(&sp
->lock
);
1680 return IRQ_RETVAL(handled
);
1683 static void ipg_rx_clear(struct ipg_nic_private
*sp
)
1687 for (i
= 0; i
< IPG_RFDLIST_LENGTH
; i
++) {
1688 if (sp
->rx_buff
[i
]) {
1689 struct ipg_rx
*rxfd
= sp
->rxd
+ i
;
1691 dev_kfree_skb_irq(sp
->rx_buff
[i
]);
1692 sp
->rx_buff
[i
] = NULL
;
1693 pci_unmap_single(sp
->pdev
,
1694 le64_to_cpu(rxfd
->frag_info
) & ~IPG_RFI_FRAGLEN
,
1695 sp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1700 static void ipg_tx_clear(struct ipg_nic_private
*sp
)
1704 for (i
= 0; i
< IPG_TFDLIST_LENGTH
; i
++) {
1705 if (sp
->tx_buff
[i
]) {
1706 struct ipg_tx
*txfd
= sp
->txd
+ i
;
1708 pci_unmap_single(sp
->pdev
,
1709 le64_to_cpu(txfd
->frag_info
) & ~IPG_TFI_FRAGLEN
,
1710 sp
->tx_buff
[i
]->len
, PCI_DMA_TODEVICE
);
1712 dev_kfree_skb_irq(sp
->tx_buff
[i
]);
1714 sp
->tx_buff
[i
] = NULL
;
1719 static int ipg_nic_open(struct net_device
*dev
)
1721 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1722 void __iomem
*ioaddr
= sp
->ioaddr
;
1723 struct pci_dev
*pdev
= sp
->pdev
;
1726 IPG_DEBUG_MSG("_nic_open\n");
1728 sp
->rx_buf_sz
= sp
->rxsupport_size
;
1730 /* Check for interrupt line conflicts, and request interrupt
1733 * IMPORTANT: Disable IPG interrupts prior to registering
1736 ipg_w16(0x0000, INT_ENABLE
);
1738 /* Register the interrupt line to be used by the IPG within
1741 rc
= request_irq(pdev
->irq
, ipg_interrupt_handler
, IRQF_SHARED
,
1744 printk(KERN_INFO
"%s: Error when requesting interrupt.\n",
1749 dev
->irq
= pdev
->irq
;
1753 sp
->rxd
= dma_alloc_coherent(&pdev
->dev
, IPG_RX_RING_BYTES
,
1754 &sp
->rxd_map
, GFP_KERNEL
);
1756 goto err_free_irq_0
;
1758 sp
->txd
= dma_alloc_coherent(&pdev
->dev
, IPG_TX_RING_BYTES
,
1759 &sp
->txd_map
, GFP_KERNEL
);
1763 rc
= init_rfdlist(dev
);
1765 printk(KERN_INFO
"%s: Error during configuration.\n",
1772 rc
= ipg_io_config(dev
);
1774 printk(KERN_INFO
"%s: Error during configuration.\n",
1776 goto err_release_tfdlist_3
;
1779 /* Resolve autonegotiation. */
1780 if (ipg_config_autoneg(dev
) < 0)
1781 printk(KERN_INFO
"%s: Auto-negotiation error.\n", dev
->name
);
1783 /* initialize JUMBO Frame control variable */
1784 sp
->jumbo
.found_start
= 0;
1785 sp
->jumbo
.current_size
= 0;
1786 sp
->jumbo
.skb
= NULL
;
1788 /* Enable transmit and receive operation of the IPG. */
1789 ipg_w32((ipg_r32(MAC_CTRL
) | IPG_MC_RX_ENABLE
| IPG_MC_TX_ENABLE
) &
1790 IPG_MC_RSVD_MASK
, MAC_CTRL
);
1792 netif_start_queue(dev
);
1796 err_release_tfdlist_3
:
1800 dma_free_coherent(&pdev
->dev
, IPG_TX_RING_BYTES
, sp
->txd
, sp
->txd_map
);
1802 dma_free_coherent(&pdev
->dev
, IPG_RX_RING_BYTES
, sp
->rxd
, sp
->rxd_map
);
1804 free_irq(pdev
->irq
, dev
);
1808 static int ipg_nic_stop(struct net_device
*dev
)
1810 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1811 void __iomem
*ioaddr
= sp
->ioaddr
;
1812 struct pci_dev
*pdev
= sp
->pdev
;
1814 IPG_DEBUG_MSG("_nic_stop\n");
1816 netif_stop_queue(dev
);
1818 IPG_DUMPTFDLIST(dev
);
1821 (void) ipg_r16(INT_STATUS_ACK
);
1823 ipg_reset(dev
, IPG_AC_GLOBAL_RESET
| IPG_AC_HOST
| IPG_AC_DMA
);
1825 synchronize_irq(pdev
->irq
);
1826 } while (ipg_r16(INT_ENABLE
) & IPG_IE_RSVD_MASK
);
1832 pci_free_consistent(pdev
, IPG_RX_RING_BYTES
, sp
->rxd
, sp
->rxd_map
);
1833 pci_free_consistent(pdev
, IPG_TX_RING_BYTES
, sp
->txd
, sp
->txd_map
);
1835 free_irq(pdev
->irq
, dev
);
1840 static netdev_tx_t
ipg_nic_hard_start_xmit(struct sk_buff
*skb
,
1841 struct net_device
*dev
)
1843 struct ipg_nic_private
*sp
= netdev_priv(dev
);
1844 void __iomem
*ioaddr
= sp
->ioaddr
;
1845 unsigned int entry
= sp
->tx_current
% IPG_TFDLIST_LENGTH
;
1846 unsigned long flags
;
1847 struct ipg_tx
*txfd
;
1849 IPG_DDEBUG_MSG("_nic_hard_start_xmit\n");
1851 /* If in 10Mbps mode, stop the transmit queue so
1852 * no more transmit frames are accepted.
1854 if (sp
->tenmbpsmode
)
1855 netif_stop_queue(dev
);
1857 if (sp
->reset_current_tfd
) {
1858 sp
->reset_current_tfd
= 0;
1862 txfd
= sp
->txd
+ entry
;
1864 sp
->tx_buff
[entry
] = skb
;
1866 /* Clear all TFC fields, except TFDDONE. */
1867 txfd
->tfc
= cpu_to_le64(IPG_TFC_TFDDONE
);
1869 /* Specify the TFC field within the TFD. */
1870 txfd
->tfc
|= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED
|
1871 (IPG_TFC_FRAMEID
& sp
->tx_current
) |
1872 (IPG_TFC_FRAGCOUNT
& (1 << 24)));
1874 * 16--17 (WordAlign) <- 3 (disable),
1875 * 0--15 (FrameId) <- sp->tx_current,
1876 * 24--27 (FragCount) <- 1
1879 /* Request TxComplete interrupts at an interval defined
1880 * by the constant IPG_FRAMESBETWEENTXCOMPLETES.
1881 * Request TxComplete interrupt for every frame
1882 * if in 10Mbps mode to accomodate problem with 10Mbps
1885 if (sp
->tenmbpsmode
)
1886 txfd
->tfc
|= cpu_to_le64(IPG_TFC_TXINDICATE
);
1887 txfd
->tfc
|= cpu_to_le64(IPG_TFC_TXDMAINDICATE
);
1888 /* Based on compilation option, determine if FCS is to be
1889 * appended to transmit frame by IPG.
1891 if (!(IPG_APPEND_FCS_ON_TX
))
1892 txfd
->tfc
|= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE
);
1894 /* Based on compilation option, determine if IP, TCP and/or
1895 * UDP checksums are to be added to transmit frame by IPG.
1897 if (IPG_ADD_IPCHECKSUM_ON_TX
)
1898 txfd
->tfc
|= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE
);
1900 if (IPG_ADD_TCPCHECKSUM_ON_TX
)
1901 txfd
->tfc
|= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE
);
1903 if (IPG_ADD_UDPCHECKSUM_ON_TX
)
1904 txfd
->tfc
|= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE
);
1906 /* Based on compilation option, determine if VLAN tag info is to be
1907 * inserted into transmit frame by IPG.
1909 if (IPG_INSERT_MANUAL_VLAN_TAG
) {
1910 txfd
->tfc
|= cpu_to_le64(IPG_TFC_VLANTAGINSERT
|
1911 ((u64
) IPG_MANUAL_VLAN_VID
<< 32) |
1912 ((u64
) IPG_MANUAL_VLAN_CFI
<< 44) |
1913 ((u64
) IPG_MANUAL_VLAN_USERPRIORITY
<< 45));
1916 /* The fragment start location within system memory is defined
1917 * by the sk_buff structure's data field. The physical address
1918 * of this location within the system's virtual memory space
1919 * is determined using the IPG_HOST2BUS_MAP function.
1921 txfd
->frag_info
= cpu_to_le64(pci_map_single(sp
->pdev
, skb
->data
,
1922 skb
->len
, PCI_DMA_TODEVICE
));
1924 /* The length of the fragment within system memory is defined by
1925 * the sk_buff structure's len field.
1927 txfd
->frag_info
|= cpu_to_le64(IPG_TFI_FRAGLEN
&
1928 ((u64
) (skb
->len
& 0xffff) << 48));
1930 /* Clear the TFDDone bit last to indicate the TFD is ready
1931 * for transfer to the IPG.
1933 txfd
->tfc
&= cpu_to_le64(~IPG_TFC_TFDDONE
);
1935 spin_lock_irqsave(&sp
->lock
, flags
);
1941 ipg_w32(IPG_DC_TX_DMA_POLL_NOW
, DMA_CTRL
);
1943 if (sp
->tx_current
== (sp
->tx_dirty
+ IPG_TFDLIST_LENGTH
))
1944 netif_stop_queue(dev
);
1946 spin_unlock_irqrestore(&sp
->lock
, flags
);
1948 return NETDEV_TX_OK
;
1951 static void ipg_set_phy_default_param(unsigned char rev
,
1952 struct net_device
*dev
, int phy_address
)
1954 unsigned short length
;
1955 unsigned char revision
;
1956 unsigned short *phy_param
;
1957 unsigned short address
, value
;
1959 phy_param
= &DefaultPhyParam
[0];
1960 length
= *phy_param
& 0x00FF;
1961 revision
= (unsigned char)((*phy_param
) >> 8);
1963 while (length
!= 0) {
1964 if (rev
== revision
) {
1965 while (length
> 1) {
1966 address
= *phy_param
;
1967 value
= *(phy_param
+ 1);
1969 mdio_write(dev
, phy_address
, address
, value
);
1974 phy_param
+= length
/ 2;
1975 length
= *phy_param
& 0x00FF;
1976 revision
= (unsigned char)((*phy_param
) >> 8);
1982 static int read_eeprom(struct net_device
*dev
, int eep_addr
)
1984 void __iomem
*ioaddr
= ipg_ioaddr(dev
);
1989 value
= IPG_EC_EEPROM_READOPCODE
| (eep_addr
& 0xff);
1990 ipg_w16(value
, EEPROM_CTRL
);
1992 for (i
= 0; i
< 1000; i
++) {
1996 data
= ipg_r16(EEPROM_CTRL
);
1997 if (!(data
& IPG_EC_EEPROM_BUSY
)) {
1998 ret
= ipg_r16(EEPROM_DATA
);
2005 static void ipg_init_mii(struct net_device
*dev
)
2007 struct ipg_nic_private
*sp
= netdev_priv(dev
);
2008 struct mii_if_info
*mii_if
= &sp
->mii_if
;
2012 mii_if
->mdio_read
= mdio_read
;
2013 mii_if
->mdio_write
= mdio_write
;
2014 mii_if
->phy_id_mask
= 0x1f;
2015 mii_if
->reg_num_mask
= 0x1f;
2017 mii_if
->phy_id
= phyaddr
= ipg_find_phyaddr(dev
);
2019 if (phyaddr
!= 0x1f) {
2020 u16 mii_phyctrl
, mii_1000cr
;
2023 mii_1000cr
= mdio_read(dev
, phyaddr
, MII_CTRL1000
);
2024 mii_1000cr
|= ADVERTISE_1000FULL
| ADVERTISE_1000HALF
|
2025 GMII_PHY_1000BASETCONTROL_PreferMaster
;
2026 mdio_write(dev
, phyaddr
, MII_CTRL1000
, mii_1000cr
);
2028 mii_phyctrl
= mdio_read(dev
, phyaddr
, MII_BMCR
);
2030 /* Set default phyparam */
2031 pci_read_config_byte(sp
->pdev
, PCI_REVISION_ID
, &revisionid
);
2032 ipg_set_phy_default_param(revisionid
, dev
, phyaddr
);
2035 mii_phyctrl
|= BMCR_RESET
| BMCR_ANRESTART
;
2036 mdio_write(dev
, phyaddr
, MII_BMCR
, mii_phyctrl
);
2041 static int ipg_hw_init(struct net_device
*dev
)
2043 struct ipg_nic_private
*sp
= netdev_priv(dev
);
2044 void __iomem
*ioaddr
= sp
->ioaddr
;
2048 /* Read/Write and Reset EEPROM Value */
2049 /* Read LED Mode Configuration from EEPROM */
2050 sp
->led_mode
= read_eeprom(dev
, 6);
2052 /* Reset all functions within the IPG. Do not assert
2053 * RST_OUT as not compatible with some PHYs.
2055 rc
= ipg_reset(dev
, IPG_RESET_MASK
);
2061 /* Read MAC Address from EEPROM */
2062 for (i
= 0; i
< 3; i
++)
2063 sp
->station_addr
[i
] = read_eeprom(dev
, 16 + i
);
2065 for (i
= 0; i
< 3; i
++)
2066 ipg_w16(sp
->station_addr
[i
], STATION_ADDRESS_0
+ 2*i
);
2068 /* Set station address in ethernet_device structure. */
2069 dev
->dev_addr
[0] = ipg_r16(STATION_ADDRESS_0
) & 0x00ff;
2070 dev
->dev_addr
[1] = (ipg_r16(STATION_ADDRESS_0
) & 0xff00) >> 8;
2071 dev
->dev_addr
[2] = ipg_r16(STATION_ADDRESS_1
) & 0x00ff;
2072 dev
->dev_addr
[3] = (ipg_r16(STATION_ADDRESS_1
) & 0xff00) >> 8;
2073 dev
->dev_addr
[4] = ipg_r16(STATION_ADDRESS_2
) & 0x00ff;
2074 dev
->dev_addr
[5] = (ipg_r16(STATION_ADDRESS_2
) & 0xff00) >> 8;
2079 static int ipg_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
2081 struct ipg_nic_private
*sp
= netdev_priv(dev
);
2084 mutex_lock(&sp
->mii_mutex
);
2085 rc
= generic_mii_ioctl(&sp
->mii_if
, if_mii(ifr
), cmd
, NULL
);
2086 mutex_unlock(&sp
->mii_mutex
);
2091 static int ipg_nic_change_mtu(struct net_device
*dev
, int new_mtu
)
2093 struct ipg_nic_private
*sp
= netdev_priv(dev
);
2096 /* Function to accomodate changes to Maximum Transfer Unit
2097 * (or MTU) of IPG NIC. Cannot use default function since
2098 * the default will not allow for MTU > 1500 bytes.
2101 IPG_DEBUG_MSG("_nic_change_mtu\n");
2104 * Check that the new MTU value is between 68 (14 byte header, 46 byte
2105 * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU.
2107 if (new_mtu
< 68 || new_mtu
> 10240)
2110 err
= ipg_nic_stop(dev
);
2116 sp
->max_rxframe_size
= new_mtu
;
2118 sp
->rxfrag_size
= new_mtu
;
2119 if (sp
->rxfrag_size
> 4088)
2120 sp
->rxfrag_size
= 4088;
2122 sp
->rxsupport_size
= sp
->max_rxframe_size
;
2124 if (new_mtu
> 0x0600)
2125 sp
->is_jumbo
= true;
2127 sp
->is_jumbo
= false;
2129 return ipg_nic_open(dev
);
2132 static int ipg_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2134 struct ipg_nic_private
*sp
= netdev_priv(dev
);
2137 mutex_lock(&sp
->mii_mutex
);
2138 rc
= mii_ethtool_gset(&sp
->mii_if
, cmd
);
2139 mutex_unlock(&sp
->mii_mutex
);
2144 static int ipg_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2146 struct ipg_nic_private
*sp
= netdev_priv(dev
);
2149 mutex_lock(&sp
->mii_mutex
);
2150 rc
= mii_ethtool_sset(&sp
->mii_if
, cmd
);
2151 mutex_unlock(&sp
->mii_mutex
);
2156 static int ipg_nway_reset(struct net_device
*dev
)
2158 struct ipg_nic_private
*sp
= netdev_priv(dev
);
2161 mutex_lock(&sp
->mii_mutex
);
2162 rc
= mii_nway_restart(&sp
->mii_if
);
2163 mutex_unlock(&sp
->mii_mutex
);
2168 static const struct ethtool_ops ipg_ethtool_ops
= {
2169 .get_settings
= ipg_get_settings
,
2170 .set_settings
= ipg_set_settings
,
2171 .nway_reset
= ipg_nway_reset
,
2174 static void __devexit
ipg_remove(struct pci_dev
*pdev
)
2176 struct net_device
*dev
= pci_get_drvdata(pdev
);
2177 struct ipg_nic_private
*sp
= netdev_priv(dev
);
2179 IPG_DEBUG_MSG("_remove\n");
2181 /* Un-register Ethernet device. */
2182 unregister_netdev(dev
);
2184 pci_iounmap(pdev
, sp
->ioaddr
);
2186 pci_release_regions(pdev
);
2189 pci_disable_device(pdev
);
2190 pci_set_drvdata(pdev
, NULL
);
2193 static const struct net_device_ops ipg_netdev_ops
= {
2194 .ndo_open
= ipg_nic_open
,
2195 .ndo_stop
= ipg_nic_stop
,
2196 .ndo_start_xmit
= ipg_nic_hard_start_xmit
,
2197 .ndo_get_stats
= ipg_nic_get_stats
,
2198 .ndo_set_multicast_list
= ipg_nic_set_multicast_list
,
2199 .ndo_do_ioctl
= ipg_ioctl
,
2200 .ndo_tx_timeout
= ipg_tx_timeout
,
2201 .ndo_change_mtu
= ipg_nic_change_mtu
,
2202 .ndo_set_mac_address
= eth_mac_addr
,
2203 .ndo_validate_addr
= eth_validate_addr
,
2206 static int __devinit
ipg_probe(struct pci_dev
*pdev
,
2207 const struct pci_device_id
*id
)
2209 unsigned int i
= id
->driver_data
;
2210 struct ipg_nic_private
*sp
;
2211 struct net_device
*dev
;
2212 void __iomem
*ioaddr
;
2215 rc
= pci_enable_device(pdev
);
2219 printk(KERN_INFO
"%s: %s\n", pci_name(pdev
), ipg_brand_name
[i
]);
2221 pci_set_master(pdev
);
2223 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(40));
2225 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2227 printk(KERN_ERR
"%s: DMA config failed.\n",
2234 * Initialize net device.
2236 dev
= alloc_etherdev(sizeof(struct ipg_nic_private
));
2238 printk(KERN_ERR
"%s: alloc_etherdev failed\n", pci_name(pdev
));
2243 sp
= netdev_priv(dev
);
2244 spin_lock_init(&sp
->lock
);
2245 mutex_init(&sp
->mii_mutex
);
2247 sp
->is_jumbo
= IPG_IS_JUMBO
;
2248 sp
->rxfrag_size
= IPG_RXFRAG_SIZE
;
2249 sp
->rxsupport_size
= IPG_RXSUPPORT_SIZE
;
2250 sp
->max_rxframe_size
= IPG_MAX_RXFRAME_SIZE
;
2252 /* Declare IPG NIC functions for Ethernet device methods.
2254 dev
->netdev_ops
= &ipg_netdev_ops
;
2255 SET_NETDEV_DEV(dev
, &pdev
->dev
);
2256 SET_ETHTOOL_OPS(dev
, &ipg_ethtool_ops
);
2258 rc
= pci_request_regions(pdev
, DRV_NAME
);
2260 goto err_free_dev_1
;
2262 ioaddr
= pci_iomap(pdev
, 1, pci_resource_len(pdev
, 1));
2264 printk(KERN_ERR
"%s cannot map MMIO\n", pci_name(pdev
));
2266 goto err_release_regions_2
;
2269 /* Save the pointer to the PCI device information. */
2270 sp
->ioaddr
= ioaddr
;
2274 INIT_DELAYED_WORK(&sp
->task
, ipg_reset_after_host_error
);
2276 pci_set_drvdata(pdev
, dev
);
2278 rc
= ipg_hw_init(dev
);
2282 rc
= register_netdev(dev
);
2286 printk(KERN_INFO
"Ethernet device registered as: %s\n", dev
->name
);
2291 pci_iounmap(pdev
, ioaddr
);
2292 err_release_regions_2
:
2293 pci_release_regions(pdev
);
2297 pci_disable_device(pdev
);
2301 static struct pci_driver ipg_pci_driver
= {
2302 .name
= IPG_DRIVER_NAME
,
2303 .id_table
= ipg_pci_tbl
,
2305 .remove
= __devexit_p(ipg_remove
),
2308 static int __init
ipg_init_module(void)
2310 return pci_register_driver(&ipg_pci_driver
);
2313 static void __exit
ipg_exit_module(void)
2315 pci_unregister_driver(&ipg_pci_driver
);
2318 module_init(ipg_init_module
);
2319 module_exit(ipg_exit_module
);