[PATCH] chelsio: statistics improvement
[linux-2.6/x86.git] / drivers / net / chelsio / cxgb2.c
blob53bec6739812366fdf0c1537b54149409d9ae1ec
1 /*****************************************************************************
2 * *
3 * File: cxgb2.c *
4 * $Revision: 1.25 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
6 * Description: *
7 * Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
39 #include "common.h"
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/pci.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/if_vlan.h>
46 #include <linux/mii.h>
47 #include <linux/sockios.h>
48 #include <linux/dma-mapping.h>
49 #include <asm/uaccess.h>
51 #include "cpl5_cmd.h"
52 #include "regs.h"
53 #include "gmac.h"
54 #include "cphy.h"
55 #include "sge.h"
56 #include "tp.h"
57 #include "espi.h"
58 #include "elmer0.h"
60 #include <linux/workqueue.h>
62 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
64 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
67 static inline void cancel_mac_stats_update(struct adapter *ap)
69 cancel_delayed_work(&ap->stats_update_task);
72 #define MAX_CMDQ_ENTRIES 16384
73 #define MAX_CMDQ1_ENTRIES 1024
74 #define MAX_RX_BUFFERS 16384
75 #define MAX_RX_JUMBO_BUFFERS 16384
76 #define MAX_TX_BUFFERS_HIGH 16384U
77 #define MAX_TX_BUFFERS_LOW 1536U
78 #define MAX_TX_BUFFERS 1460U
79 #define MIN_FL_ENTRIES 32
81 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
83 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
86 * The EEPROM is actually bigger but only the first few bytes are used so we
87 * only report those.
89 #define EEPROM_SIZE 32
91 MODULE_DESCRIPTION(DRV_DESCRIPTION);
92 MODULE_AUTHOR("Chelsio Communications");
93 MODULE_LICENSE("GPL");
95 static int dflt_msg_enable = DFLT_MSG_ENABLE;
97 module_param(dflt_msg_enable, int, 0);
98 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
100 #define HCLOCK 0x0
101 #define LCLOCK 0x1
103 /* T1 cards powersave mode */
104 static int t1_clock(struct adapter *adapter, int mode);
105 static int t1powersave = 1; /* HW default is powersave mode. */
107 module_param(t1powersave, int, 0);
108 MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
110 static int disable_msi = 0;
111 module_param(disable_msi, int, 0);
112 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
114 static const char pci_speed[][4] = {
115 "33", "66", "100", "133"
119 * Setup MAC to receive the types of packets we want.
121 static void t1_set_rxmode(struct net_device *dev)
123 struct adapter *adapter = dev->priv;
124 struct cmac *mac = adapter->port[dev->if_port].mac;
125 struct t1_rx_mode rm;
127 rm.dev = dev;
128 rm.idx = 0;
129 rm.list = dev->mc_list;
130 mac->ops->set_rx_mode(mac, &rm);
133 static void link_report(struct port_info *p)
135 if (!netif_carrier_ok(p->dev))
136 printk(KERN_INFO "%s: link down\n", p->dev->name);
137 else {
138 const char *s = "10Mbps";
140 switch (p->link_config.speed) {
141 case SPEED_10000: s = "10Gbps"; break;
142 case SPEED_1000: s = "1000Mbps"; break;
143 case SPEED_100: s = "100Mbps"; break;
146 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
147 p->dev->name, s,
148 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
152 void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
153 int speed, int duplex, int pause)
155 struct port_info *p = &adapter->port[port_id];
157 if (link_stat != netif_carrier_ok(p->dev)) {
158 if (link_stat)
159 netif_carrier_on(p->dev);
160 else
161 netif_carrier_off(p->dev);
162 link_report(p);
164 /* multi-ports: inform toe */
165 if ((speed > 0) && (adapter->params.nports > 1)) {
166 unsigned int sched_speed = 10;
167 switch (speed) {
168 case SPEED_1000:
169 sched_speed = 1000;
170 break;
171 case SPEED_100:
172 sched_speed = 100;
173 break;
174 case SPEED_10:
175 sched_speed = 10;
176 break;
178 t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
183 static void link_start(struct port_info *p)
185 struct cmac *mac = p->mac;
187 mac->ops->reset(mac);
188 if (mac->ops->macaddress_set)
189 mac->ops->macaddress_set(mac, p->dev->dev_addr);
190 t1_set_rxmode(p->dev);
191 t1_link_start(p->phy, mac, &p->link_config);
192 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
195 static void enable_hw_csum(struct adapter *adapter)
197 if (adapter->flags & TSO_CAPABLE)
198 t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */
199 if (adapter->flags & UDP_CSUM_CAPABLE)
200 t1_tp_set_udp_checksum_offload(adapter->tp, 1);
201 t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
205 * Things to do upon first use of a card.
206 * This must run with the rtnl lock held.
208 static int cxgb_up(struct adapter *adapter)
210 int err = 0;
212 if (!(adapter->flags & FULL_INIT_DONE)) {
213 err = t1_init_hw_modules(adapter);
214 if (err)
215 goto out_err;
217 enable_hw_csum(adapter);
218 adapter->flags |= FULL_INIT_DONE;
221 t1_interrupts_clear(adapter);
223 adapter->params.has_msi = !disable_msi && pci_enable_msi(adapter->pdev) == 0;
224 err = request_irq(adapter->pdev->irq,
225 t1_select_intr_handler(adapter),
226 adapter->params.has_msi ? 0 : IRQF_SHARED,
227 adapter->name, adapter);
228 if (err) {
229 if (adapter->params.has_msi)
230 pci_disable_msi(adapter->pdev);
232 goto out_err;
235 t1_sge_start(adapter->sge);
236 t1_interrupts_enable(adapter);
237 out_err:
238 return err;
242 * Release resources when all the ports have been stopped.
244 static void cxgb_down(struct adapter *adapter)
246 t1_sge_stop(adapter->sge);
247 t1_interrupts_disable(adapter);
248 free_irq(adapter->pdev->irq, adapter);
249 if (adapter->params.has_msi)
250 pci_disable_msi(adapter->pdev);
253 static int cxgb_open(struct net_device *dev)
255 int err;
256 struct adapter *adapter = dev->priv;
257 int other_ports = adapter->open_device_map & PORT_MASK;
259 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
260 return err;
262 __set_bit(dev->if_port, &adapter->open_device_map);
263 link_start(&adapter->port[dev->if_port]);
264 netif_start_queue(dev);
265 if (!other_ports && adapter->params.stats_update_period)
266 schedule_mac_stats_update(adapter,
267 adapter->params.stats_update_period);
268 return 0;
271 static int cxgb_close(struct net_device *dev)
273 struct adapter *adapter = dev->priv;
274 struct port_info *p = &adapter->port[dev->if_port];
275 struct cmac *mac = p->mac;
277 netif_stop_queue(dev);
278 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
279 netif_carrier_off(dev);
281 clear_bit(dev->if_port, &adapter->open_device_map);
282 if (adapter->params.stats_update_period &&
283 !(adapter->open_device_map & PORT_MASK)) {
284 /* Stop statistics accumulation. */
285 smp_mb__after_clear_bit();
286 spin_lock(&adapter->work_lock); /* sync with update task */
287 spin_unlock(&adapter->work_lock);
288 cancel_mac_stats_update(adapter);
291 if (!adapter->open_device_map)
292 cxgb_down(adapter);
293 return 0;
296 static struct net_device_stats *t1_get_stats(struct net_device *dev)
298 struct adapter *adapter = dev->priv;
299 struct port_info *p = &adapter->port[dev->if_port];
300 struct net_device_stats *ns = &p->netstats;
301 const struct cmac_statistics *pstats;
303 /* Do a full update of the MAC stats */
304 pstats = p->mac->ops->statistics_update(p->mac,
305 MAC_STATS_UPDATE_FULL);
307 ns->tx_packets = pstats->TxUnicastFramesOK +
308 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
310 ns->rx_packets = pstats->RxUnicastFramesOK +
311 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
313 ns->tx_bytes = pstats->TxOctetsOK;
314 ns->rx_bytes = pstats->RxOctetsOK;
316 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
317 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
318 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
319 pstats->RxFCSErrors + pstats->RxAlignErrors +
320 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
321 pstats->RxSymbolErrors + pstats->RxRuntErrors;
323 ns->multicast = pstats->RxMulticastFramesOK;
324 ns->collisions = pstats->TxTotalCollisions;
326 /* detailed rx_errors */
327 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
328 pstats->RxJabberErrors;
329 ns->rx_over_errors = 0;
330 ns->rx_crc_errors = pstats->RxFCSErrors;
331 ns->rx_frame_errors = pstats->RxAlignErrors;
332 ns->rx_fifo_errors = 0;
333 ns->rx_missed_errors = 0;
335 /* detailed tx_errors */
336 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
337 ns->tx_carrier_errors = 0;
338 ns->tx_fifo_errors = pstats->TxUnderrun;
339 ns->tx_heartbeat_errors = 0;
340 ns->tx_window_errors = pstats->TxLateCollisions;
341 return ns;
344 static u32 get_msglevel(struct net_device *dev)
346 struct adapter *adapter = dev->priv;
348 return adapter->msg_enable;
351 static void set_msglevel(struct net_device *dev, u32 val)
353 struct adapter *adapter = dev->priv;
355 adapter->msg_enable = val;
358 static char stats_strings[][ETH_GSTRING_LEN] = {
359 "TxOctetsOK",
360 "TxOctetsBad",
361 "TxUnicastFramesOK",
362 "TxMulticastFramesOK",
363 "TxBroadcastFramesOK",
364 "TxPauseFrames",
365 "TxFramesWithDeferredXmissions",
366 "TxLateCollisions",
367 "TxTotalCollisions",
368 "TxFramesAbortedDueToXSCollisions",
369 "TxUnderrun",
370 "TxLengthErrors",
371 "TxInternalMACXmitError",
372 "TxFramesWithExcessiveDeferral",
373 "TxFCSErrors",
375 "RxOctetsOK",
376 "RxOctetsBad",
377 "RxUnicastFramesOK",
378 "RxMulticastFramesOK",
379 "RxBroadcastFramesOK",
380 "RxPauseFrames",
381 "RxFCSErrors",
382 "RxAlignErrors",
383 "RxSymbolErrors",
384 "RxDataErrors",
385 "RxSequenceErrors",
386 "RxRuntErrors",
387 "RxJabberErrors",
388 "RxInternalMACRcvError",
389 "RxInRangeLengthErrors",
390 "RxOutOfRangeLengthField",
391 "RxFrameTooLongErrors",
393 /* Port stats */
394 "RxPackets",
395 "RxCsumGood",
396 "TxPackets",
397 "TxCsumOffload",
398 "TxTso",
399 "RxVlan",
400 "TxVlan",
402 /* Interrupt stats */
403 "rx drops",
404 "pure_rsps",
405 "unhandled irqs",
406 "respQ_empty",
407 "respQ_overflow",
408 "freelistQ_empty",
409 "pkt_too_big",
410 "pkt_mismatch",
411 "cmdQ_full0",
412 "cmdQ_full1",
414 "espi_DIP2ParityErr",
415 "espi_DIP4Err",
416 "espi_RxDrops",
417 "espi_TxDrops",
418 "espi_RxOvfl",
419 "espi_ParityErr"
422 #define T2_REGMAP_SIZE (3 * 1024)
424 static int get_regs_len(struct net_device *dev)
426 return T2_REGMAP_SIZE;
429 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
431 struct adapter *adapter = dev->priv;
433 strcpy(info->driver, DRV_NAME);
434 strcpy(info->version, DRV_VERSION);
435 strcpy(info->fw_version, "N/A");
436 strcpy(info->bus_info, pci_name(adapter->pdev));
439 static int get_stats_count(struct net_device *dev)
441 return ARRAY_SIZE(stats_strings);
444 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
446 if (stringset == ETH_SS_STATS)
447 memcpy(data, stats_strings, sizeof(stats_strings));
450 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
451 u64 *data)
453 struct adapter *adapter = dev->priv;
454 struct cmac *mac = adapter->port[dev->if_port].mac;
455 const struct cmac_statistics *s;
456 const struct sge_intr_counts *t;
457 struct sge_port_stats ss;
459 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
461 *data++ = s->TxOctetsOK;
462 *data++ = s->TxOctetsBad;
463 *data++ = s->TxUnicastFramesOK;
464 *data++ = s->TxMulticastFramesOK;
465 *data++ = s->TxBroadcastFramesOK;
466 *data++ = s->TxPauseFrames;
467 *data++ = s->TxFramesWithDeferredXmissions;
468 *data++ = s->TxLateCollisions;
469 *data++ = s->TxTotalCollisions;
470 *data++ = s->TxFramesAbortedDueToXSCollisions;
471 *data++ = s->TxUnderrun;
472 *data++ = s->TxLengthErrors;
473 *data++ = s->TxInternalMACXmitError;
474 *data++ = s->TxFramesWithExcessiveDeferral;
475 *data++ = s->TxFCSErrors;
477 *data++ = s->RxOctetsOK;
478 *data++ = s->RxOctetsBad;
479 *data++ = s->RxUnicastFramesOK;
480 *data++ = s->RxMulticastFramesOK;
481 *data++ = s->RxBroadcastFramesOK;
482 *data++ = s->RxPauseFrames;
483 *data++ = s->RxFCSErrors;
484 *data++ = s->RxAlignErrors;
485 *data++ = s->RxSymbolErrors;
486 *data++ = s->RxDataErrors;
487 *data++ = s->RxSequenceErrors;
488 *data++ = s->RxRuntErrors;
489 *data++ = s->RxJabberErrors;
490 *data++ = s->RxInternalMACRcvError;
491 *data++ = s->RxInRangeLengthErrors;
492 *data++ = s->RxOutOfRangeLengthField;
493 *data++ = s->RxFrameTooLongErrors;
495 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
496 *data++ = ss.rx_packets;
497 *data++ = ss.rx_cso_good;
498 *data++ = ss.tx_packets;
499 *data++ = ss.tx_cso;
500 *data++ = ss.tx_tso;
501 *data++ = ss.vlan_xtract;
502 *data++ = ss.vlan_insert;
504 t = t1_sge_get_intr_counts(adapter->sge);
505 *data++ = t->rx_drops;
506 *data++ = t->pure_rsps;
507 *data++ = t->unhandled_irqs;
508 *data++ = t->respQ_empty;
509 *data++ = t->respQ_overflow;
510 *data++ = t->freelistQ_empty;
511 *data++ = t->pkt_too_big;
512 *data++ = t->pkt_mismatch;
513 *data++ = t->cmdQ_full[0];
514 *data++ = t->cmdQ_full[1];
516 if (adapter->espi) {
517 const struct espi_intr_counts *e;
519 e = t1_espi_get_intr_counts(adapter->espi);
520 *data++ = e->DIP2_parity_err;
521 *data++ = e->DIP4_err;
522 *data++ = e->rx_drops;
523 *data++ = e->tx_drops;
524 *data++ = e->rx_ovflw;
525 *data++ = e->parity_err;
529 static inline void reg_block_dump(struct adapter *ap, void *buf,
530 unsigned int start, unsigned int end)
532 u32 *p = buf + start;
534 for ( ; start <= end; start += sizeof(u32))
535 *p++ = readl(ap->regs + start);
538 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
539 void *buf)
541 struct adapter *ap = dev->priv;
544 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
546 regs->version = 2;
548 memset(buf, 0, T2_REGMAP_SIZE);
549 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
550 reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
551 reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
552 reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
553 reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
554 reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
555 reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
556 reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
557 reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
558 reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
561 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
563 struct adapter *adapter = dev->priv;
564 struct port_info *p = &adapter->port[dev->if_port];
566 cmd->supported = p->link_config.supported;
567 cmd->advertising = p->link_config.advertising;
569 if (netif_carrier_ok(dev)) {
570 cmd->speed = p->link_config.speed;
571 cmd->duplex = p->link_config.duplex;
572 } else {
573 cmd->speed = -1;
574 cmd->duplex = -1;
577 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
578 cmd->phy_address = p->phy->addr;
579 cmd->transceiver = XCVR_EXTERNAL;
580 cmd->autoneg = p->link_config.autoneg;
581 cmd->maxtxpkt = 0;
582 cmd->maxrxpkt = 0;
583 return 0;
586 static int speed_duplex_to_caps(int speed, int duplex)
588 int cap = 0;
590 switch (speed) {
591 case SPEED_10:
592 if (duplex == DUPLEX_FULL)
593 cap = SUPPORTED_10baseT_Full;
594 else
595 cap = SUPPORTED_10baseT_Half;
596 break;
597 case SPEED_100:
598 if (duplex == DUPLEX_FULL)
599 cap = SUPPORTED_100baseT_Full;
600 else
601 cap = SUPPORTED_100baseT_Half;
602 break;
603 case SPEED_1000:
604 if (duplex == DUPLEX_FULL)
605 cap = SUPPORTED_1000baseT_Full;
606 else
607 cap = SUPPORTED_1000baseT_Half;
608 break;
609 case SPEED_10000:
610 if (duplex == DUPLEX_FULL)
611 cap = SUPPORTED_10000baseT_Full;
613 return cap;
616 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
617 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
618 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
619 ADVERTISED_10000baseT_Full)
621 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
623 struct adapter *adapter = dev->priv;
624 struct port_info *p = &adapter->port[dev->if_port];
625 struct link_config *lc = &p->link_config;
627 if (!(lc->supported & SUPPORTED_Autoneg))
628 return -EOPNOTSUPP; /* can't change speed/duplex */
630 if (cmd->autoneg == AUTONEG_DISABLE) {
631 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
633 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
634 return -EINVAL;
635 lc->requested_speed = cmd->speed;
636 lc->requested_duplex = cmd->duplex;
637 lc->advertising = 0;
638 } else {
639 cmd->advertising &= ADVERTISED_MASK;
640 if (cmd->advertising & (cmd->advertising - 1))
641 cmd->advertising = lc->supported;
642 cmd->advertising &= lc->supported;
643 if (!cmd->advertising)
644 return -EINVAL;
645 lc->requested_speed = SPEED_INVALID;
646 lc->requested_duplex = DUPLEX_INVALID;
647 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
649 lc->autoneg = cmd->autoneg;
650 if (netif_running(dev))
651 t1_link_start(p->phy, p->mac, lc);
652 return 0;
655 static void get_pauseparam(struct net_device *dev,
656 struct ethtool_pauseparam *epause)
658 struct adapter *adapter = dev->priv;
659 struct port_info *p = &adapter->port[dev->if_port];
661 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
662 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
663 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
666 static int set_pauseparam(struct net_device *dev,
667 struct ethtool_pauseparam *epause)
669 struct adapter *adapter = dev->priv;
670 struct port_info *p = &adapter->port[dev->if_port];
671 struct link_config *lc = &p->link_config;
673 if (epause->autoneg == AUTONEG_DISABLE)
674 lc->requested_fc = 0;
675 else if (lc->supported & SUPPORTED_Autoneg)
676 lc->requested_fc = PAUSE_AUTONEG;
677 else
678 return -EINVAL;
680 if (epause->rx_pause)
681 lc->requested_fc |= PAUSE_RX;
682 if (epause->tx_pause)
683 lc->requested_fc |= PAUSE_TX;
684 if (lc->autoneg == AUTONEG_ENABLE) {
685 if (netif_running(dev))
686 t1_link_start(p->phy, p->mac, lc);
687 } else {
688 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
689 if (netif_running(dev))
690 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
691 lc->fc);
693 return 0;
696 static u32 get_rx_csum(struct net_device *dev)
698 struct adapter *adapter = dev->priv;
700 return (adapter->flags & RX_CSUM_ENABLED) != 0;
703 static int set_rx_csum(struct net_device *dev, u32 data)
705 struct adapter *adapter = dev->priv;
707 if (data)
708 adapter->flags |= RX_CSUM_ENABLED;
709 else
710 adapter->flags &= ~RX_CSUM_ENABLED;
711 return 0;
714 static int set_tso(struct net_device *dev, u32 value)
716 struct adapter *adapter = dev->priv;
718 if (!(adapter->flags & TSO_CAPABLE))
719 return value ? -EOPNOTSUPP : 0;
720 return ethtool_op_set_tso(dev, value);
723 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
725 struct adapter *adapter = dev->priv;
726 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
728 e->rx_max_pending = MAX_RX_BUFFERS;
729 e->rx_mini_max_pending = 0;
730 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
731 e->tx_max_pending = MAX_CMDQ_ENTRIES;
733 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
734 e->rx_mini_pending = 0;
735 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
736 e->tx_pending = adapter->params.sge.cmdQ_size[0];
739 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
741 struct adapter *adapter = dev->priv;
742 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
744 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
745 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
746 e->tx_pending > MAX_CMDQ_ENTRIES ||
747 e->rx_pending < MIN_FL_ENTRIES ||
748 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
749 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
750 return -EINVAL;
752 if (adapter->flags & FULL_INIT_DONE)
753 return -EBUSY;
755 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
756 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
757 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
758 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
759 MAX_CMDQ1_ENTRIES : e->tx_pending;
760 return 0;
763 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
765 struct adapter *adapter = dev->priv;
768 * If RX coalescing is requested we use NAPI, otherwise interrupts.
769 * This choice can be made only when all ports and the TOE are off.
771 if (adapter->open_device_map == 0)
772 adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
774 if (adapter->params.sge.polling) {
775 adapter->params.sge.rx_coalesce_usecs = 0;
776 } else {
777 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
779 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
780 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
781 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
782 return 0;
785 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
787 struct adapter *adapter = dev->priv;
789 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
790 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
791 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
792 return 0;
795 static int get_eeprom_len(struct net_device *dev)
797 struct adapter *adapter = dev->priv;
799 return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
802 #define EEPROM_MAGIC(ap) \
803 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
805 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
806 u8 *data)
808 int i;
809 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
810 struct adapter *adapter = dev->priv;
812 e->magic = EEPROM_MAGIC(adapter);
813 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
814 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
815 memcpy(data, buf + e->offset, e->len);
816 return 0;
819 static const struct ethtool_ops t1_ethtool_ops = {
820 .get_settings = get_settings,
821 .set_settings = set_settings,
822 .get_drvinfo = get_drvinfo,
823 .get_msglevel = get_msglevel,
824 .set_msglevel = set_msglevel,
825 .get_ringparam = get_sge_param,
826 .set_ringparam = set_sge_param,
827 .get_coalesce = get_coalesce,
828 .set_coalesce = set_coalesce,
829 .get_eeprom_len = get_eeprom_len,
830 .get_eeprom = get_eeprom,
831 .get_pauseparam = get_pauseparam,
832 .set_pauseparam = set_pauseparam,
833 .get_rx_csum = get_rx_csum,
834 .set_rx_csum = set_rx_csum,
835 .get_tx_csum = ethtool_op_get_tx_csum,
836 .set_tx_csum = ethtool_op_set_tx_csum,
837 .get_sg = ethtool_op_get_sg,
838 .set_sg = ethtool_op_set_sg,
839 .get_link = ethtool_op_get_link,
840 .get_strings = get_strings,
841 .get_stats_count = get_stats_count,
842 .get_ethtool_stats = get_stats,
843 .get_regs_len = get_regs_len,
844 .get_regs = get_regs,
845 .get_tso = ethtool_op_get_tso,
846 .set_tso = set_tso,
849 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
851 struct adapter *adapter = dev->priv;
852 struct mii_ioctl_data *data = if_mii(req);
854 switch (cmd) {
855 case SIOCGMIIPHY:
856 data->phy_id = adapter->port[dev->if_port].phy->addr;
857 /* FALLTHRU */
858 case SIOCGMIIREG: {
859 struct cphy *phy = adapter->port[dev->if_port].phy;
860 u32 val;
862 if (!phy->mdio_read)
863 return -EOPNOTSUPP;
864 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
865 &val);
866 data->val_out = val;
867 break;
869 case SIOCSMIIREG: {
870 struct cphy *phy = adapter->port[dev->if_port].phy;
872 if (!capable(CAP_NET_ADMIN))
873 return -EPERM;
874 if (!phy->mdio_write)
875 return -EOPNOTSUPP;
876 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
877 data->val_in);
878 break;
881 default:
882 return -EOPNOTSUPP;
884 return 0;
887 static int t1_change_mtu(struct net_device *dev, int new_mtu)
889 int ret;
890 struct adapter *adapter = dev->priv;
891 struct cmac *mac = adapter->port[dev->if_port].mac;
893 if (!mac->ops->set_mtu)
894 return -EOPNOTSUPP;
895 if (new_mtu < 68)
896 return -EINVAL;
897 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
898 return ret;
899 dev->mtu = new_mtu;
900 return 0;
903 static int t1_set_mac_addr(struct net_device *dev, void *p)
905 struct adapter *adapter = dev->priv;
906 struct cmac *mac = adapter->port[dev->if_port].mac;
907 struct sockaddr *addr = p;
909 if (!mac->ops->macaddress_set)
910 return -EOPNOTSUPP;
912 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
913 mac->ops->macaddress_set(mac, dev->dev_addr);
914 return 0;
917 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
918 static void vlan_rx_register(struct net_device *dev,
919 struct vlan_group *grp)
921 struct adapter *adapter = dev->priv;
923 spin_lock_irq(&adapter->async_lock);
924 adapter->vlan_grp = grp;
925 t1_set_vlan_accel(adapter, grp != NULL);
926 spin_unlock_irq(&adapter->async_lock);
929 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
931 struct adapter *adapter = dev->priv;
933 spin_lock_irq(&adapter->async_lock);
934 if (adapter->vlan_grp)
935 adapter->vlan_grp->vlan_devices[vid] = NULL;
936 spin_unlock_irq(&adapter->async_lock);
938 #endif
940 #ifdef CONFIG_NET_POLL_CONTROLLER
941 static void t1_netpoll(struct net_device *dev)
943 unsigned long flags;
944 struct adapter *adapter = dev->priv;
946 local_irq_save(flags);
947 t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter);
948 local_irq_restore(flags);
950 #endif
953 * Periodic accumulation of MAC statistics. This is used only if the MAC
954 * does not have any other way to prevent stats counter overflow.
956 static void mac_stats_task(void *data)
958 int i;
959 struct adapter *adapter = data;
961 for_each_port(adapter, i) {
962 struct port_info *p = &adapter->port[i];
964 if (netif_running(p->dev))
965 p->mac->ops->statistics_update(p->mac,
966 MAC_STATS_UPDATE_FAST);
969 /* Schedule the next statistics update if any port is active. */
970 spin_lock(&adapter->work_lock);
971 if (adapter->open_device_map & PORT_MASK)
972 schedule_mac_stats_update(adapter,
973 adapter->params.stats_update_period);
974 spin_unlock(&adapter->work_lock);
978 * Processes elmer0 external interrupts in process context.
980 static void ext_intr_task(void *data)
982 struct adapter *adapter = data;
984 t1_elmer0_ext_intr_handler(adapter);
986 /* Now reenable external interrupts */
987 spin_lock_irq(&adapter->async_lock);
988 adapter->slow_intr_mask |= F_PL_INTR_EXT;
989 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
990 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
991 adapter->regs + A_PL_ENABLE);
992 spin_unlock_irq(&adapter->async_lock);
996 * Interrupt-context handler for elmer0 external interrupts.
998 void t1_elmer0_ext_intr(struct adapter *adapter)
1001 * Schedule a task to handle external interrupts as we require
1002 * a process context. We disable EXT interrupts in the interim
1003 * and let the task reenable them when it's done.
1005 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
1006 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
1007 adapter->regs + A_PL_ENABLE);
1008 schedule_work(&adapter->ext_intr_handler_task);
1011 void t1_fatal_err(struct adapter *adapter)
1013 if (adapter->flags & FULL_INIT_DONE) {
1014 t1_sge_stop(adapter->sge);
1015 t1_interrupts_disable(adapter);
1017 CH_ALERT("%s: encountered fatal error, operation suspended\n",
1018 adapter->name);
1021 static int __devinit init_one(struct pci_dev *pdev,
1022 const struct pci_device_id *ent)
1024 static int version_printed;
1026 int i, err, pci_using_dac = 0;
1027 unsigned long mmio_start, mmio_len;
1028 const struct board_info *bi;
1029 struct adapter *adapter = NULL;
1030 struct port_info *pi;
1032 if (!version_printed) {
1033 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
1034 DRV_VERSION);
1035 ++version_printed;
1038 err = pci_enable_device(pdev);
1039 if (err)
1040 return err;
1042 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1043 CH_ERR("%s: cannot find PCI device memory base address\n",
1044 pci_name(pdev));
1045 err = -ENODEV;
1046 goto out_disable_pdev;
1049 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1050 pci_using_dac = 1;
1052 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
1053 CH_ERR("%s: unable to obtain 64-bit DMA for"
1054 "consistent allocations\n", pci_name(pdev));
1055 err = -ENODEV;
1056 goto out_disable_pdev;
1059 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
1060 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1061 goto out_disable_pdev;
1064 err = pci_request_regions(pdev, DRV_NAME);
1065 if (err) {
1066 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1067 goto out_disable_pdev;
1070 pci_set_master(pdev);
1072 mmio_start = pci_resource_start(pdev, 0);
1073 mmio_len = pci_resource_len(pdev, 0);
1074 bi = t1_get_board_info(ent->driver_data);
1076 for (i = 0; i < bi->port_number; ++i) {
1077 struct net_device *netdev;
1079 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1080 if (!netdev) {
1081 err = -ENOMEM;
1082 goto out_free_dev;
1085 SET_MODULE_OWNER(netdev);
1086 SET_NETDEV_DEV(netdev, &pdev->dev);
1088 if (!adapter) {
1089 adapter = netdev->priv;
1090 adapter->pdev = pdev;
1091 adapter->port[0].dev = netdev; /* so we don't leak it */
1093 adapter->regs = ioremap(mmio_start, mmio_len);
1094 if (!adapter->regs) {
1095 CH_ERR("%s: cannot map device registers\n",
1096 pci_name(pdev));
1097 err = -ENOMEM;
1098 goto out_free_dev;
1101 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1102 err = -ENODEV; /* Can't handle this chip rev */
1103 goto out_free_dev;
1106 adapter->name = pci_name(pdev);
1107 adapter->msg_enable = dflt_msg_enable;
1108 adapter->mmio_len = mmio_len;
1110 spin_lock_init(&adapter->tpi_lock);
1111 spin_lock_init(&adapter->work_lock);
1112 spin_lock_init(&adapter->async_lock);
1113 spin_lock_init(&adapter->mac_lock);
1115 INIT_WORK(&adapter->ext_intr_handler_task,
1116 ext_intr_task, adapter);
1117 INIT_WORK(&adapter->stats_update_task, mac_stats_task,
1118 adapter);
1120 pci_set_drvdata(pdev, netdev);
1123 pi = &adapter->port[i];
1124 pi->dev = netdev;
1125 netif_carrier_off(netdev);
1126 netdev->irq = pdev->irq;
1127 netdev->if_port = i;
1128 netdev->mem_start = mmio_start;
1129 netdev->mem_end = mmio_start + mmio_len - 1;
1130 netdev->priv = adapter;
1131 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1132 netdev->features |= NETIF_F_LLTX;
1134 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1135 if (pci_using_dac)
1136 netdev->features |= NETIF_F_HIGHDMA;
1137 if (vlan_tso_capable(adapter)) {
1138 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1139 adapter->flags |= VLAN_ACCEL_CAPABLE;
1140 netdev->features |=
1141 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1142 netdev->vlan_rx_register = vlan_rx_register;
1143 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
1144 #endif
1146 /* T204: disable TSO */
1147 if (!(is_T2(adapter)) || bi->port_number != 4) {
1148 adapter->flags |= TSO_CAPABLE;
1149 netdev->features |= NETIF_F_TSO;
1153 netdev->open = cxgb_open;
1154 netdev->stop = cxgb_close;
1155 netdev->hard_start_xmit = t1_start_xmit;
1156 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1157 sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1158 netdev->get_stats = t1_get_stats;
1159 netdev->set_multicast_list = t1_set_rxmode;
1160 netdev->do_ioctl = t1_ioctl;
1161 netdev->change_mtu = t1_change_mtu;
1162 netdev->set_mac_address = t1_set_mac_addr;
1163 #ifdef CONFIG_NET_POLL_CONTROLLER
1164 netdev->poll_controller = t1_netpoll;
1165 #endif
1166 netdev->weight = 64;
1168 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1171 if (t1_init_sw_modules(adapter, bi) < 0) {
1172 err = -ENODEV;
1173 goto out_free_dev;
1177 * The card is now ready to go. If any errors occur during device
1178 * registration we do not fail the whole card but rather proceed only
1179 * with the ports we manage to register successfully. However we must
1180 * register at least one net device.
1182 for (i = 0; i < bi->port_number; ++i) {
1183 err = register_netdev(adapter->port[i].dev);
1184 if (err)
1185 CH_WARN("%s: cannot register net device %s, skipping\n",
1186 pci_name(pdev), adapter->port[i].dev->name);
1187 else {
1189 * Change the name we use for messages to the name of
1190 * the first successfully registered interface.
1192 if (!adapter->registered_device_map)
1193 adapter->name = adapter->port[i].dev->name;
1195 __set_bit(i, &adapter->registered_device_map);
1198 if (!adapter->registered_device_map) {
1199 CH_ERR("%s: could not register any net devices\n",
1200 pci_name(pdev));
1201 goto out_release_adapter_res;
1204 printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1205 bi->desc, adapter->params.chip_revision,
1206 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1207 adapter->params.pci.speed, adapter->params.pci.width);
1210 * Set the T1B ASIC and memory clocks.
1212 if (t1powersave)
1213 adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */
1214 else
1215 adapter->t1powersave = HCLOCK;
1216 if (t1_is_T1B(adapter))
1217 t1_clock(adapter, t1powersave);
1219 return 0;
1221 out_release_adapter_res:
1222 t1_free_sw_modules(adapter);
1223 out_free_dev:
1224 if (adapter) {
1225 if (adapter->regs)
1226 iounmap(adapter->regs);
1227 for (i = bi->port_number - 1; i >= 0; --i)
1228 if (adapter->port[i].dev)
1229 free_netdev(adapter->port[i].dev);
1231 pci_release_regions(pdev);
1232 out_disable_pdev:
1233 pci_disable_device(pdev);
1234 pci_set_drvdata(pdev, NULL);
1235 return err;
1238 static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1240 int data;
1241 int i;
1242 u32 val;
1244 enum {
1245 S_CLOCK = 1 << 3,
1246 S_DATA = 1 << 4
1249 for (i = (nbits - 1); i > -1; i--) {
1251 udelay(50);
1253 data = ((bitdata >> i) & 0x1);
1254 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1256 if (data)
1257 val |= S_DATA;
1258 else
1259 val &= ~S_DATA;
1261 udelay(50);
1263 /* Set SCLOCK low */
1264 val &= ~S_CLOCK;
1265 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1267 udelay(50);
1269 /* Write SCLOCK high */
1270 val |= S_CLOCK;
1271 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1276 static int t1_clock(struct adapter *adapter, int mode)
1278 u32 val;
1279 int M_CORE_VAL;
1280 int M_MEM_VAL;
1282 enum {
1283 M_CORE_BITS = 9,
1284 T_CORE_VAL = 0,
1285 T_CORE_BITS = 2,
1286 N_CORE_VAL = 0,
1287 N_CORE_BITS = 2,
1288 M_MEM_BITS = 9,
1289 T_MEM_VAL = 0,
1290 T_MEM_BITS = 2,
1291 N_MEM_VAL = 0,
1292 N_MEM_BITS = 2,
1293 NP_LOAD = 1 << 17,
1294 S_LOAD_MEM = 1 << 5,
1295 S_LOAD_CORE = 1 << 6,
1296 S_CLOCK = 1 << 3
1299 if (!t1_is_T1B(adapter))
1300 return -ENODEV; /* Can't re-clock this chip. */
1302 if (mode & 2) {
1303 return 0; /* show current mode. */
1306 if ((adapter->t1powersave & 1) == (mode & 1))
1307 return -EALREADY; /* ASIC already running in mode. */
1309 if ((mode & 1) == HCLOCK) {
1310 M_CORE_VAL = 0x14;
1311 M_MEM_VAL = 0x18;
1312 adapter->t1powersave = HCLOCK; /* overclock */
1313 } else {
1314 M_CORE_VAL = 0xe;
1315 M_MEM_VAL = 0x10;
1316 adapter->t1powersave = LCLOCK; /* underclock */
1319 /* Don't interrupt this serial stream! */
1320 spin_lock(&adapter->tpi_lock);
1322 /* Initialize for ASIC core */
1323 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1324 val |= NP_LOAD;
1325 udelay(50);
1326 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1327 udelay(50);
1328 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1329 val &= ~S_LOAD_CORE;
1330 val &= ~S_CLOCK;
1331 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1332 udelay(50);
1334 /* Serial program the ASIC clock synthesizer */
1335 bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1336 bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1337 bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1338 udelay(50);
1340 /* Finish ASIC core */
1341 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1342 val |= S_LOAD_CORE;
1343 udelay(50);
1344 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1345 udelay(50);
1346 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1347 val &= ~S_LOAD_CORE;
1348 udelay(50);
1349 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1350 udelay(50);
1352 /* Initialize for memory */
1353 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1354 val |= NP_LOAD;
1355 udelay(50);
1356 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1357 udelay(50);
1358 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1359 val &= ~S_LOAD_MEM;
1360 val &= ~S_CLOCK;
1361 udelay(50);
1362 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1363 udelay(50);
1365 /* Serial program the memory clock synthesizer */
1366 bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1367 bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1368 bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1369 udelay(50);
1371 /* Finish memory */
1372 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1373 val |= S_LOAD_MEM;
1374 udelay(50);
1375 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1376 udelay(50);
1377 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1378 val &= ~S_LOAD_MEM;
1379 udelay(50);
1380 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1382 spin_unlock(&adapter->tpi_lock);
1384 return 0;
1387 static inline void t1_sw_reset(struct pci_dev *pdev)
1389 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1390 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1393 static void __devexit remove_one(struct pci_dev *pdev)
1395 struct net_device *dev = pci_get_drvdata(pdev);
1397 if (dev) {
1398 int i;
1399 struct adapter *adapter = dev->priv;
1401 for_each_port(adapter, i)
1402 if (test_bit(i, &adapter->registered_device_map))
1403 unregister_netdev(adapter->port[i].dev);
1405 t1_free_sw_modules(adapter);
1406 iounmap(adapter->regs);
1407 while (--i >= 0)
1408 if (adapter->port[i].dev)
1409 free_netdev(adapter->port[i].dev);
1411 pci_release_regions(pdev);
1412 pci_disable_device(pdev);
1413 pci_set_drvdata(pdev, NULL);
1414 t1_sw_reset(pdev);
1418 static struct pci_driver driver = {
1419 .name = DRV_NAME,
1420 .id_table = t1_pci_tbl,
1421 .probe = init_one,
1422 .remove = __devexit_p(remove_one),
1425 static int __init t1_init_module(void)
1427 return pci_register_driver(&driver);
1430 static void __exit t1_cleanup_module(void)
1432 pci_unregister_driver(&driver);
1435 module_init(t1_init_module);
1436 module_exit(t1_cleanup_module);