[PATCH] chelsio: add support for other 10G boards
[linux-2.6/x86.git] / drivers / net / chelsio / cxgb2.c
bloba8c873b0af54153b3f6118fb77772adf489db221
1 /*****************************************************************************
2 * *
3 * File: cxgb2.c *
4 * $Revision: 1.25 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
6 * Description: *
7 * Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
39 #include "common.h"
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/pci.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/if_vlan.h>
46 #include <linux/mii.h>
47 #include <linux/sockios.h>
48 #include <linux/dma-mapping.h>
49 #include <asm/uaccess.h>
51 #include "cpl5_cmd.h"
52 #include "regs.h"
53 #include "gmac.h"
54 #include "cphy.h"
55 #include "sge.h"
56 #include "tp.h"
57 #include "espi.h"
58 #include "elmer0.h"
60 #include <linux/workqueue.h>
62 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
64 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
67 static inline void cancel_mac_stats_update(struct adapter *ap)
69 cancel_delayed_work(&ap->stats_update_task);
72 #define MAX_CMDQ_ENTRIES 16384
73 #define MAX_CMDQ1_ENTRIES 1024
74 #define MAX_RX_BUFFERS 16384
75 #define MAX_RX_JUMBO_BUFFERS 16384
76 #define MAX_TX_BUFFERS_HIGH 16384U
77 #define MAX_TX_BUFFERS_LOW 1536U
78 #define MAX_TX_BUFFERS 1460U
79 #define MIN_FL_ENTRIES 32
81 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
83 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
86 * The EEPROM is actually bigger but only the first few bytes are used so we
87 * only report those.
89 #define EEPROM_SIZE 32
91 MODULE_DESCRIPTION(DRV_DESCRIPTION);
92 MODULE_AUTHOR("Chelsio Communications");
93 MODULE_LICENSE("GPL");
95 static int dflt_msg_enable = DFLT_MSG_ENABLE;
97 module_param(dflt_msg_enable, int, 0);
98 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
100 #define HCLOCK 0x0
101 #define LCLOCK 0x1
103 /* T1 cards powersave mode */
104 static int t1_clock(struct adapter *adapter, int mode);
105 static int t1powersave = 1; /* HW default is powersave mode. */
107 module_param(t1powersave, int, 0);
108 MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
110 static const char pci_speed[][4] = {
111 "33", "66", "100", "133"
115 * Setup MAC to receive the types of packets we want.
117 static void t1_set_rxmode(struct net_device *dev)
119 struct adapter *adapter = dev->priv;
120 struct cmac *mac = adapter->port[dev->if_port].mac;
121 struct t1_rx_mode rm;
123 rm.dev = dev;
124 rm.idx = 0;
125 rm.list = dev->mc_list;
126 mac->ops->set_rx_mode(mac, &rm);
129 static void link_report(struct port_info *p)
131 if (!netif_carrier_ok(p->dev))
132 printk(KERN_INFO "%s: link down\n", p->dev->name);
133 else {
134 const char *s = "10Mbps";
136 switch (p->link_config.speed) {
137 case SPEED_10000: s = "10Gbps"; break;
138 case SPEED_1000: s = "1000Mbps"; break;
139 case SPEED_100: s = "100Mbps"; break;
142 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
143 p->dev->name, s,
144 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
148 void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
149 int speed, int duplex, int pause)
151 struct port_info *p = &adapter->port[port_id];
153 if (link_stat != netif_carrier_ok(p->dev)) {
154 if (link_stat)
155 netif_carrier_on(p->dev);
156 else
157 netif_carrier_off(p->dev);
158 link_report(p);
160 /* multi-ports: inform toe */
161 if ((speed > 0) && (adapter->params.nports > 1)) {
162 unsigned int sched_speed = 10;
163 switch (speed) {
164 case SPEED_1000:
165 sched_speed = 1000;
166 break;
167 case SPEED_100:
168 sched_speed = 100;
169 break;
170 case SPEED_10:
171 sched_speed = 10;
172 break;
174 t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
179 static void link_start(struct port_info *p)
181 struct cmac *mac = p->mac;
183 mac->ops->reset(mac);
184 if (mac->ops->macaddress_set)
185 mac->ops->macaddress_set(mac, p->dev->dev_addr);
186 t1_set_rxmode(p->dev);
187 t1_link_start(p->phy, mac, &p->link_config);
188 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
191 static void enable_hw_csum(struct adapter *adapter)
193 if (adapter->flags & TSO_CAPABLE)
194 t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */
195 if (adapter->flags & UDP_CSUM_CAPABLE)
196 t1_tp_set_udp_checksum_offload(adapter->tp, 1);
197 t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
201 * Things to do upon first use of a card.
202 * This must run with the rtnl lock held.
204 static int cxgb_up(struct adapter *adapter)
206 int err = 0;
208 if (!(adapter->flags & FULL_INIT_DONE)) {
209 err = t1_init_hw_modules(adapter);
210 if (err)
211 goto out_err;
213 enable_hw_csum(adapter);
214 adapter->flags |= FULL_INIT_DONE;
217 t1_interrupts_clear(adapter);
218 if ((err = request_irq(adapter->pdev->irq,
219 t1_select_intr_handler(adapter), IRQF_SHARED,
220 adapter->name, adapter))) {
221 goto out_err;
223 t1_sge_start(adapter->sge);
224 t1_interrupts_enable(adapter);
225 out_err:
226 return err;
230 * Release resources when all the ports have been stopped.
232 static void cxgb_down(struct adapter *adapter)
234 t1_sge_stop(adapter->sge);
235 t1_interrupts_disable(adapter);
236 free_irq(adapter->pdev->irq, adapter);
239 static int cxgb_open(struct net_device *dev)
241 int err;
242 struct adapter *adapter = dev->priv;
243 int other_ports = adapter->open_device_map & PORT_MASK;
245 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
246 return err;
248 __set_bit(dev->if_port, &adapter->open_device_map);
249 link_start(&adapter->port[dev->if_port]);
250 netif_start_queue(dev);
251 if (!other_ports && adapter->params.stats_update_period)
252 schedule_mac_stats_update(adapter,
253 adapter->params.stats_update_period);
254 return 0;
257 static int cxgb_close(struct net_device *dev)
259 struct adapter *adapter = dev->priv;
260 struct port_info *p = &adapter->port[dev->if_port];
261 struct cmac *mac = p->mac;
263 netif_stop_queue(dev);
264 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
265 netif_carrier_off(dev);
267 clear_bit(dev->if_port, &adapter->open_device_map);
268 if (adapter->params.stats_update_period &&
269 !(adapter->open_device_map & PORT_MASK)) {
270 /* Stop statistics accumulation. */
271 smp_mb__after_clear_bit();
272 spin_lock(&adapter->work_lock); /* sync with update task */
273 spin_unlock(&adapter->work_lock);
274 cancel_mac_stats_update(adapter);
277 if (!adapter->open_device_map)
278 cxgb_down(adapter);
279 return 0;
282 static struct net_device_stats *t1_get_stats(struct net_device *dev)
284 struct adapter *adapter = dev->priv;
285 struct port_info *p = &adapter->port[dev->if_port];
286 struct net_device_stats *ns = &p->netstats;
287 const struct cmac_statistics *pstats;
289 /* Do a full update of the MAC stats */
290 pstats = p->mac->ops->statistics_update(p->mac,
291 MAC_STATS_UPDATE_FULL);
293 ns->tx_packets = pstats->TxUnicastFramesOK +
294 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
296 ns->rx_packets = pstats->RxUnicastFramesOK +
297 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
299 ns->tx_bytes = pstats->TxOctetsOK;
300 ns->rx_bytes = pstats->RxOctetsOK;
302 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
303 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
304 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
305 pstats->RxFCSErrors + pstats->RxAlignErrors +
306 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
307 pstats->RxSymbolErrors + pstats->RxRuntErrors;
309 ns->multicast = pstats->RxMulticastFramesOK;
310 ns->collisions = pstats->TxTotalCollisions;
312 /* detailed rx_errors */
313 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
314 pstats->RxJabberErrors;
315 ns->rx_over_errors = 0;
316 ns->rx_crc_errors = pstats->RxFCSErrors;
317 ns->rx_frame_errors = pstats->RxAlignErrors;
318 ns->rx_fifo_errors = 0;
319 ns->rx_missed_errors = 0;
321 /* detailed tx_errors */
322 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
323 ns->tx_carrier_errors = 0;
324 ns->tx_fifo_errors = pstats->TxUnderrun;
325 ns->tx_heartbeat_errors = 0;
326 ns->tx_window_errors = pstats->TxLateCollisions;
327 return ns;
330 static u32 get_msglevel(struct net_device *dev)
332 struct adapter *adapter = dev->priv;
334 return adapter->msg_enable;
337 static void set_msglevel(struct net_device *dev, u32 val)
339 struct adapter *adapter = dev->priv;
341 adapter->msg_enable = val;
344 static char stats_strings[][ETH_GSTRING_LEN] = {
345 "TxOctetsOK",
346 "TxOctetsBad",
347 "TxUnicastFramesOK",
348 "TxMulticastFramesOK",
349 "TxBroadcastFramesOK",
350 "TxPauseFrames",
351 "TxFramesWithDeferredXmissions",
352 "TxLateCollisions",
353 "TxTotalCollisions",
354 "TxFramesAbortedDueToXSCollisions",
355 "TxUnderrun",
356 "TxLengthErrors",
357 "TxInternalMACXmitError",
358 "TxFramesWithExcessiveDeferral",
359 "TxFCSErrors",
361 "RxOctetsOK",
362 "RxOctetsBad",
363 "RxUnicastFramesOK",
364 "RxMulticastFramesOK",
365 "RxBroadcastFramesOK",
366 "RxPauseFrames",
367 "RxFCSErrors",
368 "RxAlignErrors",
369 "RxSymbolErrors",
370 "RxDataErrors",
371 "RxSequenceErrors",
372 "RxRuntErrors",
373 "RxJabberErrors",
374 "RxInternalMACRcvError",
375 "RxInRangeLengthErrors",
376 "RxOutOfRangeLengthField",
377 "RxFrameTooLongErrors",
379 "TSO",
380 "VLANextractions",
381 "VLANinsertions",
382 "RxCsumGood",
383 "TxCsumOffload",
384 "RxDrops"
386 "respQ_empty",
387 "respQ_overflow",
388 "freelistQ_empty",
389 "pkt_too_big",
390 "pkt_mismatch",
391 "cmdQ_full0",
392 "cmdQ_full1",
393 "tx_ipfrags",
394 "tx_reg_pkts",
395 "tx_lso_pkts",
396 "tx_do_cksum",
398 "espi_DIP2ParityErr",
399 "espi_DIP4Err",
400 "espi_RxDrops",
401 "espi_TxDrops",
402 "espi_RxOvfl",
403 "espi_ParityErr"
406 #define T2_REGMAP_SIZE (3 * 1024)
408 static int get_regs_len(struct net_device *dev)
410 return T2_REGMAP_SIZE;
413 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
415 struct adapter *adapter = dev->priv;
417 strcpy(info->driver, DRV_NAME);
418 strcpy(info->version, DRV_VERSION);
419 strcpy(info->fw_version, "N/A");
420 strcpy(info->bus_info, pci_name(adapter->pdev));
423 static int get_stats_count(struct net_device *dev)
425 return ARRAY_SIZE(stats_strings);
428 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
430 if (stringset == ETH_SS_STATS)
431 memcpy(data, stats_strings, sizeof(stats_strings));
434 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
435 u64 *data)
437 struct adapter *adapter = dev->priv;
438 struct cmac *mac = adapter->port[dev->if_port].mac;
439 const struct cmac_statistics *s;
440 const struct sge_port_stats *ss;
441 const struct sge_intr_counts *t;
443 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
444 ss = t1_sge_get_port_stats(adapter->sge, dev->if_port);
445 t = t1_sge_get_intr_counts(adapter->sge);
447 *data++ = s->TxOctetsOK;
448 *data++ = s->TxOctetsBad;
449 *data++ = s->TxUnicastFramesOK;
450 *data++ = s->TxMulticastFramesOK;
451 *data++ = s->TxBroadcastFramesOK;
452 *data++ = s->TxPauseFrames;
453 *data++ = s->TxFramesWithDeferredXmissions;
454 *data++ = s->TxLateCollisions;
455 *data++ = s->TxTotalCollisions;
456 *data++ = s->TxFramesAbortedDueToXSCollisions;
457 *data++ = s->TxUnderrun;
458 *data++ = s->TxLengthErrors;
459 *data++ = s->TxInternalMACXmitError;
460 *data++ = s->TxFramesWithExcessiveDeferral;
461 *data++ = s->TxFCSErrors;
463 *data++ = s->RxOctetsOK;
464 *data++ = s->RxOctetsBad;
465 *data++ = s->RxUnicastFramesOK;
466 *data++ = s->RxMulticastFramesOK;
467 *data++ = s->RxBroadcastFramesOK;
468 *data++ = s->RxPauseFrames;
469 *data++ = s->RxFCSErrors;
470 *data++ = s->RxAlignErrors;
471 *data++ = s->RxSymbolErrors;
472 *data++ = s->RxDataErrors;
473 *data++ = s->RxSequenceErrors;
474 *data++ = s->RxRuntErrors;
475 *data++ = s->RxJabberErrors;
476 *data++ = s->RxInternalMACRcvError;
477 *data++ = s->RxInRangeLengthErrors;
478 *data++ = s->RxOutOfRangeLengthField;
479 *data++ = s->RxFrameTooLongErrors;
481 *data++ = ss->tso;
482 *data++ = ss->vlan_xtract;
483 *data++ = ss->vlan_insert;
484 *data++ = ss->rx_cso_good;
485 *data++ = ss->tx_cso;
486 *data++ = ss->rx_drops;
488 *data++ = (u64)t->respQ_empty;
489 *data++ = (u64)t->respQ_overflow;
490 *data++ = (u64)t->freelistQ_empty;
491 *data++ = (u64)t->pkt_too_big;
492 *data++ = (u64)t->pkt_mismatch;
493 *data++ = (u64)t->cmdQ_full[0];
494 *data++ = (u64)t->cmdQ_full[1];
495 *data++ = (u64)t->tx_ipfrags;
496 *data++ = (u64)t->tx_reg_pkts;
497 *data++ = (u64)t->tx_lso_pkts;
498 *data++ = (u64)t->tx_do_cksum;
500 if (adapter->espi) {
501 const struct espi_intr_counts *e;
503 e = t1_espi_get_intr_counts(adapter->espi);
504 *data++ = (u64) e->DIP2_parity_err;
505 *data++ = (u64) e->DIP4_err;
506 *data++ = (u64) e->rx_drops;
507 *data++ = (u64) e->tx_drops;
508 *data++ = (u64) e->rx_ovflw;
509 *data++ = (u64) e->parity_err;
513 static inline void reg_block_dump(struct adapter *ap, void *buf,
514 unsigned int start, unsigned int end)
516 u32 *p = buf + start;
518 for ( ; start <= end; start += sizeof(u32))
519 *p++ = readl(ap->regs + start);
522 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
523 void *buf)
525 struct adapter *ap = dev->priv;
528 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
530 regs->version = 2;
532 memset(buf, 0, T2_REGMAP_SIZE);
533 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
534 reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
535 reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
536 reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
537 reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
538 reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
539 reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
540 reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
541 reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
542 reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
545 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
547 struct adapter *adapter = dev->priv;
548 struct port_info *p = &adapter->port[dev->if_port];
550 cmd->supported = p->link_config.supported;
551 cmd->advertising = p->link_config.advertising;
553 if (netif_carrier_ok(dev)) {
554 cmd->speed = p->link_config.speed;
555 cmd->duplex = p->link_config.duplex;
556 } else {
557 cmd->speed = -1;
558 cmd->duplex = -1;
561 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
562 cmd->phy_address = p->phy->addr;
563 cmd->transceiver = XCVR_EXTERNAL;
564 cmd->autoneg = p->link_config.autoneg;
565 cmd->maxtxpkt = 0;
566 cmd->maxrxpkt = 0;
567 return 0;
570 static int speed_duplex_to_caps(int speed, int duplex)
572 int cap = 0;
574 switch (speed) {
575 case SPEED_10:
576 if (duplex == DUPLEX_FULL)
577 cap = SUPPORTED_10baseT_Full;
578 else
579 cap = SUPPORTED_10baseT_Half;
580 break;
581 case SPEED_100:
582 if (duplex == DUPLEX_FULL)
583 cap = SUPPORTED_100baseT_Full;
584 else
585 cap = SUPPORTED_100baseT_Half;
586 break;
587 case SPEED_1000:
588 if (duplex == DUPLEX_FULL)
589 cap = SUPPORTED_1000baseT_Full;
590 else
591 cap = SUPPORTED_1000baseT_Half;
592 break;
593 case SPEED_10000:
594 if (duplex == DUPLEX_FULL)
595 cap = SUPPORTED_10000baseT_Full;
597 return cap;
600 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
601 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
602 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
603 ADVERTISED_10000baseT_Full)
605 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
607 struct adapter *adapter = dev->priv;
608 struct port_info *p = &adapter->port[dev->if_port];
609 struct link_config *lc = &p->link_config;
611 if (!(lc->supported & SUPPORTED_Autoneg))
612 return -EOPNOTSUPP; /* can't change speed/duplex */
614 if (cmd->autoneg == AUTONEG_DISABLE) {
615 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
617 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
618 return -EINVAL;
619 lc->requested_speed = cmd->speed;
620 lc->requested_duplex = cmd->duplex;
621 lc->advertising = 0;
622 } else {
623 cmd->advertising &= ADVERTISED_MASK;
624 if (cmd->advertising & (cmd->advertising - 1))
625 cmd->advertising = lc->supported;
626 cmd->advertising &= lc->supported;
627 if (!cmd->advertising)
628 return -EINVAL;
629 lc->requested_speed = SPEED_INVALID;
630 lc->requested_duplex = DUPLEX_INVALID;
631 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
633 lc->autoneg = cmd->autoneg;
634 if (netif_running(dev))
635 t1_link_start(p->phy, p->mac, lc);
636 return 0;
639 static void get_pauseparam(struct net_device *dev,
640 struct ethtool_pauseparam *epause)
642 struct adapter *adapter = dev->priv;
643 struct port_info *p = &adapter->port[dev->if_port];
645 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
646 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
647 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
650 static int set_pauseparam(struct net_device *dev,
651 struct ethtool_pauseparam *epause)
653 struct adapter *adapter = dev->priv;
654 struct port_info *p = &adapter->port[dev->if_port];
655 struct link_config *lc = &p->link_config;
657 if (epause->autoneg == AUTONEG_DISABLE)
658 lc->requested_fc = 0;
659 else if (lc->supported & SUPPORTED_Autoneg)
660 lc->requested_fc = PAUSE_AUTONEG;
661 else
662 return -EINVAL;
664 if (epause->rx_pause)
665 lc->requested_fc |= PAUSE_RX;
666 if (epause->tx_pause)
667 lc->requested_fc |= PAUSE_TX;
668 if (lc->autoneg == AUTONEG_ENABLE) {
669 if (netif_running(dev))
670 t1_link_start(p->phy, p->mac, lc);
671 } else {
672 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
673 if (netif_running(dev))
674 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
675 lc->fc);
677 return 0;
680 static u32 get_rx_csum(struct net_device *dev)
682 struct adapter *adapter = dev->priv;
684 return (adapter->flags & RX_CSUM_ENABLED) != 0;
687 static int set_rx_csum(struct net_device *dev, u32 data)
689 struct adapter *adapter = dev->priv;
691 if (data)
692 adapter->flags |= RX_CSUM_ENABLED;
693 else
694 adapter->flags &= ~RX_CSUM_ENABLED;
695 return 0;
698 static int set_tso(struct net_device *dev, u32 value)
700 struct adapter *adapter = dev->priv;
702 if (!(adapter->flags & TSO_CAPABLE))
703 return value ? -EOPNOTSUPP : 0;
704 return ethtool_op_set_tso(dev, value);
707 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
709 struct adapter *adapter = dev->priv;
710 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
712 e->rx_max_pending = MAX_RX_BUFFERS;
713 e->rx_mini_max_pending = 0;
714 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
715 e->tx_max_pending = MAX_CMDQ_ENTRIES;
717 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
718 e->rx_mini_pending = 0;
719 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
720 e->tx_pending = adapter->params.sge.cmdQ_size[0];
723 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
725 struct adapter *adapter = dev->priv;
726 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
728 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
729 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
730 e->tx_pending > MAX_CMDQ_ENTRIES ||
731 e->rx_pending < MIN_FL_ENTRIES ||
732 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
733 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
734 return -EINVAL;
736 if (adapter->flags & FULL_INIT_DONE)
737 return -EBUSY;
739 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
740 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
741 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
742 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
743 MAX_CMDQ1_ENTRIES : e->tx_pending;
744 return 0;
747 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
749 struct adapter *adapter = dev->priv;
752 * If RX coalescing is requested we use NAPI, otherwise interrupts.
753 * This choice can be made only when all ports and the TOE are off.
755 if (adapter->open_device_map == 0)
756 adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
758 if (adapter->params.sge.polling) {
759 adapter->params.sge.rx_coalesce_usecs = 0;
760 } else {
761 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
763 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
764 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
765 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
766 return 0;
769 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
771 struct adapter *adapter = dev->priv;
773 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
774 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
775 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
776 return 0;
779 static int get_eeprom_len(struct net_device *dev)
781 struct adapter *adapter = dev->priv;
783 return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
786 #define EEPROM_MAGIC(ap) \
787 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
789 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
790 u8 *data)
792 int i;
793 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
794 struct adapter *adapter = dev->priv;
796 e->magic = EEPROM_MAGIC(adapter);
797 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
798 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
799 memcpy(data, buf + e->offset, e->len);
800 return 0;
803 static const struct ethtool_ops t1_ethtool_ops = {
804 .get_settings = get_settings,
805 .set_settings = set_settings,
806 .get_drvinfo = get_drvinfo,
807 .get_msglevel = get_msglevel,
808 .set_msglevel = set_msglevel,
809 .get_ringparam = get_sge_param,
810 .set_ringparam = set_sge_param,
811 .get_coalesce = get_coalesce,
812 .set_coalesce = set_coalesce,
813 .get_eeprom_len = get_eeprom_len,
814 .get_eeprom = get_eeprom,
815 .get_pauseparam = get_pauseparam,
816 .set_pauseparam = set_pauseparam,
817 .get_rx_csum = get_rx_csum,
818 .set_rx_csum = set_rx_csum,
819 .get_tx_csum = ethtool_op_get_tx_csum,
820 .set_tx_csum = ethtool_op_set_tx_csum,
821 .get_sg = ethtool_op_get_sg,
822 .set_sg = ethtool_op_set_sg,
823 .get_link = ethtool_op_get_link,
824 .get_strings = get_strings,
825 .get_stats_count = get_stats_count,
826 .get_ethtool_stats = get_stats,
827 .get_regs_len = get_regs_len,
828 .get_regs = get_regs,
829 .get_tso = ethtool_op_get_tso,
830 .set_tso = set_tso,
833 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
835 struct adapter *adapter = dev->priv;
836 struct mii_ioctl_data *data = if_mii(req);
838 switch (cmd) {
839 case SIOCGMIIPHY:
840 data->phy_id = adapter->port[dev->if_port].phy->addr;
841 /* FALLTHRU */
842 case SIOCGMIIREG: {
843 struct cphy *phy = adapter->port[dev->if_port].phy;
844 u32 val;
846 if (!phy->mdio_read)
847 return -EOPNOTSUPP;
848 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
849 &val);
850 data->val_out = val;
851 break;
853 case SIOCSMIIREG: {
854 struct cphy *phy = adapter->port[dev->if_port].phy;
856 if (!capable(CAP_NET_ADMIN))
857 return -EPERM;
858 if (!phy->mdio_write)
859 return -EOPNOTSUPP;
860 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
861 data->val_in);
862 break;
865 default:
866 return -EOPNOTSUPP;
868 return 0;
871 static int t1_change_mtu(struct net_device *dev, int new_mtu)
873 int ret;
874 struct adapter *adapter = dev->priv;
875 struct cmac *mac = adapter->port[dev->if_port].mac;
877 if (!mac->ops->set_mtu)
878 return -EOPNOTSUPP;
879 if (new_mtu < 68)
880 return -EINVAL;
881 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
882 return ret;
883 dev->mtu = new_mtu;
884 return 0;
887 static int t1_set_mac_addr(struct net_device *dev, void *p)
889 struct adapter *adapter = dev->priv;
890 struct cmac *mac = adapter->port[dev->if_port].mac;
891 struct sockaddr *addr = p;
893 if (!mac->ops->macaddress_set)
894 return -EOPNOTSUPP;
896 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
897 mac->ops->macaddress_set(mac, dev->dev_addr);
898 return 0;
901 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
902 static void vlan_rx_register(struct net_device *dev,
903 struct vlan_group *grp)
905 struct adapter *adapter = dev->priv;
907 spin_lock_irq(&adapter->async_lock);
908 adapter->vlan_grp = grp;
909 t1_set_vlan_accel(adapter, grp != NULL);
910 spin_unlock_irq(&adapter->async_lock);
913 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
915 struct adapter *adapter = dev->priv;
917 spin_lock_irq(&adapter->async_lock);
918 if (adapter->vlan_grp)
919 adapter->vlan_grp->vlan_devices[vid] = NULL;
920 spin_unlock_irq(&adapter->async_lock);
922 #endif
924 #ifdef CONFIG_NET_POLL_CONTROLLER
925 static void t1_netpoll(struct net_device *dev)
927 unsigned long flags;
928 struct adapter *adapter = dev->priv;
930 local_irq_save(flags);
931 t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter);
932 local_irq_restore(flags);
934 #endif
937 * Periodic accumulation of MAC statistics. This is used only if the MAC
938 * does not have any other way to prevent stats counter overflow.
940 static void mac_stats_task(void *data)
942 int i;
943 struct adapter *adapter = data;
945 for_each_port(adapter, i) {
946 struct port_info *p = &adapter->port[i];
948 if (netif_running(p->dev))
949 p->mac->ops->statistics_update(p->mac,
950 MAC_STATS_UPDATE_FAST);
953 /* Schedule the next statistics update if any port is active. */
954 spin_lock(&adapter->work_lock);
955 if (adapter->open_device_map & PORT_MASK)
956 schedule_mac_stats_update(adapter,
957 adapter->params.stats_update_period);
958 spin_unlock(&adapter->work_lock);
962 * Processes elmer0 external interrupts in process context.
964 static void ext_intr_task(void *data)
966 struct adapter *adapter = data;
968 t1_elmer0_ext_intr_handler(adapter);
970 /* Now reenable external interrupts */
971 spin_lock_irq(&adapter->async_lock);
972 adapter->slow_intr_mask |= F_PL_INTR_EXT;
973 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
974 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
975 adapter->regs + A_PL_ENABLE);
976 spin_unlock_irq(&adapter->async_lock);
980 * Interrupt-context handler for elmer0 external interrupts.
982 void t1_elmer0_ext_intr(struct adapter *adapter)
985 * Schedule a task to handle external interrupts as we require
986 * a process context. We disable EXT interrupts in the interim
987 * and let the task reenable them when it's done.
989 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
990 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
991 adapter->regs + A_PL_ENABLE);
992 schedule_work(&adapter->ext_intr_handler_task);
995 void t1_fatal_err(struct adapter *adapter)
997 if (adapter->flags & FULL_INIT_DONE) {
998 t1_sge_stop(adapter->sge);
999 t1_interrupts_disable(adapter);
1001 CH_ALERT("%s: encountered fatal error, operation suspended\n",
1002 adapter->name);
1005 static int __devinit init_one(struct pci_dev *pdev,
1006 const struct pci_device_id *ent)
1008 static int version_printed;
1010 int i, err, pci_using_dac = 0;
1011 unsigned long mmio_start, mmio_len;
1012 const struct board_info *bi;
1013 struct adapter *adapter = NULL;
1014 struct port_info *pi;
1016 if (!version_printed) {
1017 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
1018 DRV_VERSION);
1019 ++version_printed;
1022 err = pci_enable_device(pdev);
1023 if (err)
1024 return err;
1026 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1027 CH_ERR("%s: cannot find PCI device memory base address\n",
1028 pci_name(pdev));
1029 err = -ENODEV;
1030 goto out_disable_pdev;
1033 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1034 pci_using_dac = 1;
1036 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
1037 CH_ERR("%s: unable to obtain 64-bit DMA for"
1038 "consistent allocations\n", pci_name(pdev));
1039 err = -ENODEV;
1040 goto out_disable_pdev;
1043 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
1044 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1045 goto out_disable_pdev;
1048 err = pci_request_regions(pdev, DRV_NAME);
1049 if (err) {
1050 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1051 goto out_disable_pdev;
1054 pci_set_master(pdev);
1056 mmio_start = pci_resource_start(pdev, 0);
1057 mmio_len = pci_resource_len(pdev, 0);
1058 bi = t1_get_board_info(ent->driver_data);
1060 for (i = 0; i < bi->port_number; ++i) {
1061 struct net_device *netdev;
1063 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1064 if (!netdev) {
1065 err = -ENOMEM;
1066 goto out_free_dev;
1069 SET_MODULE_OWNER(netdev);
1070 SET_NETDEV_DEV(netdev, &pdev->dev);
1072 if (!adapter) {
1073 adapter = netdev->priv;
1074 adapter->pdev = pdev;
1075 adapter->port[0].dev = netdev; /* so we don't leak it */
1077 adapter->regs = ioremap(mmio_start, mmio_len);
1078 if (!adapter->regs) {
1079 CH_ERR("%s: cannot map device registers\n",
1080 pci_name(pdev));
1081 err = -ENOMEM;
1082 goto out_free_dev;
1085 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1086 err = -ENODEV; /* Can't handle this chip rev */
1087 goto out_free_dev;
1090 adapter->name = pci_name(pdev);
1091 adapter->msg_enable = dflt_msg_enable;
1092 adapter->mmio_len = mmio_len;
1094 spin_lock_init(&adapter->tpi_lock);
1095 spin_lock_init(&adapter->work_lock);
1096 spin_lock_init(&adapter->async_lock);
1098 INIT_WORK(&adapter->ext_intr_handler_task,
1099 ext_intr_task, adapter);
1100 INIT_WORK(&adapter->stats_update_task, mac_stats_task,
1101 adapter);
1103 pci_set_drvdata(pdev, netdev);
1106 pi = &adapter->port[i];
1107 pi->dev = netdev;
1108 netif_carrier_off(netdev);
1109 netdev->irq = pdev->irq;
1110 netdev->if_port = i;
1111 netdev->mem_start = mmio_start;
1112 netdev->mem_end = mmio_start + mmio_len - 1;
1113 netdev->priv = adapter;
1114 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1115 netdev->features |= NETIF_F_LLTX;
1117 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1118 if (pci_using_dac)
1119 netdev->features |= NETIF_F_HIGHDMA;
1120 if (vlan_tso_capable(adapter)) {
1121 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1122 adapter->flags |= VLAN_ACCEL_CAPABLE;
1123 netdev->features |=
1124 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1125 netdev->vlan_rx_register = vlan_rx_register;
1126 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
1127 #endif
1129 /* T204: disable TSO */
1130 if (!(is_T2(adapter)) || bi->port_number != 4) {
1131 adapter->flags |= TSO_CAPABLE;
1132 netdev->features |= NETIF_F_TSO;
1136 netdev->open = cxgb_open;
1137 netdev->stop = cxgb_close;
1138 netdev->hard_start_xmit = t1_start_xmit;
1139 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1140 sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1141 netdev->get_stats = t1_get_stats;
1142 netdev->set_multicast_list = t1_set_rxmode;
1143 netdev->do_ioctl = t1_ioctl;
1144 netdev->change_mtu = t1_change_mtu;
1145 netdev->set_mac_address = t1_set_mac_addr;
1146 #ifdef CONFIG_NET_POLL_CONTROLLER
1147 netdev->poll_controller = t1_netpoll;
1148 #endif
1149 netdev->weight = 64;
1151 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1154 if (t1_init_sw_modules(adapter, bi) < 0) {
1155 err = -ENODEV;
1156 goto out_free_dev;
1160 * The card is now ready to go. If any errors occur during device
1161 * registration we do not fail the whole card but rather proceed only
1162 * with the ports we manage to register successfully. However we must
1163 * register at least one net device.
1165 for (i = 0; i < bi->port_number; ++i) {
1166 err = register_netdev(adapter->port[i].dev);
1167 if (err)
1168 CH_WARN("%s: cannot register net device %s, skipping\n",
1169 pci_name(pdev), adapter->port[i].dev->name);
1170 else {
1172 * Change the name we use for messages to the name of
1173 * the first successfully registered interface.
1175 if (!adapter->registered_device_map)
1176 adapter->name = adapter->port[i].dev->name;
1178 __set_bit(i, &adapter->registered_device_map);
1181 if (!adapter->registered_device_map) {
1182 CH_ERR("%s: could not register any net devices\n",
1183 pci_name(pdev));
1184 goto out_release_adapter_res;
1187 printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1188 bi->desc, adapter->params.chip_revision,
1189 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1190 adapter->params.pci.speed, adapter->params.pci.width);
1193 * Set the T1B ASIC and memory clocks.
1195 if (t1powersave)
1196 adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */
1197 else
1198 adapter->t1powersave = HCLOCK;
1199 if (t1_is_T1B(adapter))
1200 t1_clock(adapter, t1powersave);
1202 return 0;
1204 out_release_adapter_res:
1205 t1_free_sw_modules(adapter);
1206 out_free_dev:
1207 if (adapter) {
1208 if (adapter->regs)
1209 iounmap(adapter->regs);
1210 for (i = bi->port_number - 1; i >= 0; --i)
1211 if (adapter->port[i].dev)
1212 free_netdev(adapter->port[i].dev);
1214 pci_release_regions(pdev);
1215 out_disable_pdev:
1216 pci_disable_device(pdev);
1217 pci_set_drvdata(pdev, NULL);
1218 return err;
1221 static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1223 int data;
1224 int i;
1225 u32 val;
1227 enum {
1228 S_CLOCK = 1 << 3,
1229 S_DATA = 1 << 4
1232 for (i = (nbits - 1); i > -1; i--) {
1234 udelay(50);
1236 data = ((bitdata >> i) & 0x1);
1237 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1239 if (data)
1240 val |= S_DATA;
1241 else
1242 val &= ~S_DATA;
1244 udelay(50);
1246 /* Set SCLOCK low */
1247 val &= ~S_CLOCK;
1248 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1250 udelay(50);
1252 /* Write SCLOCK high */
1253 val |= S_CLOCK;
1254 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1259 static int t1_clock(struct adapter *adapter, int mode)
1261 u32 val;
1262 int M_CORE_VAL;
1263 int M_MEM_VAL;
1265 enum {
1266 M_CORE_BITS = 9,
1267 T_CORE_VAL = 0,
1268 T_CORE_BITS = 2,
1269 N_CORE_VAL = 0,
1270 N_CORE_BITS = 2,
1271 M_MEM_BITS = 9,
1272 T_MEM_VAL = 0,
1273 T_MEM_BITS = 2,
1274 N_MEM_VAL = 0,
1275 N_MEM_BITS = 2,
1276 NP_LOAD = 1 << 17,
1277 S_LOAD_MEM = 1 << 5,
1278 S_LOAD_CORE = 1 << 6,
1279 S_CLOCK = 1 << 3
1282 if (!t1_is_T1B(adapter))
1283 return -ENODEV; /* Can't re-clock this chip. */
1285 if (mode & 2) {
1286 return 0; /* show current mode. */
1289 if ((adapter->t1powersave & 1) == (mode & 1))
1290 return -EALREADY; /* ASIC already running in mode. */
1292 if ((mode & 1) == HCLOCK) {
1293 M_CORE_VAL = 0x14;
1294 M_MEM_VAL = 0x18;
1295 adapter->t1powersave = HCLOCK; /* overclock */
1296 } else {
1297 M_CORE_VAL = 0xe;
1298 M_MEM_VAL = 0x10;
1299 adapter->t1powersave = LCLOCK; /* underclock */
1302 /* Don't interrupt this serial stream! */
1303 spin_lock(&adapter->tpi_lock);
1305 /* Initialize for ASIC core */
1306 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1307 val |= NP_LOAD;
1308 udelay(50);
1309 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1310 udelay(50);
1311 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1312 val &= ~S_LOAD_CORE;
1313 val &= ~S_CLOCK;
1314 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1315 udelay(50);
1317 /* Serial program the ASIC clock synthesizer */
1318 bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1319 bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1320 bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1321 udelay(50);
1323 /* Finish ASIC core */
1324 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1325 val |= S_LOAD_CORE;
1326 udelay(50);
1327 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1328 udelay(50);
1329 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1330 val &= ~S_LOAD_CORE;
1331 udelay(50);
1332 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1333 udelay(50);
1335 /* Initialize for memory */
1336 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1337 val |= NP_LOAD;
1338 udelay(50);
1339 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1340 udelay(50);
1341 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1342 val &= ~S_LOAD_MEM;
1343 val &= ~S_CLOCK;
1344 udelay(50);
1345 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1346 udelay(50);
1348 /* Serial program the memory clock synthesizer */
1349 bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1350 bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1351 bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1352 udelay(50);
1354 /* Finish memory */
1355 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1356 val |= S_LOAD_MEM;
1357 udelay(50);
1358 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1359 udelay(50);
1360 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1361 val &= ~S_LOAD_MEM;
1362 udelay(50);
1363 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1365 spin_unlock(&adapter->tpi_lock);
1367 return 0;
1370 static inline void t1_sw_reset(struct pci_dev *pdev)
1372 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1373 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1376 static void __devexit remove_one(struct pci_dev *pdev)
1378 struct net_device *dev = pci_get_drvdata(pdev);
1380 if (dev) {
1381 int i;
1382 struct adapter *adapter = dev->priv;
1384 for_each_port(adapter, i)
1385 if (test_bit(i, &adapter->registered_device_map))
1386 unregister_netdev(adapter->port[i].dev);
1388 t1_free_sw_modules(adapter);
1389 iounmap(adapter->regs);
1390 while (--i >= 0)
1391 if (adapter->port[i].dev)
1392 free_netdev(adapter->port[i].dev);
1394 pci_release_regions(pdev);
1395 pci_disable_device(pdev);
1396 pci_set_drvdata(pdev, NULL);
1397 t1_sw_reset(pdev);
1401 static struct pci_driver driver = {
1402 .name = DRV_NAME,
1403 .id_table = t1_pci_tbl,
1404 .probe = init_one,
1405 .remove = __devexit_p(remove_one),
1408 static int __init t1_init_module(void)
1410 return pci_register_driver(&driver);
1413 static void __exit t1_cleanup_module(void)
1415 pci_unregister_driver(&driver);
1418 module_init(t1_init_module);
1419 module_exit(t1_cleanup_module);