chelsio: useless test in cxgb2::remove_one
[firewire-audio.git] / drivers / net / chelsio / cxgb2.c
blobc3b1648618c883e745fc990268b5f890a2e5953a
1 /*****************************************************************************
2 * *
3 * File: cxgb2.c *
4 * $Revision: 1.25 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
6 * Description: *
7 * Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
39 #include "common.h"
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/pci.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/if_vlan.h>
46 #include <linux/mii.h>
47 #include <linux/sockios.h>
48 #include <linux/dma-mapping.h>
49 #include <asm/uaccess.h>
51 #include "cpl5_cmd.h"
52 #include "regs.h"
53 #include "gmac.h"
54 #include "cphy.h"
55 #include "sge.h"
56 #include "tp.h"
57 #include "espi.h"
58 #include "elmer0.h"
60 #include <linux/workqueue.h>
62 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
64 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
67 static inline void cancel_mac_stats_update(struct adapter *ap)
69 cancel_delayed_work(&ap->stats_update_task);
72 #define MAX_CMDQ_ENTRIES 16384
73 #define MAX_CMDQ1_ENTRIES 1024
74 #define MAX_RX_BUFFERS 16384
75 #define MAX_RX_JUMBO_BUFFERS 16384
76 #define MAX_TX_BUFFERS_HIGH 16384U
77 #define MAX_TX_BUFFERS_LOW 1536U
78 #define MAX_TX_BUFFERS 1460U
79 #define MIN_FL_ENTRIES 32
81 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
83 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
86 * The EEPROM is actually bigger but only the first few bytes are used so we
87 * only report those.
89 #define EEPROM_SIZE 32
91 MODULE_DESCRIPTION(DRV_DESCRIPTION);
92 MODULE_AUTHOR("Chelsio Communications");
93 MODULE_LICENSE("GPL");
95 static int dflt_msg_enable = DFLT_MSG_ENABLE;
97 module_param(dflt_msg_enable, int, 0);
98 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
100 #define HCLOCK 0x0
101 #define LCLOCK 0x1
103 /* T1 cards powersave mode */
104 static int t1_clock(struct adapter *adapter, int mode);
105 static int t1powersave = 1; /* HW default is powersave mode. */
107 module_param(t1powersave, int, 0);
108 MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
110 static int disable_msi = 0;
111 module_param(disable_msi, int, 0);
112 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
114 static const char pci_speed[][4] = {
115 "33", "66", "100", "133"
119 * Setup MAC to receive the types of packets we want.
121 static void t1_set_rxmode(struct net_device *dev)
123 struct adapter *adapter = dev->priv;
124 struct cmac *mac = adapter->port[dev->if_port].mac;
125 struct t1_rx_mode rm;
127 rm.dev = dev;
128 rm.idx = 0;
129 rm.list = dev->mc_list;
130 mac->ops->set_rx_mode(mac, &rm);
133 static void link_report(struct port_info *p)
135 if (!netif_carrier_ok(p->dev))
136 printk(KERN_INFO "%s: link down\n", p->dev->name);
137 else {
138 const char *s = "10Mbps";
140 switch (p->link_config.speed) {
141 case SPEED_10000: s = "10Gbps"; break;
142 case SPEED_1000: s = "1000Mbps"; break;
143 case SPEED_100: s = "100Mbps"; break;
146 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
147 p->dev->name, s,
148 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
152 void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
153 int speed, int duplex, int pause)
155 struct port_info *p = &adapter->port[port_id];
157 if (link_stat != netif_carrier_ok(p->dev)) {
158 if (link_stat)
159 netif_carrier_on(p->dev);
160 else
161 netif_carrier_off(p->dev);
162 link_report(p);
164 /* multi-ports: inform toe */
165 if ((speed > 0) && (adapter->params.nports > 1)) {
166 unsigned int sched_speed = 10;
167 switch (speed) {
168 case SPEED_1000:
169 sched_speed = 1000;
170 break;
171 case SPEED_100:
172 sched_speed = 100;
173 break;
174 case SPEED_10:
175 sched_speed = 10;
176 break;
178 t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
183 static void link_start(struct port_info *p)
185 struct cmac *mac = p->mac;
187 mac->ops->reset(mac);
188 if (mac->ops->macaddress_set)
189 mac->ops->macaddress_set(mac, p->dev->dev_addr);
190 t1_set_rxmode(p->dev);
191 t1_link_start(p->phy, mac, &p->link_config);
192 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
195 static void enable_hw_csum(struct adapter *adapter)
197 if (adapter->flags & TSO_CAPABLE)
198 t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */
199 if (adapter->flags & UDP_CSUM_CAPABLE)
200 t1_tp_set_udp_checksum_offload(adapter->tp, 1);
201 t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
205 * Things to do upon first use of a card.
206 * This must run with the rtnl lock held.
208 static int cxgb_up(struct adapter *adapter)
210 int err = 0;
212 if (!(adapter->flags & FULL_INIT_DONE)) {
213 err = t1_init_hw_modules(adapter);
214 if (err)
215 goto out_err;
217 enable_hw_csum(adapter);
218 adapter->flags |= FULL_INIT_DONE;
221 t1_interrupts_clear(adapter);
223 adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
224 err = request_irq(adapter->pdev->irq, t1_interrupt,
225 adapter->params.has_msi ? 0 : IRQF_SHARED,
226 adapter->name, adapter);
227 if (err) {
228 if (adapter->params.has_msi)
229 pci_disable_msi(adapter->pdev);
231 goto out_err;
234 t1_sge_start(adapter->sge);
235 t1_interrupts_enable(adapter);
236 out_err:
237 return err;
241 * Release resources when all the ports have been stopped.
243 static void cxgb_down(struct adapter *adapter)
245 t1_sge_stop(adapter->sge);
246 t1_interrupts_disable(adapter);
247 free_irq(adapter->pdev->irq, adapter);
248 if (adapter->params.has_msi)
249 pci_disable_msi(adapter->pdev);
252 static int cxgb_open(struct net_device *dev)
254 int err;
255 struct adapter *adapter = dev->priv;
256 int other_ports = adapter->open_device_map & PORT_MASK;
258 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
259 return err;
261 __set_bit(dev->if_port, &adapter->open_device_map);
262 link_start(&adapter->port[dev->if_port]);
263 netif_start_queue(dev);
264 if (!other_ports && adapter->params.stats_update_period)
265 schedule_mac_stats_update(adapter,
266 adapter->params.stats_update_period);
267 return 0;
270 static int cxgb_close(struct net_device *dev)
272 struct adapter *adapter = dev->priv;
273 struct port_info *p = &adapter->port[dev->if_port];
274 struct cmac *mac = p->mac;
276 netif_stop_queue(dev);
277 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
278 netif_carrier_off(dev);
280 clear_bit(dev->if_port, &adapter->open_device_map);
281 if (adapter->params.stats_update_period &&
282 !(adapter->open_device_map & PORT_MASK)) {
283 /* Stop statistics accumulation. */
284 smp_mb__after_clear_bit();
285 spin_lock(&adapter->work_lock); /* sync with update task */
286 spin_unlock(&adapter->work_lock);
287 cancel_mac_stats_update(adapter);
290 if (!adapter->open_device_map)
291 cxgb_down(adapter);
292 return 0;
295 static struct net_device_stats *t1_get_stats(struct net_device *dev)
297 struct adapter *adapter = dev->priv;
298 struct port_info *p = &adapter->port[dev->if_port];
299 struct net_device_stats *ns = &p->netstats;
300 const struct cmac_statistics *pstats;
302 /* Do a full update of the MAC stats */
303 pstats = p->mac->ops->statistics_update(p->mac,
304 MAC_STATS_UPDATE_FULL);
306 ns->tx_packets = pstats->TxUnicastFramesOK +
307 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
309 ns->rx_packets = pstats->RxUnicastFramesOK +
310 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
312 ns->tx_bytes = pstats->TxOctetsOK;
313 ns->rx_bytes = pstats->RxOctetsOK;
315 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
316 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
317 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
318 pstats->RxFCSErrors + pstats->RxAlignErrors +
319 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
320 pstats->RxSymbolErrors + pstats->RxRuntErrors;
322 ns->multicast = pstats->RxMulticastFramesOK;
323 ns->collisions = pstats->TxTotalCollisions;
325 /* detailed rx_errors */
326 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
327 pstats->RxJabberErrors;
328 ns->rx_over_errors = 0;
329 ns->rx_crc_errors = pstats->RxFCSErrors;
330 ns->rx_frame_errors = pstats->RxAlignErrors;
331 ns->rx_fifo_errors = 0;
332 ns->rx_missed_errors = 0;
334 /* detailed tx_errors */
335 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
336 ns->tx_carrier_errors = 0;
337 ns->tx_fifo_errors = pstats->TxUnderrun;
338 ns->tx_heartbeat_errors = 0;
339 ns->tx_window_errors = pstats->TxLateCollisions;
340 return ns;
343 static u32 get_msglevel(struct net_device *dev)
345 struct adapter *adapter = dev->priv;
347 return adapter->msg_enable;
350 static void set_msglevel(struct net_device *dev, u32 val)
352 struct adapter *adapter = dev->priv;
354 adapter->msg_enable = val;
357 static char stats_strings[][ETH_GSTRING_LEN] = {
358 "TxOctetsOK",
359 "TxOctetsBad",
360 "TxUnicastFramesOK",
361 "TxMulticastFramesOK",
362 "TxBroadcastFramesOK",
363 "TxPauseFrames",
364 "TxFramesWithDeferredXmissions",
365 "TxLateCollisions",
366 "TxTotalCollisions",
367 "TxFramesAbortedDueToXSCollisions",
368 "TxUnderrun",
369 "TxLengthErrors",
370 "TxInternalMACXmitError",
371 "TxFramesWithExcessiveDeferral",
372 "TxFCSErrors",
374 "RxOctetsOK",
375 "RxOctetsBad",
376 "RxUnicastFramesOK",
377 "RxMulticastFramesOK",
378 "RxBroadcastFramesOK",
379 "RxPauseFrames",
380 "RxFCSErrors",
381 "RxAlignErrors",
382 "RxSymbolErrors",
383 "RxDataErrors",
384 "RxSequenceErrors",
385 "RxRuntErrors",
386 "RxJabberErrors",
387 "RxInternalMACRcvError",
388 "RxInRangeLengthErrors",
389 "RxOutOfRangeLengthField",
390 "RxFrameTooLongErrors",
392 /* Port stats */
393 "RxPackets",
394 "RxCsumGood",
395 "TxPackets",
396 "TxCsumOffload",
397 "TxTso",
398 "RxVlan",
399 "TxVlan",
401 /* Interrupt stats */
402 "rx drops",
403 "pure_rsps",
404 "unhandled irqs",
405 "respQ_empty",
406 "respQ_overflow",
407 "freelistQ_empty",
408 "pkt_too_big",
409 "pkt_mismatch",
410 "cmdQ_full0",
411 "cmdQ_full1",
413 "espi_DIP2ParityErr",
414 "espi_DIP4Err",
415 "espi_RxDrops",
416 "espi_TxDrops",
417 "espi_RxOvfl",
418 "espi_ParityErr"
421 #define T2_REGMAP_SIZE (3 * 1024)
423 static int get_regs_len(struct net_device *dev)
425 return T2_REGMAP_SIZE;
428 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
430 struct adapter *adapter = dev->priv;
432 strcpy(info->driver, DRV_NAME);
433 strcpy(info->version, DRV_VERSION);
434 strcpy(info->fw_version, "N/A");
435 strcpy(info->bus_info, pci_name(adapter->pdev));
438 static int get_stats_count(struct net_device *dev)
440 return ARRAY_SIZE(stats_strings);
443 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
445 if (stringset == ETH_SS_STATS)
446 memcpy(data, stats_strings, sizeof(stats_strings));
449 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
450 u64 *data)
452 struct adapter *adapter = dev->priv;
453 struct cmac *mac = adapter->port[dev->if_port].mac;
454 const struct cmac_statistics *s;
455 const struct sge_intr_counts *t;
456 struct sge_port_stats ss;
458 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
460 *data++ = s->TxOctetsOK;
461 *data++ = s->TxOctetsBad;
462 *data++ = s->TxUnicastFramesOK;
463 *data++ = s->TxMulticastFramesOK;
464 *data++ = s->TxBroadcastFramesOK;
465 *data++ = s->TxPauseFrames;
466 *data++ = s->TxFramesWithDeferredXmissions;
467 *data++ = s->TxLateCollisions;
468 *data++ = s->TxTotalCollisions;
469 *data++ = s->TxFramesAbortedDueToXSCollisions;
470 *data++ = s->TxUnderrun;
471 *data++ = s->TxLengthErrors;
472 *data++ = s->TxInternalMACXmitError;
473 *data++ = s->TxFramesWithExcessiveDeferral;
474 *data++ = s->TxFCSErrors;
476 *data++ = s->RxOctetsOK;
477 *data++ = s->RxOctetsBad;
478 *data++ = s->RxUnicastFramesOK;
479 *data++ = s->RxMulticastFramesOK;
480 *data++ = s->RxBroadcastFramesOK;
481 *data++ = s->RxPauseFrames;
482 *data++ = s->RxFCSErrors;
483 *data++ = s->RxAlignErrors;
484 *data++ = s->RxSymbolErrors;
485 *data++ = s->RxDataErrors;
486 *data++ = s->RxSequenceErrors;
487 *data++ = s->RxRuntErrors;
488 *data++ = s->RxJabberErrors;
489 *data++ = s->RxInternalMACRcvError;
490 *data++ = s->RxInRangeLengthErrors;
491 *data++ = s->RxOutOfRangeLengthField;
492 *data++ = s->RxFrameTooLongErrors;
494 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
495 *data++ = ss.rx_packets;
496 *data++ = ss.rx_cso_good;
497 *data++ = ss.tx_packets;
498 *data++ = ss.tx_cso;
499 *data++ = ss.tx_tso;
500 *data++ = ss.vlan_xtract;
501 *data++ = ss.vlan_insert;
503 t = t1_sge_get_intr_counts(adapter->sge);
504 *data++ = t->rx_drops;
505 *data++ = t->pure_rsps;
506 *data++ = t->unhandled_irqs;
507 *data++ = t->respQ_empty;
508 *data++ = t->respQ_overflow;
509 *data++ = t->freelistQ_empty;
510 *data++ = t->pkt_too_big;
511 *data++ = t->pkt_mismatch;
512 *data++ = t->cmdQ_full[0];
513 *data++ = t->cmdQ_full[1];
515 if (adapter->espi) {
516 const struct espi_intr_counts *e;
518 e = t1_espi_get_intr_counts(adapter->espi);
519 *data++ = e->DIP2_parity_err;
520 *data++ = e->DIP4_err;
521 *data++ = e->rx_drops;
522 *data++ = e->tx_drops;
523 *data++ = e->rx_ovflw;
524 *data++ = e->parity_err;
528 static inline void reg_block_dump(struct adapter *ap, void *buf,
529 unsigned int start, unsigned int end)
531 u32 *p = buf + start;
533 for ( ; start <= end; start += sizeof(u32))
534 *p++ = readl(ap->regs + start);
537 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
538 void *buf)
540 struct adapter *ap = dev->priv;
543 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
545 regs->version = 2;
547 memset(buf, 0, T2_REGMAP_SIZE);
548 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
549 reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
550 reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
551 reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
552 reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
553 reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
554 reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
555 reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
556 reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
557 reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
560 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
562 struct adapter *adapter = dev->priv;
563 struct port_info *p = &adapter->port[dev->if_port];
565 cmd->supported = p->link_config.supported;
566 cmd->advertising = p->link_config.advertising;
568 if (netif_carrier_ok(dev)) {
569 cmd->speed = p->link_config.speed;
570 cmd->duplex = p->link_config.duplex;
571 } else {
572 cmd->speed = -1;
573 cmd->duplex = -1;
576 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
577 cmd->phy_address = p->phy->addr;
578 cmd->transceiver = XCVR_EXTERNAL;
579 cmd->autoneg = p->link_config.autoneg;
580 cmd->maxtxpkt = 0;
581 cmd->maxrxpkt = 0;
582 return 0;
585 static int speed_duplex_to_caps(int speed, int duplex)
587 int cap = 0;
589 switch (speed) {
590 case SPEED_10:
591 if (duplex == DUPLEX_FULL)
592 cap = SUPPORTED_10baseT_Full;
593 else
594 cap = SUPPORTED_10baseT_Half;
595 break;
596 case SPEED_100:
597 if (duplex == DUPLEX_FULL)
598 cap = SUPPORTED_100baseT_Full;
599 else
600 cap = SUPPORTED_100baseT_Half;
601 break;
602 case SPEED_1000:
603 if (duplex == DUPLEX_FULL)
604 cap = SUPPORTED_1000baseT_Full;
605 else
606 cap = SUPPORTED_1000baseT_Half;
607 break;
608 case SPEED_10000:
609 if (duplex == DUPLEX_FULL)
610 cap = SUPPORTED_10000baseT_Full;
612 return cap;
615 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
616 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
617 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
618 ADVERTISED_10000baseT_Full)
620 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
622 struct adapter *adapter = dev->priv;
623 struct port_info *p = &adapter->port[dev->if_port];
624 struct link_config *lc = &p->link_config;
626 if (!(lc->supported & SUPPORTED_Autoneg))
627 return -EOPNOTSUPP; /* can't change speed/duplex */
629 if (cmd->autoneg == AUTONEG_DISABLE) {
630 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
632 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
633 return -EINVAL;
634 lc->requested_speed = cmd->speed;
635 lc->requested_duplex = cmd->duplex;
636 lc->advertising = 0;
637 } else {
638 cmd->advertising &= ADVERTISED_MASK;
639 if (cmd->advertising & (cmd->advertising - 1))
640 cmd->advertising = lc->supported;
641 cmd->advertising &= lc->supported;
642 if (!cmd->advertising)
643 return -EINVAL;
644 lc->requested_speed = SPEED_INVALID;
645 lc->requested_duplex = DUPLEX_INVALID;
646 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
648 lc->autoneg = cmd->autoneg;
649 if (netif_running(dev))
650 t1_link_start(p->phy, p->mac, lc);
651 return 0;
654 static void get_pauseparam(struct net_device *dev,
655 struct ethtool_pauseparam *epause)
657 struct adapter *adapter = dev->priv;
658 struct port_info *p = &adapter->port[dev->if_port];
660 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
661 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
662 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
665 static int set_pauseparam(struct net_device *dev,
666 struct ethtool_pauseparam *epause)
668 struct adapter *adapter = dev->priv;
669 struct port_info *p = &adapter->port[dev->if_port];
670 struct link_config *lc = &p->link_config;
672 if (epause->autoneg == AUTONEG_DISABLE)
673 lc->requested_fc = 0;
674 else if (lc->supported & SUPPORTED_Autoneg)
675 lc->requested_fc = PAUSE_AUTONEG;
676 else
677 return -EINVAL;
679 if (epause->rx_pause)
680 lc->requested_fc |= PAUSE_RX;
681 if (epause->tx_pause)
682 lc->requested_fc |= PAUSE_TX;
683 if (lc->autoneg == AUTONEG_ENABLE) {
684 if (netif_running(dev))
685 t1_link_start(p->phy, p->mac, lc);
686 } else {
687 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
688 if (netif_running(dev))
689 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
690 lc->fc);
692 return 0;
695 static u32 get_rx_csum(struct net_device *dev)
697 struct adapter *adapter = dev->priv;
699 return (adapter->flags & RX_CSUM_ENABLED) != 0;
702 static int set_rx_csum(struct net_device *dev, u32 data)
704 struct adapter *adapter = dev->priv;
706 if (data)
707 adapter->flags |= RX_CSUM_ENABLED;
708 else
709 adapter->flags &= ~RX_CSUM_ENABLED;
710 return 0;
713 static int set_tso(struct net_device *dev, u32 value)
715 struct adapter *adapter = dev->priv;
717 if (!(adapter->flags & TSO_CAPABLE))
718 return value ? -EOPNOTSUPP : 0;
719 return ethtool_op_set_tso(dev, value);
722 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
724 struct adapter *adapter = dev->priv;
725 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
727 e->rx_max_pending = MAX_RX_BUFFERS;
728 e->rx_mini_max_pending = 0;
729 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
730 e->tx_max_pending = MAX_CMDQ_ENTRIES;
732 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
733 e->rx_mini_pending = 0;
734 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
735 e->tx_pending = adapter->params.sge.cmdQ_size[0];
738 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
740 struct adapter *adapter = dev->priv;
741 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
743 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
744 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
745 e->tx_pending > MAX_CMDQ_ENTRIES ||
746 e->rx_pending < MIN_FL_ENTRIES ||
747 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
748 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
749 return -EINVAL;
751 if (adapter->flags & FULL_INIT_DONE)
752 return -EBUSY;
754 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
755 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
756 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
757 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
758 MAX_CMDQ1_ENTRIES : e->tx_pending;
759 return 0;
762 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
764 struct adapter *adapter = dev->priv;
766 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
767 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
768 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
769 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
770 return 0;
773 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
775 struct adapter *adapter = dev->priv;
777 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
778 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
779 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
780 return 0;
783 static int get_eeprom_len(struct net_device *dev)
785 struct adapter *adapter = dev->priv;
787 return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
790 #define EEPROM_MAGIC(ap) \
791 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
793 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
794 u8 *data)
796 int i;
797 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
798 struct adapter *adapter = dev->priv;
800 e->magic = EEPROM_MAGIC(adapter);
801 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
802 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
803 memcpy(data, buf + e->offset, e->len);
804 return 0;
807 static const struct ethtool_ops t1_ethtool_ops = {
808 .get_settings = get_settings,
809 .set_settings = set_settings,
810 .get_drvinfo = get_drvinfo,
811 .get_msglevel = get_msglevel,
812 .set_msglevel = set_msglevel,
813 .get_ringparam = get_sge_param,
814 .set_ringparam = set_sge_param,
815 .get_coalesce = get_coalesce,
816 .set_coalesce = set_coalesce,
817 .get_eeprom_len = get_eeprom_len,
818 .get_eeprom = get_eeprom,
819 .get_pauseparam = get_pauseparam,
820 .set_pauseparam = set_pauseparam,
821 .get_rx_csum = get_rx_csum,
822 .set_rx_csum = set_rx_csum,
823 .get_tx_csum = ethtool_op_get_tx_csum,
824 .set_tx_csum = ethtool_op_set_tx_csum,
825 .get_sg = ethtool_op_get_sg,
826 .set_sg = ethtool_op_set_sg,
827 .get_link = ethtool_op_get_link,
828 .get_strings = get_strings,
829 .get_stats_count = get_stats_count,
830 .get_ethtool_stats = get_stats,
831 .get_regs_len = get_regs_len,
832 .get_regs = get_regs,
833 .get_tso = ethtool_op_get_tso,
834 .set_tso = set_tso,
837 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
839 struct adapter *adapter = dev->priv;
840 struct mii_ioctl_data *data = if_mii(req);
842 switch (cmd) {
843 case SIOCGMIIPHY:
844 data->phy_id = adapter->port[dev->if_port].phy->addr;
845 /* FALLTHRU */
846 case SIOCGMIIREG: {
847 struct cphy *phy = adapter->port[dev->if_port].phy;
848 u32 val;
850 if (!phy->mdio_read)
851 return -EOPNOTSUPP;
852 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
853 &val);
854 data->val_out = val;
855 break;
857 case SIOCSMIIREG: {
858 struct cphy *phy = adapter->port[dev->if_port].phy;
860 if (!capable(CAP_NET_ADMIN))
861 return -EPERM;
862 if (!phy->mdio_write)
863 return -EOPNOTSUPP;
864 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
865 data->val_in);
866 break;
869 default:
870 return -EOPNOTSUPP;
872 return 0;
875 static int t1_change_mtu(struct net_device *dev, int new_mtu)
877 int ret;
878 struct adapter *adapter = dev->priv;
879 struct cmac *mac = adapter->port[dev->if_port].mac;
881 if (!mac->ops->set_mtu)
882 return -EOPNOTSUPP;
883 if (new_mtu < 68)
884 return -EINVAL;
885 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
886 return ret;
887 dev->mtu = new_mtu;
888 return 0;
891 static int t1_set_mac_addr(struct net_device *dev, void *p)
893 struct adapter *adapter = dev->priv;
894 struct cmac *mac = adapter->port[dev->if_port].mac;
895 struct sockaddr *addr = p;
897 if (!mac->ops->macaddress_set)
898 return -EOPNOTSUPP;
900 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
901 mac->ops->macaddress_set(mac, dev->dev_addr);
902 return 0;
905 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
906 static void vlan_rx_register(struct net_device *dev,
907 struct vlan_group *grp)
909 struct adapter *adapter = dev->priv;
911 spin_lock_irq(&adapter->async_lock);
912 adapter->vlan_grp = grp;
913 t1_set_vlan_accel(adapter, grp != NULL);
914 spin_unlock_irq(&adapter->async_lock);
917 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
919 struct adapter *adapter = dev->priv;
921 spin_lock_irq(&adapter->async_lock);
922 if (adapter->vlan_grp)
923 adapter->vlan_grp->vlan_devices[vid] = NULL;
924 spin_unlock_irq(&adapter->async_lock);
926 #endif
928 #ifdef CONFIG_NET_POLL_CONTROLLER
929 static void t1_netpoll(struct net_device *dev)
931 unsigned long flags;
932 struct adapter *adapter = dev->priv;
934 local_irq_save(flags);
935 t1_interrupt(adapter->pdev->irq, adapter);
936 local_irq_restore(flags);
938 #endif
941 * Periodic accumulation of MAC statistics. This is used only if the MAC
942 * does not have any other way to prevent stats counter overflow.
944 static void mac_stats_task(struct work_struct *work)
946 int i;
947 struct adapter *adapter =
948 container_of(work, struct adapter, stats_update_task.work);
950 for_each_port(adapter, i) {
951 struct port_info *p = &adapter->port[i];
953 if (netif_running(p->dev))
954 p->mac->ops->statistics_update(p->mac,
955 MAC_STATS_UPDATE_FAST);
958 /* Schedule the next statistics update if any port is active. */
959 spin_lock(&adapter->work_lock);
960 if (adapter->open_device_map & PORT_MASK)
961 schedule_mac_stats_update(adapter,
962 adapter->params.stats_update_period);
963 spin_unlock(&adapter->work_lock);
967 * Processes elmer0 external interrupts in process context.
969 static void ext_intr_task(struct work_struct *work)
971 struct adapter *adapter =
972 container_of(work, struct adapter, ext_intr_handler_task);
974 t1_elmer0_ext_intr_handler(adapter);
976 /* Now reenable external interrupts */
977 spin_lock_irq(&adapter->async_lock);
978 adapter->slow_intr_mask |= F_PL_INTR_EXT;
979 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
980 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
981 adapter->regs + A_PL_ENABLE);
982 spin_unlock_irq(&adapter->async_lock);
986 * Interrupt-context handler for elmer0 external interrupts.
988 void t1_elmer0_ext_intr(struct adapter *adapter)
991 * Schedule a task to handle external interrupts as we require
992 * a process context. We disable EXT interrupts in the interim
993 * and let the task reenable them when it's done.
995 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
996 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
997 adapter->regs + A_PL_ENABLE);
998 schedule_work(&adapter->ext_intr_handler_task);
1001 void t1_fatal_err(struct adapter *adapter)
1003 if (adapter->flags & FULL_INIT_DONE) {
1004 t1_sge_stop(adapter->sge);
1005 t1_interrupts_disable(adapter);
1007 CH_ALERT("%s: encountered fatal error, operation suspended\n",
1008 adapter->name);
1011 static int __devinit init_one(struct pci_dev *pdev,
1012 const struct pci_device_id *ent)
1014 static int version_printed;
1016 int i, err, pci_using_dac = 0;
1017 unsigned long mmio_start, mmio_len;
1018 const struct board_info *bi;
1019 struct adapter *adapter = NULL;
1020 struct port_info *pi;
1022 if (!version_printed) {
1023 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
1024 DRV_VERSION);
1025 ++version_printed;
1028 err = pci_enable_device(pdev);
1029 if (err)
1030 return err;
1032 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1033 CH_ERR("%s: cannot find PCI device memory base address\n",
1034 pci_name(pdev));
1035 err = -ENODEV;
1036 goto out_disable_pdev;
1039 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1040 pci_using_dac = 1;
1042 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
1043 CH_ERR("%s: unable to obtain 64-bit DMA for"
1044 "consistent allocations\n", pci_name(pdev));
1045 err = -ENODEV;
1046 goto out_disable_pdev;
1049 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
1050 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1051 goto out_disable_pdev;
1054 err = pci_request_regions(pdev, DRV_NAME);
1055 if (err) {
1056 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1057 goto out_disable_pdev;
1060 pci_set_master(pdev);
1062 mmio_start = pci_resource_start(pdev, 0);
1063 mmio_len = pci_resource_len(pdev, 0);
1064 bi = t1_get_board_info(ent->driver_data);
1066 for (i = 0; i < bi->port_number; ++i) {
1067 struct net_device *netdev;
1069 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1070 if (!netdev) {
1071 err = -ENOMEM;
1072 goto out_free_dev;
1075 SET_MODULE_OWNER(netdev);
1076 SET_NETDEV_DEV(netdev, &pdev->dev);
1078 if (!adapter) {
1079 adapter = netdev->priv;
1080 adapter->pdev = pdev;
1081 adapter->port[0].dev = netdev; /* so we don't leak it */
1083 adapter->regs = ioremap(mmio_start, mmio_len);
1084 if (!adapter->regs) {
1085 CH_ERR("%s: cannot map device registers\n",
1086 pci_name(pdev));
1087 err = -ENOMEM;
1088 goto out_free_dev;
1091 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1092 err = -ENODEV; /* Can't handle this chip rev */
1093 goto out_free_dev;
1096 adapter->name = pci_name(pdev);
1097 adapter->msg_enable = dflt_msg_enable;
1098 adapter->mmio_len = mmio_len;
1100 spin_lock_init(&adapter->tpi_lock);
1101 spin_lock_init(&adapter->work_lock);
1102 spin_lock_init(&adapter->async_lock);
1103 spin_lock_init(&adapter->mac_lock);
1105 INIT_WORK(&adapter->ext_intr_handler_task,
1106 ext_intr_task);
1107 INIT_DELAYED_WORK(&adapter->stats_update_task,
1108 mac_stats_task);
1110 pci_set_drvdata(pdev, netdev);
1113 pi = &adapter->port[i];
1114 pi->dev = netdev;
1115 netif_carrier_off(netdev);
1116 netdev->irq = pdev->irq;
1117 netdev->if_port = i;
1118 netdev->mem_start = mmio_start;
1119 netdev->mem_end = mmio_start + mmio_len - 1;
1120 netdev->priv = adapter;
1121 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1122 netdev->features |= NETIF_F_LLTX;
1124 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1125 if (pci_using_dac)
1126 netdev->features |= NETIF_F_HIGHDMA;
1127 if (vlan_tso_capable(adapter)) {
1128 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1129 adapter->flags |= VLAN_ACCEL_CAPABLE;
1130 netdev->features |=
1131 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1132 netdev->vlan_rx_register = vlan_rx_register;
1133 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
1134 #endif
1136 /* T204: disable TSO */
1137 if (!(is_T2(adapter)) || bi->port_number != 4) {
1138 adapter->flags |= TSO_CAPABLE;
1139 netdev->features |= NETIF_F_TSO;
1143 netdev->open = cxgb_open;
1144 netdev->stop = cxgb_close;
1145 netdev->hard_start_xmit = t1_start_xmit;
1146 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1147 sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1148 netdev->get_stats = t1_get_stats;
1149 netdev->set_multicast_list = t1_set_rxmode;
1150 netdev->do_ioctl = t1_ioctl;
1151 netdev->change_mtu = t1_change_mtu;
1152 netdev->set_mac_address = t1_set_mac_addr;
1153 #ifdef CONFIG_NET_POLL_CONTROLLER
1154 netdev->poll_controller = t1_netpoll;
1155 #endif
1156 #ifdef CONFIG_CHELSIO_T1_NAPI
1157 netdev->weight = 64;
1158 netdev->poll = t1_poll;
1159 #endif
1161 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1164 if (t1_init_sw_modules(adapter, bi) < 0) {
1165 err = -ENODEV;
1166 goto out_free_dev;
1170 * The card is now ready to go. If any errors occur during device
1171 * registration we do not fail the whole card but rather proceed only
1172 * with the ports we manage to register successfully. However we must
1173 * register at least one net device.
1175 for (i = 0; i < bi->port_number; ++i) {
1176 err = register_netdev(adapter->port[i].dev);
1177 if (err)
1178 CH_WARN("%s: cannot register net device %s, skipping\n",
1179 pci_name(pdev), adapter->port[i].dev->name);
1180 else {
1182 * Change the name we use for messages to the name of
1183 * the first successfully registered interface.
1185 if (!adapter->registered_device_map)
1186 adapter->name = adapter->port[i].dev->name;
1188 __set_bit(i, &adapter->registered_device_map);
1191 if (!adapter->registered_device_map) {
1192 CH_ERR("%s: could not register any net devices\n",
1193 pci_name(pdev));
1194 goto out_release_adapter_res;
1197 printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1198 bi->desc, adapter->params.chip_revision,
1199 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1200 adapter->params.pci.speed, adapter->params.pci.width);
1203 * Set the T1B ASIC and memory clocks.
1205 if (t1powersave)
1206 adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */
1207 else
1208 adapter->t1powersave = HCLOCK;
1209 if (t1_is_T1B(adapter))
1210 t1_clock(adapter, t1powersave);
1212 return 0;
1214 out_release_adapter_res:
1215 t1_free_sw_modules(adapter);
1216 out_free_dev:
1217 if (adapter) {
1218 if (adapter->regs)
1219 iounmap(adapter->regs);
1220 for (i = bi->port_number - 1; i >= 0; --i)
1221 if (adapter->port[i].dev)
1222 free_netdev(adapter->port[i].dev);
1224 pci_release_regions(pdev);
1225 out_disable_pdev:
1226 pci_disable_device(pdev);
1227 pci_set_drvdata(pdev, NULL);
1228 return err;
1231 static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1233 int data;
1234 int i;
1235 u32 val;
1237 enum {
1238 S_CLOCK = 1 << 3,
1239 S_DATA = 1 << 4
1242 for (i = (nbits - 1); i > -1; i--) {
1244 udelay(50);
1246 data = ((bitdata >> i) & 0x1);
1247 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1249 if (data)
1250 val |= S_DATA;
1251 else
1252 val &= ~S_DATA;
1254 udelay(50);
1256 /* Set SCLOCK low */
1257 val &= ~S_CLOCK;
1258 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1260 udelay(50);
1262 /* Write SCLOCK high */
1263 val |= S_CLOCK;
1264 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1269 static int t1_clock(struct adapter *adapter, int mode)
1271 u32 val;
1272 int M_CORE_VAL;
1273 int M_MEM_VAL;
1275 enum {
1276 M_CORE_BITS = 9,
1277 T_CORE_VAL = 0,
1278 T_CORE_BITS = 2,
1279 N_CORE_VAL = 0,
1280 N_CORE_BITS = 2,
1281 M_MEM_BITS = 9,
1282 T_MEM_VAL = 0,
1283 T_MEM_BITS = 2,
1284 N_MEM_VAL = 0,
1285 N_MEM_BITS = 2,
1286 NP_LOAD = 1 << 17,
1287 S_LOAD_MEM = 1 << 5,
1288 S_LOAD_CORE = 1 << 6,
1289 S_CLOCK = 1 << 3
1292 if (!t1_is_T1B(adapter))
1293 return -ENODEV; /* Can't re-clock this chip. */
1295 if (mode & 2)
1296 return 0; /* show current mode. */
1298 if ((adapter->t1powersave & 1) == (mode & 1))
1299 return -EALREADY; /* ASIC already running in mode. */
1301 if ((mode & 1) == HCLOCK) {
1302 M_CORE_VAL = 0x14;
1303 M_MEM_VAL = 0x18;
1304 adapter->t1powersave = HCLOCK; /* overclock */
1305 } else {
1306 M_CORE_VAL = 0xe;
1307 M_MEM_VAL = 0x10;
1308 adapter->t1powersave = LCLOCK; /* underclock */
1311 /* Don't interrupt this serial stream! */
1312 spin_lock(&adapter->tpi_lock);
1314 /* Initialize for ASIC core */
1315 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1316 val |= NP_LOAD;
1317 udelay(50);
1318 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1319 udelay(50);
1320 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1321 val &= ~S_LOAD_CORE;
1322 val &= ~S_CLOCK;
1323 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1324 udelay(50);
1326 /* Serial program the ASIC clock synthesizer */
1327 bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1328 bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1329 bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1330 udelay(50);
1332 /* Finish ASIC core */
1333 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1334 val |= S_LOAD_CORE;
1335 udelay(50);
1336 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1337 udelay(50);
1338 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1339 val &= ~S_LOAD_CORE;
1340 udelay(50);
1341 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1342 udelay(50);
1344 /* Initialize for memory */
1345 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1346 val |= NP_LOAD;
1347 udelay(50);
1348 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1349 udelay(50);
1350 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1351 val &= ~S_LOAD_MEM;
1352 val &= ~S_CLOCK;
1353 udelay(50);
1354 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1355 udelay(50);
1357 /* Serial program the memory clock synthesizer */
1358 bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1359 bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1360 bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1361 udelay(50);
1363 /* Finish memory */
1364 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1365 val |= S_LOAD_MEM;
1366 udelay(50);
1367 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1368 udelay(50);
1369 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1370 val &= ~S_LOAD_MEM;
1371 udelay(50);
1372 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1374 spin_unlock(&adapter->tpi_lock);
1376 return 0;
1379 static inline void t1_sw_reset(struct pci_dev *pdev)
1381 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1382 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1385 static void __devexit remove_one(struct pci_dev *pdev)
1387 struct net_device *dev = pci_get_drvdata(pdev);
1388 struct adapter *adapter = dev->priv;
1389 int i;
1391 for_each_port(adapter, i) {
1392 if (test_bit(i, &adapter->registered_device_map))
1393 unregister_netdev(adapter->port[i].dev);
1396 t1_free_sw_modules(adapter);
1397 iounmap(adapter->regs);
1399 while (--i >= 0) {
1400 if (adapter->port[i].dev)
1401 free_netdev(adapter->port[i].dev);
1404 pci_release_regions(pdev);
1405 pci_disable_device(pdev);
1406 pci_set_drvdata(pdev, NULL);
1407 t1_sw_reset(pdev);
1410 static struct pci_driver driver = {
1411 .name = DRV_NAME,
1412 .id_table = t1_pci_tbl,
1413 .probe = init_one,
1414 .remove = __devexit_p(remove_one),
1417 static int __init t1_init_module(void)
1419 return pci_register_driver(&driver);
1422 static void __exit t1_cleanup_module(void)
1424 pci_unregister_driver(&driver);
1427 module_init(t1_init_module);
1428 module_exit(t1_cleanup_module);