Fix dnotify/close race
[linux-2.6.22.y-op.git] / drivers / net / chelsio / cxgb2.c
bloba82a1fabaedd11d2215b90ca9be8afed51ab3526
1 /*****************************************************************************
2 * *
3 * File: cxgb2.c *
4 * $Revision: 1.25 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
6 * Description: *
7 * Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
39 #include "common.h"
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/pci.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/if_vlan.h>
46 #include <linux/mii.h>
47 #include <linux/sockios.h>
48 #include <linux/dma-mapping.h>
49 #include <asm/uaccess.h>
51 #include "cpl5_cmd.h"
52 #include "regs.h"
53 #include "gmac.h"
54 #include "cphy.h"
55 #include "sge.h"
56 #include "tp.h"
57 #include "espi.h"
58 #include "elmer0.h"
60 #include <linux/workqueue.h>
62 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
64 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
67 static inline void cancel_mac_stats_update(struct adapter *ap)
69 cancel_delayed_work(&ap->stats_update_task);
72 #define MAX_CMDQ_ENTRIES 16384
73 #define MAX_CMDQ1_ENTRIES 1024
74 #define MAX_RX_BUFFERS 16384
75 #define MAX_RX_JUMBO_BUFFERS 16384
76 #define MAX_TX_BUFFERS_HIGH 16384U
77 #define MAX_TX_BUFFERS_LOW 1536U
78 #define MAX_TX_BUFFERS 1460U
79 #define MIN_FL_ENTRIES 32
81 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
83 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
86 * The EEPROM is actually bigger but only the first few bytes are used so we
87 * only report those.
89 #define EEPROM_SIZE 32
91 MODULE_DESCRIPTION(DRV_DESCRIPTION);
92 MODULE_AUTHOR("Chelsio Communications");
93 MODULE_LICENSE("GPL");
95 static int dflt_msg_enable = DFLT_MSG_ENABLE;
97 module_param(dflt_msg_enable, int, 0);
98 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
100 #define HCLOCK 0x0
101 #define LCLOCK 0x1
103 /* T1 cards powersave mode */
104 static int t1_clock(struct adapter *adapter, int mode);
105 static int t1powersave = 1; /* HW default is powersave mode. */
107 module_param(t1powersave, int, 0);
108 MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
110 static int disable_msi = 0;
111 module_param(disable_msi, int, 0);
112 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
114 static const char pci_speed[][4] = {
115 "33", "66", "100", "133"
119 * Setup MAC to receive the types of packets we want.
121 static void t1_set_rxmode(struct net_device *dev)
123 struct adapter *adapter = dev->priv;
124 struct cmac *mac = adapter->port[dev->if_port].mac;
125 struct t1_rx_mode rm;
127 rm.dev = dev;
128 rm.idx = 0;
129 rm.list = dev->mc_list;
130 mac->ops->set_rx_mode(mac, &rm);
133 static void link_report(struct port_info *p)
135 if (!netif_carrier_ok(p->dev))
136 printk(KERN_INFO "%s: link down\n", p->dev->name);
137 else {
138 const char *s = "10Mbps";
140 switch (p->link_config.speed) {
141 case SPEED_10000: s = "10Gbps"; break;
142 case SPEED_1000: s = "1000Mbps"; break;
143 case SPEED_100: s = "100Mbps"; break;
146 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
147 p->dev->name, s,
148 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
152 void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
153 int speed, int duplex, int pause)
155 struct port_info *p = &adapter->port[port_id];
157 if (link_stat != netif_carrier_ok(p->dev)) {
158 if (link_stat)
159 netif_carrier_on(p->dev);
160 else
161 netif_carrier_off(p->dev);
162 link_report(p);
164 /* multi-ports: inform toe */
165 if ((speed > 0) && (adapter->params.nports > 1)) {
166 unsigned int sched_speed = 10;
167 switch (speed) {
168 case SPEED_1000:
169 sched_speed = 1000;
170 break;
171 case SPEED_100:
172 sched_speed = 100;
173 break;
174 case SPEED_10:
175 sched_speed = 10;
176 break;
178 t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
183 static void link_start(struct port_info *p)
185 struct cmac *mac = p->mac;
187 mac->ops->reset(mac);
188 if (mac->ops->macaddress_set)
189 mac->ops->macaddress_set(mac, p->dev->dev_addr);
190 t1_set_rxmode(p->dev);
191 t1_link_start(p->phy, mac, &p->link_config);
192 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
195 static void enable_hw_csum(struct adapter *adapter)
197 if (adapter->flags & TSO_CAPABLE)
198 t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */
199 if (adapter->flags & UDP_CSUM_CAPABLE)
200 t1_tp_set_udp_checksum_offload(adapter->tp, 1);
201 t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
205 * Things to do upon first use of a card.
206 * This must run with the rtnl lock held.
208 static int cxgb_up(struct adapter *adapter)
210 int err = 0;
212 if (!(adapter->flags & FULL_INIT_DONE)) {
213 err = t1_init_hw_modules(adapter);
214 if (err)
215 goto out_err;
217 enable_hw_csum(adapter);
218 adapter->flags |= FULL_INIT_DONE;
221 t1_interrupts_clear(adapter);
223 adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
224 err = request_irq(adapter->pdev->irq, t1_interrupt,
225 adapter->params.has_msi ? 0 : IRQF_SHARED,
226 adapter->name, adapter);
227 if (err) {
228 if (adapter->params.has_msi)
229 pci_disable_msi(adapter->pdev);
231 goto out_err;
234 t1_sge_start(adapter->sge);
235 t1_interrupts_enable(adapter);
236 out_err:
237 return err;
241 * Release resources when all the ports have been stopped.
243 static void cxgb_down(struct adapter *adapter)
245 t1_sge_stop(adapter->sge);
246 t1_interrupts_disable(adapter);
247 free_irq(adapter->pdev->irq, adapter);
248 if (adapter->params.has_msi)
249 pci_disable_msi(adapter->pdev);
252 static int cxgb_open(struct net_device *dev)
254 int err;
255 struct adapter *adapter = dev->priv;
256 int other_ports = adapter->open_device_map & PORT_MASK;
258 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
259 return err;
261 __set_bit(dev->if_port, &adapter->open_device_map);
262 link_start(&adapter->port[dev->if_port]);
263 netif_start_queue(dev);
264 if (!other_ports && adapter->params.stats_update_period)
265 schedule_mac_stats_update(adapter,
266 adapter->params.stats_update_period);
267 return 0;
270 static int cxgb_close(struct net_device *dev)
272 struct adapter *adapter = dev->priv;
273 struct port_info *p = &adapter->port[dev->if_port];
274 struct cmac *mac = p->mac;
276 netif_stop_queue(dev);
277 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
278 netif_carrier_off(dev);
280 clear_bit(dev->if_port, &adapter->open_device_map);
281 if (adapter->params.stats_update_period &&
282 !(adapter->open_device_map & PORT_MASK)) {
283 /* Stop statistics accumulation. */
284 smp_mb__after_clear_bit();
285 spin_lock(&adapter->work_lock); /* sync with update task */
286 spin_unlock(&adapter->work_lock);
287 cancel_mac_stats_update(adapter);
290 if (!adapter->open_device_map)
291 cxgb_down(adapter);
292 return 0;
295 static struct net_device_stats *t1_get_stats(struct net_device *dev)
297 struct adapter *adapter = dev->priv;
298 struct port_info *p = &adapter->port[dev->if_port];
299 struct net_device_stats *ns = &p->netstats;
300 const struct cmac_statistics *pstats;
302 /* Do a full update of the MAC stats */
303 pstats = p->mac->ops->statistics_update(p->mac,
304 MAC_STATS_UPDATE_FULL);
306 ns->tx_packets = pstats->TxUnicastFramesOK +
307 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
309 ns->rx_packets = pstats->RxUnicastFramesOK +
310 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
312 ns->tx_bytes = pstats->TxOctetsOK;
313 ns->rx_bytes = pstats->RxOctetsOK;
315 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
316 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
317 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
318 pstats->RxFCSErrors + pstats->RxAlignErrors +
319 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
320 pstats->RxSymbolErrors + pstats->RxRuntErrors;
322 ns->multicast = pstats->RxMulticastFramesOK;
323 ns->collisions = pstats->TxTotalCollisions;
325 /* detailed rx_errors */
326 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
327 pstats->RxJabberErrors;
328 ns->rx_over_errors = 0;
329 ns->rx_crc_errors = pstats->RxFCSErrors;
330 ns->rx_frame_errors = pstats->RxAlignErrors;
331 ns->rx_fifo_errors = 0;
332 ns->rx_missed_errors = 0;
334 /* detailed tx_errors */
335 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
336 ns->tx_carrier_errors = 0;
337 ns->tx_fifo_errors = pstats->TxUnderrun;
338 ns->tx_heartbeat_errors = 0;
339 ns->tx_window_errors = pstats->TxLateCollisions;
340 return ns;
343 static u32 get_msglevel(struct net_device *dev)
345 struct adapter *adapter = dev->priv;
347 return adapter->msg_enable;
350 static void set_msglevel(struct net_device *dev, u32 val)
352 struct adapter *adapter = dev->priv;
354 adapter->msg_enable = val;
357 static char stats_strings[][ETH_GSTRING_LEN] = {
358 "TxOctetsOK",
359 "TxOctetsBad",
360 "TxUnicastFramesOK",
361 "TxMulticastFramesOK",
362 "TxBroadcastFramesOK",
363 "TxPauseFrames",
364 "TxFramesWithDeferredXmissions",
365 "TxLateCollisions",
366 "TxTotalCollisions",
367 "TxFramesAbortedDueToXSCollisions",
368 "TxUnderrun",
369 "TxLengthErrors",
370 "TxInternalMACXmitError",
371 "TxFramesWithExcessiveDeferral",
372 "TxFCSErrors",
373 "TxJumboFramesOk",
374 "TxJumboOctetsOk",
376 "RxOctetsOK",
377 "RxOctetsBad",
378 "RxUnicastFramesOK",
379 "RxMulticastFramesOK",
380 "RxBroadcastFramesOK",
381 "RxPauseFrames",
382 "RxFCSErrors",
383 "RxAlignErrors",
384 "RxSymbolErrors",
385 "RxDataErrors",
386 "RxSequenceErrors",
387 "RxRuntErrors",
388 "RxJabberErrors",
389 "RxInternalMACRcvError",
390 "RxInRangeLengthErrors",
391 "RxOutOfRangeLengthField",
392 "RxFrameTooLongErrors",
393 "RxJumboFramesOk",
394 "RxJumboOctetsOk",
396 /* Port stats */
397 "RxCsumGood",
398 "TxCsumOffload",
399 "TxTso",
400 "RxVlan",
401 "TxVlan",
402 "TxNeedHeadroom",
404 /* Interrupt stats */
405 "rx drops",
406 "pure_rsps",
407 "unhandled irqs",
408 "respQ_empty",
409 "respQ_overflow",
410 "freelistQ_empty",
411 "pkt_too_big",
412 "pkt_mismatch",
413 "cmdQ_full0",
414 "cmdQ_full1",
416 "espi_DIP2ParityErr",
417 "espi_DIP4Err",
418 "espi_RxDrops",
419 "espi_TxDrops",
420 "espi_RxOvfl",
421 "espi_ParityErr"
424 #define T2_REGMAP_SIZE (3 * 1024)
426 static int get_regs_len(struct net_device *dev)
428 return T2_REGMAP_SIZE;
431 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
433 struct adapter *adapter = dev->priv;
435 strcpy(info->driver, DRV_NAME);
436 strcpy(info->version, DRV_VERSION);
437 strcpy(info->fw_version, "N/A");
438 strcpy(info->bus_info, pci_name(adapter->pdev));
441 static int get_stats_count(struct net_device *dev)
443 return ARRAY_SIZE(stats_strings);
446 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
448 if (stringset == ETH_SS_STATS)
449 memcpy(data, stats_strings, sizeof(stats_strings));
452 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
453 u64 *data)
455 struct adapter *adapter = dev->priv;
456 struct cmac *mac = adapter->port[dev->if_port].mac;
457 const struct cmac_statistics *s;
458 const struct sge_intr_counts *t;
459 struct sge_port_stats ss;
461 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
462 t = t1_sge_get_intr_counts(adapter->sge);
463 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
465 *data++ = s->TxOctetsOK;
466 *data++ = s->TxOctetsBad;
467 *data++ = s->TxUnicastFramesOK;
468 *data++ = s->TxMulticastFramesOK;
469 *data++ = s->TxBroadcastFramesOK;
470 *data++ = s->TxPauseFrames;
471 *data++ = s->TxFramesWithDeferredXmissions;
472 *data++ = s->TxLateCollisions;
473 *data++ = s->TxTotalCollisions;
474 *data++ = s->TxFramesAbortedDueToXSCollisions;
475 *data++ = s->TxUnderrun;
476 *data++ = s->TxLengthErrors;
477 *data++ = s->TxInternalMACXmitError;
478 *data++ = s->TxFramesWithExcessiveDeferral;
479 *data++ = s->TxFCSErrors;
480 *data++ = s->TxJumboFramesOK;
481 *data++ = s->TxJumboOctetsOK;
483 *data++ = s->RxOctetsOK;
484 *data++ = s->RxOctetsBad;
485 *data++ = s->RxUnicastFramesOK;
486 *data++ = s->RxMulticastFramesOK;
487 *data++ = s->RxBroadcastFramesOK;
488 *data++ = s->RxPauseFrames;
489 *data++ = s->RxFCSErrors;
490 *data++ = s->RxAlignErrors;
491 *data++ = s->RxSymbolErrors;
492 *data++ = s->RxDataErrors;
493 *data++ = s->RxSequenceErrors;
494 *data++ = s->RxRuntErrors;
495 *data++ = s->RxJabberErrors;
496 *data++ = s->RxInternalMACRcvError;
497 *data++ = s->RxInRangeLengthErrors;
498 *data++ = s->RxOutOfRangeLengthField;
499 *data++ = s->RxFrameTooLongErrors;
500 *data++ = s->RxJumboFramesOK;
501 *data++ = s->RxJumboOctetsOK;
503 *data++ = ss.rx_cso_good;
504 *data++ = ss.tx_cso;
505 *data++ = ss.tx_tso;
506 *data++ = ss.vlan_xtract;
507 *data++ = ss.vlan_insert;
508 *data++ = ss.tx_need_hdrroom;
510 *data++ = t->rx_drops;
511 *data++ = t->pure_rsps;
512 *data++ = t->unhandled_irqs;
513 *data++ = t->respQ_empty;
514 *data++ = t->respQ_overflow;
515 *data++ = t->freelistQ_empty;
516 *data++ = t->pkt_too_big;
517 *data++ = t->pkt_mismatch;
518 *data++ = t->cmdQ_full[0];
519 *data++ = t->cmdQ_full[1];
521 if (adapter->espi) {
522 const struct espi_intr_counts *e;
524 e = t1_espi_get_intr_counts(adapter->espi);
525 *data++ = e->DIP2_parity_err;
526 *data++ = e->DIP4_err;
527 *data++ = e->rx_drops;
528 *data++ = e->tx_drops;
529 *data++ = e->rx_ovflw;
530 *data++ = e->parity_err;
534 static inline void reg_block_dump(struct adapter *ap, void *buf,
535 unsigned int start, unsigned int end)
537 u32 *p = buf + start;
539 for ( ; start <= end; start += sizeof(u32))
540 *p++ = readl(ap->regs + start);
543 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
544 void *buf)
546 struct adapter *ap = dev->priv;
549 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
551 regs->version = 2;
553 memset(buf, 0, T2_REGMAP_SIZE);
554 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
555 reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
556 reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
557 reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
558 reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
559 reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
560 reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
561 reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
562 reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
563 reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
566 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
568 struct adapter *adapter = dev->priv;
569 struct port_info *p = &adapter->port[dev->if_port];
571 cmd->supported = p->link_config.supported;
572 cmd->advertising = p->link_config.advertising;
574 if (netif_carrier_ok(dev)) {
575 cmd->speed = p->link_config.speed;
576 cmd->duplex = p->link_config.duplex;
577 } else {
578 cmd->speed = -1;
579 cmd->duplex = -1;
582 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
583 cmd->phy_address = p->phy->addr;
584 cmd->transceiver = XCVR_EXTERNAL;
585 cmd->autoneg = p->link_config.autoneg;
586 cmd->maxtxpkt = 0;
587 cmd->maxrxpkt = 0;
588 return 0;
591 static int speed_duplex_to_caps(int speed, int duplex)
593 int cap = 0;
595 switch (speed) {
596 case SPEED_10:
597 if (duplex == DUPLEX_FULL)
598 cap = SUPPORTED_10baseT_Full;
599 else
600 cap = SUPPORTED_10baseT_Half;
601 break;
602 case SPEED_100:
603 if (duplex == DUPLEX_FULL)
604 cap = SUPPORTED_100baseT_Full;
605 else
606 cap = SUPPORTED_100baseT_Half;
607 break;
608 case SPEED_1000:
609 if (duplex == DUPLEX_FULL)
610 cap = SUPPORTED_1000baseT_Full;
611 else
612 cap = SUPPORTED_1000baseT_Half;
613 break;
614 case SPEED_10000:
615 if (duplex == DUPLEX_FULL)
616 cap = SUPPORTED_10000baseT_Full;
618 return cap;
621 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
622 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
623 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
624 ADVERTISED_10000baseT_Full)
626 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
628 struct adapter *adapter = dev->priv;
629 struct port_info *p = &adapter->port[dev->if_port];
630 struct link_config *lc = &p->link_config;
632 if (!(lc->supported & SUPPORTED_Autoneg))
633 return -EOPNOTSUPP; /* can't change speed/duplex */
635 if (cmd->autoneg == AUTONEG_DISABLE) {
636 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
638 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
639 return -EINVAL;
640 lc->requested_speed = cmd->speed;
641 lc->requested_duplex = cmd->duplex;
642 lc->advertising = 0;
643 } else {
644 cmd->advertising &= ADVERTISED_MASK;
645 if (cmd->advertising & (cmd->advertising - 1))
646 cmd->advertising = lc->supported;
647 cmd->advertising &= lc->supported;
648 if (!cmd->advertising)
649 return -EINVAL;
650 lc->requested_speed = SPEED_INVALID;
651 lc->requested_duplex = DUPLEX_INVALID;
652 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
654 lc->autoneg = cmd->autoneg;
655 if (netif_running(dev))
656 t1_link_start(p->phy, p->mac, lc);
657 return 0;
660 static void get_pauseparam(struct net_device *dev,
661 struct ethtool_pauseparam *epause)
663 struct adapter *adapter = dev->priv;
664 struct port_info *p = &adapter->port[dev->if_port];
666 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
667 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
668 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
671 static int set_pauseparam(struct net_device *dev,
672 struct ethtool_pauseparam *epause)
674 struct adapter *adapter = dev->priv;
675 struct port_info *p = &adapter->port[dev->if_port];
676 struct link_config *lc = &p->link_config;
678 if (epause->autoneg == AUTONEG_DISABLE)
679 lc->requested_fc = 0;
680 else if (lc->supported & SUPPORTED_Autoneg)
681 lc->requested_fc = PAUSE_AUTONEG;
682 else
683 return -EINVAL;
685 if (epause->rx_pause)
686 lc->requested_fc |= PAUSE_RX;
687 if (epause->tx_pause)
688 lc->requested_fc |= PAUSE_TX;
689 if (lc->autoneg == AUTONEG_ENABLE) {
690 if (netif_running(dev))
691 t1_link_start(p->phy, p->mac, lc);
692 } else {
693 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
694 if (netif_running(dev))
695 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
696 lc->fc);
698 return 0;
701 static u32 get_rx_csum(struct net_device *dev)
703 struct adapter *adapter = dev->priv;
705 return (adapter->flags & RX_CSUM_ENABLED) != 0;
708 static int set_rx_csum(struct net_device *dev, u32 data)
710 struct adapter *adapter = dev->priv;
712 if (data)
713 adapter->flags |= RX_CSUM_ENABLED;
714 else
715 adapter->flags &= ~RX_CSUM_ENABLED;
716 return 0;
719 static int set_tso(struct net_device *dev, u32 value)
721 struct adapter *adapter = dev->priv;
723 if (!(adapter->flags & TSO_CAPABLE))
724 return value ? -EOPNOTSUPP : 0;
725 return ethtool_op_set_tso(dev, value);
728 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
730 struct adapter *adapter = dev->priv;
731 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
733 e->rx_max_pending = MAX_RX_BUFFERS;
734 e->rx_mini_max_pending = 0;
735 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
736 e->tx_max_pending = MAX_CMDQ_ENTRIES;
738 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
739 e->rx_mini_pending = 0;
740 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
741 e->tx_pending = adapter->params.sge.cmdQ_size[0];
744 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
746 struct adapter *adapter = dev->priv;
747 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
749 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
750 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
751 e->tx_pending > MAX_CMDQ_ENTRIES ||
752 e->rx_pending < MIN_FL_ENTRIES ||
753 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
754 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
755 return -EINVAL;
757 if (adapter->flags & FULL_INIT_DONE)
758 return -EBUSY;
760 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
761 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
762 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
763 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
764 MAX_CMDQ1_ENTRIES : e->tx_pending;
765 return 0;
768 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
770 struct adapter *adapter = dev->priv;
772 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
773 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
774 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
775 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
776 return 0;
779 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
781 struct adapter *adapter = dev->priv;
783 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
784 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
785 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
786 return 0;
789 static int get_eeprom_len(struct net_device *dev)
791 struct adapter *adapter = dev->priv;
793 return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
796 #define EEPROM_MAGIC(ap) \
797 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
799 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
800 u8 *data)
802 int i;
803 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
804 struct adapter *adapter = dev->priv;
806 e->magic = EEPROM_MAGIC(adapter);
807 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
808 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
809 memcpy(data, buf + e->offset, e->len);
810 return 0;
813 static const struct ethtool_ops t1_ethtool_ops = {
814 .get_settings = get_settings,
815 .set_settings = set_settings,
816 .get_drvinfo = get_drvinfo,
817 .get_msglevel = get_msglevel,
818 .set_msglevel = set_msglevel,
819 .get_ringparam = get_sge_param,
820 .set_ringparam = set_sge_param,
821 .get_coalesce = get_coalesce,
822 .set_coalesce = set_coalesce,
823 .get_eeprom_len = get_eeprom_len,
824 .get_eeprom = get_eeprom,
825 .get_pauseparam = get_pauseparam,
826 .set_pauseparam = set_pauseparam,
827 .get_rx_csum = get_rx_csum,
828 .set_rx_csum = set_rx_csum,
829 .get_tx_csum = ethtool_op_get_tx_csum,
830 .set_tx_csum = ethtool_op_set_tx_csum,
831 .get_sg = ethtool_op_get_sg,
832 .set_sg = ethtool_op_set_sg,
833 .get_link = ethtool_op_get_link,
834 .get_strings = get_strings,
835 .get_stats_count = get_stats_count,
836 .get_ethtool_stats = get_stats,
837 .get_regs_len = get_regs_len,
838 .get_regs = get_regs,
839 .get_tso = ethtool_op_get_tso,
840 .set_tso = set_tso,
843 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
845 struct adapter *adapter = dev->priv;
846 struct mii_ioctl_data *data = if_mii(req);
848 switch (cmd) {
849 case SIOCGMIIPHY:
850 data->phy_id = adapter->port[dev->if_port].phy->addr;
851 /* FALLTHRU */
852 case SIOCGMIIREG: {
853 struct cphy *phy = adapter->port[dev->if_port].phy;
854 u32 val;
856 if (!phy->mdio_read)
857 return -EOPNOTSUPP;
858 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
859 &val);
860 data->val_out = val;
861 break;
863 case SIOCSMIIREG: {
864 struct cphy *phy = adapter->port[dev->if_port].phy;
866 if (!capable(CAP_NET_ADMIN))
867 return -EPERM;
868 if (!phy->mdio_write)
869 return -EOPNOTSUPP;
870 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
871 data->val_in);
872 break;
875 default:
876 return -EOPNOTSUPP;
878 return 0;
881 static int t1_change_mtu(struct net_device *dev, int new_mtu)
883 int ret;
884 struct adapter *adapter = dev->priv;
885 struct cmac *mac = adapter->port[dev->if_port].mac;
887 if (!mac->ops->set_mtu)
888 return -EOPNOTSUPP;
889 if (new_mtu < 68)
890 return -EINVAL;
891 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
892 return ret;
893 dev->mtu = new_mtu;
894 return 0;
897 static int t1_set_mac_addr(struct net_device *dev, void *p)
899 struct adapter *adapter = dev->priv;
900 struct cmac *mac = adapter->port[dev->if_port].mac;
901 struct sockaddr *addr = p;
903 if (!mac->ops->macaddress_set)
904 return -EOPNOTSUPP;
906 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
907 mac->ops->macaddress_set(mac, dev->dev_addr);
908 return 0;
911 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
912 static void vlan_rx_register(struct net_device *dev,
913 struct vlan_group *grp)
915 struct adapter *adapter = dev->priv;
917 spin_lock_irq(&adapter->async_lock);
918 adapter->vlan_grp = grp;
919 t1_set_vlan_accel(adapter, grp != NULL);
920 spin_unlock_irq(&adapter->async_lock);
922 #endif
924 #ifdef CONFIG_NET_POLL_CONTROLLER
925 static void t1_netpoll(struct net_device *dev)
927 unsigned long flags;
928 struct adapter *adapter = dev->priv;
930 local_irq_save(flags);
931 t1_interrupt(adapter->pdev->irq, adapter);
932 local_irq_restore(flags);
934 #endif
937 * Periodic accumulation of MAC statistics. This is used only if the MAC
938 * does not have any other way to prevent stats counter overflow.
940 static void mac_stats_task(struct work_struct *work)
942 int i;
943 struct adapter *adapter =
944 container_of(work, struct adapter, stats_update_task.work);
946 for_each_port(adapter, i) {
947 struct port_info *p = &adapter->port[i];
949 if (netif_running(p->dev))
950 p->mac->ops->statistics_update(p->mac,
951 MAC_STATS_UPDATE_FAST);
954 /* Schedule the next statistics update if any port is active. */
955 spin_lock(&adapter->work_lock);
956 if (adapter->open_device_map & PORT_MASK)
957 schedule_mac_stats_update(adapter,
958 adapter->params.stats_update_period);
959 spin_unlock(&adapter->work_lock);
963 * Processes elmer0 external interrupts in process context.
965 static void ext_intr_task(struct work_struct *work)
967 struct adapter *adapter =
968 container_of(work, struct adapter, ext_intr_handler_task);
970 t1_elmer0_ext_intr_handler(adapter);
972 /* Now reenable external interrupts */
973 spin_lock_irq(&adapter->async_lock);
974 adapter->slow_intr_mask |= F_PL_INTR_EXT;
975 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
976 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
977 adapter->regs + A_PL_ENABLE);
978 spin_unlock_irq(&adapter->async_lock);
982 * Interrupt-context handler for elmer0 external interrupts.
984 void t1_elmer0_ext_intr(struct adapter *adapter)
987 * Schedule a task to handle external interrupts as we require
988 * a process context. We disable EXT interrupts in the interim
989 * and let the task reenable them when it's done.
991 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
992 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
993 adapter->regs + A_PL_ENABLE);
994 schedule_work(&adapter->ext_intr_handler_task);
997 void t1_fatal_err(struct adapter *adapter)
999 if (adapter->flags & FULL_INIT_DONE) {
1000 t1_sge_stop(adapter->sge);
1001 t1_interrupts_disable(adapter);
1003 CH_ALERT("%s: encountered fatal error, operation suspended\n",
1004 adapter->name);
1007 static int __devinit init_one(struct pci_dev *pdev,
1008 const struct pci_device_id *ent)
1010 static int version_printed;
1012 int i, err, pci_using_dac = 0;
1013 unsigned long mmio_start, mmio_len;
1014 const struct board_info *bi;
1015 struct adapter *adapter = NULL;
1016 struct port_info *pi;
1018 if (!version_printed) {
1019 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
1020 DRV_VERSION);
1021 ++version_printed;
1024 err = pci_enable_device(pdev);
1025 if (err)
1026 return err;
1028 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1029 CH_ERR("%s: cannot find PCI device memory base address\n",
1030 pci_name(pdev));
1031 err = -ENODEV;
1032 goto out_disable_pdev;
1035 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1036 pci_using_dac = 1;
1038 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
1039 CH_ERR("%s: unable to obtain 64-bit DMA for"
1040 "consistent allocations\n", pci_name(pdev));
1041 err = -ENODEV;
1042 goto out_disable_pdev;
1045 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
1046 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1047 goto out_disable_pdev;
1050 err = pci_request_regions(pdev, DRV_NAME);
1051 if (err) {
1052 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1053 goto out_disable_pdev;
1056 pci_set_master(pdev);
1058 mmio_start = pci_resource_start(pdev, 0);
1059 mmio_len = pci_resource_len(pdev, 0);
1060 bi = t1_get_board_info(ent->driver_data);
1062 for (i = 0; i < bi->port_number; ++i) {
1063 struct net_device *netdev;
1065 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1066 if (!netdev) {
1067 err = -ENOMEM;
1068 goto out_free_dev;
1071 SET_MODULE_OWNER(netdev);
1072 SET_NETDEV_DEV(netdev, &pdev->dev);
1074 if (!adapter) {
1075 adapter = netdev->priv;
1076 adapter->pdev = pdev;
1077 adapter->port[0].dev = netdev; /* so we don't leak it */
1079 adapter->regs = ioremap(mmio_start, mmio_len);
1080 if (!adapter->regs) {
1081 CH_ERR("%s: cannot map device registers\n",
1082 pci_name(pdev));
1083 err = -ENOMEM;
1084 goto out_free_dev;
1087 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1088 err = -ENODEV; /* Can't handle this chip rev */
1089 goto out_free_dev;
1092 adapter->name = pci_name(pdev);
1093 adapter->msg_enable = dflt_msg_enable;
1094 adapter->mmio_len = mmio_len;
1096 spin_lock_init(&adapter->tpi_lock);
1097 spin_lock_init(&adapter->work_lock);
1098 spin_lock_init(&adapter->async_lock);
1099 spin_lock_init(&adapter->mac_lock);
1101 INIT_WORK(&adapter->ext_intr_handler_task,
1102 ext_intr_task);
1103 INIT_DELAYED_WORK(&adapter->stats_update_task,
1104 mac_stats_task);
1106 pci_set_drvdata(pdev, netdev);
1109 pi = &adapter->port[i];
1110 pi->dev = netdev;
1111 netif_carrier_off(netdev);
1112 netdev->irq = pdev->irq;
1113 netdev->if_port = i;
1114 netdev->mem_start = mmio_start;
1115 netdev->mem_end = mmio_start + mmio_len - 1;
1116 netdev->priv = adapter;
1117 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1118 netdev->features |= NETIF_F_LLTX;
1120 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1121 if (pci_using_dac)
1122 netdev->features |= NETIF_F_HIGHDMA;
1123 if (vlan_tso_capable(adapter)) {
1124 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1125 adapter->flags |= VLAN_ACCEL_CAPABLE;
1126 netdev->features |=
1127 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1128 netdev->vlan_rx_register = vlan_rx_register;
1129 #endif
1131 /* T204: disable TSO */
1132 if (!(is_T2(adapter)) || bi->port_number != 4) {
1133 adapter->flags |= TSO_CAPABLE;
1134 netdev->features |= NETIF_F_TSO;
1138 netdev->open = cxgb_open;
1139 netdev->stop = cxgb_close;
1140 netdev->hard_start_xmit = t1_start_xmit;
1141 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1142 sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1143 netdev->get_stats = t1_get_stats;
1144 netdev->set_multicast_list = t1_set_rxmode;
1145 netdev->do_ioctl = t1_ioctl;
1146 netdev->change_mtu = t1_change_mtu;
1147 netdev->set_mac_address = t1_set_mac_addr;
1148 #ifdef CONFIG_NET_POLL_CONTROLLER
1149 netdev->poll_controller = t1_netpoll;
1150 #endif
1151 #ifdef CONFIG_CHELSIO_T1_NAPI
1152 netdev->weight = 64;
1153 netdev->poll = t1_poll;
1154 #endif
1156 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1159 if (t1_init_sw_modules(adapter, bi) < 0) {
1160 err = -ENODEV;
1161 goto out_free_dev;
1165 * The card is now ready to go. If any errors occur during device
1166 * registration we do not fail the whole card but rather proceed only
1167 * with the ports we manage to register successfully. However we must
1168 * register at least one net device.
1170 for (i = 0; i < bi->port_number; ++i) {
1171 err = register_netdev(adapter->port[i].dev);
1172 if (err)
1173 CH_WARN("%s: cannot register net device %s, skipping\n",
1174 pci_name(pdev), adapter->port[i].dev->name);
1175 else {
1177 * Change the name we use for messages to the name of
1178 * the first successfully registered interface.
1180 if (!adapter->registered_device_map)
1181 adapter->name = adapter->port[i].dev->name;
1183 __set_bit(i, &adapter->registered_device_map);
1186 if (!adapter->registered_device_map) {
1187 CH_ERR("%s: could not register any net devices\n",
1188 pci_name(pdev));
1189 goto out_release_adapter_res;
1192 printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1193 bi->desc, adapter->params.chip_revision,
1194 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1195 adapter->params.pci.speed, adapter->params.pci.width);
1198 * Set the T1B ASIC and memory clocks.
1200 if (t1powersave)
1201 adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */
1202 else
1203 adapter->t1powersave = HCLOCK;
1204 if (t1_is_T1B(adapter))
1205 t1_clock(adapter, t1powersave);
1207 return 0;
1209 out_release_adapter_res:
1210 t1_free_sw_modules(adapter);
1211 out_free_dev:
1212 if (adapter) {
1213 if (adapter->regs)
1214 iounmap(adapter->regs);
1215 for (i = bi->port_number - 1; i >= 0; --i)
1216 if (adapter->port[i].dev)
1217 free_netdev(adapter->port[i].dev);
1219 pci_release_regions(pdev);
1220 out_disable_pdev:
1221 pci_disable_device(pdev);
1222 pci_set_drvdata(pdev, NULL);
1223 return err;
1226 static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1228 int data;
1229 int i;
1230 u32 val;
1232 enum {
1233 S_CLOCK = 1 << 3,
1234 S_DATA = 1 << 4
1237 for (i = (nbits - 1); i > -1; i--) {
1239 udelay(50);
1241 data = ((bitdata >> i) & 0x1);
1242 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1244 if (data)
1245 val |= S_DATA;
1246 else
1247 val &= ~S_DATA;
1249 udelay(50);
1251 /* Set SCLOCK low */
1252 val &= ~S_CLOCK;
1253 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1255 udelay(50);
1257 /* Write SCLOCK high */
1258 val |= S_CLOCK;
1259 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1264 static int t1_clock(struct adapter *adapter, int mode)
1266 u32 val;
1267 int M_CORE_VAL;
1268 int M_MEM_VAL;
1270 enum {
1271 M_CORE_BITS = 9,
1272 T_CORE_VAL = 0,
1273 T_CORE_BITS = 2,
1274 N_CORE_VAL = 0,
1275 N_CORE_BITS = 2,
1276 M_MEM_BITS = 9,
1277 T_MEM_VAL = 0,
1278 T_MEM_BITS = 2,
1279 N_MEM_VAL = 0,
1280 N_MEM_BITS = 2,
1281 NP_LOAD = 1 << 17,
1282 S_LOAD_MEM = 1 << 5,
1283 S_LOAD_CORE = 1 << 6,
1284 S_CLOCK = 1 << 3
1287 if (!t1_is_T1B(adapter))
1288 return -ENODEV; /* Can't re-clock this chip. */
1290 if (mode & 2)
1291 return 0; /* show current mode. */
1293 if ((adapter->t1powersave & 1) == (mode & 1))
1294 return -EALREADY; /* ASIC already running in mode. */
1296 if ((mode & 1) == HCLOCK) {
1297 M_CORE_VAL = 0x14;
1298 M_MEM_VAL = 0x18;
1299 adapter->t1powersave = HCLOCK; /* overclock */
1300 } else {
1301 M_CORE_VAL = 0xe;
1302 M_MEM_VAL = 0x10;
1303 adapter->t1powersave = LCLOCK; /* underclock */
1306 /* Don't interrupt this serial stream! */
1307 spin_lock(&adapter->tpi_lock);
1309 /* Initialize for ASIC core */
1310 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1311 val |= NP_LOAD;
1312 udelay(50);
1313 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1314 udelay(50);
1315 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1316 val &= ~S_LOAD_CORE;
1317 val &= ~S_CLOCK;
1318 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1319 udelay(50);
1321 /* Serial program the ASIC clock synthesizer */
1322 bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1323 bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1324 bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1325 udelay(50);
1327 /* Finish ASIC core */
1328 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1329 val |= S_LOAD_CORE;
1330 udelay(50);
1331 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1332 udelay(50);
1333 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1334 val &= ~S_LOAD_CORE;
1335 udelay(50);
1336 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1337 udelay(50);
1339 /* Initialize for memory */
1340 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1341 val |= NP_LOAD;
1342 udelay(50);
1343 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1344 udelay(50);
1345 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1346 val &= ~S_LOAD_MEM;
1347 val &= ~S_CLOCK;
1348 udelay(50);
1349 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1350 udelay(50);
1352 /* Serial program the memory clock synthesizer */
1353 bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1354 bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1355 bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1356 udelay(50);
1358 /* Finish memory */
1359 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1360 val |= S_LOAD_MEM;
1361 udelay(50);
1362 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1363 udelay(50);
1364 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1365 val &= ~S_LOAD_MEM;
1366 udelay(50);
1367 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1369 spin_unlock(&adapter->tpi_lock);
1371 return 0;
1374 static inline void t1_sw_reset(struct pci_dev *pdev)
1376 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1377 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1380 static void __devexit remove_one(struct pci_dev *pdev)
1382 struct net_device *dev = pci_get_drvdata(pdev);
1383 struct adapter *adapter = dev->priv;
1384 int i;
1386 for_each_port(adapter, i) {
1387 if (test_bit(i, &adapter->registered_device_map))
1388 unregister_netdev(adapter->port[i].dev);
1391 t1_free_sw_modules(adapter);
1392 iounmap(adapter->regs);
1394 while (--i >= 0) {
1395 if (adapter->port[i].dev)
1396 free_netdev(adapter->port[i].dev);
1399 pci_release_regions(pdev);
1400 pci_disable_device(pdev);
1401 pci_set_drvdata(pdev, NULL);
1402 t1_sw_reset(pdev);
1405 static struct pci_driver driver = {
1406 .name = DRV_NAME,
1407 .id_table = t1_pci_tbl,
1408 .probe = init_one,
1409 .remove = __devexit_p(remove_one),
1412 static int __init t1_init_module(void)
1414 return pci_register_driver(&driver);
1417 static void __exit t1_cleanup_module(void)
1419 pci_unregister_driver(&driver);
1422 module_init(t1_init_module);
1423 module_exit(t1_cleanup_module);