[PATCH] chelsio: remove unused mutex
[linux-2.6/kmemtrace.git] / drivers / net / chelsio / cxgb2.c
blob42ad9cfd670a3adaf589002daf67bd18a3044098
1 /*****************************************************************************
2 * *
3 * File: cxgb2.c *
4 * $Revision: 1.25 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
6 * Description: *
7 * Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
39 #include "common.h"
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/pci.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/if_vlan.h>
46 #include <linux/mii.h>
47 #include <linux/sockios.h>
48 #include <linux/dma-mapping.h>
49 #include <asm/uaccess.h>
51 #include "cpl5_cmd.h"
52 #include "regs.h"
53 #include "gmac.h"
54 #include "cphy.h"
55 #include "sge.h"
56 #include "espi.h"
58 #include <linux/workqueue.h>
60 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
62 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
65 static inline void cancel_mac_stats_update(struct adapter *ap)
67 cancel_delayed_work(&ap->stats_update_task);
70 #define MAX_CMDQ_ENTRIES 16384
71 #define MAX_CMDQ1_ENTRIES 1024
72 #define MAX_RX_BUFFERS 16384
73 #define MAX_RX_JUMBO_BUFFERS 16384
74 #define MAX_TX_BUFFERS_HIGH 16384U
75 #define MAX_TX_BUFFERS_LOW 1536U
76 #define MIN_FL_ENTRIES 32
78 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
80 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
81 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
82 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
85 * The EEPROM is actually bigger but only the first few bytes are used so we
86 * only report those.
88 #define EEPROM_SIZE 32
90 MODULE_DESCRIPTION(DRV_DESCRIPTION);
91 MODULE_AUTHOR("Chelsio Communications");
92 MODULE_LICENSE("GPL");
94 static int dflt_msg_enable = DFLT_MSG_ENABLE;
96 module_param(dflt_msg_enable, int, 0);
97 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap");
100 static const char pci_speed[][4] = {
101 "33", "66", "100", "133"
105 * Setup MAC to receive the types of packets we want.
107 static void t1_set_rxmode(struct net_device *dev)
109 struct adapter *adapter = dev->priv;
110 struct cmac *mac = adapter->port[dev->if_port].mac;
111 struct t1_rx_mode rm;
113 rm.dev = dev;
114 rm.idx = 0;
115 rm.list = dev->mc_list;
116 mac->ops->set_rx_mode(mac, &rm);
119 static void link_report(struct port_info *p)
121 if (!netif_carrier_ok(p->dev))
122 printk(KERN_INFO "%s: link down\n", p->dev->name);
123 else {
124 const char *s = "10Mbps";
126 switch (p->link_config.speed) {
127 case SPEED_10000: s = "10Gbps"; break;
128 case SPEED_1000: s = "1000Mbps"; break;
129 case SPEED_100: s = "100Mbps"; break;
132 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
133 p->dev->name, s,
134 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
138 void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
139 int speed, int duplex, int pause)
141 struct port_info *p = &adapter->port[port_id];
143 if (link_stat != netif_carrier_ok(p->dev)) {
144 if (link_stat)
145 netif_carrier_on(p->dev);
146 else
147 netif_carrier_off(p->dev);
148 link_report(p);
153 static void link_start(struct port_info *p)
155 struct cmac *mac = p->mac;
157 mac->ops->reset(mac);
158 if (mac->ops->macaddress_set)
159 mac->ops->macaddress_set(mac, p->dev->dev_addr);
160 t1_set_rxmode(p->dev);
161 t1_link_start(p->phy, mac, &p->link_config);
162 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
165 static void enable_hw_csum(struct adapter *adapter)
167 if (adapter->flags & TSO_CAPABLE)
168 t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */
169 t1_tp_set_tcp_checksum_offload(adapter, 1);
173 * Things to do upon first use of a card.
174 * This must run with the rtnl lock held.
176 static int cxgb_up(struct adapter *adapter)
178 int err = 0;
180 if (!(adapter->flags & FULL_INIT_DONE)) {
181 err = t1_init_hw_modules(adapter);
182 if (err)
183 goto out_err;
185 enable_hw_csum(adapter);
186 adapter->flags |= FULL_INIT_DONE;
189 t1_interrupts_clear(adapter);
190 if ((err = request_irq(adapter->pdev->irq,
191 t1_select_intr_handler(adapter), IRQF_SHARED,
192 adapter->name, adapter))) {
193 goto out_err;
195 t1_sge_start(adapter->sge);
196 t1_interrupts_enable(adapter);
197 out_err:
198 return err;
202 * Release resources when all the ports have been stopped.
204 static void cxgb_down(struct adapter *adapter)
206 t1_sge_stop(adapter->sge);
207 t1_interrupts_disable(adapter);
208 free_irq(adapter->pdev->irq, adapter);
211 static int cxgb_open(struct net_device *dev)
213 int err;
214 struct adapter *adapter = dev->priv;
215 int other_ports = adapter->open_device_map & PORT_MASK;
217 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
218 return err;
220 __set_bit(dev->if_port, &adapter->open_device_map);
221 link_start(&adapter->port[dev->if_port]);
222 netif_start_queue(dev);
223 if (!other_ports && adapter->params.stats_update_period)
224 schedule_mac_stats_update(adapter,
225 adapter->params.stats_update_period);
226 return 0;
229 static int cxgb_close(struct net_device *dev)
231 struct adapter *adapter = dev->priv;
232 struct port_info *p = &adapter->port[dev->if_port];
233 struct cmac *mac = p->mac;
235 netif_stop_queue(dev);
236 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
237 netif_carrier_off(dev);
239 clear_bit(dev->if_port, &adapter->open_device_map);
240 if (adapter->params.stats_update_period &&
241 !(adapter->open_device_map & PORT_MASK)) {
242 /* Stop statistics accumulation. */
243 smp_mb__after_clear_bit();
244 spin_lock(&adapter->work_lock); /* sync with update task */
245 spin_unlock(&adapter->work_lock);
246 cancel_mac_stats_update(adapter);
249 if (!adapter->open_device_map)
250 cxgb_down(adapter);
251 return 0;
254 static struct net_device_stats *t1_get_stats(struct net_device *dev)
256 struct adapter *adapter = dev->priv;
257 struct port_info *p = &adapter->port[dev->if_port];
258 struct net_device_stats *ns = &p->netstats;
259 const struct cmac_statistics *pstats;
261 /* Do a full update of the MAC stats */
262 pstats = p->mac->ops->statistics_update(p->mac,
263 MAC_STATS_UPDATE_FULL);
265 ns->tx_packets = pstats->TxUnicastFramesOK +
266 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
268 ns->rx_packets = pstats->RxUnicastFramesOK +
269 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
271 ns->tx_bytes = pstats->TxOctetsOK;
272 ns->rx_bytes = pstats->RxOctetsOK;
274 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
275 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
276 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
277 pstats->RxFCSErrors + pstats->RxAlignErrors +
278 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
279 pstats->RxSymbolErrors + pstats->RxRuntErrors;
281 ns->multicast = pstats->RxMulticastFramesOK;
282 ns->collisions = pstats->TxTotalCollisions;
284 /* detailed rx_errors */
285 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
286 pstats->RxJabberErrors;
287 ns->rx_over_errors = 0;
288 ns->rx_crc_errors = pstats->RxFCSErrors;
289 ns->rx_frame_errors = pstats->RxAlignErrors;
290 ns->rx_fifo_errors = 0;
291 ns->rx_missed_errors = 0;
293 /* detailed tx_errors */
294 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
295 ns->tx_carrier_errors = 0;
296 ns->tx_fifo_errors = pstats->TxUnderrun;
297 ns->tx_heartbeat_errors = 0;
298 ns->tx_window_errors = pstats->TxLateCollisions;
299 return ns;
302 static u32 get_msglevel(struct net_device *dev)
304 struct adapter *adapter = dev->priv;
306 return adapter->msg_enable;
309 static void set_msglevel(struct net_device *dev, u32 val)
311 struct adapter *adapter = dev->priv;
313 adapter->msg_enable = val;
316 static char stats_strings[][ETH_GSTRING_LEN] = {
317 "TxOctetsOK",
318 "TxOctetsBad",
319 "TxUnicastFramesOK",
320 "TxMulticastFramesOK",
321 "TxBroadcastFramesOK",
322 "TxPauseFrames",
323 "TxFramesWithDeferredXmissions",
324 "TxLateCollisions",
325 "TxTotalCollisions",
326 "TxFramesAbortedDueToXSCollisions",
327 "TxUnderrun",
328 "TxLengthErrors",
329 "TxInternalMACXmitError",
330 "TxFramesWithExcessiveDeferral",
331 "TxFCSErrors",
333 "RxOctetsOK",
334 "RxOctetsBad",
335 "RxUnicastFramesOK",
336 "RxMulticastFramesOK",
337 "RxBroadcastFramesOK",
338 "RxPauseFrames",
339 "RxFCSErrors",
340 "RxAlignErrors",
341 "RxSymbolErrors",
342 "RxDataErrors",
343 "RxSequenceErrors",
344 "RxRuntErrors",
345 "RxJabberErrors",
346 "RxInternalMACRcvError",
347 "RxInRangeLengthErrors",
348 "RxOutOfRangeLengthField",
349 "RxFrameTooLongErrors",
351 "TSO",
352 "VLANextractions",
353 "VLANinsertions",
354 "RxCsumGood",
355 "TxCsumOffload",
356 "RxDrops"
358 "respQ_empty",
359 "respQ_overflow",
360 "freelistQ_empty",
361 "pkt_too_big",
362 "pkt_mismatch",
363 "cmdQ_full0",
364 "cmdQ_full1",
365 "tx_ipfrags",
366 "tx_reg_pkts",
367 "tx_lso_pkts",
368 "tx_do_cksum",
370 "espi_DIP2ParityErr",
371 "espi_DIP4Err",
372 "espi_RxDrops",
373 "espi_TxDrops",
374 "espi_RxOvfl",
375 "espi_ParityErr"
378 #define T2_REGMAP_SIZE (3 * 1024)
380 static int get_regs_len(struct net_device *dev)
382 return T2_REGMAP_SIZE;
385 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
387 struct adapter *adapter = dev->priv;
389 strcpy(info->driver, DRV_NAME);
390 strcpy(info->version, DRV_VERSION);
391 strcpy(info->fw_version, "N/A");
392 strcpy(info->bus_info, pci_name(adapter->pdev));
395 static int get_stats_count(struct net_device *dev)
397 return ARRAY_SIZE(stats_strings);
400 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
402 if (stringset == ETH_SS_STATS)
403 memcpy(data, stats_strings, sizeof(stats_strings));
406 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
407 u64 *data)
409 struct adapter *adapter = dev->priv;
410 struct cmac *mac = adapter->port[dev->if_port].mac;
411 const struct cmac_statistics *s;
412 const struct sge_port_stats *ss;
413 const struct sge_intr_counts *t;
415 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
416 ss = t1_sge_get_port_stats(adapter->sge, dev->if_port);
417 t = t1_sge_get_intr_counts(adapter->sge);
419 *data++ = s->TxOctetsOK;
420 *data++ = s->TxOctetsBad;
421 *data++ = s->TxUnicastFramesOK;
422 *data++ = s->TxMulticastFramesOK;
423 *data++ = s->TxBroadcastFramesOK;
424 *data++ = s->TxPauseFrames;
425 *data++ = s->TxFramesWithDeferredXmissions;
426 *data++ = s->TxLateCollisions;
427 *data++ = s->TxTotalCollisions;
428 *data++ = s->TxFramesAbortedDueToXSCollisions;
429 *data++ = s->TxUnderrun;
430 *data++ = s->TxLengthErrors;
431 *data++ = s->TxInternalMACXmitError;
432 *data++ = s->TxFramesWithExcessiveDeferral;
433 *data++ = s->TxFCSErrors;
435 *data++ = s->RxOctetsOK;
436 *data++ = s->RxOctetsBad;
437 *data++ = s->RxUnicastFramesOK;
438 *data++ = s->RxMulticastFramesOK;
439 *data++ = s->RxBroadcastFramesOK;
440 *data++ = s->RxPauseFrames;
441 *data++ = s->RxFCSErrors;
442 *data++ = s->RxAlignErrors;
443 *data++ = s->RxSymbolErrors;
444 *data++ = s->RxDataErrors;
445 *data++ = s->RxSequenceErrors;
446 *data++ = s->RxRuntErrors;
447 *data++ = s->RxJabberErrors;
448 *data++ = s->RxInternalMACRcvError;
449 *data++ = s->RxInRangeLengthErrors;
450 *data++ = s->RxOutOfRangeLengthField;
451 *data++ = s->RxFrameTooLongErrors;
453 *data++ = ss->tso;
454 *data++ = ss->vlan_xtract;
455 *data++ = ss->vlan_insert;
456 *data++ = ss->rx_cso_good;
457 *data++ = ss->tx_cso;
458 *data++ = ss->rx_drops;
460 *data++ = (u64)t->respQ_empty;
461 *data++ = (u64)t->respQ_overflow;
462 *data++ = (u64)t->freelistQ_empty;
463 *data++ = (u64)t->pkt_too_big;
464 *data++ = (u64)t->pkt_mismatch;
465 *data++ = (u64)t->cmdQ_full[0];
466 *data++ = (u64)t->cmdQ_full[1];
467 *data++ = (u64)t->tx_ipfrags;
468 *data++ = (u64)t->tx_reg_pkts;
469 *data++ = (u64)t->tx_lso_pkts;
470 *data++ = (u64)t->tx_do_cksum;
473 static inline void reg_block_dump(struct adapter *ap, void *buf,
474 unsigned int start, unsigned int end)
476 u32 *p = buf + start;
478 for ( ; start <= end; start += sizeof(u32))
479 *p++ = readl(ap->regs + start);
482 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
483 void *buf)
485 struct adapter *ap = dev->priv;
488 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
490 regs->version = 2;
492 memset(buf, 0, T2_REGMAP_SIZE);
493 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
496 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
498 struct adapter *adapter = dev->priv;
499 struct port_info *p = &adapter->port[dev->if_port];
501 cmd->supported = p->link_config.supported;
502 cmd->advertising = p->link_config.advertising;
504 if (netif_carrier_ok(dev)) {
505 cmd->speed = p->link_config.speed;
506 cmd->duplex = p->link_config.duplex;
507 } else {
508 cmd->speed = -1;
509 cmd->duplex = -1;
512 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
513 cmd->phy_address = p->phy->addr;
514 cmd->transceiver = XCVR_EXTERNAL;
515 cmd->autoneg = p->link_config.autoneg;
516 cmd->maxtxpkt = 0;
517 cmd->maxrxpkt = 0;
518 return 0;
521 static int speed_duplex_to_caps(int speed, int duplex)
523 int cap = 0;
525 switch (speed) {
526 case SPEED_10:
527 if (duplex == DUPLEX_FULL)
528 cap = SUPPORTED_10baseT_Full;
529 else
530 cap = SUPPORTED_10baseT_Half;
531 break;
532 case SPEED_100:
533 if (duplex == DUPLEX_FULL)
534 cap = SUPPORTED_100baseT_Full;
535 else
536 cap = SUPPORTED_100baseT_Half;
537 break;
538 case SPEED_1000:
539 if (duplex == DUPLEX_FULL)
540 cap = SUPPORTED_1000baseT_Full;
541 else
542 cap = SUPPORTED_1000baseT_Half;
543 break;
544 case SPEED_10000:
545 if (duplex == DUPLEX_FULL)
546 cap = SUPPORTED_10000baseT_Full;
548 return cap;
551 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
552 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
553 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
554 ADVERTISED_10000baseT_Full)
556 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
558 struct adapter *adapter = dev->priv;
559 struct port_info *p = &adapter->port[dev->if_port];
560 struct link_config *lc = &p->link_config;
562 if (!(lc->supported & SUPPORTED_Autoneg))
563 return -EOPNOTSUPP; /* can't change speed/duplex */
565 if (cmd->autoneg == AUTONEG_DISABLE) {
566 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
568 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
569 return -EINVAL;
570 lc->requested_speed = cmd->speed;
571 lc->requested_duplex = cmd->duplex;
572 lc->advertising = 0;
573 } else {
574 cmd->advertising &= ADVERTISED_MASK;
575 if (cmd->advertising & (cmd->advertising - 1))
576 cmd->advertising = lc->supported;
577 cmd->advertising &= lc->supported;
578 if (!cmd->advertising)
579 return -EINVAL;
580 lc->requested_speed = SPEED_INVALID;
581 lc->requested_duplex = DUPLEX_INVALID;
582 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
584 lc->autoneg = cmd->autoneg;
585 if (netif_running(dev))
586 t1_link_start(p->phy, p->mac, lc);
587 return 0;
590 static void get_pauseparam(struct net_device *dev,
591 struct ethtool_pauseparam *epause)
593 struct adapter *adapter = dev->priv;
594 struct port_info *p = &adapter->port[dev->if_port];
596 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
597 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
598 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
601 static int set_pauseparam(struct net_device *dev,
602 struct ethtool_pauseparam *epause)
604 struct adapter *adapter = dev->priv;
605 struct port_info *p = &adapter->port[dev->if_port];
606 struct link_config *lc = &p->link_config;
608 if (epause->autoneg == AUTONEG_DISABLE)
609 lc->requested_fc = 0;
610 else if (lc->supported & SUPPORTED_Autoneg)
611 lc->requested_fc = PAUSE_AUTONEG;
612 else
613 return -EINVAL;
615 if (epause->rx_pause)
616 lc->requested_fc |= PAUSE_RX;
617 if (epause->tx_pause)
618 lc->requested_fc |= PAUSE_TX;
619 if (lc->autoneg == AUTONEG_ENABLE) {
620 if (netif_running(dev))
621 t1_link_start(p->phy, p->mac, lc);
622 } else {
623 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
624 if (netif_running(dev))
625 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
626 lc->fc);
628 return 0;
631 static u32 get_rx_csum(struct net_device *dev)
633 struct adapter *adapter = dev->priv;
635 return (adapter->flags & RX_CSUM_ENABLED) != 0;
638 static int set_rx_csum(struct net_device *dev, u32 data)
640 struct adapter *adapter = dev->priv;
642 if (data)
643 adapter->flags |= RX_CSUM_ENABLED;
644 else
645 adapter->flags &= ~RX_CSUM_ENABLED;
646 return 0;
649 static int set_tso(struct net_device *dev, u32 value)
651 struct adapter *adapter = dev->priv;
653 if (!(adapter->flags & TSO_CAPABLE))
654 return value ? -EOPNOTSUPP : 0;
655 return ethtool_op_set_tso(dev, value);
658 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
660 struct adapter *adapter = dev->priv;
661 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
663 e->rx_max_pending = MAX_RX_BUFFERS;
664 e->rx_mini_max_pending = 0;
665 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
666 e->tx_max_pending = MAX_CMDQ_ENTRIES;
668 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
669 e->rx_mini_pending = 0;
670 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
671 e->tx_pending = adapter->params.sge.cmdQ_size[0];
674 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
676 struct adapter *adapter = dev->priv;
677 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
679 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
680 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
681 e->tx_pending > MAX_CMDQ_ENTRIES ||
682 e->rx_pending < MIN_FL_ENTRIES ||
683 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
684 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
685 return -EINVAL;
687 if (adapter->flags & FULL_INIT_DONE)
688 return -EBUSY;
690 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
691 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
692 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
693 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
694 MAX_CMDQ1_ENTRIES : e->tx_pending;
695 return 0;
698 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
700 struct adapter *adapter = dev->priv;
703 * If RX coalescing is requested we use NAPI, otherwise interrupts.
704 * This choice can be made only when all ports and the TOE are off.
706 if (adapter->open_device_map == 0)
707 adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
709 if (adapter->params.sge.polling) {
710 adapter->params.sge.rx_coalesce_usecs = 0;
711 } else {
712 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
714 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
715 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
716 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
717 return 0;
720 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
722 struct adapter *adapter = dev->priv;
724 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
725 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
726 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
727 return 0;
730 static int get_eeprom_len(struct net_device *dev)
732 return EEPROM_SIZE;
735 #define EEPROM_MAGIC(ap) \
736 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
738 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
739 u8 *data)
741 int i;
742 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
743 struct adapter *adapter = dev->priv;
745 e->magic = EEPROM_MAGIC(adapter);
746 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
747 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
748 memcpy(data, buf + e->offset, e->len);
749 return 0;
752 static const struct ethtool_ops t1_ethtool_ops = {
753 .get_settings = get_settings,
754 .set_settings = set_settings,
755 .get_drvinfo = get_drvinfo,
756 .get_msglevel = get_msglevel,
757 .set_msglevel = set_msglevel,
758 .get_ringparam = get_sge_param,
759 .set_ringparam = set_sge_param,
760 .get_coalesce = get_coalesce,
761 .set_coalesce = set_coalesce,
762 .get_eeprom_len = get_eeprom_len,
763 .get_eeprom = get_eeprom,
764 .get_pauseparam = get_pauseparam,
765 .set_pauseparam = set_pauseparam,
766 .get_rx_csum = get_rx_csum,
767 .set_rx_csum = set_rx_csum,
768 .get_tx_csum = ethtool_op_get_tx_csum,
769 .set_tx_csum = ethtool_op_set_tx_csum,
770 .get_sg = ethtool_op_get_sg,
771 .set_sg = ethtool_op_set_sg,
772 .get_link = ethtool_op_get_link,
773 .get_strings = get_strings,
774 .get_stats_count = get_stats_count,
775 .get_ethtool_stats = get_stats,
776 .get_regs_len = get_regs_len,
777 .get_regs = get_regs,
778 .get_tso = ethtool_op_get_tso,
779 .set_tso = set_tso,
782 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
784 struct adapter *adapter = dev->priv;
785 struct mii_ioctl_data *data = if_mii(req);
787 switch (cmd) {
788 case SIOCGMIIPHY:
789 data->phy_id = adapter->port[dev->if_port].phy->addr;
790 /* FALLTHRU */
791 case SIOCGMIIREG: {
792 struct cphy *phy = adapter->port[dev->if_port].phy;
793 u32 val;
795 if (!phy->mdio_read)
796 return -EOPNOTSUPP;
797 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
798 &val);
799 data->val_out = val;
800 break;
802 case SIOCSMIIREG: {
803 struct cphy *phy = adapter->port[dev->if_port].phy;
805 if (!capable(CAP_NET_ADMIN))
806 return -EPERM;
807 if (!phy->mdio_write)
808 return -EOPNOTSUPP;
809 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
810 data->val_in);
811 break;
814 default:
815 return -EOPNOTSUPP;
817 return 0;
820 static int t1_change_mtu(struct net_device *dev, int new_mtu)
822 int ret;
823 struct adapter *adapter = dev->priv;
824 struct cmac *mac = adapter->port[dev->if_port].mac;
826 if (!mac->ops->set_mtu)
827 return -EOPNOTSUPP;
828 if (new_mtu < 68)
829 return -EINVAL;
830 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
831 return ret;
832 dev->mtu = new_mtu;
833 return 0;
836 static int t1_set_mac_addr(struct net_device *dev, void *p)
838 struct adapter *adapter = dev->priv;
839 struct cmac *mac = adapter->port[dev->if_port].mac;
840 struct sockaddr *addr = p;
842 if (!mac->ops->macaddress_set)
843 return -EOPNOTSUPP;
845 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
846 mac->ops->macaddress_set(mac, dev->dev_addr);
847 return 0;
850 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
851 static void vlan_rx_register(struct net_device *dev,
852 struct vlan_group *grp)
854 struct adapter *adapter = dev->priv;
856 spin_lock_irq(&adapter->async_lock);
857 adapter->vlan_grp = grp;
858 t1_set_vlan_accel(adapter, grp != NULL);
859 spin_unlock_irq(&adapter->async_lock);
862 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
864 struct adapter *adapter = dev->priv;
866 spin_lock_irq(&adapter->async_lock);
867 if (adapter->vlan_grp)
868 adapter->vlan_grp->vlan_devices[vid] = NULL;
869 spin_unlock_irq(&adapter->async_lock);
871 #endif
873 #ifdef CONFIG_NET_POLL_CONTROLLER
874 static void t1_netpoll(struct net_device *dev)
876 unsigned long flags;
877 struct adapter *adapter = dev->priv;
879 local_irq_save(flags);
880 t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter);
881 local_irq_restore(flags);
883 #endif
886 * Periodic accumulation of MAC statistics. This is used only if the MAC
887 * does not have any other way to prevent stats counter overflow.
889 static void mac_stats_task(void *data)
891 int i;
892 struct adapter *adapter = data;
894 for_each_port(adapter, i) {
895 struct port_info *p = &adapter->port[i];
897 if (netif_running(p->dev))
898 p->mac->ops->statistics_update(p->mac,
899 MAC_STATS_UPDATE_FAST);
902 /* Schedule the next statistics update if any port is active. */
903 spin_lock(&adapter->work_lock);
904 if (adapter->open_device_map & PORT_MASK)
905 schedule_mac_stats_update(adapter,
906 adapter->params.stats_update_period);
907 spin_unlock(&adapter->work_lock);
911 * Processes elmer0 external interrupts in process context.
913 static void ext_intr_task(void *data)
915 struct adapter *adapter = data;
917 elmer0_ext_intr_handler(adapter);
919 /* Now reenable external interrupts */
920 spin_lock_irq(&adapter->async_lock);
921 adapter->slow_intr_mask |= F_PL_INTR_EXT;
922 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
923 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
924 adapter->regs + A_PL_ENABLE);
925 spin_unlock_irq(&adapter->async_lock);
929 * Interrupt-context handler for elmer0 external interrupts.
931 void t1_elmer0_ext_intr(struct adapter *adapter)
934 * Schedule a task to handle external interrupts as we require
935 * a process context. We disable EXT interrupts in the interim
936 * and let the task reenable them when it's done.
938 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
939 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
940 adapter->regs + A_PL_ENABLE);
941 schedule_work(&adapter->ext_intr_handler_task);
944 void t1_fatal_err(struct adapter *adapter)
946 if (adapter->flags & FULL_INIT_DONE) {
947 t1_sge_stop(adapter->sge);
948 t1_interrupts_disable(adapter);
950 CH_ALERT("%s: encountered fatal error, operation suspended\n",
951 adapter->name);
954 static int __devinit init_one(struct pci_dev *pdev,
955 const struct pci_device_id *ent)
957 static int version_printed;
959 int i, err, pci_using_dac = 0;
960 unsigned long mmio_start, mmio_len;
961 const struct board_info *bi;
962 struct adapter *adapter = NULL;
963 struct port_info *pi;
965 if (!version_printed) {
966 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
967 DRV_VERSION);
968 ++version_printed;
971 err = pci_enable_device(pdev);
972 if (err)
973 return err;
975 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
976 CH_ERR("%s: cannot find PCI device memory base address\n",
977 pci_name(pdev));
978 err = -ENODEV;
979 goto out_disable_pdev;
982 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
983 pci_using_dac = 1;
985 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
986 CH_ERR("%s: unable to obtain 64-bit DMA for"
987 "consistent allocations\n", pci_name(pdev));
988 err = -ENODEV;
989 goto out_disable_pdev;
992 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
993 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
994 goto out_disable_pdev;
997 err = pci_request_regions(pdev, DRV_NAME);
998 if (err) {
999 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1000 goto out_disable_pdev;
1003 pci_set_master(pdev);
1005 mmio_start = pci_resource_start(pdev, 0);
1006 mmio_len = pci_resource_len(pdev, 0);
1007 bi = t1_get_board_info(ent->driver_data);
1009 for (i = 0; i < bi->port_number; ++i) {
1010 struct net_device *netdev;
1012 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1013 if (!netdev) {
1014 err = -ENOMEM;
1015 goto out_free_dev;
1018 SET_MODULE_OWNER(netdev);
1019 SET_NETDEV_DEV(netdev, &pdev->dev);
1021 if (!adapter) {
1022 adapter = netdev->priv;
1023 adapter->pdev = pdev;
1024 adapter->port[0].dev = netdev; /* so we don't leak it */
1026 adapter->regs = ioremap(mmio_start, mmio_len);
1027 if (!adapter->regs) {
1028 CH_ERR("%s: cannot map device registers\n",
1029 pci_name(pdev));
1030 err = -ENOMEM;
1031 goto out_free_dev;
1034 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1035 err = -ENODEV; /* Can't handle this chip rev */
1036 goto out_free_dev;
1039 adapter->name = pci_name(pdev);
1040 adapter->msg_enable = dflt_msg_enable;
1041 adapter->mmio_len = mmio_len;
1043 spin_lock_init(&adapter->tpi_lock);
1044 spin_lock_init(&adapter->work_lock);
1045 spin_lock_init(&adapter->async_lock);
1047 INIT_WORK(&adapter->ext_intr_handler_task,
1048 ext_intr_task, adapter);
1049 INIT_WORK(&adapter->stats_update_task, mac_stats_task,
1050 adapter);
1052 pci_set_drvdata(pdev, netdev);
1055 pi = &adapter->port[i];
1056 pi->dev = netdev;
1057 netif_carrier_off(netdev);
1058 netdev->irq = pdev->irq;
1059 netdev->if_port = i;
1060 netdev->mem_start = mmio_start;
1061 netdev->mem_end = mmio_start + mmio_len - 1;
1062 netdev->priv = adapter;
1063 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1064 netdev->features |= NETIF_F_LLTX;
1066 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1067 if (pci_using_dac)
1068 netdev->features |= NETIF_F_HIGHDMA;
1069 if (vlan_tso_capable(adapter)) {
1070 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1071 adapter->flags |= VLAN_ACCEL_CAPABLE;
1072 netdev->features |=
1073 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1074 netdev->vlan_rx_register = vlan_rx_register;
1075 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
1076 #endif
1077 adapter->flags |= TSO_CAPABLE;
1078 netdev->features |= NETIF_F_TSO;
1081 netdev->open = cxgb_open;
1082 netdev->stop = cxgb_close;
1083 netdev->hard_start_xmit = t1_start_xmit;
1084 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1085 sizeof(struct cpl_tx_pkt_lso) :
1086 sizeof(struct cpl_tx_pkt);
1087 netdev->get_stats = t1_get_stats;
1088 netdev->set_multicast_list = t1_set_rxmode;
1089 netdev->do_ioctl = t1_ioctl;
1090 netdev->change_mtu = t1_change_mtu;
1091 netdev->set_mac_address = t1_set_mac_addr;
1092 #ifdef CONFIG_NET_POLL_CONTROLLER
1093 netdev->poll_controller = t1_netpoll;
1094 #endif
1095 netdev->weight = 64;
1097 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1100 if (t1_init_sw_modules(adapter, bi) < 0) {
1101 err = -ENODEV;
1102 goto out_free_dev;
1106 * The card is now ready to go. If any errors occur during device
1107 * registration we do not fail the whole card but rather proceed only
1108 * with the ports we manage to register successfully. However we must
1109 * register at least one net device.
1111 for (i = 0; i < bi->port_number; ++i) {
1112 err = register_netdev(adapter->port[i].dev);
1113 if (err)
1114 CH_WARN("%s: cannot register net device %s, skipping\n",
1115 pci_name(pdev), adapter->port[i].dev->name);
1116 else {
1118 * Change the name we use for messages to the name of
1119 * the first successfully registered interface.
1121 if (!adapter->registered_device_map)
1122 adapter->name = adapter->port[i].dev->name;
1124 __set_bit(i, &adapter->registered_device_map);
1127 if (!adapter->registered_device_map) {
1128 CH_ERR("%s: could not register any net devices\n",
1129 pci_name(pdev));
1130 goto out_release_adapter_res;
1133 printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1134 bi->desc, adapter->params.chip_revision,
1135 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1136 adapter->params.pci.speed, adapter->params.pci.width);
1137 return 0;
1139 out_release_adapter_res:
1140 t1_free_sw_modules(adapter);
1141 out_free_dev:
1142 if (adapter) {
1143 if (adapter->regs)
1144 iounmap(adapter->regs);
1145 for (i = bi->port_number - 1; i >= 0; --i)
1146 if (adapter->port[i].dev)
1147 free_netdev(adapter->port[i].dev);
1149 pci_release_regions(pdev);
1150 out_disable_pdev:
1151 pci_disable_device(pdev);
1152 pci_set_drvdata(pdev, NULL);
1153 return err;
1156 static inline void t1_sw_reset(struct pci_dev *pdev)
1158 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1159 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1162 static void __devexit remove_one(struct pci_dev *pdev)
1164 struct net_device *dev = pci_get_drvdata(pdev);
1166 if (dev) {
1167 int i;
1168 struct adapter *adapter = dev->priv;
1170 for_each_port(adapter, i)
1171 if (test_bit(i, &adapter->registered_device_map))
1172 unregister_netdev(adapter->port[i].dev);
1174 t1_free_sw_modules(adapter);
1175 iounmap(adapter->regs);
1176 while (--i >= 0)
1177 if (adapter->port[i].dev)
1178 free_netdev(adapter->port[i].dev);
1180 pci_release_regions(pdev);
1181 pci_disable_device(pdev);
1182 pci_set_drvdata(pdev, NULL);
1183 t1_sw_reset(pdev);
1187 static struct pci_driver driver = {
1188 .name = DRV_NAME,
1189 .id_table = t1_pci_tbl,
1190 .probe = init_one,
1191 .remove = __devexit_p(remove_one),
1194 static int __init t1_init_module(void)
1196 return pci_register_driver(&driver);
1199 static void __exit t1_cleanup_module(void)
1201 pci_unregister_driver(&driver);
1204 module_init(t1_init_module);
1205 module_exit(t1_cleanup_module);