[PATCH] shpchp: reduce debug message verbosity
[linux-2.6/verdex.git] / drivers / net / chelsio / cxgb2.c
blob349ebe783ed6b87892d242e679e5a9b210a51696
1 /*****************************************************************************
2 * *
3 * File: cxgb2.c *
4 * $Revision: 1.25 $ *
5 * $Date: 2005/06/22 00:43:25 $ *
6 * Description: *
7 * Chelsio 10Gb Ethernet Driver. *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
12 * *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
39 #include "common.h"
40 #include <linux/config.h>
41 #include <linux/module.h>
42 #include <linux/init.h>
43 #include <linux/pci.h>
44 #include <linux/netdevice.h>
45 #include <linux/etherdevice.h>
46 #include <linux/if_vlan.h>
47 #include <linux/mii.h>
48 #include <linux/sockios.h>
49 #include <linux/proc_fs.h>
50 #include <linux/dma-mapping.h>
51 #include <asm/uaccess.h>
53 #include "cpl5_cmd.h"
54 #include "regs.h"
55 #include "gmac.h"
56 #include "cphy.h"
57 #include "sge.h"
58 #include "espi.h"
60 #ifdef work_struct
61 #include <linux/tqueue.h>
62 #define INIT_WORK INIT_TQUEUE
63 #define schedule_work schedule_task
64 #define flush_scheduled_work flush_scheduled_tasks
66 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
68 mod_timer(&ap->stats_update_timer, jiffies + secs * HZ);
71 static inline void cancel_mac_stats_update(struct adapter *ap)
73 del_timer_sync(&ap->stats_update_timer);
74 flush_scheduled_tasks();
78 * Stats update timer for 2.4. It schedules a task to do the actual update as
79 * we need to access MAC statistics in process context.
81 static void mac_stats_timer(unsigned long data)
83 struct adapter *ap = (struct adapter *)data;
85 schedule_task(&ap->stats_update_task);
87 #else
88 #include <linux/workqueue.h>
90 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
92 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
95 static inline void cancel_mac_stats_update(struct adapter *ap)
97 cancel_delayed_work(&ap->stats_update_task);
99 #endif
101 #define MAX_CMDQ_ENTRIES 16384
102 #define MAX_CMDQ1_ENTRIES 1024
103 #define MAX_RX_BUFFERS 16384
104 #define MAX_RX_JUMBO_BUFFERS 16384
105 #define MAX_TX_BUFFERS_HIGH 16384U
106 #define MAX_TX_BUFFERS_LOW 1536U
107 #define MIN_FL_ENTRIES 32
109 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
111 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
113 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
116 * The EEPROM is actually bigger but only the first few bytes are used so we
117 * only report those.
119 #define EEPROM_SIZE 32
121 MODULE_DESCRIPTION(DRV_DESCRIPTION);
122 MODULE_AUTHOR("Chelsio Communications");
123 MODULE_LICENSE("GPL");
125 static int dflt_msg_enable = DFLT_MSG_ENABLE;
127 MODULE_PARM(dflt_msg_enable, "i");
128 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap");
131 static const char pci_speed[][4] = {
132 "33", "66", "100", "133"
136 * Setup MAC to receive the types of packets we want.
138 static void t1_set_rxmode(struct net_device *dev)
140 struct adapter *adapter = dev->priv;
141 struct cmac *mac = adapter->port[dev->if_port].mac;
142 struct t1_rx_mode rm;
144 rm.dev = dev;
145 rm.idx = 0;
146 rm.list = dev->mc_list;
147 mac->ops->set_rx_mode(mac, &rm);
150 static void link_report(struct port_info *p)
152 if (!netif_carrier_ok(p->dev))
153 printk(KERN_INFO "%s: link down\n", p->dev->name);
154 else {
155 const char *s = "10Mbps";
157 switch (p->link_config.speed) {
158 case SPEED_10000: s = "10Gbps"; break;
159 case SPEED_1000: s = "1000Mbps"; break;
160 case SPEED_100: s = "100Mbps"; break;
163 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
164 p->dev->name, s,
165 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169 void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
170 int speed, int duplex, int pause)
172 struct port_info *p = &adapter->port[port_id];
174 if (link_stat != netif_carrier_ok(p->dev)) {
175 if (link_stat)
176 netif_carrier_on(p->dev);
177 else
178 netif_carrier_off(p->dev);
179 link_report(p);
184 static void link_start(struct port_info *p)
186 struct cmac *mac = p->mac;
188 mac->ops->reset(mac);
189 if (mac->ops->macaddress_set)
190 mac->ops->macaddress_set(mac, p->dev->dev_addr);
191 t1_set_rxmode(p->dev);
192 t1_link_start(p->phy, mac, &p->link_config);
193 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
196 static void enable_hw_csum(struct adapter *adapter)
198 if (adapter->flags & TSO_CAPABLE)
199 t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */
200 t1_tp_set_tcp_checksum_offload(adapter, 1);
204 * Things to do upon first use of a card.
205 * This must run with the rtnl lock held.
207 static int cxgb_up(struct adapter *adapter)
209 int err = 0;
211 if (!(adapter->flags & FULL_INIT_DONE)) {
212 err = t1_init_hw_modules(adapter);
213 if (err)
214 goto out_err;
216 enable_hw_csum(adapter);
217 adapter->flags |= FULL_INIT_DONE;
220 t1_interrupts_clear(adapter);
221 if ((err = request_irq(adapter->pdev->irq,
222 t1_select_intr_handler(adapter), SA_SHIRQ,
223 adapter->name, adapter))) {
224 goto out_err;
226 t1_sge_start(adapter->sge);
227 t1_interrupts_enable(adapter);
228 out_err:
229 return err;
233 * Release resources when all the ports have been stopped.
235 static void cxgb_down(struct adapter *adapter)
237 t1_sge_stop(adapter->sge);
238 t1_interrupts_disable(adapter);
239 free_irq(adapter->pdev->irq, adapter);
242 static int cxgb_open(struct net_device *dev)
244 int err;
245 struct adapter *adapter = dev->priv;
246 int other_ports = adapter->open_device_map & PORT_MASK;
248 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
249 return err;
251 __set_bit(dev->if_port, &adapter->open_device_map);
252 link_start(&adapter->port[dev->if_port]);
253 netif_start_queue(dev);
254 if (!other_ports && adapter->params.stats_update_period)
255 schedule_mac_stats_update(adapter,
256 adapter->params.stats_update_period);
257 return 0;
260 static int cxgb_close(struct net_device *dev)
262 struct adapter *adapter = dev->priv;
263 struct port_info *p = &adapter->port[dev->if_port];
264 struct cmac *mac = p->mac;
266 netif_stop_queue(dev);
267 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
268 netif_carrier_off(dev);
270 clear_bit(dev->if_port, &adapter->open_device_map);
271 if (adapter->params.stats_update_period &&
272 !(adapter->open_device_map & PORT_MASK)) {
273 /* Stop statistics accumulation. */
274 smp_mb__after_clear_bit();
275 spin_lock(&adapter->work_lock); /* sync with update task */
276 spin_unlock(&adapter->work_lock);
277 cancel_mac_stats_update(adapter);
280 if (!adapter->open_device_map)
281 cxgb_down(adapter);
282 return 0;
285 static struct net_device_stats *t1_get_stats(struct net_device *dev)
287 struct adapter *adapter = dev->priv;
288 struct port_info *p = &adapter->port[dev->if_port];
289 struct net_device_stats *ns = &p->netstats;
290 const struct cmac_statistics *pstats;
292 /* Do a full update of the MAC stats */
293 pstats = p->mac->ops->statistics_update(p->mac,
294 MAC_STATS_UPDATE_FULL);
296 ns->tx_packets = pstats->TxUnicastFramesOK +
297 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
299 ns->rx_packets = pstats->RxUnicastFramesOK +
300 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
302 ns->tx_bytes = pstats->TxOctetsOK;
303 ns->rx_bytes = pstats->RxOctetsOK;
305 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
306 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
307 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
308 pstats->RxFCSErrors + pstats->RxAlignErrors +
309 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
310 pstats->RxSymbolErrors + pstats->RxRuntErrors;
312 ns->multicast = pstats->RxMulticastFramesOK;
313 ns->collisions = pstats->TxTotalCollisions;
315 /* detailed rx_errors */
316 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
317 pstats->RxJabberErrors;
318 ns->rx_over_errors = 0;
319 ns->rx_crc_errors = pstats->RxFCSErrors;
320 ns->rx_frame_errors = pstats->RxAlignErrors;
321 ns->rx_fifo_errors = 0;
322 ns->rx_missed_errors = 0;
324 /* detailed tx_errors */
325 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
326 ns->tx_carrier_errors = 0;
327 ns->tx_fifo_errors = pstats->TxUnderrun;
328 ns->tx_heartbeat_errors = 0;
329 ns->tx_window_errors = pstats->TxLateCollisions;
330 return ns;
333 static u32 get_msglevel(struct net_device *dev)
335 struct adapter *adapter = dev->priv;
337 return adapter->msg_enable;
340 static void set_msglevel(struct net_device *dev, u32 val)
342 struct adapter *adapter = dev->priv;
344 adapter->msg_enable = val;
347 static char stats_strings[][ETH_GSTRING_LEN] = {
348 "TxOctetsOK",
349 "TxOctetsBad",
350 "TxUnicastFramesOK",
351 "TxMulticastFramesOK",
352 "TxBroadcastFramesOK",
353 "TxPauseFrames",
354 "TxFramesWithDeferredXmissions",
355 "TxLateCollisions",
356 "TxTotalCollisions",
357 "TxFramesAbortedDueToXSCollisions",
358 "TxUnderrun",
359 "TxLengthErrors",
360 "TxInternalMACXmitError",
361 "TxFramesWithExcessiveDeferral",
362 "TxFCSErrors",
364 "RxOctetsOK",
365 "RxOctetsBad",
366 "RxUnicastFramesOK",
367 "RxMulticastFramesOK",
368 "RxBroadcastFramesOK",
369 "RxPauseFrames",
370 "RxFCSErrors",
371 "RxAlignErrors",
372 "RxSymbolErrors",
373 "RxDataErrors",
374 "RxSequenceErrors",
375 "RxRuntErrors",
376 "RxJabberErrors",
377 "RxInternalMACRcvError",
378 "RxInRangeLengthErrors",
379 "RxOutOfRangeLengthField",
380 "RxFrameTooLongErrors",
382 "TSO",
383 "VLANextractions",
384 "VLANinsertions",
385 "RxCsumGood",
386 "TxCsumOffload",
387 "RxDrops"
389 "respQ_empty",
390 "respQ_overflow",
391 "freelistQ_empty",
392 "pkt_too_big",
393 "pkt_mismatch",
394 "cmdQ_full0",
395 "cmdQ_full1",
396 "tx_ipfrags",
397 "tx_reg_pkts",
398 "tx_lso_pkts",
399 "tx_do_cksum",
401 "espi_DIP2ParityErr",
402 "espi_DIP4Err",
403 "espi_RxDrops",
404 "espi_TxDrops",
405 "espi_RxOvfl",
406 "espi_ParityErr"
409 #define T2_REGMAP_SIZE (3 * 1024)
411 static int get_regs_len(struct net_device *dev)
413 return T2_REGMAP_SIZE;
416 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
418 struct adapter *adapter = dev->priv;
420 strcpy(info->driver, DRV_NAME);
421 strcpy(info->version, DRV_VERSION);
422 strcpy(info->fw_version, "N/A");
423 strcpy(info->bus_info, pci_name(adapter->pdev));
426 static int get_stats_count(struct net_device *dev)
428 return ARRAY_SIZE(stats_strings);
431 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
433 if (stringset == ETH_SS_STATS)
434 memcpy(data, stats_strings, sizeof(stats_strings));
437 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
438 u64 *data)
440 struct adapter *adapter = dev->priv;
441 struct cmac *mac = adapter->port[dev->if_port].mac;
442 const struct cmac_statistics *s;
443 const struct sge_port_stats *ss;
444 const struct sge_intr_counts *t;
446 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
447 ss = t1_sge_get_port_stats(adapter->sge, dev->if_port);
448 t = t1_sge_get_intr_counts(adapter->sge);
450 *data++ = s->TxOctetsOK;
451 *data++ = s->TxOctetsBad;
452 *data++ = s->TxUnicastFramesOK;
453 *data++ = s->TxMulticastFramesOK;
454 *data++ = s->TxBroadcastFramesOK;
455 *data++ = s->TxPauseFrames;
456 *data++ = s->TxFramesWithDeferredXmissions;
457 *data++ = s->TxLateCollisions;
458 *data++ = s->TxTotalCollisions;
459 *data++ = s->TxFramesAbortedDueToXSCollisions;
460 *data++ = s->TxUnderrun;
461 *data++ = s->TxLengthErrors;
462 *data++ = s->TxInternalMACXmitError;
463 *data++ = s->TxFramesWithExcessiveDeferral;
464 *data++ = s->TxFCSErrors;
466 *data++ = s->RxOctetsOK;
467 *data++ = s->RxOctetsBad;
468 *data++ = s->RxUnicastFramesOK;
469 *data++ = s->RxMulticastFramesOK;
470 *data++ = s->RxBroadcastFramesOK;
471 *data++ = s->RxPauseFrames;
472 *data++ = s->RxFCSErrors;
473 *data++ = s->RxAlignErrors;
474 *data++ = s->RxSymbolErrors;
475 *data++ = s->RxDataErrors;
476 *data++ = s->RxSequenceErrors;
477 *data++ = s->RxRuntErrors;
478 *data++ = s->RxJabberErrors;
479 *data++ = s->RxInternalMACRcvError;
480 *data++ = s->RxInRangeLengthErrors;
481 *data++ = s->RxOutOfRangeLengthField;
482 *data++ = s->RxFrameTooLongErrors;
484 *data++ = ss->tso;
485 *data++ = ss->vlan_xtract;
486 *data++ = ss->vlan_insert;
487 *data++ = ss->rx_cso_good;
488 *data++ = ss->tx_cso;
489 *data++ = ss->rx_drops;
491 *data++ = (u64)t->respQ_empty;
492 *data++ = (u64)t->respQ_overflow;
493 *data++ = (u64)t->freelistQ_empty;
494 *data++ = (u64)t->pkt_too_big;
495 *data++ = (u64)t->pkt_mismatch;
496 *data++ = (u64)t->cmdQ_full[0];
497 *data++ = (u64)t->cmdQ_full[1];
498 *data++ = (u64)t->tx_ipfrags;
499 *data++ = (u64)t->tx_reg_pkts;
500 *data++ = (u64)t->tx_lso_pkts;
501 *data++ = (u64)t->tx_do_cksum;
504 static inline void reg_block_dump(struct adapter *ap, void *buf,
505 unsigned int start, unsigned int end)
507 u32 *p = buf + start;
509 for ( ; start <= end; start += sizeof(u32))
510 *p++ = readl(ap->regs + start);
513 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
514 void *buf)
516 struct adapter *ap = dev->priv;
519 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
521 regs->version = 2;
523 memset(buf, 0, T2_REGMAP_SIZE);
524 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
527 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
529 struct adapter *adapter = dev->priv;
530 struct port_info *p = &adapter->port[dev->if_port];
532 cmd->supported = p->link_config.supported;
533 cmd->advertising = p->link_config.advertising;
535 if (netif_carrier_ok(dev)) {
536 cmd->speed = p->link_config.speed;
537 cmd->duplex = p->link_config.duplex;
538 } else {
539 cmd->speed = -1;
540 cmd->duplex = -1;
543 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
544 cmd->phy_address = p->phy->addr;
545 cmd->transceiver = XCVR_EXTERNAL;
546 cmd->autoneg = p->link_config.autoneg;
547 cmd->maxtxpkt = 0;
548 cmd->maxrxpkt = 0;
549 return 0;
552 static int speed_duplex_to_caps(int speed, int duplex)
554 int cap = 0;
556 switch (speed) {
557 case SPEED_10:
558 if (duplex == DUPLEX_FULL)
559 cap = SUPPORTED_10baseT_Full;
560 else
561 cap = SUPPORTED_10baseT_Half;
562 break;
563 case SPEED_100:
564 if (duplex == DUPLEX_FULL)
565 cap = SUPPORTED_100baseT_Full;
566 else
567 cap = SUPPORTED_100baseT_Half;
568 break;
569 case SPEED_1000:
570 if (duplex == DUPLEX_FULL)
571 cap = SUPPORTED_1000baseT_Full;
572 else
573 cap = SUPPORTED_1000baseT_Half;
574 break;
575 case SPEED_10000:
576 if (duplex == DUPLEX_FULL)
577 cap = SUPPORTED_10000baseT_Full;
579 return cap;
582 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
583 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
584 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
585 ADVERTISED_10000baseT_Full)
587 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
589 struct adapter *adapter = dev->priv;
590 struct port_info *p = &adapter->port[dev->if_port];
591 struct link_config *lc = &p->link_config;
593 if (!(lc->supported & SUPPORTED_Autoneg))
594 return -EOPNOTSUPP; /* can't change speed/duplex */
596 if (cmd->autoneg == AUTONEG_DISABLE) {
597 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
599 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
600 return -EINVAL;
601 lc->requested_speed = cmd->speed;
602 lc->requested_duplex = cmd->duplex;
603 lc->advertising = 0;
604 } else {
605 cmd->advertising &= ADVERTISED_MASK;
606 if (cmd->advertising & (cmd->advertising - 1))
607 cmd->advertising = lc->supported;
608 cmd->advertising &= lc->supported;
609 if (!cmd->advertising)
610 return -EINVAL;
611 lc->requested_speed = SPEED_INVALID;
612 lc->requested_duplex = DUPLEX_INVALID;
613 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
615 lc->autoneg = cmd->autoneg;
616 if (netif_running(dev))
617 t1_link_start(p->phy, p->mac, lc);
618 return 0;
621 static void get_pauseparam(struct net_device *dev,
622 struct ethtool_pauseparam *epause)
624 struct adapter *adapter = dev->priv;
625 struct port_info *p = &adapter->port[dev->if_port];
627 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
628 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
629 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
632 static int set_pauseparam(struct net_device *dev,
633 struct ethtool_pauseparam *epause)
635 struct adapter *adapter = dev->priv;
636 struct port_info *p = &adapter->port[dev->if_port];
637 struct link_config *lc = &p->link_config;
639 if (epause->autoneg == AUTONEG_DISABLE)
640 lc->requested_fc = 0;
641 else if (lc->supported & SUPPORTED_Autoneg)
642 lc->requested_fc = PAUSE_AUTONEG;
643 else
644 return -EINVAL;
646 if (epause->rx_pause)
647 lc->requested_fc |= PAUSE_RX;
648 if (epause->tx_pause)
649 lc->requested_fc |= PAUSE_TX;
650 if (lc->autoneg == AUTONEG_ENABLE) {
651 if (netif_running(dev))
652 t1_link_start(p->phy, p->mac, lc);
653 } else {
654 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
655 if (netif_running(dev))
656 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
657 lc->fc);
659 return 0;
662 static u32 get_rx_csum(struct net_device *dev)
664 struct adapter *adapter = dev->priv;
666 return (adapter->flags & RX_CSUM_ENABLED) != 0;
669 static int set_rx_csum(struct net_device *dev, u32 data)
671 struct adapter *adapter = dev->priv;
673 if (data)
674 adapter->flags |= RX_CSUM_ENABLED;
675 else
676 adapter->flags &= ~RX_CSUM_ENABLED;
677 return 0;
680 static int set_tso(struct net_device *dev, u32 value)
682 struct adapter *adapter = dev->priv;
684 if (!(adapter->flags & TSO_CAPABLE))
685 return value ? -EOPNOTSUPP : 0;
686 return ethtool_op_set_tso(dev, value);
689 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
691 struct adapter *adapter = dev->priv;
692 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
694 e->rx_max_pending = MAX_RX_BUFFERS;
695 e->rx_mini_max_pending = 0;
696 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
697 e->tx_max_pending = MAX_CMDQ_ENTRIES;
699 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
700 e->rx_mini_pending = 0;
701 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
702 e->tx_pending = adapter->params.sge.cmdQ_size[0];
705 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
707 struct adapter *adapter = dev->priv;
708 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
710 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
711 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
712 e->tx_pending > MAX_CMDQ_ENTRIES ||
713 e->rx_pending < MIN_FL_ENTRIES ||
714 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
715 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
716 return -EINVAL;
718 if (adapter->flags & FULL_INIT_DONE)
719 return -EBUSY;
721 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
722 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
723 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
724 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
725 MAX_CMDQ1_ENTRIES : e->tx_pending;
726 return 0;
729 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
731 struct adapter *adapter = dev->priv;
734 * If RX coalescing is requested we use NAPI, otherwise interrupts.
735 * This choice can be made only when all ports and the TOE are off.
737 if (adapter->open_device_map == 0)
738 adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
740 if (adapter->params.sge.polling) {
741 adapter->params.sge.rx_coalesce_usecs = 0;
742 } else {
743 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
745 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
746 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
747 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
748 return 0;
751 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
753 struct adapter *adapter = dev->priv;
755 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
756 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
757 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
758 return 0;
761 static int get_eeprom_len(struct net_device *dev)
763 return EEPROM_SIZE;
766 #define EEPROM_MAGIC(ap) \
767 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
769 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
770 u8 *data)
772 int i;
773 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
774 struct adapter *adapter = dev->priv;
776 e->magic = EEPROM_MAGIC(adapter);
777 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
778 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
779 memcpy(data, buf + e->offset, e->len);
780 return 0;
783 static struct ethtool_ops t1_ethtool_ops = {
784 .get_settings = get_settings,
785 .set_settings = set_settings,
786 .get_drvinfo = get_drvinfo,
787 .get_msglevel = get_msglevel,
788 .set_msglevel = set_msglevel,
789 .get_ringparam = get_sge_param,
790 .set_ringparam = set_sge_param,
791 .get_coalesce = get_coalesce,
792 .set_coalesce = set_coalesce,
793 .get_eeprom_len = get_eeprom_len,
794 .get_eeprom = get_eeprom,
795 .get_pauseparam = get_pauseparam,
796 .set_pauseparam = set_pauseparam,
797 .get_rx_csum = get_rx_csum,
798 .set_rx_csum = set_rx_csum,
799 .get_tx_csum = ethtool_op_get_tx_csum,
800 .set_tx_csum = ethtool_op_set_tx_csum,
801 .get_sg = ethtool_op_get_sg,
802 .set_sg = ethtool_op_set_sg,
803 .get_link = ethtool_op_get_link,
804 .get_strings = get_strings,
805 .get_stats_count = get_stats_count,
806 .get_ethtool_stats = get_stats,
807 .get_regs_len = get_regs_len,
808 .get_regs = get_regs,
809 .get_tso = ethtool_op_get_tso,
810 .set_tso = set_tso,
813 static void cxgb_proc_cleanup(struct adapter *adapter,
814 struct proc_dir_entry *dir)
816 const char *name;
817 name = adapter->name;
818 remove_proc_entry(name, dir);
820 //#define chtoe_setup_toedev(adapter) NULL
821 #define update_mtu_tab(adapter)
822 #define write_smt_entry(adapter, idx)
824 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
826 struct adapter *adapter = dev->priv;
827 struct mii_ioctl_data *data = if_mii(req);
829 switch (cmd) {
830 case SIOCGMIIPHY:
831 data->phy_id = adapter->port[dev->if_port].phy->addr;
832 /* FALLTHRU */
833 case SIOCGMIIREG: {
834 struct cphy *phy = adapter->port[dev->if_port].phy;
835 u32 val;
837 if (!phy->mdio_read)
838 return -EOPNOTSUPP;
839 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
840 &val);
841 data->val_out = val;
842 break;
844 case SIOCSMIIREG: {
845 struct cphy *phy = adapter->port[dev->if_port].phy;
847 if (!capable(CAP_NET_ADMIN))
848 return -EPERM;
849 if (!phy->mdio_write)
850 return -EOPNOTSUPP;
851 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
852 data->val_in);
853 break;
856 default:
857 return -EOPNOTSUPP;
859 return 0;
862 static int t1_change_mtu(struct net_device *dev, int new_mtu)
864 int ret;
865 struct adapter *adapter = dev->priv;
866 struct cmac *mac = adapter->port[dev->if_port].mac;
868 if (!mac->ops->set_mtu)
869 return -EOPNOTSUPP;
870 if (new_mtu < 68)
871 return -EINVAL;
872 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
873 return ret;
874 dev->mtu = new_mtu;
875 return 0;
878 static int t1_set_mac_addr(struct net_device *dev, void *p)
880 struct adapter *adapter = dev->priv;
881 struct cmac *mac = adapter->port[dev->if_port].mac;
882 struct sockaddr *addr = p;
884 if (!mac->ops->macaddress_set)
885 return -EOPNOTSUPP;
887 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
888 mac->ops->macaddress_set(mac, dev->dev_addr);
889 return 0;
892 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
893 static void vlan_rx_register(struct net_device *dev,
894 struct vlan_group *grp)
896 struct adapter *adapter = dev->priv;
898 spin_lock_irq(&adapter->async_lock);
899 adapter->vlan_grp = grp;
900 t1_set_vlan_accel(adapter, grp != NULL);
901 spin_unlock_irq(&adapter->async_lock);
904 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
906 struct adapter *adapter = dev->priv;
908 spin_lock_irq(&adapter->async_lock);
909 if (adapter->vlan_grp)
910 adapter->vlan_grp->vlan_devices[vid] = NULL;
911 spin_unlock_irq(&adapter->async_lock);
913 #endif
915 #ifdef CONFIG_NET_POLL_CONTROLLER
916 static void t1_netpoll(struct net_device *dev)
918 unsigned long flags;
919 struct adapter *adapter = dev->priv;
921 local_irq_save(flags);
922 t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter, NULL);
923 local_irq_restore(flags);
925 #endif
928 * Periodic accumulation of MAC statistics. This is used only if the MAC
929 * does not have any other way to prevent stats counter overflow.
931 static void mac_stats_task(void *data)
933 int i;
934 struct adapter *adapter = data;
936 for_each_port(adapter, i) {
937 struct port_info *p = &adapter->port[i];
939 if (netif_running(p->dev))
940 p->mac->ops->statistics_update(p->mac,
941 MAC_STATS_UPDATE_FAST);
944 /* Schedule the next statistics update if any port is active. */
945 spin_lock(&adapter->work_lock);
946 if (adapter->open_device_map & PORT_MASK)
947 schedule_mac_stats_update(adapter,
948 adapter->params.stats_update_period);
949 spin_unlock(&adapter->work_lock);
953 * Processes elmer0 external interrupts in process context.
955 static void ext_intr_task(void *data)
957 struct adapter *adapter = data;
959 elmer0_ext_intr_handler(adapter);
961 /* Now reenable external interrupts */
962 spin_lock_irq(&adapter->async_lock);
963 adapter->slow_intr_mask |= F_PL_INTR_EXT;
964 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
965 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
966 adapter->regs + A_PL_ENABLE);
967 spin_unlock_irq(&adapter->async_lock);
971 * Interrupt-context handler for elmer0 external interrupts.
973 void t1_elmer0_ext_intr(struct adapter *adapter)
976 * Schedule a task to handle external interrupts as we require
977 * a process context. We disable EXT interrupts in the interim
978 * and let the task reenable them when it's done.
980 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
981 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
982 adapter->regs + A_PL_ENABLE);
983 schedule_work(&adapter->ext_intr_handler_task);
986 void t1_fatal_err(struct adapter *adapter)
988 if (adapter->flags & FULL_INIT_DONE) {
989 t1_sge_stop(adapter->sge);
990 t1_interrupts_disable(adapter);
992 CH_ALERT("%s: encountered fatal error, operation suspended\n",
993 adapter->name);
996 static int __devinit init_one(struct pci_dev *pdev,
997 const struct pci_device_id *ent)
999 static int version_printed;
1001 int i, err, pci_using_dac = 0;
1002 unsigned long mmio_start, mmio_len;
1003 const struct board_info *bi;
1004 struct adapter *adapter = NULL;
1005 struct port_info *pi;
1007 if (!version_printed) {
1008 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
1009 DRV_VERSION);
1010 ++version_printed;
1013 err = pci_enable_device(pdev);
1014 if (err)
1015 return err;
1017 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1018 CH_ERR("%s: cannot find PCI device memory base address\n",
1019 pci_name(pdev));
1020 err = -ENODEV;
1021 goto out_disable_pdev;
1024 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1025 pci_using_dac = 1;
1027 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
1028 CH_ERR("%s: unable to obtain 64-bit DMA for"
1029 "consistent allocations\n", pci_name(pdev));
1030 err = -ENODEV;
1031 goto out_disable_pdev;
1034 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
1035 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1036 goto out_disable_pdev;
1039 err = pci_request_regions(pdev, DRV_NAME);
1040 if (err) {
1041 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1042 goto out_disable_pdev;
1045 pci_set_master(pdev);
1047 mmio_start = pci_resource_start(pdev, 0);
1048 mmio_len = pci_resource_len(pdev, 0);
1049 bi = t1_get_board_info(ent->driver_data);
1051 for (i = 0; i < bi->port_number; ++i) {
1052 struct net_device *netdev;
1054 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1055 if (!netdev) {
1056 err = -ENOMEM;
1057 goto out_free_dev;
1060 SET_MODULE_OWNER(netdev);
1061 SET_NETDEV_DEV(netdev, &pdev->dev);
1063 if (!adapter) {
1064 adapter = netdev->priv;
1065 adapter->pdev = pdev;
1066 adapter->port[0].dev = netdev; /* so we don't leak it */
1068 adapter->regs = ioremap(mmio_start, mmio_len);
1069 if (!adapter->regs) {
1070 CH_ERR("%s: cannot map device registers\n",
1071 pci_name(pdev));
1072 err = -ENOMEM;
1073 goto out_free_dev;
1076 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1077 err = -ENODEV; /* Can't handle this chip rev */
1078 goto out_free_dev;
1081 adapter->name = pci_name(pdev);
1082 adapter->msg_enable = dflt_msg_enable;
1083 adapter->mmio_len = mmio_len;
1085 init_MUTEX(&adapter->mib_mutex);
1086 spin_lock_init(&adapter->tpi_lock);
1087 spin_lock_init(&adapter->work_lock);
1088 spin_lock_init(&adapter->async_lock);
1090 INIT_WORK(&adapter->ext_intr_handler_task,
1091 ext_intr_task, adapter);
1092 INIT_WORK(&adapter->stats_update_task, mac_stats_task,
1093 adapter);
1094 #ifdef work_struct
1095 init_timer(&adapter->stats_update_timer);
1096 adapter->stats_update_timer.function = mac_stats_timer;
1097 adapter->stats_update_timer.data =
1098 (unsigned long)adapter;
1099 #endif
1101 pci_set_drvdata(pdev, netdev);
1104 pi = &adapter->port[i];
1105 pi->dev = netdev;
1106 netif_carrier_off(netdev);
1107 netdev->irq = pdev->irq;
1108 netdev->if_port = i;
1109 netdev->mem_start = mmio_start;
1110 netdev->mem_end = mmio_start + mmio_len - 1;
1111 netdev->priv = adapter;
1112 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1113 netdev->features |= NETIF_F_LLTX;
1115 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1116 if (pci_using_dac)
1117 netdev->features |= NETIF_F_HIGHDMA;
1118 if (vlan_tso_capable(adapter)) {
1119 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1120 adapter->flags |= VLAN_ACCEL_CAPABLE;
1121 netdev->features |=
1122 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1123 netdev->vlan_rx_register = vlan_rx_register;
1124 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
1125 #endif
1126 adapter->flags |= TSO_CAPABLE;
1127 netdev->features |= NETIF_F_TSO;
1130 netdev->open = cxgb_open;
1131 netdev->stop = cxgb_close;
1132 netdev->hard_start_xmit = t1_start_xmit;
1133 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1134 sizeof(struct cpl_tx_pkt_lso) :
1135 sizeof(struct cpl_tx_pkt);
1136 netdev->get_stats = t1_get_stats;
1137 netdev->set_multicast_list = t1_set_rxmode;
1138 netdev->do_ioctl = t1_ioctl;
1139 netdev->change_mtu = t1_change_mtu;
1140 netdev->set_mac_address = t1_set_mac_addr;
1141 #ifdef CONFIG_NET_POLL_CONTROLLER
1142 netdev->poll_controller = t1_netpoll;
1143 #endif
1144 netdev->weight = 64;
1146 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1149 if (t1_init_sw_modules(adapter, bi) < 0) {
1150 err = -ENODEV;
1151 goto out_free_dev;
1155 * The card is now ready to go. If any errors occur during device
1156 * registration we do not fail the whole card but rather proceed only
1157 * with the ports we manage to register successfully. However we must
1158 * register at least one net device.
1160 for (i = 0; i < bi->port_number; ++i) {
1161 err = register_netdev(adapter->port[i].dev);
1162 if (err)
1163 CH_WARN("%s: cannot register net device %s, skipping\n",
1164 pci_name(pdev), adapter->port[i].dev->name);
1165 else {
1167 * Change the name we use for messages to the name of
1168 * the first successfully registered interface.
1170 if (!adapter->registered_device_map)
1171 adapter->name = adapter->port[i].dev->name;
1173 __set_bit(i, &adapter->registered_device_map);
1176 if (!adapter->registered_device_map) {
1177 CH_ERR("%s: could not register any net devices\n",
1178 pci_name(pdev));
1179 goto out_release_adapter_res;
1182 printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1183 bi->desc, adapter->params.chip_revision,
1184 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1185 adapter->params.pci.speed, adapter->params.pci.width);
1186 return 0;
1188 out_release_adapter_res:
1189 t1_free_sw_modules(adapter);
1190 out_free_dev:
1191 if (adapter) {
1192 if (adapter->regs) iounmap(adapter->regs);
1193 for (i = bi->port_number - 1; i >= 0; --i)
1194 if (adapter->port[i].dev) {
1195 cxgb_proc_cleanup(adapter, proc_root_driver);
1196 kfree(adapter->port[i].dev);
1199 pci_release_regions(pdev);
1200 out_disable_pdev:
1201 pci_disable_device(pdev);
1202 pci_set_drvdata(pdev, NULL);
1203 return err;
1206 static inline void t1_sw_reset(struct pci_dev *pdev)
1208 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1209 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1212 static void __devexit remove_one(struct pci_dev *pdev)
1214 struct net_device *dev = pci_get_drvdata(pdev);
1216 if (dev) {
1217 int i;
1218 struct adapter *adapter = dev->priv;
1220 for_each_port(adapter, i)
1221 if (test_bit(i, &adapter->registered_device_map))
1222 unregister_netdev(adapter->port[i].dev);
1224 t1_free_sw_modules(adapter);
1225 iounmap(adapter->regs);
1226 while (--i >= 0)
1227 if (adapter->port[i].dev) {
1228 cxgb_proc_cleanup(adapter, proc_root_driver);
1229 kfree(adapter->port[i].dev);
1231 pci_release_regions(pdev);
1232 pci_disable_device(pdev);
1233 pci_set_drvdata(pdev, NULL);
1234 t1_sw_reset(pdev);
1238 static struct pci_driver driver = {
1239 .name = DRV_NAME,
1240 .id_table = t1_pci_tbl,
1241 .probe = init_one,
1242 .remove = __devexit_p(remove_one),
1245 static int __init t1_init_module(void)
1247 return pci_module_init(&driver);
1250 static void __exit t1_cleanup_module(void)
1252 pci_unregister_driver(&driver);
1255 module_init(t1_init_module);
1256 module_exit(t1_cleanup_module);