cxgb3 - fix ethtool cmd on multiple queues port
[linux-2.6.git] / drivers / net / cxgb3 / cxgb3_main.c
blobeb0a4e0682002b068be19beadadbe2d6620b0486
1 /*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <asm/uaccess.h>
47 #include "common.h"
48 #include "cxgb3_ioctl.h"
49 #include "regs.h"
50 #include "cxgb3_offload.h"
51 #include "version.h"
53 #include "cxgb3_ctl_defs.h"
54 #include "t3_cpl.h"
55 #include "firmware_exports.h"
57 enum {
58 MAX_TXQ_ENTRIES = 16384,
59 MAX_CTRL_TXQ_ENTRIES = 1024,
60 MAX_RSPQ_ENTRIES = 16384,
61 MAX_RX_BUFFERS = 16384,
62 MAX_RX_JUMBO_BUFFERS = 16384,
63 MIN_TXQ_ENTRIES = 4,
64 MIN_CTRL_TXQ_ENTRIES = 4,
65 MIN_RSPQ_ENTRIES = 32,
66 MIN_FL_ENTRIES = 32
69 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
71 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
72 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
73 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
75 #define EEPROM_MAGIC 0x38E2F10C
77 #define CH_DEVICE(devid, ssid, idx) \
78 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
80 static const struct pci_device_id cxgb3_pci_tbl[] = {
81 CH_DEVICE(0x20, 1, 0), /* PE9000 */
82 CH_DEVICE(0x21, 1, 1), /* T302E */
83 CH_DEVICE(0x22, 1, 2), /* T310E */
84 CH_DEVICE(0x23, 1, 3), /* T320X */
85 CH_DEVICE(0x24, 1, 1), /* T302X */
86 CH_DEVICE(0x25, 1, 3), /* T320E */
87 CH_DEVICE(0x26, 1, 2), /* T310X */
88 CH_DEVICE(0x30, 1, 2), /* T3B10 */
89 CH_DEVICE(0x31, 1, 3), /* T3B20 */
90 CH_DEVICE(0x32, 1, 1), /* T3B02 */
91 {0,}
94 MODULE_DESCRIPTION(DRV_DESC);
95 MODULE_AUTHOR("Chelsio Communications");
96 MODULE_LICENSE("Dual BSD/GPL");
97 MODULE_VERSION(DRV_VERSION);
98 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
100 static int dflt_msg_enable = DFLT_MSG_ENABLE;
102 module_param(dflt_msg_enable, int, 0644);
103 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106 * The driver uses the best interrupt scheme available on a platform in the
107 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
108 * of these schemes the driver may consider as follows:
110 * msi = 2: choose from among all three options
111 * msi = 1: only consider MSI and pin interrupts
112 * msi = 0: force pin interrupts
114 static int msi = 2;
116 module_param(msi, int, 0644);
117 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120 * The driver enables offload as a default.
121 * To disable it, use ofld_disable = 1.
124 static int ofld_disable = 0;
126 module_param(ofld_disable, int, 0644);
127 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130 * We have work elements that we need to cancel when an interface is taken
131 * down. Normally the work elements would be executed by keventd but that
132 * can deadlock because of linkwatch. If our close method takes the rtnl
133 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
134 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
135 * for our work to complete. Get our own work queue to solve this.
137 static struct workqueue_struct *cxgb3_wq;
140 * link_report - show link status and link speed/duplex
141 * @p: the port whose settings are to be reported
143 * Shows the link status, speed, and duplex of a port.
145 static void link_report(struct net_device *dev)
147 if (!netif_carrier_ok(dev))
148 printk(KERN_INFO "%s: link down\n", dev->name);
149 else {
150 const char *s = "10Mbps";
151 const struct port_info *p = netdev_priv(dev);
153 switch (p->link_config.speed) {
154 case SPEED_10000:
155 s = "10Gbps";
156 break;
157 case SPEED_1000:
158 s = "1000Mbps";
159 break;
160 case SPEED_100:
161 s = "100Mbps";
162 break;
165 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
166 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
171 * t3_os_link_changed - handle link status changes
172 * @adapter: the adapter associated with the link change
173 * @port_id: the port index whose limk status has changed
174 * @link_stat: the new status of the link
175 * @speed: the new speed setting
176 * @duplex: the new duplex setting
177 * @pause: the new flow-control setting
179 * This is the OS-dependent handler for link status changes. The OS
180 * neutral handler takes care of most of the processing for these events,
181 * then calls this handler for any OS-specific processing.
183 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
184 int speed, int duplex, int pause)
186 struct net_device *dev = adapter->port[port_id];
188 /* Skip changes from disabled ports. */
189 if (!netif_running(dev))
190 return;
192 if (link_stat != netif_carrier_ok(dev)) {
193 if (link_stat)
194 netif_carrier_on(dev);
195 else
196 netif_carrier_off(dev);
197 link_report(dev);
201 static void cxgb_set_rxmode(struct net_device *dev)
203 struct t3_rx_mode rm;
204 struct port_info *pi = netdev_priv(dev);
206 init_rx_mode(&rm, dev, dev->mc_list);
207 t3_mac_set_rx_mode(&pi->mac, &rm);
211 * link_start - enable a port
212 * @dev: the device to enable
214 * Performs the MAC and PHY actions needed to enable a port.
216 static void link_start(struct net_device *dev)
218 struct t3_rx_mode rm;
219 struct port_info *pi = netdev_priv(dev);
220 struct cmac *mac = &pi->mac;
222 init_rx_mode(&rm, dev, dev->mc_list);
223 t3_mac_reset(mac);
224 t3_mac_set_mtu(mac, dev->mtu);
225 t3_mac_set_address(mac, 0, dev->dev_addr);
226 t3_mac_set_rx_mode(mac, &rm);
227 t3_link_start(&pi->phy, mac, &pi->link_config);
228 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
231 static inline void cxgb_disable_msi(struct adapter *adapter)
233 if (adapter->flags & USING_MSIX) {
234 pci_disable_msix(adapter->pdev);
235 adapter->flags &= ~USING_MSIX;
236 } else if (adapter->flags & USING_MSI) {
237 pci_disable_msi(adapter->pdev);
238 adapter->flags &= ~USING_MSI;
243 * Interrupt handler for asynchronous events used with MSI-X.
245 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
247 t3_slow_intr_handler(cookie);
248 return IRQ_HANDLED;
252 * Name the MSI-X interrupts.
254 static void name_msix_vecs(struct adapter *adap)
256 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
258 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
259 adap->msix_info[0].desc[n] = 0;
261 for_each_port(adap, j) {
262 struct net_device *d = adap->port[j];
263 const struct port_info *pi = netdev_priv(d);
265 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
266 snprintf(adap->msix_info[msi_idx].desc, n,
267 "%s (queue %d)", d->name, i);
268 adap->msix_info[msi_idx].desc[n] = 0;
273 static int request_msix_data_irqs(struct adapter *adap)
275 int i, j, err, qidx = 0;
277 for_each_port(adap, i) {
278 int nqsets = adap2pinfo(adap, i)->nqsets;
280 for (j = 0; j < nqsets; ++j) {
281 err = request_irq(adap->msix_info[qidx + 1].vec,
282 t3_intr_handler(adap,
283 adap->sge.qs[qidx].
284 rspq.polling), 0,
285 adap->msix_info[qidx + 1].desc,
286 &adap->sge.qs[qidx]);
287 if (err) {
288 while (--qidx >= 0)
289 free_irq(adap->msix_info[qidx + 1].vec,
290 &adap->sge.qs[qidx]);
291 return err;
293 qidx++;
296 return 0;
300 * setup_rss - configure RSS
301 * @adap: the adapter
303 * Sets up RSS to distribute packets to multiple receive queues. We
304 * configure the RSS CPU lookup table to distribute to the number of HW
305 * receive queues, and the response queue lookup table to narrow that
306 * down to the response queues actually configured for each port.
307 * We always configure the RSS mapping for two ports since the mapping
308 * table has plenty of entries.
310 static void setup_rss(struct adapter *adap)
312 int i;
313 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
314 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
315 u8 cpus[SGE_QSETS + 1];
316 u16 rspq_map[RSS_TABLE_SIZE];
318 for (i = 0; i < SGE_QSETS; ++i)
319 cpus[i] = i;
320 cpus[SGE_QSETS] = 0xff; /* terminator */
322 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
323 rspq_map[i] = i % nq0;
324 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
327 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
328 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
329 V_RRCPLCPUSIZE(6), cpus, rspq_map);
333 * If we have multiple receive queues per port serviced by NAPI we need one
334 * netdevice per queue as NAPI operates on netdevices. We already have one
335 * netdevice, namely the one associated with the interface, so we use dummy
336 * ones for any additional queues. Note that these netdevices exist purely
337 * so that NAPI has something to work with, they do not represent network
338 * ports and are not registered.
340 static int init_dummy_netdevs(struct adapter *adap)
342 int i, j, dummy_idx = 0;
343 struct net_device *nd;
345 for_each_port(adap, i) {
346 struct net_device *dev = adap->port[i];
347 const struct port_info *pi = netdev_priv(dev);
349 for (j = 0; j < pi->nqsets - 1; j++) {
350 if (!adap->dummy_netdev[dummy_idx]) {
351 nd = alloc_netdev(0, "", ether_setup);
352 if (!nd)
353 goto free_all;
355 nd->priv = adap;
356 nd->weight = 64;
357 set_bit(__LINK_STATE_START, &nd->state);
358 adap->dummy_netdev[dummy_idx] = nd;
360 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
361 dummy_idx++;
364 return 0;
366 free_all:
367 while (--dummy_idx >= 0) {
368 free_netdev(adap->dummy_netdev[dummy_idx]);
369 adap->dummy_netdev[dummy_idx] = NULL;
371 return -ENOMEM;
375 * Wait until all NAPI handlers are descheduled. This includes the handlers of
376 * both netdevices representing interfaces and the dummy ones for the extra
377 * queues.
379 static void quiesce_rx(struct adapter *adap)
381 int i;
382 struct net_device *dev;
384 for_each_port(adap, i) {
385 dev = adap->port[i];
386 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
387 msleep(1);
390 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
391 dev = adap->dummy_netdev[i];
392 if (dev)
393 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
394 msleep(1);
399 * setup_sge_qsets - configure SGE Tx/Rx/response queues
400 * @adap: the adapter
402 * Determines how many sets of SGE queues to use and initializes them.
403 * We support multiple queue sets per port if we have MSI-X, otherwise
404 * just one queue set per port.
406 static int setup_sge_qsets(struct adapter *adap)
408 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
409 unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
411 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
412 irq_idx = -1;
414 for_each_port(adap, i) {
415 struct net_device *dev = adap->port[i];
416 const struct port_info *pi = netdev_priv(dev);
418 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
419 err = t3_sge_alloc_qset(adap, qset_idx, 1,
420 (adap->flags & USING_MSIX) ? qset_idx + 1 :
421 irq_idx,
422 &adap->params.sge.qset[qset_idx], ntxq,
423 j == 0 ? dev :
424 adap-> dummy_netdev[dummy_dev_idx++]);
425 if (err) {
426 t3_free_sge_resources(adap);
427 return err;
432 return 0;
435 static ssize_t attr_show(struct device *d, struct device_attribute *attr,
436 char *buf,
437 ssize_t(*format) (struct net_device *, char *))
439 ssize_t len;
441 /* Synchronize with ioctls that may shut down the device */
442 rtnl_lock();
443 len = (*format) (to_net_dev(d), buf);
444 rtnl_unlock();
445 return len;
448 static ssize_t attr_store(struct device *d, struct device_attribute *attr,
449 const char *buf, size_t len,
450 ssize_t(*set) (struct net_device *, unsigned int),
451 unsigned int min_val, unsigned int max_val)
453 char *endp;
454 ssize_t ret;
455 unsigned int val;
457 if (!capable(CAP_NET_ADMIN))
458 return -EPERM;
460 val = simple_strtoul(buf, &endp, 0);
461 if (endp == buf || val < min_val || val > max_val)
462 return -EINVAL;
464 rtnl_lock();
465 ret = (*set) (to_net_dev(d), val);
466 if (!ret)
467 ret = len;
468 rtnl_unlock();
469 return ret;
472 #define CXGB3_SHOW(name, val_expr) \
473 static ssize_t format_##name(struct net_device *dev, char *buf) \
475 struct adapter *adap = dev->priv; \
476 return sprintf(buf, "%u\n", val_expr); \
478 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
479 char *buf) \
481 return attr_show(d, attr, buf, format_##name); \
484 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
486 struct adapter *adap = dev->priv;
488 if (adap->flags & FULL_INIT_DONE)
489 return -EBUSY;
490 if (val && adap->params.rev == 0)
491 return -EINVAL;
492 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
493 return -EINVAL;
494 adap->params.mc5.nfilters = val;
495 return 0;
498 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
499 const char *buf, size_t len)
501 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
504 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
506 struct adapter *adap = dev->priv;
508 if (adap->flags & FULL_INIT_DONE)
509 return -EBUSY;
510 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
511 return -EINVAL;
512 adap->params.mc5.nservers = val;
513 return 0;
516 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
517 const char *buf, size_t len)
519 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
522 #define CXGB3_ATTR_R(name, val_expr) \
523 CXGB3_SHOW(name, val_expr) \
524 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
526 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
527 CXGB3_SHOW(name, val_expr) \
528 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
530 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
531 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
532 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
534 static struct attribute *cxgb3_attrs[] = {
535 &dev_attr_cam_size.attr,
536 &dev_attr_nfilters.attr,
537 &dev_attr_nservers.attr,
538 NULL
541 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
543 static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
544 char *buf, int sched)
546 ssize_t len;
547 unsigned int v, addr, bpt, cpt;
548 struct adapter *adap = to_net_dev(d)->priv;
550 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
551 rtnl_lock();
552 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
553 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
554 if (sched & 1)
555 v >>= 16;
556 bpt = (v >> 8) & 0xff;
557 cpt = v & 0xff;
558 if (!cpt)
559 len = sprintf(buf, "disabled\n");
560 else {
561 v = (adap->params.vpd.cclk * 1000) / cpt;
562 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
564 rtnl_unlock();
565 return len;
568 static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
569 const char *buf, size_t len, int sched)
571 char *endp;
572 ssize_t ret;
573 unsigned int val;
574 struct adapter *adap = to_net_dev(d)->priv;
576 if (!capable(CAP_NET_ADMIN))
577 return -EPERM;
579 val = simple_strtoul(buf, &endp, 0);
580 if (endp == buf || val > 10000000)
581 return -EINVAL;
583 rtnl_lock();
584 ret = t3_config_sched(adap, val, sched);
585 if (!ret)
586 ret = len;
587 rtnl_unlock();
588 return ret;
591 #define TM_ATTR(name, sched) \
592 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
593 char *buf) \
595 return tm_attr_show(d, attr, buf, sched); \
597 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
598 const char *buf, size_t len) \
600 return tm_attr_store(d, attr, buf, len, sched); \
602 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
604 TM_ATTR(sched0, 0);
605 TM_ATTR(sched1, 1);
606 TM_ATTR(sched2, 2);
607 TM_ATTR(sched3, 3);
608 TM_ATTR(sched4, 4);
609 TM_ATTR(sched5, 5);
610 TM_ATTR(sched6, 6);
611 TM_ATTR(sched7, 7);
613 static struct attribute *offload_attrs[] = {
614 &dev_attr_sched0.attr,
615 &dev_attr_sched1.attr,
616 &dev_attr_sched2.attr,
617 &dev_attr_sched3.attr,
618 &dev_attr_sched4.attr,
619 &dev_attr_sched5.attr,
620 &dev_attr_sched6.attr,
621 &dev_attr_sched7.attr,
622 NULL
625 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
628 * Sends an sk_buff to an offload queue driver
629 * after dealing with any active network taps.
631 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
633 int ret;
635 local_bh_disable();
636 ret = t3_offload_tx(tdev, skb);
637 local_bh_enable();
638 return ret;
641 static int write_smt_entry(struct adapter *adapter, int idx)
643 struct cpl_smt_write_req *req;
644 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
646 if (!skb)
647 return -ENOMEM;
649 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
650 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
651 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
652 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
653 req->iff = idx;
654 memset(req->src_mac1, 0, sizeof(req->src_mac1));
655 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
656 skb->priority = 1;
657 offload_tx(&adapter->tdev, skb);
658 return 0;
661 static int init_smt(struct adapter *adapter)
663 int i;
665 for_each_port(adapter, i)
666 write_smt_entry(adapter, i);
667 return 0;
670 static void init_port_mtus(struct adapter *adapter)
672 unsigned int mtus = adapter->port[0]->mtu;
674 if (adapter->port[1])
675 mtus |= adapter->port[1]->mtu << 16;
676 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
679 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
680 int hi, int port)
682 struct sk_buff *skb;
683 struct mngt_pktsched_wr *req;
685 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
686 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
687 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
688 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
689 req->sched = sched;
690 req->idx = qidx;
691 req->min = lo;
692 req->max = hi;
693 req->binding = port;
694 t3_mgmt_tx(adap, skb);
697 static void bind_qsets(struct adapter *adap)
699 int i, j;
701 for_each_port(adap, i) {
702 const struct port_info *pi = adap2pinfo(adap, i);
704 for (j = 0; j < pi->nqsets; ++j)
705 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
706 -1, i);
711 * cxgb_up - enable the adapter
712 * @adapter: adapter being enabled
714 * Called when the first port is enabled, this function performs the
715 * actions necessary to make an adapter operational, such as completing
716 * the initialization of HW modules, and enabling interrupts.
718 * Must be called with the rtnl lock held.
720 static int cxgb_up(struct adapter *adap)
722 int err = 0;
724 if (!(adap->flags & FULL_INIT_DONE)) {
725 err = t3_check_fw_version(adap);
726 if (err)
727 goto out;
729 err = init_dummy_netdevs(adap);
730 if (err)
731 goto out;
733 err = t3_init_hw(adap, 0);
734 if (err)
735 goto out;
737 err = setup_sge_qsets(adap);
738 if (err)
739 goto out;
741 setup_rss(adap);
742 adap->flags |= FULL_INIT_DONE;
745 t3_intr_clear(adap);
747 if (adap->flags & USING_MSIX) {
748 name_msix_vecs(adap);
749 err = request_irq(adap->msix_info[0].vec,
750 t3_async_intr_handler, 0,
751 adap->msix_info[0].desc, adap);
752 if (err)
753 goto irq_err;
755 if (request_msix_data_irqs(adap)) {
756 free_irq(adap->msix_info[0].vec, adap);
757 goto irq_err;
759 } else if ((err = request_irq(adap->pdev->irq,
760 t3_intr_handler(adap,
761 adap->sge.qs[0].rspq.
762 polling),
763 (adap->flags & USING_MSI) ?
764 0 : IRQF_SHARED,
765 adap->name, adap)))
766 goto irq_err;
768 t3_sge_start(adap);
769 t3_intr_enable(adap);
771 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
772 bind_qsets(adap);
773 adap->flags |= QUEUES_BOUND;
775 out:
776 return err;
777 irq_err:
778 CH_ERR(adap, "request_irq failed, err %d\n", err);
779 goto out;
783 * Release resources when all the ports and offloading have been stopped.
785 static void cxgb_down(struct adapter *adapter)
787 t3_sge_stop(adapter);
788 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
789 t3_intr_disable(adapter);
790 spin_unlock_irq(&adapter->work_lock);
792 if (adapter->flags & USING_MSIX) {
793 int i, n = 0;
795 free_irq(adapter->msix_info[0].vec, adapter);
796 for_each_port(adapter, i)
797 n += adap2pinfo(adapter, i)->nqsets;
799 for (i = 0; i < n; ++i)
800 free_irq(adapter->msix_info[i + 1].vec,
801 &adapter->sge.qs[i]);
802 } else
803 free_irq(adapter->pdev->irq, adapter);
805 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
806 quiesce_rx(adapter);
809 static void schedule_chk_task(struct adapter *adap)
811 unsigned int timeo;
813 timeo = adap->params.linkpoll_period ?
814 (HZ * adap->params.linkpoll_period) / 10 :
815 adap->params.stats_update_period * HZ;
816 if (timeo)
817 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
820 static int offload_open(struct net_device *dev)
822 struct adapter *adapter = dev->priv;
823 struct t3cdev *tdev = T3CDEV(dev);
824 int adap_up = adapter->open_device_map & PORT_MASK;
825 int err = 0;
827 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
828 return 0;
830 if (!adap_up && (err = cxgb_up(adapter)) < 0)
831 return err;
833 t3_tp_set_offload_mode(adapter, 1);
834 tdev->lldev = adapter->port[0];
835 err = cxgb3_offload_activate(adapter);
836 if (err)
837 goto out;
839 init_port_mtus(adapter);
840 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
841 adapter->params.b_wnd,
842 adapter->params.rev == 0 ?
843 adapter->port[0]->mtu : 0xffff);
844 init_smt(adapter);
846 /* Never mind if the next step fails */
847 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
849 /* Call back all registered clients */
850 cxgb3_add_clients(tdev);
852 out:
853 /* restore them in case the offload module has changed them */
854 if (err) {
855 t3_tp_set_offload_mode(adapter, 0);
856 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
857 cxgb3_set_dummy_ops(tdev);
859 return err;
862 static int offload_close(struct t3cdev *tdev)
864 struct adapter *adapter = tdev2adap(tdev);
866 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
867 return 0;
869 /* Call back all registered clients */
870 cxgb3_remove_clients(tdev);
872 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
874 tdev->lldev = NULL;
875 cxgb3_set_dummy_ops(tdev);
876 t3_tp_set_offload_mode(adapter, 0);
877 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
879 if (!adapter->open_device_map)
880 cxgb_down(adapter);
882 cxgb3_offload_deactivate(adapter);
883 return 0;
886 static int cxgb_open(struct net_device *dev)
888 int err;
889 struct adapter *adapter = dev->priv;
890 struct port_info *pi = netdev_priv(dev);
891 int other_ports = adapter->open_device_map & PORT_MASK;
893 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
894 return err;
896 set_bit(pi->port_id, &adapter->open_device_map);
897 if (!ofld_disable) {
898 err = offload_open(dev);
899 if (err)
900 printk(KERN_WARNING
901 "Could not initialize offload capabilities\n");
904 link_start(dev);
905 t3_port_intr_enable(adapter, pi->port_id);
906 netif_start_queue(dev);
907 if (!other_ports)
908 schedule_chk_task(adapter);
910 return 0;
913 static int cxgb_close(struct net_device *dev)
915 struct adapter *adapter = dev->priv;
916 struct port_info *p = netdev_priv(dev);
918 t3_port_intr_disable(adapter, p->port_id);
919 netif_stop_queue(dev);
920 p->phy.ops->power_down(&p->phy, 1);
921 netif_carrier_off(dev);
922 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
924 spin_lock(&adapter->work_lock); /* sync with update task */
925 clear_bit(p->port_id, &adapter->open_device_map);
926 spin_unlock(&adapter->work_lock);
928 if (!(adapter->open_device_map & PORT_MASK))
929 cancel_rearming_delayed_workqueue(cxgb3_wq,
930 &adapter->adap_check_task);
932 if (!adapter->open_device_map)
933 cxgb_down(adapter);
935 return 0;
938 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
940 struct adapter *adapter = dev->priv;
941 struct port_info *p = netdev_priv(dev);
942 struct net_device_stats *ns = &p->netstats;
943 const struct mac_stats *pstats;
945 spin_lock(&adapter->stats_lock);
946 pstats = t3_mac_update_stats(&p->mac);
947 spin_unlock(&adapter->stats_lock);
949 ns->tx_bytes = pstats->tx_octets;
950 ns->tx_packets = pstats->tx_frames;
951 ns->rx_bytes = pstats->rx_octets;
952 ns->rx_packets = pstats->rx_frames;
953 ns->multicast = pstats->rx_mcast_frames;
955 ns->tx_errors = pstats->tx_underrun;
956 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
957 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
958 pstats->rx_fifo_ovfl;
960 /* detailed rx_errors */
961 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
962 ns->rx_over_errors = 0;
963 ns->rx_crc_errors = pstats->rx_fcs_errs;
964 ns->rx_frame_errors = pstats->rx_symbol_errs;
965 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
966 ns->rx_missed_errors = pstats->rx_cong_drops;
968 /* detailed tx_errors */
969 ns->tx_aborted_errors = 0;
970 ns->tx_carrier_errors = 0;
971 ns->tx_fifo_errors = pstats->tx_underrun;
972 ns->tx_heartbeat_errors = 0;
973 ns->tx_window_errors = 0;
974 return ns;
977 static u32 get_msglevel(struct net_device *dev)
979 struct adapter *adapter = dev->priv;
981 return adapter->msg_enable;
984 static void set_msglevel(struct net_device *dev, u32 val)
986 struct adapter *adapter = dev->priv;
988 adapter->msg_enable = val;
991 static char stats_strings[][ETH_GSTRING_LEN] = {
992 "TxOctetsOK ",
993 "TxFramesOK ",
994 "TxMulticastFramesOK",
995 "TxBroadcastFramesOK",
996 "TxPauseFrames ",
997 "TxUnderrun ",
998 "TxExtUnderrun ",
1000 "TxFrames64 ",
1001 "TxFrames65To127 ",
1002 "TxFrames128To255 ",
1003 "TxFrames256To511 ",
1004 "TxFrames512To1023 ",
1005 "TxFrames1024To1518 ",
1006 "TxFrames1519ToMax ",
1008 "RxOctetsOK ",
1009 "RxFramesOK ",
1010 "RxMulticastFramesOK",
1011 "RxBroadcastFramesOK",
1012 "RxPauseFrames ",
1013 "RxFCSErrors ",
1014 "RxSymbolErrors ",
1015 "RxShortErrors ",
1016 "RxJabberErrors ",
1017 "RxLengthErrors ",
1018 "RxFIFOoverflow ",
1020 "RxFrames64 ",
1021 "RxFrames65To127 ",
1022 "RxFrames128To255 ",
1023 "RxFrames256To511 ",
1024 "RxFrames512To1023 ",
1025 "RxFrames1024To1518 ",
1026 "RxFrames1519ToMax ",
1028 "PhyFIFOErrors ",
1029 "TSO ",
1030 "VLANextractions ",
1031 "VLANinsertions ",
1032 "TxCsumOffload ",
1033 "RxCsumGood ",
1034 "RxDrops "
1037 static int get_stats_count(struct net_device *dev)
1039 return ARRAY_SIZE(stats_strings);
1042 #define T3_REGMAP_SIZE (3 * 1024)
1044 static int get_regs_len(struct net_device *dev)
1046 return T3_REGMAP_SIZE;
1049 static int get_eeprom_len(struct net_device *dev)
1051 return EEPROMSIZE;
1054 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1056 u32 fw_vers = 0;
1057 struct adapter *adapter = dev->priv;
1059 t3_get_fw_version(adapter, &fw_vers);
1061 strcpy(info->driver, DRV_NAME);
1062 strcpy(info->version, DRV_VERSION);
1063 strcpy(info->bus_info, pci_name(adapter->pdev));
1064 if (!fw_vers)
1065 strcpy(info->fw_version, "N/A");
1066 else {
1067 snprintf(info->fw_version, sizeof(info->fw_version),
1068 "%s %u.%u.%u",
1069 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1070 G_FW_VERSION_MAJOR(fw_vers),
1071 G_FW_VERSION_MINOR(fw_vers),
1072 G_FW_VERSION_MICRO(fw_vers));
1076 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1078 if (stringset == ETH_SS_STATS)
1079 memcpy(data, stats_strings, sizeof(stats_strings));
1082 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1083 struct port_info *p, int idx)
1085 int i;
1086 unsigned long tot = 0;
1088 for (i = 0; i < p->nqsets; ++i)
1089 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1090 return tot;
1093 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1094 u64 *data)
1096 struct adapter *adapter = dev->priv;
1097 struct port_info *pi = netdev_priv(dev);
1098 const struct mac_stats *s;
1100 spin_lock(&adapter->stats_lock);
1101 s = t3_mac_update_stats(&pi->mac);
1102 spin_unlock(&adapter->stats_lock);
1104 *data++ = s->tx_octets;
1105 *data++ = s->tx_frames;
1106 *data++ = s->tx_mcast_frames;
1107 *data++ = s->tx_bcast_frames;
1108 *data++ = s->tx_pause;
1109 *data++ = s->tx_underrun;
1110 *data++ = s->tx_fifo_urun;
1112 *data++ = s->tx_frames_64;
1113 *data++ = s->tx_frames_65_127;
1114 *data++ = s->tx_frames_128_255;
1115 *data++ = s->tx_frames_256_511;
1116 *data++ = s->tx_frames_512_1023;
1117 *data++ = s->tx_frames_1024_1518;
1118 *data++ = s->tx_frames_1519_max;
1120 *data++ = s->rx_octets;
1121 *data++ = s->rx_frames;
1122 *data++ = s->rx_mcast_frames;
1123 *data++ = s->rx_bcast_frames;
1124 *data++ = s->rx_pause;
1125 *data++ = s->rx_fcs_errs;
1126 *data++ = s->rx_symbol_errs;
1127 *data++ = s->rx_short;
1128 *data++ = s->rx_jabber;
1129 *data++ = s->rx_too_long;
1130 *data++ = s->rx_fifo_ovfl;
1132 *data++ = s->rx_frames_64;
1133 *data++ = s->rx_frames_65_127;
1134 *data++ = s->rx_frames_128_255;
1135 *data++ = s->rx_frames_256_511;
1136 *data++ = s->rx_frames_512_1023;
1137 *data++ = s->rx_frames_1024_1518;
1138 *data++ = s->rx_frames_1519_max;
1140 *data++ = pi->phy.fifo_errors;
1142 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1143 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1144 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1145 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1146 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1147 *data++ = s->rx_cong_drops;
1150 static inline void reg_block_dump(struct adapter *ap, void *buf,
1151 unsigned int start, unsigned int end)
1153 u32 *p = buf + start;
1155 for (; start <= end; start += sizeof(u32))
1156 *p++ = t3_read_reg(ap, start);
1159 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1160 void *buf)
1162 struct adapter *ap = dev->priv;
1165 * Version scheme:
1166 * bits 0..9: chip version
1167 * bits 10..15: chip revision
1168 * bit 31: set for PCIe cards
1170 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1173 * We skip the MAC statistics registers because they are clear-on-read.
1174 * Also reading multi-register stats would need to synchronize with the
1175 * periodic mac stats accumulation. Hard to justify the complexity.
1177 memset(buf, 0, T3_REGMAP_SIZE);
1178 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1179 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1180 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1181 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1182 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1183 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1184 XGM_REG(A_XGM_SERDES_STAT3, 1));
1185 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1186 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1189 static int restart_autoneg(struct net_device *dev)
1191 struct port_info *p = netdev_priv(dev);
1193 if (!netif_running(dev))
1194 return -EAGAIN;
1195 if (p->link_config.autoneg != AUTONEG_ENABLE)
1196 return -EINVAL;
1197 p->phy.ops->autoneg_restart(&p->phy);
1198 return 0;
1201 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1203 int i;
1204 struct adapter *adapter = dev->priv;
1206 if (data == 0)
1207 data = 2;
1209 for (i = 0; i < data * 2; i++) {
1210 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1211 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1212 if (msleep_interruptible(500))
1213 break;
1215 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1216 F_GPIO0_OUT_VAL);
1217 return 0;
1220 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1222 struct port_info *p = netdev_priv(dev);
1224 cmd->supported = p->link_config.supported;
1225 cmd->advertising = p->link_config.advertising;
1227 if (netif_carrier_ok(dev)) {
1228 cmd->speed = p->link_config.speed;
1229 cmd->duplex = p->link_config.duplex;
1230 } else {
1231 cmd->speed = -1;
1232 cmd->duplex = -1;
1235 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1236 cmd->phy_address = p->phy.addr;
1237 cmd->transceiver = XCVR_EXTERNAL;
1238 cmd->autoneg = p->link_config.autoneg;
1239 cmd->maxtxpkt = 0;
1240 cmd->maxrxpkt = 0;
1241 return 0;
1244 static int speed_duplex_to_caps(int speed, int duplex)
1246 int cap = 0;
1248 switch (speed) {
1249 case SPEED_10:
1250 if (duplex == DUPLEX_FULL)
1251 cap = SUPPORTED_10baseT_Full;
1252 else
1253 cap = SUPPORTED_10baseT_Half;
1254 break;
1255 case SPEED_100:
1256 if (duplex == DUPLEX_FULL)
1257 cap = SUPPORTED_100baseT_Full;
1258 else
1259 cap = SUPPORTED_100baseT_Half;
1260 break;
1261 case SPEED_1000:
1262 if (duplex == DUPLEX_FULL)
1263 cap = SUPPORTED_1000baseT_Full;
1264 else
1265 cap = SUPPORTED_1000baseT_Half;
1266 break;
1267 case SPEED_10000:
1268 if (duplex == DUPLEX_FULL)
1269 cap = SUPPORTED_10000baseT_Full;
1271 return cap;
1274 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1275 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1276 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1277 ADVERTISED_10000baseT_Full)
1279 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1281 struct port_info *p = netdev_priv(dev);
1282 struct link_config *lc = &p->link_config;
1284 if (!(lc->supported & SUPPORTED_Autoneg))
1285 return -EOPNOTSUPP; /* can't change speed/duplex */
1287 if (cmd->autoneg == AUTONEG_DISABLE) {
1288 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1290 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1291 return -EINVAL;
1292 lc->requested_speed = cmd->speed;
1293 lc->requested_duplex = cmd->duplex;
1294 lc->advertising = 0;
1295 } else {
1296 cmd->advertising &= ADVERTISED_MASK;
1297 cmd->advertising &= lc->supported;
1298 if (!cmd->advertising)
1299 return -EINVAL;
1300 lc->requested_speed = SPEED_INVALID;
1301 lc->requested_duplex = DUPLEX_INVALID;
1302 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1304 lc->autoneg = cmd->autoneg;
1305 if (netif_running(dev))
1306 t3_link_start(&p->phy, &p->mac, lc);
1307 return 0;
1310 static void get_pauseparam(struct net_device *dev,
1311 struct ethtool_pauseparam *epause)
1313 struct port_info *p = netdev_priv(dev);
1315 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1316 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1317 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1320 static int set_pauseparam(struct net_device *dev,
1321 struct ethtool_pauseparam *epause)
1323 struct port_info *p = netdev_priv(dev);
1324 struct link_config *lc = &p->link_config;
1326 if (epause->autoneg == AUTONEG_DISABLE)
1327 lc->requested_fc = 0;
1328 else if (lc->supported & SUPPORTED_Autoneg)
1329 lc->requested_fc = PAUSE_AUTONEG;
1330 else
1331 return -EINVAL;
1333 if (epause->rx_pause)
1334 lc->requested_fc |= PAUSE_RX;
1335 if (epause->tx_pause)
1336 lc->requested_fc |= PAUSE_TX;
1337 if (lc->autoneg == AUTONEG_ENABLE) {
1338 if (netif_running(dev))
1339 t3_link_start(&p->phy, &p->mac, lc);
1340 } else {
1341 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1342 if (netif_running(dev))
1343 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1345 return 0;
1348 static u32 get_rx_csum(struct net_device *dev)
1350 struct port_info *p = netdev_priv(dev);
1352 return p->rx_csum_offload;
1355 static int set_rx_csum(struct net_device *dev, u32 data)
1357 struct port_info *p = netdev_priv(dev);
1359 p->rx_csum_offload = data;
1360 return 0;
1363 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1365 const struct adapter *adapter = dev->priv;
1366 const struct port_info *pi = netdev_priv(dev);
1367 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1369 e->rx_max_pending = MAX_RX_BUFFERS;
1370 e->rx_mini_max_pending = 0;
1371 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1372 e->tx_max_pending = MAX_TXQ_ENTRIES;
1374 e->rx_pending = q->fl_size;
1375 e->rx_mini_pending = q->rspq_size;
1376 e->rx_jumbo_pending = q->jumbo_size;
1377 e->tx_pending = q->txq_size[0];
1380 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1382 int i;
1383 struct qset_params *q;
1384 struct adapter *adapter = dev->priv;
1385 const struct port_info *pi = netdev_priv(dev);
1387 if (e->rx_pending > MAX_RX_BUFFERS ||
1388 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1389 e->tx_pending > MAX_TXQ_ENTRIES ||
1390 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1391 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1392 e->rx_pending < MIN_FL_ENTRIES ||
1393 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1394 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1395 return -EINVAL;
1397 if (adapter->flags & FULL_INIT_DONE)
1398 return -EBUSY;
1400 q = &adapter->params.sge.qset[pi->first_qset];
1401 for (i = 0; i < pi->nqsets; ++i, ++q) {
1402 q->rspq_size = e->rx_mini_pending;
1403 q->fl_size = e->rx_pending;
1404 q->jumbo_size = e->rx_jumbo_pending;
1405 q->txq_size[0] = e->tx_pending;
1406 q->txq_size[1] = e->tx_pending;
1407 q->txq_size[2] = e->tx_pending;
1409 return 0;
1412 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1414 struct adapter *adapter = dev->priv;
1415 struct qset_params *qsp = &adapter->params.sge.qset[0];
1416 struct sge_qset *qs = &adapter->sge.qs[0];
1418 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1419 return -EINVAL;
1421 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1422 t3_update_qset_coalesce(qs, qsp);
1423 return 0;
1426 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1428 struct adapter *adapter = dev->priv;
1429 struct qset_params *q = adapter->params.sge.qset;
1431 c->rx_coalesce_usecs = q->coalesce_usecs;
1432 return 0;
1435 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1436 u8 * data)
1438 int i, err = 0;
1439 struct adapter *adapter = dev->priv;
1441 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1442 if (!buf)
1443 return -ENOMEM;
1445 e->magic = EEPROM_MAGIC;
1446 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1447 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1449 if (!err)
1450 memcpy(data, buf + e->offset, e->len);
1451 kfree(buf);
1452 return err;
1455 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1456 u8 * data)
1458 u8 *buf;
1459 int err = 0;
1460 u32 aligned_offset, aligned_len, *p;
1461 struct adapter *adapter = dev->priv;
1463 if (eeprom->magic != EEPROM_MAGIC)
1464 return -EINVAL;
1466 aligned_offset = eeprom->offset & ~3;
1467 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1469 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1470 buf = kmalloc(aligned_len, GFP_KERNEL);
1471 if (!buf)
1472 return -ENOMEM;
1473 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1474 if (!err && aligned_len > 4)
1475 err = t3_seeprom_read(adapter,
1476 aligned_offset + aligned_len - 4,
1477 (u32 *) & buf[aligned_len - 4]);
1478 if (err)
1479 goto out;
1480 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1481 } else
1482 buf = data;
1484 err = t3_seeprom_wp(adapter, 0);
1485 if (err)
1486 goto out;
1488 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1489 err = t3_seeprom_write(adapter, aligned_offset, *p);
1490 aligned_offset += 4;
1493 if (!err)
1494 err = t3_seeprom_wp(adapter, 1);
1495 out:
1496 if (buf != data)
1497 kfree(buf);
1498 return err;
1501 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1503 wol->supported = 0;
1504 wol->wolopts = 0;
1505 memset(&wol->sopass, 0, sizeof(wol->sopass));
1508 static const struct ethtool_ops cxgb_ethtool_ops = {
1509 .get_settings = get_settings,
1510 .set_settings = set_settings,
1511 .get_drvinfo = get_drvinfo,
1512 .get_msglevel = get_msglevel,
1513 .set_msglevel = set_msglevel,
1514 .get_ringparam = get_sge_param,
1515 .set_ringparam = set_sge_param,
1516 .get_coalesce = get_coalesce,
1517 .set_coalesce = set_coalesce,
1518 .get_eeprom_len = get_eeprom_len,
1519 .get_eeprom = get_eeprom,
1520 .set_eeprom = set_eeprom,
1521 .get_pauseparam = get_pauseparam,
1522 .set_pauseparam = set_pauseparam,
1523 .get_rx_csum = get_rx_csum,
1524 .set_rx_csum = set_rx_csum,
1525 .get_tx_csum = ethtool_op_get_tx_csum,
1526 .set_tx_csum = ethtool_op_set_tx_csum,
1527 .get_sg = ethtool_op_get_sg,
1528 .set_sg = ethtool_op_set_sg,
1529 .get_link = ethtool_op_get_link,
1530 .get_strings = get_strings,
1531 .phys_id = cxgb3_phys_id,
1532 .nway_reset = restart_autoneg,
1533 .get_stats_count = get_stats_count,
1534 .get_ethtool_stats = get_stats,
1535 .get_regs_len = get_regs_len,
1536 .get_regs = get_regs,
1537 .get_wol = get_wol,
1538 .get_tso = ethtool_op_get_tso,
1539 .set_tso = ethtool_op_set_tso,
1540 .get_perm_addr = ethtool_op_get_perm_addr
1543 static int in_range(int val, int lo, int hi)
1545 return val < 0 || (val <= hi && val >= lo);
1548 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1550 int ret;
1551 u32 cmd;
1552 struct adapter *adapter = dev->priv;
1554 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1555 return -EFAULT;
1557 switch (cmd) {
1558 case CHELSIO_SET_QSET_PARAMS:{
1559 int i;
1560 struct qset_params *q;
1561 struct ch_qset_params t;
1563 if (!capable(CAP_NET_ADMIN))
1564 return -EPERM;
1565 if (copy_from_user(&t, useraddr, sizeof(t)))
1566 return -EFAULT;
1567 if (t.qset_idx >= SGE_QSETS)
1568 return -EINVAL;
1569 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1570 !in_range(t.cong_thres, 0, 255) ||
1571 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1572 MAX_TXQ_ENTRIES) ||
1573 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1574 MAX_TXQ_ENTRIES) ||
1575 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1576 MAX_CTRL_TXQ_ENTRIES) ||
1577 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1578 MAX_RX_BUFFERS)
1579 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1580 MAX_RX_JUMBO_BUFFERS)
1581 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1582 MAX_RSPQ_ENTRIES))
1583 return -EINVAL;
1584 if ((adapter->flags & FULL_INIT_DONE) &&
1585 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1586 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1587 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1588 t.polling >= 0 || t.cong_thres >= 0))
1589 return -EBUSY;
1591 q = &adapter->params.sge.qset[t.qset_idx];
1593 if (t.rspq_size >= 0)
1594 q->rspq_size = t.rspq_size;
1595 if (t.fl_size[0] >= 0)
1596 q->fl_size = t.fl_size[0];
1597 if (t.fl_size[1] >= 0)
1598 q->jumbo_size = t.fl_size[1];
1599 if (t.txq_size[0] >= 0)
1600 q->txq_size[0] = t.txq_size[0];
1601 if (t.txq_size[1] >= 0)
1602 q->txq_size[1] = t.txq_size[1];
1603 if (t.txq_size[2] >= 0)
1604 q->txq_size[2] = t.txq_size[2];
1605 if (t.cong_thres >= 0)
1606 q->cong_thres = t.cong_thres;
1607 if (t.intr_lat >= 0) {
1608 struct sge_qset *qs =
1609 &adapter->sge.qs[t.qset_idx];
1611 q->coalesce_usecs = t.intr_lat;
1612 t3_update_qset_coalesce(qs, q);
1614 if (t.polling >= 0) {
1615 if (adapter->flags & USING_MSIX)
1616 q->polling = t.polling;
1617 else {
1618 /* No polling with INTx for T3A */
1619 if (adapter->params.rev == 0 &&
1620 !(adapter->flags & USING_MSI))
1621 t.polling = 0;
1623 for (i = 0; i < SGE_QSETS; i++) {
1624 q = &adapter->params.sge.
1625 qset[i];
1626 q->polling = t.polling;
1630 break;
1632 case CHELSIO_GET_QSET_PARAMS:{
1633 struct qset_params *q;
1634 struct ch_qset_params t;
1636 if (copy_from_user(&t, useraddr, sizeof(t)))
1637 return -EFAULT;
1638 if (t.qset_idx >= SGE_QSETS)
1639 return -EINVAL;
1641 q = &adapter->params.sge.qset[t.qset_idx];
1642 t.rspq_size = q->rspq_size;
1643 t.txq_size[0] = q->txq_size[0];
1644 t.txq_size[1] = q->txq_size[1];
1645 t.txq_size[2] = q->txq_size[2];
1646 t.fl_size[0] = q->fl_size;
1647 t.fl_size[1] = q->jumbo_size;
1648 t.polling = q->polling;
1649 t.intr_lat = q->coalesce_usecs;
1650 t.cong_thres = q->cong_thres;
1652 if (copy_to_user(useraddr, &t, sizeof(t)))
1653 return -EFAULT;
1654 break;
1656 case CHELSIO_SET_QSET_NUM:{
1657 struct ch_reg edata;
1658 struct port_info *pi = netdev_priv(dev);
1659 unsigned int i, first_qset = 0, other_qsets = 0;
1661 if (!capable(CAP_NET_ADMIN))
1662 return -EPERM;
1663 if (adapter->flags & FULL_INIT_DONE)
1664 return -EBUSY;
1665 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1666 return -EFAULT;
1667 if (edata.val < 1 ||
1668 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1669 return -EINVAL;
1671 for_each_port(adapter, i)
1672 if (adapter->port[i] && adapter->port[i] != dev)
1673 other_qsets += adap2pinfo(adapter, i)->nqsets;
1675 if (edata.val + other_qsets > SGE_QSETS)
1676 return -EINVAL;
1678 pi->nqsets = edata.val;
1680 for_each_port(adapter, i)
1681 if (adapter->port[i]) {
1682 pi = adap2pinfo(adapter, i);
1683 pi->first_qset = first_qset;
1684 first_qset += pi->nqsets;
1686 break;
1688 case CHELSIO_GET_QSET_NUM:{
1689 struct ch_reg edata;
1690 struct port_info *pi = netdev_priv(dev);
1692 edata.cmd = CHELSIO_GET_QSET_NUM;
1693 edata.val = pi->nqsets;
1694 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1695 return -EFAULT;
1696 break;
1698 case CHELSIO_LOAD_FW:{
1699 u8 *fw_data;
1700 struct ch_mem_range t;
1702 if (!capable(CAP_NET_ADMIN))
1703 return -EPERM;
1704 if (copy_from_user(&t, useraddr, sizeof(t)))
1705 return -EFAULT;
1707 fw_data = kmalloc(t.len, GFP_KERNEL);
1708 if (!fw_data)
1709 return -ENOMEM;
1711 if (copy_from_user
1712 (fw_data, useraddr + sizeof(t), t.len)) {
1713 kfree(fw_data);
1714 return -EFAULT;
1717 ret = t3_load_fw(adapter, fw_data, t.len);
1718 kfree(fw_data);
1719 if (ret)
1720 return ret;
1721 break;
1723 case CHELSIO_SETMTUTAB:{
1724 struct ch_mtus m;
1725 int i;
1727 if (!is_offload(adapter))
1728 return -EOPNOTSUPP;
1729 if (!capable(CAP_NET_ADMIN))
1730 return -EPERM;
1731 if (offload_running(adapter))
1732 return -EBUSY;
1733 if (copy_from_user(&m, useraddr, sizeof(m)))
1734 return -EFAULT;
1735 if (m.nmtus != NMTUS)
1736 return -EINVAL;
1737 if (m.mtus[0] < 81) /* accommodate SACK */
1738 return -EINVAL;
1740 /* MTUs must be in ascending order */
1741 for (i = 1; i < NMTUS; ++i)
1742 if (m.mtus[i] < m.mtus[i - 1])
1743 return -EINVAL;
1745 memcpy(adapter->params.mtus, m.mtus,
1746 sizeof(adapter->params.mtus));
1747 break;
1749 case CHELSIO_GET_PM:{
1750 struct tp_params *p = &adapter->params.tp;
1751 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1753 if (!is_offload(adapter))
1754 return -EOPNOTSUPP;
1755 m.tx_pg_sz = p->tx_pg_size;
1756 m.tx_num_pg = p->tx_num_pgs;
1757 m.rx_pg_sz = p->rx_pg_size;
1758 m.rx_num_pg = p->rx_num_pgs;
1759 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1760 if (copy_to_user(useraddr, &m, sizeof(m)))
1761 return -EFAULT;
1762 break;
1764 case CHELSIO_SET_PM:{
1765 struct ch_pm m;
1766 struct tp_params *p = &adapter->params.tp;
1768 if (!is_offload(adapter))
1769 return -EOPNOTSUPP;
1770 if (!capable(CAP_NET_ADMIN))
1771 return -EPERM;
1772 if (adapter->flags & FULL_INIT_DONE)
1773 return -EBUSY;
1774 if (copy_from_user(&m, useraddr, sizeof(m)))
1775 return -EFAULT;
1776 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1777 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1778 return -EINVAL; /* not power of 2 */
1779 if (!(m.rx_pg_sz & 0x14000))
1780 return -EINVAL; /* not 16KB or 64KB */
1781 if (!(m.tx_pg_sz & 0x1554000))
1782 return -EINVAL;
1783 if (m.tx_num_pg == -1)
1784 m.tx_num_pg = p->tx_num_pgs;
1785 if (m.rx_num_pg == -1)
1786 m.rx_num_pg = p->rx_num_pgs;
1787 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1788 return -EINVAL;
1789 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1790 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1791 return -EINVAL;
1792 p->rx_pg_size = m.rx_pg_sz;
1793 p->tx_pg_size = m.tx_pg_sz;
1794 p->rx_num_pgs = m.rx_num_pg;
1795 p->tx_num_pgs = m.tx_num_pg;
1796 break;
1798 case CHELSIO_GET_MEM:{
1799 struct ch_mem_range t;
1800 struct mc7 *mem;
1801 u64 buf[32];
1803 if (!is_offload(adapter))
1804 return -EOPNOTSUPP;
1805 if (!(adapter->flags & FULL_INIT_DONE))
1806 return -EIO; /* need the memory controllers */
1807 if (copy_from_user(&t, useraddr, sizeof(t)))
1808 return -EFAULT;
1809 if ((t.addr & 7) || (t.len & 7))
1810 return -EINVAL;
1811 if (t.mem_id == MEM_CM)
1812 mem = &adapter->cm;
1813 else if (t.mem_id == MEM_PMRX)
1814 mem = &adapter->pmrx;
1815 else if (t.mem_id == MEM_PMTX)
1816 mem = &adapter->pmtx;
1817 else
1818 return -EINVAL;
1821 * Version scheme:
1822 * bits 0..9: chip version
1823 * bits 10..15: chip revision
1825 t.version = 3 | (adapter->params.rev << 10);
1826 if (copy_to_user(useraddr, &t, sizeof(t)))
1827 return -EFAULT;
1830 * Read 256 bytes at a time as len can be large and we don't
1831 * want to use huge intermediate buffers.
1833 useraddr += sizeof(t); /* advance to start of buffer */
1834 while (t.len) {
1835 unsigned int chunk =
1836 min_t(unsigned int, t.len, sizeof(buf));
1838 ret =
1839 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1840 buf);
1841 if (ret)
1842 return ret;
1843 if (copy_to_user(useraddr, buf, chunk))
1844 return -EFAULT;
1845 useraddr += chunk;
1846 t.addr += chunk;
1847 t.len -= chunk;
1849 break;
1851 case CHELSIO_SET_TRACE_FILTER:{
1852 struct ch_trace t;
1853 const struct trace_params *tp;
1855 if (!capable(CAP_NET_ADMIN))
1856 return -EPERM;
1857 if (!offload_running(adapter))
1858 return -EAGAIN;
1859 if (copy_from_user(&t, useraddr, sizeof(t)))
1860 return -EFAULT;
1862 tp = (const struct trace_params *)&t.sip;
1863 if (t.config_tx)
1864 t3_config_trace_filter(adapter, tp, 0,
1865 t.invert_match,
1866 t.trace_tx);
1867 if (t.config_rx)
1868 t3_config_trace_filter(adapter, tp, 1,
1869 t.invert_match,
1870 t.trace_rx);
1871 break;
1873 default:
1874 return -EOPNOTSUPP;
1876 return 0;
1879 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1881 int ret, mmd;
1882 struct adapter *adapter = dev->priv;
1883 struct port_info *pi = netdev_priv(dev);
1884 struct mii_ioctl_data *data = if_mii(req);
1886 switch (cmd) {
1887 case SIOCGMIIPHY:
1888 data->phy_id = pi->phy.addr;
1889 /* FALLTHRU */
1890 case SIOCGMIIREG:{
1891 u32 val;
1892 struct cphy *phy = &pi->phy;
1894 if (!phy->mdio_read)
1895 return -EOPNOTSUPP;
1896 if (is_10G(adapter)) {
1897 mmd = data->phy_id >> 8;
1898 if (!mmd)
1899 mmd = MDIO_DEV_PCS;
1900 else if (mmd > MDIO_DEV_XGXS)
1901 return -EINVAL;
1903 ret =
1904 phy->mdio_read(adapter, data->phy_id & 0x1f,
1905 mmd, data->reg_num, &val);
1906 } else
1907 ret =
1908 phy->mdio_read(adapter, data->phy_id & 0x1f,
1909 0, data->reg_num & 0x1f,
1910 &val);
1911 if (!ret)
1912 data->val_out = val;
1913 break;
1915 case SIOCSMIIREG:{
1916 struct cphy *phy = &pi->phy;
1918 if (!capable(CAP_NET_ADMIN))
1919 return -EPERM;
1920 if (!phy->mdio_write)
1921 return -EOPNOTSUPP;
1922 if (is_10G(adapter)) {
1923 mmd = data->phy_id >> 8;
1924 if (!mmd)
1925 mmd = MDIO_DEV_PCS;
1926 else if (mmd > MDIO_DEV_XGXS)
1927 return -EINVAL;
1929 ret =
1930 phy->mdio_write(adapter,
1931 data->phy_id & 0x1f, mmd,
1932 data->reg_num,
1933 data->val_in);
1934 } else
1935 ret =
1936 phy->mdio_write(adapter,
1937 data->phy_id & 0x1f, 0,
1938 data->reg_num & 0x1f,
1939 data->val_in);
1940 break;
1942 case SIOCCHIOCTL:
1943 return cxgb_extension_ioctl(dev, req->ifr_data);
1944 default:
1945 return -EOPNOTSUPP;
1947 return ret;
1950 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1952 int ret;
1953 struct adapter *adapter = dev->priv;
1954 struct port_info *pi = netdev_priv(dev);
1956 if (new_mtu < 81) /* accommodate SACK */
1957 return -EINVAL;
1958 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1959 return ret;
1960 dev->mtu = new_mtu;
1961 init_port_mtus(adapter);
1962 if (adapter->params.rev == 0 && offload_running(adapter))
1963 t3_load_mtus(adapter, adapter->params.mtus,
1964 adapter->params.a_wnd, adapter->params.b_wnd,
1965 adapter->port[0]->mtu);
1966 return 0;
1969 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
1971 struct adapter *adapter = dev->priv;
1972 struct port_info *pi = netdev_priv(dev);
1973 struct sockaddr *addr = p;
1975 if (!is_valid_ether_addr(addr->sa_data))
1976 return -EINVAL;
1978 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1979 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
1980 if (offload_running(adapter))
1981 write_smt_entry(adapter, pi->port_id);
1982 return 0;
1986 * t3_synchronize_rx - wait for current Rx processing on a port to complete
1987 * @adap: the adapter
1988 * @p: the port
1990 * Ensures that current Rx processing on any of the queues associated with
1991 * the given port completes before returning. We do this by acquiring and
1992 * releasing the locks of the response queues associated with the port.
1994 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1996 int i;
1998 for (i = 0; i < p->nqsets; i++) {
1999 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2001 spin_lock_irq(&q->lock);
2002 spin_unlock_irq(&q->lock);
2006 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2008 struct adapter *adapter = dev->priv;
2009 struct port_info *pi = netdev_priv(dev);
2011 pi->vlan_grp = grp;
2012 if (adapter->params.rev > 0)
2013 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2014 else {
2015 /* single control for all ports */
2016 unsigned int i, have_vlans = 0;
2017 for_each_port(adapter, i)
2018 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2020 t3_set_vlan_accel(adapter, 1, have_vlans);
2022 t3_synchronize_rx(adapter, pi);
2025 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2027 /* nothing */
2030 #ifdef CONFIG_NET_POLL_CONTROLLER
2031 static void cxgb_netpoll(struct net_device *dev)
2033 struct adapter *adapter = dev->priv;
2034 struct sge_qset *qs = dev2qset(dev);
2036 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2037 adapter);
2039 #endif
2042 * Periodic accumulation of MAC statistics.
2044 static void mac_stats_update(struct adapter *adapter)
2046 int i;
2048 for_each_port(adapter, i) {
2049 struct net_device *dev = adapter->port[i];
2050 struct port_info *p = netdev_priv(dev);
2052 if (netif_running(dev)) {
2053 spin_lock(&adapter->stats_lock);
2054 t3_mac_update_stats(&p->mac);
2055 spin_unlock(&adapter->stats_lock);
2060 static void check_link_status(struct adapter *adapter)
2062 int i;
2064 for_each_port(adapter, i) {
2065 struct net_device *dev = adapter->port[i];
2066 struct port_info *p = netdev_priv(dev);
2068 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2069 t3_link_changed(adapter, i);
2073 static void t3_adap_check_task(struct work_struct *work)
2075 struct adapter *adapter = container_of(work, struct adapter,
2076 adap_check_task.work);
2077 const struct adapter_params *p = &adapter->params;
2079 adapter->check_task_cnt++;
2081 /* Check link status for PHYs without interrupts */
2082 if (p->linkpoll_period)
2083 check_link_status(adapter);
2085 /* Accumulate MAC stats if needed */
2086 if (!p->linkpoll_period ||
2087 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2088 p->stats_update_period) {
2089 mac_stats_update(adapter);
2090 adapter->check_task_cnt = 0;
2093 /* Schedule the next check update if any port is active. */
2094 spin_lock(&adapter->work_lock);
2095 if (adapter->open_device_map & PORT_MASK)
2096 schedule_chk_task(adapter);
2097 spin_unlock(&adapter->work_lock);
2101 * Processes external (PHY) interrupts in process context.
2103 static void ext_intr_task(struct work_struct *work)
2105 struct adapter *adapter = container_of(work, struct adapter,
2106 ext_intr_handler_task);
2108 t3_phy_intr_handler(adapter);
2110 /* Now reenable external interrupts */
2111 spin_lock_irq(&adapter->work_lock);
2112 if (adapter->slow_intr_mask) {
2113 adapter->slow_intr_mask |= F_T3DBG;
2114 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2115 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2116 adapter->slow_intr_mask);
2118 spin_unlock_irq(&adapter->work_lock);
2122 * Interrupt-context handler for external (PHY) interrupts.
2124 void t3_os_ext_intr_handler(struct adapter *adapter)
2127 * Schedule a task to handle external interrupts as they may be slow
2128 * and we use a mutex to protect MDIO registers. We disable PHY
2129 * interrupts in the meantime and let the task reenable them when
2130 * it's done.
2132 spin_lock(&adapter->work_lock);
2133 if (adapter->slow_intr_mask) {
2134 adapter->slow_intr_mask &= ~F_T3DBG;
2135 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2136 adapter->slow_intr_mask);
2137 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2139 spin_unlock(&adapter->work_lock);
2142 void t3_fatal_err(struct adapter *adapter)
2144 unsigned int fw_status[4];
2146 if (adapter->flags & FULL_INIT_DONE) {
2147 t3_sge_stop(adapter);
2148 t3_intr_disable(adapter);
2150 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2151 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2152 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2153 fw_status[0], fw_status[1],
2154 fw_status[2], fw_status[3]);
2158 static int __devinit cxgb_enable_msix(struct adapter *adap)
2160 struct msix_entry entries[SGE_QSETS + 1];
2161 int i, err;
2163 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2164 entries[i].entry = i;
2166 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2167 if (!err) {
2168 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2169 adap->msix_info[i].vec = entries[i].vector;
2170 } else if (err > 0)
2171 dev_info(&adap->pdev->dev,
2172 "only %d MSI-X vectors left, not using MSI-X\n", err);
2173 return err;
2176 static void __devinit print_port_info(struct adapter *adap,
2177 const struct adapter_info *ai)
2179 static const char *pci_variant[] = {
2180 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2183 int i;
2184 char buf[80];
2186 if (is_pcie(adap))
2187 snprintf(buf, sizeof(buf), "%s x%d",
2188 pci_variant[adap->params.pci.variant],
2189 adap->params.pci.width);
2190 else
2191 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2192 pci_variant[adap->params.pci.variant],
2193 adap->params.pci.speed, adap->params.pci.width);
2195 for_each_port(adap, i) {
2196 struct net_device *dev = adap->port[i];
2197 const struct port_info *pi = netdev_priv(dev);
2199 if (!test_bit(i, &adap->registered_device_map))
2200 continue;
2201 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2202 dev->name, ai->desc, pi->port_type->desc,
2203 adap->params.rev, buf,
2204 (adap->flags & USING_MSIX) ? " MSI-X" :
2205 (adap->flags & USING_MSI) ? " MSI" : "");
2206 if (adap->name == dev->name && adap->params.vpd.mclk)
2207 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2208 adap->name, t3_mc7_size(&adap->cm) >> 20,
2209 t3_mc7_size(&adap->pmtx) >> 20,
2210 t3_mc7_size(&adap->pmrx) >> 20);
2214 static int __devinit init_one(struct pci_dev *pdev,
2215 const struct pci_device_id *ent)
2217 static int version_printed;
2219 int i, err, pci_using_dac = 0;
2220 unsigned long mmio_start, mmio_len;
2221 const struct adapter_info *ai;
2222 struct adapter *adapter = NULL;
2223 struct port_info *pi;
2225 if (!version_printed) {
2226 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2227 ++version_printed;
2230 if (!cxgb3_wq) {
2231 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2232 if (!cxgb3_wq) {
2233 printk(KERN_ERR DRV_NAME
2234 ": cannot initialize work queue\n");
2235 return -ENOMEM;
2239 err = pci_request_regions(pdev, DRV_NAME);
2240 if (err) {
2241 /* Just info, some other driver may have claimed the device. */
2242 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2243 return err;
2246 err = pci_enable_device(pdev);
2247 if (err) {
2248 dev_err(&pdev->dev, "cannot enable PCI device\n");
2249 goto out_release_regions;
2252 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2253 pci_using_dac = 1;
2254 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2255 if (err) {
2256 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2257 "coherent allocations\n");
2258 goto out_disable_device;
2260 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2261 dev_err(&pdev->dev, "no usable DMA configuration\n");
2262 goto out_disable_device;
2265 pci_set_master(pdev);
2267 mmio_start = pci_resource_start(pdev, 0);
2268 mmio_len = pci_resource_len(pdev, 0);
2269 ai = t3_get_adapter_info(ent->driver_data);
2271 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2272 if (!adapter) {
2273 err = -ENOMEM;
2274 goto out_disable_device;
2277 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2278 if (!adapter->regs) {
2279 dev_err(&pdev->dev, "cannot map device registers\n");
2280 err = -ENOMEM;
2281 goto out_free_adapter;
2284 adapter->pdev = pdev;
2285 adapter->name = pci_name(pdev);
2286 adapter->msg_enable = dflt_msg_enable;
2287 adapter->mmio_len = mmio_len;
2289 mutex_init(&adapter->mdio_lock);
2290 spin_lock_init(&adapter->work_lock);
2291 spin_lock_init(&adapter->stats_lock);
2293 INIT_LIST_HEAD(&adapter->adapter_list);
2294 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2295 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2297 for (i = 0; i < ai->nports; ++i) {
2298 struct net_device *netdev;
2300 netdev = alloc_etherdev(sizeof(struct port_info));
2301 if (!netdev) {
2302 err = -ENOMEM;
2303 goto out_free_dev;
2306 SET_MODULE_OWNER(netdev);
2307 SET_NETDEV_DEV(netdev, &pdev->dev);
2309 adapter->port[i] = netdev;
2310 pi = netdev_priv(netdev);
2311 pi->rx_csum_offload = 1;
2312 pi->nqsets = 1;
2313 pi->first_qset = i;
2314 pi->activity = 0;
2315 pi->port_id = i;
2316 netif_carrier_off(netdev);
2317 netdev->irq = pdev->irq;
2318 netdev->mem_start = mmio_start;
2319 netdev->mem_end = mmio_start + mmio_len - 1;
2320 netdev->priv = adapter;
2321 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2322 netdev->features |= NETIF_F_LLTX;
2323 if (pci_using_dac)
2324 netdev->features |= NETIF_F_HIGHDMA;
2326 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2327 netdev->vlan_rx_register = vlan_rx_register;
2328 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2330 netdev->open = cxgb_open;
2331 netdev->stop = cxgb_close;
2332 netdev->hard_start_xmit = t3_eth_xmit;
2333 netdev->get_stats = cxgb_get_stats;
2334 netdev->set_multicast_list = cxgb_set_rxmode;
2335 netdev->do_ioctl = cxgb_ioctl;
2336 netdev->change_mtu = cxgb_change_mtu;
2337 netdev->set_mac_address = cxgb_set_mac_addr;
2338 #ifdef CONFIG_NET_POLL_CONTROLLER
2339 netdev->poll_controller = cxgb_netpoll;
2340 #endif
2341 netdev->weight = 64;
2343 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2346 pci_set_drvdata(pdev, adapter->port[0]);
2347 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2348 err = -ENODEV;
2349 goto out_free_dev;
2353 * The card is now ready to go. If any errors occur during device
2354 * registration we do not fail the whole card but rather proceed only
2355 * with the ports we manage to register successfully. However we must
2356 * register at least one net device.
2358 for_each_port(adapter, i) {
2359 err = register_netdev(adapter->port[i]);
2360 if (err)
2361 dev_warn(&pdev->dev,
2362 "cannot register net device %s, skipping\n",
2363 adapter->port[i]->name);
2364 else {
2366 * Change the name we use for messages to the name of
2367 * the first successfully registered interface.
2369 if (!adapter->registered_device_map)
2370 adapter->name = adapter->port[i]->name;
2372 __set_bit(i, &adapter->registered_device_map);
2375 if (!adapter->registered_device_map) {
2376 dev_err(&pdev->dev, "could not register any net devices\n");
2377 goto out_free_dev;
2380 /* Driver's ready. Reflect it on LEDs */
2381 t3_led_ready(adapter);
2383 if (is_offload(adapter)) {
2384 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2385 cxgb3_adapter_ofld(adapter);
2388 /* See what interrupts we'll be using */
2389 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2390 adapter->flags |= USING_MSIX;
2391 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2392 adapter->flags |= USING_MSI;
2394 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2395 &cxgb3_attr_group);
2397 print_port_info(adapter, ai);
2398 return 0;
2400 out_free_dev:
2401 iounmap(adapter->regs);
2402 for (i = ai->nports - 1; i >= 0; --i)
2403 if (adapter->port[i])
2404 free_netdev(adapter->port[i]);
2406 out_free_adapter:
2407 kfree(adapter);
2409 out_disable_device:
2410 pci_disable_device(pdev);
2411 out_release_regions:
2412 pci_release_regions(pdev);
2413 pci_set_drvdata(pdev, NULL);
2414 return err;
2417 static void __devexit remove_one(struct pci_dev *pdev)
2419 struct net_device *dev = pci_get_drvdata(pdev);
2421 if (dev) {
2422 int i;
2423 struct adapter *adapter = dev->priv;
2425 t3_sge_stop(adapter);
2426 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2427 &cxgb3_attr_group);
2429 for_each_port(adapter, i)
2430 if (test_bit(i, &adapter->registered_device_map))
2431 unregister_netdev(adapter->port[i]);
2433 if (is_offload(adapter)) {
2434 cxgb3_adapter_unofld(adapter);
2435 if (test_bit(OFFLOAD_DEVMAP_BIT,
2436 &adapter->open_device_map))
2437 offload_close(&adapter->tdev);
2440 t3_free_sge_resources(adapter);
2441 cxgb_disable_msi(adapter);
2443 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2444 if (adapter->dummy_netdev[i]) {
2445 free_netdev(adapter->dummy_netdev[i]);
2446 adapter->dummy_netdev[i] = NULL;
2449 for_each_port(adapter, i)
2450 if (adapter->port[i])
2451 free_netdev(adapter->port[i]);
2453 iounmap(adapter->regs);
2454 kfree(adapter);
2455 pci_release_regions(pdev);
2456 pci_disable_device(pdev);
2457 pci_set_drvdata(pdev, NULL);
2461 static struct pci_driver driver = {
2462 .name = DRV_NAME,
2463 .id_table = cxgb3_pci_tbl,
2464 .probe = init_one,
2465 .remove = __devexit_p(remove_one),
2468 static int __init cxgb3_init_module(void)
2470 int ret;
2472 cxgb3_offload_init();
2474 ret = pci_register_driver(&driver);
2475 return ret;
2478 static void __exit cxgb3_cleanup_module(void)
2480 pci_unregister_driver(&driver);
2481 if (cxgb3_wq)
2482 destroy_workqueue(cxgb3_wq);
2485 module_init(cxgb3_init_module);
2486 module_exit(cxgb3_cleanup_module);