cxgb3 - Limit multiqueue setting to msi-x
[pohmelfs.git] / drivers / net / cxgb3 / cxgb3_main.c
blobf66367ed693296f45ac4475d5769d3b7fc09348d
1 /*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
49 #include "common.h"
50 #include "cxgb3_ioctl.h"
51 #include "regs.h"
52 #include "cxgb3_offload.h"
53 #include "version.h"
55 #include "cxgb3_ctl_defs.h"
56 #include "t3_cpl.h"
57 #include "firmware_exports.h"
59 enum {
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
65 MIN_TXQ_ENTRIES = 4,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
68 MIN_FL_ENTRIES = 32
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
93 {0,}
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
116 static int msi = 2;
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
126 static int ofld_disable = 0;
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
139 static struct workqueue_struct *cxgb3_wq;
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
145 * Shows the link status, speed, and duplex of a port.
147 static void link_report(struct net_device *dev)
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
151 else {
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
155 switch (p->link_config.speed) {
156 case SPEED_10000:
157 s = "10Gbps";
158 break;
159 case SPEED_1000:
160 s = "1000Mbps";
161 break;
162 case SPEED_100:
163 s = "100Mbps";
164 break;
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
188 struct net_device *dev = adapter->port[port_id];
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
194 return;
196 if (link_stat != netif_carrier_ok(dev)) {
197 if (link_stat) {
198 t3_mac_enable(mac, MAC_DIRECTION_RX);
199 netif_carrier_on(dev);
200 } else {
201 netif_carrier_off(dev);
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
207 link_report(dev);
212 * t3_os_phymod_changed - handle PHY module changes
213 * @phy: the PHY reporting the module change
214 * @mod_type: new module type
216 * This is the OS-dependent handler for PHY module changes. It is
217 * invoked when a PHY module is removed or inserted for any OS-specific
218 * processing.
220 void t3_os_phymod_changed(struct adapter *adap, int port_id)
222 static const char *mod_str[] = {
223 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
226 const struct net_device *dev = adap->port[port_id];
227 const struct port_info *pi = netdev_priv(dev);
229 if (pi->phy.modtype == phy_modtype_none)
230 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
231 else
232 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
233 mod_str[pi->phy.modtype]);
236 static void cxgb_set_rxmode(struct net_device *dev)
238 struct t3_rx_mode rm;
239 struct port_info *pi = netdev_priv(dev);
241 init_rx_mode(&rm, dev, dev->mc_list);
242 t3_mac_set_rx_mode(&pi->mac, &rm);
246 * link_start - enable a port
247 * @dev: the device to enable
249 * Performs the MAC and PHY actions needed to enable a port.
251 static void link_start(struct net_device *dev)
253 struct t3_rx_mode rm;
254 struct port_info *pi = netdev_priv(dev);
255 struct cmac *mac = &pi->mac;
257 init_rx_mode(&rm, dev, dev->mc_list);
258 t3_mac_reset(mac);
259 t3_mac_set_mtu(mac, dev->mtu);
260 t3_mac_set_address(mac, 0, dev->dev_addr);
261 t3_mac_set_rx_mode(mac, &rm);
262 t3_link_start(&pi->phy, mac, &pi->link_config);
263 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
266 static inline void cxgb_disable_msi(struct adapter *adapter)
268 if (adapter->flags & USING_MSIX) {
269 pci_disable_msix(adapter->pdev);
270 adapter->flags &= ~USING_MSIX;
271 } else if (adapter->flags & USING_MSI) {
272 pci_disable_msi(adapter->pdev);
273 adapter->flags &= ~USING_MSI;
278 * Interrupt handler for asynchronous events used with MSI-X.
280 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
282 t3_slow_intr_handler(cookie);
283 return IRQ_HANDLED;
287 * Name the MSI-X interrupts.
289 static void name_msix_vecs(struct adapter *adap)
291 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
293 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
294 adap->msix_info[0].desc[n] = 0;
296 for_each_port(adap, j) {
297 struct net_device *d = adap->port[j];
298 const struct port_info *pi = netdev_priv(d);
300 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
301 snprintf(adap->msix_info[msi_idx].desc, n,
302 "%s-%d", d->name, pi->first_qset + i);
303 adap->msix_info[msi_idx].desc[n] = 0;
308 static int request_msix_data_irqs(struct adapter *adap)
310 int i, j, err, qidx = 0;
312 for_each_port(adap, i) {
313 int nqsets = adap2pinfo(adap, i)->nqsets;
315 for (j = 0; j < nqsets; ++j) {
316 err = request_irq(adap->msix_info[qidx + 1].vec,
317 t3_intr_handler(adap,
318 adap->sge.qs[qidx].
319 rspq.polling), 0,
320 adap->msix_info[qidx + 1].desc,
321 &adap->sge.qs[qidx]);
322 if (err) {
323 while (--qidx >= 0)
324 free_irq(adap->msix_info[qidx + 1].vec,
325 &adap->sge.qs[qidx]);
326 return err;
328 qidx++;
331 return 0;
334 static void free_irq_resources(struct adapter *adapter)
336 if (adapter->flags & USING_MSIX) {
337 int i, n = 0;
339 free_irq(adapter->msix_info[0].vec, adapter);
340 for_each_port(adapter, i)
341 n += adap2pinfo(adapter, i)->nqsets;
343 for (i = 0; i < n; ++i)
344 free_irq(adapter->msix_info[i + 1].vec,
345 &adapter->sge.qs[i]);
346 } else
347 free_irq(adapter->pdev->irq, adapter);
350 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
351 unsigned long n)
353 int attempts = 5;
355 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
356 if (!--attempts)
357 return -ETIMEDOUT;
358 msleep(10);
360 return 0;
363 static int init_tp_parity(struct adapter *adap)
365 int i;
366 struct sk_buff *skb;
367 struct cpl_set_tcb_field *greq;
368 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
370 t3_tp_set_offload_mode(adap, 1);
372 for (i = 0; i < 16; i++) {
373 struct cpl_smt_write_req *req;
375 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
376 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
377 memset(req, 0, sizeof(*req));
378 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
379 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
380 req->iff = i;
381 t3_mgmt_tx(adap, skb);
384 for (i = 0; i < 2048; i++) {
385 struct cpl_l2t_write_req *req;
387 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
388 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
389 memset(req, 0, sizeof(*req));
390 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
391 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
392 req->params = htonl(V_L2T_W_IDX(i));
393 t3_mgmt_tx(adap, skb);
396 for (i = 0; i < 2048; i++) {
397 struct cpl_rte_write_req *req;
399 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
400 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
401 memset(req, 0, sizeof(*req));
402 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
403 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
404 req->l2t_idx = htonl(V_L2T_W_IDX(i));
405 t3_mgmt_tx(adap, skb);
408 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
409 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
410 memset(greq, 0, sizeof(*greq));
411 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
412 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
413 greq->mask = cpu_to_be64(1);
414 t3_mgmt_tx(adap, skb);
416 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
417 t3_tp_set_offload_mode(adap, 0);
418 return i;
422 * setup_rss - configure RSS
423 * @adap: the adapter
425 * Sets up RSS to distribute packets to multiple receive queues. We
426 * configure the RSS CPU lookup table to distribute to the number of HW
427 * receive queues, and the response queue lookup table to narrow that
428 * down to the response queues actually configured for each port.
429 * We always configure the RSS mapping for two ports since the mapping
430 * table has plenty of entries.
432 static void setup_rss(struct adapter *adap)
434 int i;
435 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
436 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
437 u8 cpus[SGE_QSETS + 1];
438 u16 rspq_map[RSS_TABLE_SIZE];
440 for (i = 0; i < SGE_QSETS; ++i)
441 cpus[i] = i;
442 cpus[SGE_QSETS] = 0xff; /* terminator */
444 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
445 rspq_map[i] = i % nq0;
446 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
449 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
450 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
451 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
454 static void init_napi(struct adapter *adap)
456 int i;
458 for (i = 0; i < SGE_QSETS; i++) {
459 struct sge_qset *qs = &adap->sge.qs[i];
461 if (qs->adap)
462 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
463 64);
467 * netif_napi_add() can be called only once per napi_struct because it
468 * adds each new napi_struct to a list. Be careful not to call it a
469 * second time, e.g., during EEH recovery, by making a note of it.
471 adap->flags |= NAPI_INIT;
475 * Wait until all NAPI handlers are descheduled. This includes the handlers of
476 * both netdevices representing interfaces and the dummy ones for the extra
477 * queues.
479 static void quiesce_rx(struct adapter *adap)
481 int i;
483 for (i = 0; i < SGE_QSETS; i++)
484 if (adap->sge.qs[i].adap)
485 napi_disable(&adap->sge.qs[i].napi);
488 static void enable_all_napi(struct adapter *adap)
490 int i;
491 for (i = 0; i < SGE_QSETS; i++)
492 if (adap->sge.qs[i].adap)
493 napi_enable(&adap->sge.qs[i].napi);
497 * setup_sge_qsets - configure SGE Tx/Rx/response queues
498 * @adap: the adapter
500 * Determines how many sets of SGE queues to use and initializes them.
501 * We support multiple queue sets per port if we have MSI-X, otherwise
502 * just one queue set per port.
504 static int setup_sge_qsets(struct adapter *adap)
506 int i, j, err, irq_idx = 0, qset_idx = 0;
507 unsigned int ntxq = SGE_TXQ_PER_SET;
509 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
510 irq_idx = -1;
512 for_each_port(adap, i) {
513 struct net_device *dev = adap->port[i];
514 struct port_info *pi = netdev_priv(dev);
516 pi->qs = &adap->sge.qs[pi->first_qset];
517 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
518 ++j, ++qset_idx) {
519 if (!pi->rx_csum_offload)
520 adap->params.sge.qset[qset_idx].lro = 0;
521 err = t3_sge_alloc_qset(adap, qset_idx, 1,
522 (adap->flags & USING_MSIX) ? qset_idx + 1 :
523 irq_idx,
524 &adap->params.sge.qset[qset_idx], ntxq, dev);
525 if (err) {
526 t3_stop_sge_timers(adap);
527 t3_free_sge_resources(adap);
528 return err;
533 return 0;
536 static ssize_t attr_show(struct device *d, char *buf,
537 ssize_t(*format) (struct net_device *, char *))
539 ssize_t len;
541 /* Synchronize with ioctls that may shut down the device */
542 rtnl_lock();
543 len = (*format) (to_net_dev(d), buf);
544 rtnl_unlock();
545 return len;
548 static ssize_t attr_store(struct device *d,
549 const char *buf, size_t len,
550 ssize_t(*set) (struct net_device *, unsigned int),
551 unsigned int min_val, unsigned int max_val)
553 char *endp;
554 ssize_t ret;
555 unsigned int val;
557 if (!capable(CAP_NET_ADMIN))
558 return -EPERM;
560 val = simple_strtoul(buf, &endp, 0);
561 if (endp == buf || val < min_val || val > max_val)
562 return -EINVAL;
564 rtnl_lock();
565 ret = (*set) (to_net_dev(d), val);
566 if (!ret)
567 ret = len;
568 rtnl_unlock();
569 return ret;
572 #define CXGB3_SHOW(name, val_expr) \
573 static ssize_t format_##name(struct net_device *dev, char *buf) \
575 struct port_info *pi = netdev_priv(dev); \
576 struct adapter *adap = pi->adapter; \
577 return sprintf(buf, "%u\n", val_expr); \
579 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
580 char *buf) \
582 return attr_show(d, buf, format_##name); \
585 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
587 struct port_info *pi = netdev_priv(dev);
588 struct adapter *adap = pi->adapter;
589 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
591 if (adap->flags & FULL_INIT_DONE)
592 return -EBUSY;
593 if (val && adap->params.rev == 0)
594 return -EINVAL;
595 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
596 min_tids)
597 return -EINVAL;
598 adap->params.mc5.nfilters = val;
599 return 0;
602 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
603 const char *buf, size_t len)
605 return attr_store(d, buf, len, set_nfilters, 0, ~0);
608 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
610 struct port_info *pi = netdev_priv(dev);
611 struct adapter *adap = pi->adapter;
613 if (adap->flags & FULL_INIT_DONE)
614 return -EBUSY;
615 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
616 MC5_MIN_TIDS)
617 return -EINVAL;
618 adap->params.mc5.nservers = val;
619 return 0;
622 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
623 const char *buf, size_t len)
625 return attr_store(d, buf, len, set_nservers, 0, ~0);
628 #define CXGB3_ATTR_R(name, val_expr) \
629 CXGB3_SHOW(name, val_expr) \
630 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
632 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
633 CXGB3_SHOW(name, val_expr) \
634 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
636 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
637 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
638 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
640 static struct attribute *cxgb3_attrs[] = {
641 &dev_attr_cam_size.attr,
642 &dev_attr_nfilters.attr,
643 &dev_attr_nservers.attr,
644 NULL
647 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
649 static ssize_t tm_attr_show(struct device *d,
650 char *buf, int sched)
652 struct port_info *pi = netdev_priv(to_net_dev(d));
653 struct adapter *adap = pi->adapter;
654 unsigned int v, addr, bpt, cpt;
655 ssize_t len;
657 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
658 rtnl_lock();
659 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
660 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
661 if (sched & 1)
662 v >>= 16;
663 bpt = (v >> 8) & 0xff;
664 cpt = v & 0xff;
665 if (!cpt)
666 len = sprintf(buf, "disabled\n");
667 else {
668 v = (adap->params.vpd.cclk * 1000) / cpt;
669 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
671 rtnl_unlock();
672 return len;
675 static ssize_t tm_attr_store(struct device *d,
676 const char *buf, size_t len, int sched)
678 struct port_info *pi = netdev_priv(to_net_dev(d));
679 struct adapter *adap = pi->adapter;
680 unsigned int val;
681 char *endp;
682 ssize_t ret;
684 if (!capable(CAP_NET_ADMIN))
685 return -EPERM;
687 val = simple_strtoul(buf, &endp, 0);
688 if (endp == buf || val > 10000000)
689 return -EINVAL;
691 rtnl_lock();
692 ret = t3_config_sched(adap, val, sched);
693 if (!ret)
694 ret = len;
695 rtnl_unlock();
696 return ret;
699 #define TM_ATTR(name, sched) \
700 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
701 char *buf) \
703 return tm_attr_show(d, buf, sched); \
705 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
706 const char *buf, size_t len) \
708 return tm_attr_store(d, buf, len, sched); \
710 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
712 TM_ATTR(sched0, 0);
713 TM_ATTR(sched1, 1);
714 TM_ATTR(sched2, 2);
715 TM_ATTR(sched3, 3);
716 TM_ATTR(sched4, 4);
717 TM_ATTR(sched5, 5);
718 TM_ATTR(sched6, 6);
719 TM_ATTR(sched7, 7);
721 static struct attribute *offload_attrs[] = {
722 &dev_attr_sched0.attr,
723 &dev_attr_sched1.attr,
724 &dev_attr_sched2.attr,
725 &dev_attr_sched3.attr,
726 &dev_attr_sched4.attr,
727 &dev_attr_sched5.attr,
728 &dev_attr_sched6.attr,
729 &dev_attr_sched7.attr,
730 NULL
733 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
736 * Sends an sk_buff to an offload queue driver
737 * after dealing with any active network taps.
739 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
741 int ret;
743 local_bh_disable();
744 ret = t3_offload_tx(tdev, skb);
745 local_bh_enable();
746 return ret;
749 static int write_smt_entry(struct adapter *adapter, int idx)
751 struct cpl_smt_write_req *req;
752 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
754 if (!skb)
755 return -ENOMEM;
757 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
758 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
759 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
760 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
761 req->iff = idx;
762 memset(req->src_mac1, 0, sizeof(req->src_mac1));
763 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
764 skb->priority = 1;
765 offload_tx(&adapter->tdev, skb);
766 return 0;
769 static int init_smt(struct adapter *adapter)
771 int i;
773 for_each_port(adapter, i)
774 write_smt_entry(adapter, i);
775 return 0;
778 static void init_port_mtus(struct adapter *adapter)
780 unsigned int mtus = adapter->port[0]->mtu;
782 if (adapter->port[1])
783 mtus |= adapter->port[1]->mtu << 16;
784 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
787 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
788 int hi, int port)
790 struct sk_buff *skb;
791 struct mngt_pktsched_wr *req;
792 int ret;
794 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
795 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
796 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
797 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
798 req->sched = sched;
799 req->idx = qidx;
800 req->min = lo;
801 req->max = hi;
802 req->binding = port;
803 ret = t3_mgmt_tx(adap, skb);
805 return ret;
808 static int bind_qsets(struct adapter *adap)
810 int i, j, err = 0;
812 for_each_port(adap, i) {
813 const struct port_info *pi = adap2pinfo(adap, i);
815 for (j = 0; j < pi->nqsets; ++j) {
816 int ret = send_pktsched_cmd(adap, 1,
817 pi->first_qset + j, -1,
818 -1, i);
819 if (ret)
820 err = ret;
824 return err;
827 #define FW_FNAME "t3fw-%d.%d.%d.bin"
828 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
830 static int upgrade_fw(struct adapter *adap)
832 int ret;
833 char buf[64];
834 const struct firmware *fw;
835 struct device *dev = &adap->pdev->dev;
837 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
838 FW_VERSION_MINOR, FW_VERSION_MICRO);
839 ret = request_firmware(&fw, buf, dev);
840 if (ret < 0) {
841 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
842 buf);
843 return ret;
845 ret = t3_load_fw(adap, fw->data, fw->size);
846 release_firmware(fw);
848 if (ret == 0)
849 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
850 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
851 else
852 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
853 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
855 return ret;
858 static inline char t3rev2char(struct adapter *adapter)
860 char rev = 0;
862 switch(adapter->params.rev) {
863 case T3_REV_B:
864 case T3_REV_B2:
865 rev = 'b';
866 break;
867 case T3_REV_C:
868 rev = 'c';
869 break;
871 return rev;
874 static int update_tpsram(struct adapter *adap)
876 const struct firmware *tpsram;
877 char buf[64];
878 struct device *dev = &adap->pdev->dev;
879 int ret;
880 char rev;
882 rev = t3rev2char(adap);
883 if (!rev)
884 return 0;
886 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
887 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
889 ret = request_firmware(&tpsram, buf, dev);
890 if (ret < 0) {
891 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
892 buf);
893 return ret;
896 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
897 if (ret)
898 goto release_tpsram;
900 ret = t3_set_proto_sram(adap, tpsram->data);
901 if (ret == 0)
902 dev_info(dev,
903 "successful update of protocol engine "
904 "to %d.%d.%d\n",
905 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
906 else
907 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
908 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
909 if (ret)
910 dev_err(dev, "loading protocol SRAM failed\n");
912 release_tpsram:
913 release_firmware(tpsram);
915 return ret;
919 * cxgb_up - enable the adapter
920 * @adapter: adapter being enabled
922 * Called when the first port is enabled, this function performs the
923 * actions necessary to make an adapter operational, such as completing
924 * the initialization of HW modules, and enabling interrupts.
926 * Must be called with the rtnl lock held.
928 static int cxgb_up(struct adapter *adap)
930 int err;
931 int must_load;
933 if (!(adap->flags & FULL_INIT_DONE)) {
934 err = t3_check_fw_version(adap, &must_load);
935 if (err == -EINVAL) {
936 err = upgrade_fw(adap);
937 if (err && must_load)
938 goto out;
941 err = t3_check_tpsram_version(adap, &must_load);
942 if (err == -EINVAL) {
943 err = update_tpsram(adap);
944 if (err && must_load)
945 goto out;
949 * Clear interrupts now to catch errors if t3_init_hw fails.
950 * We clear them again later as initialization may trigger
951 * conditions that can interrupt.
953 t3_intr_clear(adap);
955 err = t3_init_hw(adap, 0);
956 if (err)
957 goto out;
959 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
960 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
962 err = setup_sge_qsets(adap);
963 if (err)
964 goto out;
966 setup_rss(adap);
967 if (!(adap->flags & NAPI_INIT))
968 init_napi(adap);
969 adap->flags |= FULL_INIT_DONE;
972 t3_intr_clear(adap);
974 if (adap->flags & USING_MSIX) {
975 name_msix_vecs(adap);
976 err = request_irq(adap->msix_info[0].vec,
977 t3_async_intr_handler, 0,
978 adap->msix_info[0].desc, adap);
979 if (err)
980 goto irq_err;
982 err = request_msix_data_irqs(adap);
983 if (err) {
984 free_irq(adap->msix_info[0].vec, adap);
985 goto irq_err;
987 } else if ((err = request_irq(adap->pdev->irq,
988 t3_intr_handler(adap,
989 adap->sge.qs[0].rspq.
990 polling),
991 (adap->flags & USING_MSI) ?
992 0 : IRQF_SHARED,
993 adap->name, adap)))
994 goto irq_err;
996 enable_all_napi(adap);
997 t3_sge_start(adap);
998 t3_intr_enable(adap);
1000 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1001 is_offload(adap) && init_tp_parity(adap) == 0)
1002 adap->flags |= TP_PARITY_INIT;
1004 if (adap->flags & TP_PARITY_INIT) {
1005 t3_write_reg(adap, A_TP_INT_CAUSE,
1006 F_CMCACHEPERR | F_ARPLUTPERR);
1007 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1010 if (!(adap->flags & QUEUES_BOUND)) {
1011 err = bind_qsets(adap);
1012 if (err) {
1013 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1014 t3_intr_disable(adap);
1015 free_irq_resources(adap);
1016 goto out;
1018 adap->flags |= QUEUES_BOUND;
1021 out:
1022 return err;
1023 irq_err:
1024 CH_ERR(adap, "request_irq failed, err %d\n", err);
1025 goto out;
1029 * Release resources when all the ports and offloading have been stopped.
1031 static void cxgb_down(struct adapter *adapter)
1033 t3_sge_stop(adapter);
1034 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1035 t3_intr_disable(adapter);
1036 spin_unlock_irq(&adapter->work_lock);
1038 free_irq_resources(adapter);
1039 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1040 quiesce_rx(adapter);
1043 static void schedule_chk_task(struct adapter *adap)
1045 unsigned int timeo;
1047 timeo = adap->params.linkpoll_period ?
1048 (HZ * adap->params.linkpoll_period) / 10 :
1049 adap->params.stats_update_period * HZ;
1050 if (timeo)
1051 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1054 static int offload_open(struct net_device *dev)
1056 struct port_info *pi = netdev_priv(dev);
1057 struct adapter *adapter = pi->adapter;
1058 struct t3cdev *tdev = dev2t3cdev(dev);
1059 int adap_up = adapter->open_device_map & PORT_MASK;
1060 int err;
1062 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1063 return 0;
1065 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1066 goto out;
1068 t3_tp_set_offload_mode(adapter, 1);
1069 tdev->lldev = adapter->port[0];
1070 err = cxgb3_offload_activate(adapter);
1071 if (err)
1072 goto out;
1074 init_port_mtus(adapter);
1075 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1076 adapter->params.b_wnd,
1077 adapter->params.rev == 0 ?
1078 adapter->port[0]->mtu : 0xffff);
1079 init_smt(adapter);
1081 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1082 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1084 /* Call back all registered clients */
1085 cxgb3_add_clients(tdev);
1087 out:
1088 /* restore them in case the offload module has changed them */
1089 if (err) {
1090 t3_tp_set_offload_mode(adapter, 0);
1091 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1092 cxgb3_set_dummy_ops(tdev);
1094 return err;
1097 static int offload_close(struct t3cdev *tdev)
1099 struct adapter *adapter = tdev2adap(tdev);
1101 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1102 return 0;
1104 /* Call back all registered clients */
1105 cxgb3_remove_clients(tdev);
1107 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1109 tdev->lldev = NULL;
1110 cxgb3_set_dummy_ops(tdev);
1111 t3_tp_set_offload_mode(adapter, 0);
1112 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1114 if (!adapter->open_device_map)
1115 cxgb_down(adapter);
1117 cxgb3_offload_deactivate(adapter);
1118 return 0;
1121 static int cxgb_open(struct net_device *dev)
1123 struct port_info *pi = netdev_priv(dev);
1124 struct adapter *adapter = pi->adapter;
1125 int other_ports = adapter->open_device_map & PORT_MASK;
1126 int err;
1128 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1129 return err;
1131 set_bit(pi->port_id, &adapter->open_device_map);
1132 if (is_offload(adapter) && !ofld_disable) {
1133 err = offload_open(dev);
1134 if (err)
1135 printk(KERN_WARNING
1136 "Could not initialize offload capabilities\n");
1139 link_start(dev);
1140 t3_port_intr_enable(adapter, pi->port_id);
1141 netif_start_queue(dev);
1142 if (!other_ports)
1143 schedule_chk_task(adapter);
1145 return 0;
1148 static int cxgb_close(struct net_device *dev)
1150 struct port_info *pi = netdev_priv(dev);
1151 struct adapter *adapter = pi->adapter;
1153 t3_port_intr_disable(adapter, pi->port_id);
1154 netif_stop_queue(dev);
1155 pi->phy.ops->power_down(&pi->phy, 1);
1156 netif_carrier_off(dev);
1157 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1159 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1160 clear_bit(pi->port_id, &adapter->open_device_map);
1161 spin_unlock_irq(&adapter->work_lock);
1163 if (!(adapter->open_device_map & PORT_MASK))
1164 cancel_rearming_delayed_workqueue(cxgb3_wq,
1165 &adapter->adap_check_task);
1167 if (!adapter->open_device_map)
1168 cxgb_down(adapter);
1170 return 0;
1173 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1175 struct port_info *pi = netdev_priv(dev);
1176 struct adapter *adapter = pi->adapter;
1177 struct net_device_stats *ns = &pi->netstats;
1178 const struct mac_stats *pstats;
1180 spin_lock(&adapter->stats_lock);
1181 pstats = t3_mac_update_stats(&pi->mac);
1182 spin_unlock(&adapter->stats_lock);
1184 ns->tx_bytes = pstats->tx_octets;
1185 ns->tx_packets = pstats->tx_frames;
1186 ns->rx_bytes = pstats->rx_octets;
1187 ns->rx_packets = pstats->rx_frames;
1188 ns->multicast = pstats->rx_mcast_frames;
1190 ns->tx_errors = pstats->tx_underrun;
1191 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1192 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1193 pstats->rx_fifo_ovfl;
1195 /* detailed rx_errors */
1196 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1197 ns->rx_over_errors = 0;
1198 ns->rx_crc_errors = pstats->rx_fcs_errs;
1199 ns->rx_frame_errors = pstats->rx_symbol_errs;
1200 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1201 ns->rx_missed_errors = pstats->rx_cong_drops;
1203 /* detailed tx_errors */
1204 ns->tx_aborted_errors = 0;
1205 ns->tx_carrier_errors = 0;
1206 ns->tx_fifo_errors = pstats->tx_underrun;
1207 ns->tx_heartbeat_errors = 0;
1208 ns->tx_window_errors = 0;
1209 return ns;
1212 static u32 get_msglevel(struct net_device *dev)
1214 struct port_info *pi = netdev_priv(dev);
1215 struct adapter *adapter = pi->adapter;
1217 return adapter->msg_enable;
1220 static void set_msglevel(struct net_device *dev, u32 val)
1222 struct port_info *pi = netdev_priv(dev);
1223 struct adapter *adapter = pi->adapter;
1225 adapter->msg_enable = val;
1228 static char stats_strings[][ETH_GSTRING_LEN] = {
1229 "TxOctetsOK ",
1230 "TxFramesOK ",
1231 "TxMulticastFramesOK",
1232 "TxBroadcastFramesOK",
1233 "TxPauseFrames ",
1234 "TxUnderrun ",
1235 "TxExtUnderrun ",
1237 "TxFrames64 ",
1238 "TxFrames65To127 ",
1239 "TxFrames128To255 ",
1240 "TxFrames256To511 ",
1241 "TxFrames512To1023 ",
1242 "TxFrames1024To1518 ",
1243 "TxFrames1519ToMax ",
1245 "RxOctetsOK ",
1246 "RxFramesOK ",
1247 "RxMulticastFramesOK",
1248 "RxBroadcastFramesOK",
1249 "RxPauseFrames ",
1250 "RxFCSErrors ",
1251 "RxSymbolErrors ",
1252 "RxShortErrors ",
1253 "RxJabberErrors ",
1254 "RxLengthErrors ",
1255 "RxFIFOoverflow ",
1257 "RxFrames64 ",
1258 "RxFrames65To127 ",
1259 "RxFrames128To255 ",
1260 "RxFrames256To511 ",
1261 "RxFrames512To1023 ",
1262 "RxFrames1024To1518 ",
1263 "RxFrames1519ToMax ",
1265 "PhyFIFOErrors ",
1266 "TSO ",
1267 "VLANextractions ",
1268 "VLANinsertions ",
1269 "TxCsumOffload ",
1270 "RxCsumGood ",
1271 "LroAggregated ",
1272 "LroFlushed ",
1273 "LroNoDesc ",
1274 "RxDrops ",
1276 "CheckTXEnToggled ",
1277 "CheckResets ",
1281 static int get_sset_count(struct net_device *dev, int sset)
1283 switch (sset) {
1284 case ETH_SS_STATS:
1285 return ARRAY_SIZE(stats_strings);
1286 default:
1287 return -EOPNOTSUPP;
1291 #define T3_REGMAP_SIZE (3 * 1024)
1293 static int get_regs_len(struct net_device *dev)
1295 return T3_REGMAP_SIZE;
1298 static int get_eeprom_len(struct net_device *dev)
1300 return EEPROMSIZE;
1303 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1305 struct port_info *pi = netdev_priv(dev);
1306 struct adapter *adapter = pi->adapter;
1307 u32 fw_vers = 0;
1308 u32 tp_vers = 0;
1310 t3_get_fw_version(adapter, &fw_vers);
1311 t3_get_tp_version(adapter, &tp_vers);
1313 strcpy(info->driver, DRV_NAME);
1314 strcpy(info->version, DRV_VERSION);
1315 strcpy(info->bus_info, pci_name(adapter->pdev));
1316 if (!fw_vers)
1317 strcpy(info->fw_version, "N/A");
1318 else {
1319 snprintf(info->fw_version, sizeof(info->fw_version),
1320 "%s %u.%u.%u TP %u.%u.%u",
1321 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1322 G_FW_VERSION_MAJOR(fw_vers),
1323 G_FW_VERSION_MINOR(fw_vers),
1324 G_FW_VERSION_MICRO(fw_vers),
1325 G_TP_VERSION_MAJOR(tp_vers),
1326 G_TP_VERSION_MINOR(tp_vers),
1327 G_TP_VERSION_MICRO(tp_vers));
1331 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1333 if (stringset == ETH_SS_STATS)
1334 memcpy(data, stats_strings, sizeof(stats_strings));
1337 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1338 struct port_info *p, int idx)
1340 int i;
1341 unsigned long tot = 0;
1343 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1344 tot += adapter->sge.qs[i].port_stats[idx];
1345 return tot;
1348 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1349 u64 *data)
1351 struct port_info *pi = netdev_priv(dev);
1352 struct adapter *adapter = pi->adapter;
1353 const struct mac_stats *s;
1355 spin_lock(&adapter->stats_lock);
1356 s = t3_mac_update_stats(&pi->mac);
1357 spin_unlock(&adapter->stats_lock);
1359 *data++ = s->tx_octets;
1360 *data++ = s->tx_frames;
1361 *data++ = s->tx_mcast_frames;
1362 *data++ = s->tx_bcast_frames;
1363 *data++ = s->tx_pause;
1364 *data++ = s->tx_underrun;
1365 *data++ = s->tx_fifo_urun;
1367 *data++ = s->tx_frames_64;
1368 *data++ = s->tx_frames_65_127;
1369 *data++ = s->tx_frames_128_255;
1370 *data++ = s->tx_frames_256_511;
1371 *data++ = s->tx_frames_512_1023;
1372 *data++ = s->tx_frames_1024_1518;
1373 *data++ = s->tx_frames_1519_max;
1375 *data++ = s->rx_octets;
1376 *data++ = s->rx_frames;
1377 *data++ = s->rx_mcast_frames;
1378 *data++ = s->rx_bcast_frames;
1379 *data++ = s->rx_pause;
1380 *data++ = s->rx_fcs_errs;
1381 *data++ = s->rx_symbol_errs;
1382 *data++ = s->rx_short;
1383 *data++ = s->rx_jabber;
1384 *data++ = s->rx_too_long;
1385 *data++ = s->rx_fifo_ovfl;
1387 *data++ = s->rx_frames_64;
1388 *data++ = s->rx_frames_65_127;
1389 *data++ = s->rx_frames_128_255;
1390 *data++ = s->rx_frames_256_511;
1391 *data++ = s->rx_frames_512_1023;
1392 *data++ = s->rx_frames_1024_1518;
1393 *data++ = s->rx_frames_1519_max;
1395 *data++ = pi->phy.fifo_errors;
1397 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1398 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1399 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1400 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1401 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1402 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
1403 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
1404 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
1405 *data++ = s->rx_cong_drops;
1407 *data++ = s->num_toggled;
1408 *data++ = s->num_resets;
1411 static inline void reg_block_dump(struct adapter *ap, void *buf,
1412 unsigned int start, unsigned int end)
1414 u32 *p = buf + start;
1416 for (; start <= end; start += sizeof(u32))
1417 *p++ = t3_read_reg(ap, start);
1420 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1421 void *buf)
1423 struct port_info *pi = netdev_priv(dev);
1424 struct adapter *ap = pi->adapter;
1427 * Version scheme:
1428 * bits 0..9: chip version
1429 * bits 10..15: chip revision
1430 * bit 31: set for PCIe cards
1432 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1435 * We skip the MAC statistics registers because they are clear-on-read.
1436 * Also reading multi-register stats would need to synchronize with the
1437 * periodic mac stats accumulation. Hard to justify the complexity.
1439 memset(buf, 0, T3_REGMAP_SIZE);
1440 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1441 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1442 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1443 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1444 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1445 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1446 XGM_REG(A_XGM_SERDES_STAT3, 1));
1447 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1448 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1451 static int restart_autoneg(struct net_device *dev)
1453 struct port_info *p = netdev_priv(dev);
1455 if (!netif_running(dev))
1456 return -EAGAIN;
1457 if (p->link_config.autoneg != AUTONEG_ENABLE)
1458 return -EINVAL;
1459 p->phy.ops->autoneg_restart(&p->phy);
1460 return 0;
1463 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1465 struct port_info *pi = netdev_priv(dev);
1466 struct adapter *adapter = pi->adapter;
1467 int i;
1469 if (data == 0)
1470 data = 2;
1472 for (i = 0; i < data * 2; i++) {
1473 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1474 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1475 if (msleep_interruptible(500))
1476 break;
1478 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1479 F_GPIO0_OUT_VAL);
1480 return 0;
1483 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1485 struct port_info *p = netdev_priv(dev);
1487 cmd->supported = p->link_config.supported;
1488 cmd->advertising = p->link_config.advertising;
1490 if (netif_carrier_ok(dev)) {
1491 cmd->speed = p->link_config.speed;
1492 cmd->duplex = p->link_config.duplex;
1493 } else {
1494 cmd->speed = -1;
1495 cmd->duplex = -1;
1498 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1499 cmd->phy_address = p->phy.addr;
1500 cmd->transceiver = XCVR_EXTERNAL;
1501 cmd->autoneg = p->link_config.autoneg;
1502 cmd->maxtxpkt = 0;
1503 cmd->maxrxpkt = 0;
1504 return 0;
1507 static int speed_duplex_to_caps(int speed, int duplex)
1509 int cap = 0;
1511 switch (speed) {
1512 case SPEED_10:
1513 if (duplex == DUPLEX_FULL)
1514 cap = SUPPORTED_10baseT_Full;
1515 else
1516 cap = SUPPORTED_10baseT_Half;
1517 break;
1518 case SPEED_100:
1519 if (duplex == DUPLEX_FULL)
1520 cap = SUPPORTED_100baseT_Full;
1521 else
1522 cap = SUPPORTED_100baseT_Half;
1523 break;
1524 case SPEED_1000:
1525 if (duplex == DUPLEX_FULL)
1526 cap = SUPPORTED_1000baseT_Full;
1527 else
1528 cap = SUPPORTED_1000baseT_Half;
1529 break;
1530 case SPEED_10000:
1531 if (duplex == DUPLEX_FULL)
1532 cap = SUPPORTED_10000baseT_Full;
1534 return cap;
1537 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1538 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1539 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1540 ADVERTISED_10000baseT_Full)
1542 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1544 int cap;
1545 struct port_info *p = netdev_priv(dev);
1546 struct link_config *lc = &p->link_config;
1548 if (!(lc->supported & SUPPORTED_Autoneg)) {
1550 * PHY offers a single speed/duplex. See if that's what's
1551 * being requested.
1553 if (cmd->autoneg == AUTONEG_DISABLE) {
1554 cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1555 if (lc->supported & cap)
1556 return 0;
1558 return -EINVAL;
1561 if (cmd->autoneg == AUTONEG_DISABLE) {
1562 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1564 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1565 return -EINVAL;
1566 lc->requested_speed = cmd->speed;
1567 lc->requested_duplex = cmd->duplex;
1568 lc->advertising = 0;
1569 } else {
1570 cmd->advertising &= ADVERTISED_MASK;
1571 cmd->advertising &= lc->supported;
1572 if (!cmd->advertising)
1573 return -EINVAL;
1574 lc->requested_speed = SPEED_INVALID;
1575 lc->requested_duplex = DUPLEX_INVALID;
1576 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1578 lc->autoneg = cmd->autoneg;
1579 if (netif_running(dev))
1580 t3_link_start(&p->phy, &p->mac, lc);
1581 return 0;
1584 static void get_pauseparam(struct net_device *dev,
1585 struct ethtool_pauseparam *epause)
1587 struct port_info *p = netdev_priv(dev);
1589 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1590 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1591 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1594 static int set_pauseparam(struct net_device *dev,
1595 struct ethtool_pauseparam *epause)
1597 struct port_info *p = netdev_priv(dev);
1598 struct link_config *lc = &p->link_config;
1600 if (epause->autoneg == AUTONEG_DISABLE)
1601 lc->requested_fc = 0;
1602 else if (lc->supported & SUPPORTED_Autoneg)
1603 lc->requested_fc = PAUSE_AUTONEG;
1604 else
1605 return -EINVAL;
1607 if (epause->rx_pause)
1608 lc->requested_fc |= PAUSE_RX;
1609 if (epause->tx_pause)
1610 lc->requested_fc |= PAUSE_TX;
1611 if (lc->autoneg == AUTONEG_ENABLE) {
1612 if (netif_running(dev))
1613 t3_link_start(&p->phy, &p->mac, lc);
1614 } else {
1615 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1616 if (netif_running(dev))
1617 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1619 return 0;
1622 static u32 get_rx_csum(struct net_device *dev)
1624 struct port_info *p = netdev_priv(dev);
1626 return p->rx_csum_offload;
1629 static int set_rx_csum(struct net_device *dev, u32 data)
1631 struct port_info *p = netdev_priv(dev);
1633 p->rx_csum_offload = data;
1634 if (!data) {
1635 struct adapter *adap = p->adapter;
1636 int i;
1638 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1639 adap->params.sge.qset[i].lro = 0;
1640 adap->sge.qs[i].lro_enabled = 0;
1643 return 0;
1646 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1648 struct port_info *pi = netdev_priv(dev);
1649 struct adapter *adapter = pi->adapter;
1650 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1652 e->rx_max_pending = MAX_RX_BUFFERS;
1653 e->rx_mini_max_pending = 0;
1654 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1655 e->tx_max_pending = MAX_TXQ_ENTRIES;
1657 e->rx_pending = q->fl_size;
1658 e->rx_mini_pending = q->rspq_size;
1659 e->rx_jumbo_pending = q->jumbo_size;
1660 e->tx_pending = q->txq_size[0];
1663 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1665 struct port_info *pi = netdev_priv(dev);
1666 struct adapter *adapter = pi->adapter;
1667 struct qset_params *q;
1668 int i;
1670 if (e->rx_pending > MAX_RX_BUFFERS ||
1671 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1672 e->tx_pending > MAX_TXQ_ENTRIES ||
1673 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1674 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1675 e->rx_pending < MIN_FL_ENTRIES ||
1676 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1677 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1678 return -EINVAL;
1680 if (adapter->flags & FULL_INIT_DONE)
1681 return -EBUSY;
1683 q = &adapter->params.sge.qset[pi->first_qset];
1684 for (i = 0; i < pi->nqsets; ++i, ++q) {
1685 q->rspq_size = e->rx_mini_pending;
1686 q->fl_size = e->rx_pending;
1687 q->jumbo_size = e->rx_jumbo_pending;
1688 q->txq_size[0] = e->tx_pending;
1689 q->txq_size[1] = e->tx_pending;
1690 q->txq_size[2] = e->tx_pending;
1692 return 0;
1695 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1697 struct port_info *pi = netdev_priv(dev);
1698 struct adapter *adapter = pi->adapter;
1699 struct qset_params *qsp = &adapter->params.sge.qset[0];
1700 struct sge_qset *qs = &adapter->sge.qs[0];
1702 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1703 return -EINVAL;
1705 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1706 t3_update_qset_coalesce(qs, qsp);
1707 return 0;
1710 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1712 struct port_info *pi = netdev_priv(dev);
1713 struct adapter *adapter = pi->adapter;
1714 struct qset_params *q = adapter->params.sge.qset;
1716 c->rx_coalesce_usecs = q->coalesce_usecs;
1717 return 0;
1720 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1721 u8 * data)
1723 struct port_info *pi = netdev_priv(dev);
1724 struct adapter *adapter = pi->adapter;
1725 int i, err = 0;
1727 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1728 if (!buf)
1729 return -ENOMEM;
1731 e->magic = EEPROM_MAGIC;
1732 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1733 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1735 if (!err)
1736 memcpy(data, buf + e->offset, e->len);
1737 kfree(buf);
1738 return err;
1741 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1742 u8 * data)
1744 struct port_info *pi = netdev_priv(dev);
1745 struct adapter *adapter = pi->adapter;
1746 u32 aligned_offset, aligned_len;
1747 __le32 *p;
1748 u8 *buf;
1749 int err;
1751 if (eeprom->magic != EEPROM_MAGIC)
1752 return -EINVAL;
1754 aligned_offset = eeprom->offset & ~3;
1755 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1757 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1758 buf = kmalloc(aligned_len, GFP_KERNEL);
1759 if (!buf)
1760 return -ENOMEM;
1761 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1762 if (!err && aligned_len > 4)
1763 err = t3_seeprom_read(adapter,
1764 aligned_offset + aligned_len - 4,
1765 (__le32 *) & buf[aligned_len - 4]);
1766 if (err)
1767 goto out;
1768 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1769 } else
1770 buf = data;
1772 err = t3_seeprom_wp(adapter, 0);
1773 if (err)
1774 goto out;
1776 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1777 err = t3_seeprom_write(adapter, aligned_offset, *p);
1778 aligned_offset += 4;
1781 if (!err)
1782 err = t3_seeprom_wp(adapter, 1);
1783 out:
1784 if (buf != data)
1785 kfree(buf);
1786 return err;
1789 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1791 wol->supported = 0;
1792 wol->wolopts = 0;
1793 memset(&wol->sopass, 0, sizeof(wol->sopass));
1796 static const struct ethtool_ops cxgb_ethtool_ops = {
1797 .get_settings = get_settings,
1798 .set_settings = set_settings,
1799 .get_drvinfo = get_drvinfo,
1800 .get_msglevel = get_msglevel,
1801 .set_msglevel = set_msglevel,
1802 .get_ringparam = get_sge_param,
1803 .set_ringparam = set_sge_param,
1804 .get_coalesce = get_coalesce,
1805 .set_coalesce = set_coalesce,
1806 .get_eeprom_len = get_eeprom_len,
1807 .get_eeprom = get_eeprom,
1808 .set_eeprom = set_eeprom,
1809 .get_pauseparam = get_pauseparam,
1810 .set_pauseparam = set_pauseparam,
1811 .get_rx_csum = get_rx_csum,
1812 .set_rx_csum = set_rx_csum,
1813 .set_tx_csum = ethtool_op_set_tx_csum,
1814 .set_sg = ethtool_op_set_sg,
1815 .get_link = ethtool_op_get_link,
1816 .get_strings = get_strings,
1817 .phys_id = cxgb3_phys_id,
1818 .nway_reset = restart_autoneg,
1819 .get_sset_count = get_sset_count,
1820 .get_ethtool_stats = get_stats,
1821 .get_regs_len = get_regs_len,
1822 .get_regs = get_regs,
1823 .get_wol = get_wol,
1824 .set_tso = ethtool_op_set_tso,
1827 static int in_range(int val, int lo, int hi)
1829 return val < 0 || (val <= hi && val >= lo);
1832 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1834 struct port_info *pi = netdev_priv(dev);
1835 struct adapter *adapter = pi->adapter;
1836 u32 cmd;
1837 int ret;
1839 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1840 return -EFAULT;
1842 switch (cmd) {
1843 case CHELSIO_SET_QSET_PARAMS:{
1844 int i;
1845 struct qset_params *q;
1846 struct ch_qset_params t;
1847 int q1 = pi->first_qset;
1848 int nqsets = pi->nqsets;
1850 if (!capable(CAP_NET_ADMIN))
1851 return -EPERM;
1852 if (copy_from_user(&t, useraddr, sizeof(t)))
1853 return -EFAULT;
1854 if (t.qset_idx >= SGE_QSETS)
1855 return -EINVAL;
1856 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1857 !in_range(t.cong_thres, 0, 255) ||
1858 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1859 MAX_TXQ_ENTRIES) ||
1860 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1861 MAX_TXQ_ENTRIES) ||
1862 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1863 MAX_CTRL_TXQ_ENTRIES) ||
1864 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1865 MAX_RX_BUFFERS)
1866 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1867 MAX_RX_JUMBO_BUFFERS)
1868 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1869 MAX_RSPQ_ENTRIES))
1870 return -EINVAL;
1872 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1873 for_each_port(adapter, i) {
1874 pi = adap2pinfo(adapter, i);
1875 if (t.qset_idx >= pi->first_qset &&
1876 t.qset_idx < pi->first_qset + pi->nqsets &&
1877 !pi->rx_csum_offload)
1878 return -EINVAL;
1881 if ((adapter->flags & FULL_INIT_DONE) &&
1882 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1883 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1884 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1885 t.polling >= 0 || t.cong_thres >= 0))
1886 return -EBUSY;
1888 /* Allow setting of any available qset when offload enabled */
1889 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1890 q1 = 0;
1891 for_each_port(adapter, i) {
1892 pi = adap2pinfo(adapter, i);
1893 nqsets += pi->first_qset + pi->nqsets;
1897 if (t.qset_idx < q1)
1898 return -EINVAL;
1899 if (t.qset_idx > q1 + nqsets - 1)
1900 return -EINVAL;
1902 q = &adapter->params.sge.qset[t.qset_idx];
1904 if (t.rspq_size >= 0)
1905 q->rspq_size = t.rspq_size;
1906 if (t.fl_size[0] >= 0)
1907 q->fl_size = t.fl_size[0];
1908 if (t.fl_size[1] >= 0)
1909 q->jumbo_size = t.fl_size[1];
1910 if (t.txq_size[0] >= 0)
1911 q->txq_size[0] = t.txq_size[0];
1912 if (t.txq_size[1] >= 0)
1913 q->txq_size[1] = t.txq_size[1];
1914 if (t.txq_size[2] >= 0)
1915 q->txq_size[2] = t.txq_size[2];
1916 if (t.cong_thres >= 0)
1917 q->cong_thres = t.cong_thres;
1918 if (t.intr_lat >= 0) {
1919 struct sge_qset *qs =
1920 &adapter->sge.qs[t.qset_idx];
1922 q->coalesce_usecs = t.intr_lat;
1923 t3_update_qset_coalesce(qs, q);
1925 if (t.polling >= 0) {
1926 if (adapter->flags & USING_MSIX)
1927 q->polling = t.polling;
1928 else {
1929 /* No polling with INTx for T3A */
1930 if (adapter->params.rev == 0 &&
1931 !(adapter->flags & USING_MSI))
1932 t.polling = 0;
1934 for (i = 0; i < SGE_QSETS; i++) {
1935 q = &adapter->params.sge.
1936 qset[i];
1937 q->polling = t.polling;
1941 if (t.lro >= 0) {
1942 struct sge_qset *qs = &adapter->sge.qs[t.qset_idx];
1943 q->lro = t.lro;
1944 qs->lro_enabled = t.lro;
1946 break;
1948 case CHELSIO_GET_QSET_PARAMS:{
1949 struct qset_params *q;
1950 struct ch_qset_params t;
1951 int q1 = pi->first_qset;
1952 int nqsets = pi->nqsets;
1953 int i;
1955 if (copy_from_user(&t, useraddr, sizeof(t)))
1956 return -EFAULT;
1958 /* Display qsets for all ports when offload enabled */
1959 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1960 q1 = 0;
1961 for_each_port(adapter, i) {
1962 pi = adap2pinfo(adapter, i);
1963 nqsets = pi->first_qset + pi->nqsets;
1967 if (t.qset_idx >= nqsets)
1968 return -EINVAL;
1970 q = &adapter->params.sge.qset[q1 + t.qset_idx];
1971 t.rspq_size = q->rspq_size;
1972 t.txq_size[0] = q->txq_size[0];
1973 t.txq_size[1] = q->txq_size[1];
1974 t.txq_size[2] = q->txq_size[2];
1975 t.fl_size[0] = q->fl_size;
1976 t.fl_size[1] = q->jumbo_size;
1977 t.polling = q->polling;
1978 t.lro = q->lro;
1979 t.intr_lat = q->coalesce_usecs;
1980 t.cong_thres = q->cong_thres;
1981 t.qnum = q1;
1983 if (adapter->flags & USING_MSIX)
1984 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
1985 else
1986 t.vector = adapter->pdev->irq;
1988 if (copy_to_user(useraddr, &t, sizeof(t)))
1989 return -EFAULT;
1990 break;
1992 case CHELSIO_SET_QSET_NUM:{
1993 struct ch_reg edata;
1994 unsigned int i, first_qset = 0, other_qsets = 0;
1996 if (!capable(CAP_NET_ADMIN))
1997 return -EPERM;
1998 if (adapter->flags & FULL_INIT_DONE)
1999 return -EBUSY;
2000 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2001 return -EFAULT;
2002 if (edata.val < 1 ||
2003 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2004 return -EINVAL;
2006 for_each_port(adapter, i)
2007 if (adapter->port[i] && adapter->port[i] != dev)
2008 other_qsets += adap2pinfo(adapter, i)->nqsets;
2010 if (edata.val + other_qsets > SGE_QSETS)
2011 return -EINVAL;
2013 pi->nqsets = edata.val;
2015 for_each_port(adapter, i)
2016 if (adapter->port[i]) {
2017 pi = adap2pinfo(adapter, i);
2018 pi->first_qset = first_qset;
2019 first_qset += pi->nqsets;
2021 break;
2023 case CHELSIO_GET_QSET_NUM:{
2024 struct ch_reg edata;
2026 edata.cmd = CHELSIO_GET_QSET_NUM;
2027 edata.val = pi->nqsets;
2028 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2029 return -EFAULT;
2030 break;
2032 case CHELSIO_LOAD_FW:{
2033 u8 *fw_data;
2034 struct ch_mem_range t;
2036 if (!capable(CAP_SYS_RAWIO))
2037 return -EPERM;
2038 if (copy_from_user(&t, useraddr, sizeof(t)))
2039 return -EFAULT;
2040 /* Check t.len sanity ? */
2041 fw_data = kmalloc(t.len, GFP_KERNEL);
2042 if (!fw_data)
2043 return -ENOMEM;
2045 if (copy_from_user
2046 (fw_data, useraddr + sizeof(t), t.len)) {
2047 kfree(fw_data);
2048 return -EFAULT;
2051 ret = t3_load_fw(adapter, fw_data, t.len);
2052 kfree(fw_data);
2053 if (ret)
2054 return ret;
2055 break;
2057 case CHELSIO_SETMTUTAB:{
2058 struct ch_mtus m;
2059 int i;
2061 if (!is_offload(adapter))
2062 return -EOPNOTSUPP;
2063 if (!capable(CAP_NET_ADMIN))
2064 return -EPERM;
2065 if (offload_running(adapter))
2066 return -EBUSY;
2067 if (copy_from_user(&m, useraddr, sizeof(m)))
2068 return -EFAULT;
2069 if (m.nmtus != NMTUS)
2070 return -EINVAL;
2071 if (m.mtus[0] < 81) /* accommodate SACK */
2072 return -EINVAL;
2074 /* MTUs must be in ascending order */
2075 for (i = 1; i < NMTUS; ++i)
2076 if (m.mtus[i] < m.mtus[i - 1])
2077 return -EINVAL;
2079 memcpy(adapter->params.mtus, m.mtus,
2080 sizeof(adapter->params.mtus));
2081 break;
2083 case CHELSIO_GET_PM:{
2084 struct tp_params *p = &adapter->params.tp;
2085 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2087 if (!is_offload(adapter))
2088 return -EOPNOTSUPP;
2089 m.tx_pg_sz = p->tx_pg_size;
2090 m.tx_num_pg = p->tx_num_pgs;
2091 m.rx_pg_sz = p->rx_pg_size;
2092 m.rx_num_pg = p->rx_num_pgs;
2093 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2094 if (copy_to_user(useraddr, &m, sizeof(m)))
2095 return -EFAULT;
2096 break;
2098 case CHELSIO_SET_PM:{
2099 struct ch_pm m;
2100 struct tp_params *p = &adapter->params.tp;
2102 if (!is_offload(adapter))
2103 return -EOPNOTSUPP;
2104 if (!capable(CAP_NET_ADMIN))
2105 return -EPERM;
2106 if (adapter->flags & FULL_INIT_DONE)
2107 return -EBUSY;
2108 if (copy_from_user(&m, useraddr, sizeof(m)))
2109 return -EFAULT;
2110 if (!is_power_of_2(m.rx_pg_sz) ||
2111 !is_power_of_2(m.tx_pg_sz))
2112 return -EINVAL; /* not power of 2 */
2113 if (!(m.rx_pg_sz & 0x14000))
2114 return -EINVAL; /* not 16KB or 64KB */
2115 if (!(m.tx_pg_sz & 0x1554000))
2116 return -EINVAL;
2117 if (m.tx_num_pg == -1)
2118 m.tx_num_pg = p->tx_num_pgs;
2119 if (m.rx_num_pg == -1)
2120 m.rx_num_pg = p->rx_num_pgs;
2121 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2122 return -EINVAL;
2123 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2124 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2125 return -EINVAL;
2126 p->rx_pg_size = m.rx_pg_sz;
2127 p->tx_pg_size = m.tx_pg_sz;
2128 p->rx_num_pgs = m.rx_num_pg;
2129 p->tx_num_pgs = m.tx_num_pg;
2130 break;
2132 case CHELSIO_GET_MEM:{
2133 struct ch_mem_range t;
2134 struct mc7 *mem;
2135 u64 buf[32];
2137 if (!is_offload(adapter))
2138 return -EOPNOTSUPP;
2139 if (!(adapter->flags & FULL_INIT_DONE))
2140 return -EIO; /* need the memory controllers */
2141 if (copy_from_user(&t, useraddr, sizeof(t)))
2142 return -EFAULT;
2143 if ((t.addr & 7) || (t.len & 7))
2144 return -EINVAL;
2145 if (t.mem_id == MEM_CM)
2146 mem = &adapter->cm;
2147 else if (t.mem_id == MEM_PMRX)
2148 mem = &adapter->pmrx;
2149 else if (t.mem_id == MEM_PMTX)
2150 mem = &adapter->pmtx;
2151 else
2152 return -EINVAL;
2155 * Version scheme:
2156 * bits 0..9: chip version
2157 * bits 10..15: chip revision
2159 t.version = 3 | (adapter->params.rev << 10);
2160 if (copy_to_user(useraddr, &t, sizeof(t)))
2161 return -EFAULT;
2164 * Read 256 bytes at a time as len can be large and we don't
2165 * want to use huge intermediate buffers.
2167 useraddr += sizeof(t); /* advance to start of buffer */
2168 while (t.len) {
2169 unsigned int chunk =
2170 min_t(unsigned int, t.len, sizeof(buf));
2172 ret =
2173 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2174 buf);
2175 if (ret)
2176 return ret;
2177 if (copy_to_user(useraddr, buf, chunk))
2178 return -EFAULT;
2179 useraddr += chunk;
2180 t.addr += chunk;
2181 t.len -= chunk;
2183 break;
2185 case CHELSIO_SET_TRACE_FILTER:{
2186 struct ch_trace t;
2187 const struct trace_params *tp;
2189 if (!capable(CAP_NET_ADMIN))
2190 return -EPERM;
2191 if (!offload_running(adapter))
2192 return -EAGAIN;
2193 if (copy_from_user(&t, useraddr, sizeof(t)))
2194 return -EFAULT;
2196 tp = (const struct trace_params *)&t.sip;
2197 if (t.config_tx)
2198 t3_config_trace_filter(adapter, tp, 0,
2199 t.invert_match,
2200 t.trace_tx);
2201 if (t.config_rx)
2202 t3_config_trace_filter(adapter, tp, 1,
2203 t.invert_match,
2204 t.trace_rx);
2205 break;
2207 default:
2208 return -EOPNOTSUPP;
2210 return 0;
2213 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2215 struct mii_ioctl_data *data = if_mii(req);
2216 struct port_info *pi = netdev_priv(dev);
2217 struct adapter *adapter = pi->adapter;
2218 int ret, mmd;
2220 switch (cmd) {
2221 case SIOCGMIIPHY:
2222 data->phy_id = pi->phy.addr;
2223 /* FALLTHRU */
2224 case SIOCGMIIREG:{
2225 u32 val;
2226 struct cphy *phy = &pi->phy;
2228 if (!phy->mdio_read)
2229 return -EOPNOTSUPP;
2230 if (is_10G(adapter)) {
2231 mmd = data->phy_id >> 8;
2232 if (!mmd)
2233 mmd = MDIO_DEV_PCS;
2234 else if (mmd > MDIO_DEV_VEND2)
2235 return -EINVAL;
2237 ret =
2238 phy->mdio_read(adapter, data->phy_id & 0x1f,
2239 mmd, data->reg_num, &val);
2240 } else
2241 ret =
2242 phy->mdio_read(adapter, data->phy_id & 0x1f,
2243 0, data->reg_num & 0x1f,
2244 &val);
2245 if (!ret)
2246 data->val_out = val;
2247 break;
2249 case SIOCSMIIREG:{
2250 struct cphy *phy = &pi->phy;
2252 if (!capable(CAP_NET_ADMIN))
2253 return -EPERM;
2254 if (!phy->mdio_write)
2255 return -EOPNOTSUPP;
2256 if (is_10G(adapter)) {
2257 mmd = data->phy_id >> 8;
2258 if (!mmd)
2259 mmd = MDIO_DEV_PCS;
2260 else if (mmd > MDIO_DEV_VEND2)
2261 return -EINVAL;
2263 ret =
2264 phy->mdio_write(adapter,
2265 data->phy_id & 0x1f, mmd,
2266 data->reg_num,
2267 data->val_in);
2268 } else
2269 ret =
2270 phy->mdio_write(adapter,
2271 data->phy_id & 0x1f, 0,
2272 data->reg_num & 0x1f,
2273 data->val_in);
2274 break;
2276 case SIOCCHIOCTL:
2277 return cxgb_extension_ioctl(dev, req->ifr_data);
2278 default:
2279 return -EOPNOTSUPP;
2281 return ret;
2284 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2286 struct port_info *pi = netdev_priv(dev);
2287 struct adapter *adapter = pi->adapter;
2288 int ret;
2290 if (new_mtu < 81) /* accommodate SACK */
2291 return -EINVAL;
2292 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2293 return ret;
2294 dev->mtu = new_mtu;
2295 init_port_mtus(adapter);
2296 if (adapter->params.rev == 0 && offload_running(adapter))
2297 t3_load_mtus(adapter, adapter->params.mtus,
2298 adapter->params.a_wnd, adapter->params.b_wnd,
2299 adapter->port[0]->mtu);
2300 return 0;
2303 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2305 struct port_info *pi = netdev_priv(dev);
2306 struct adapter *adapter = pi->adapter;
2307 struct sockaddr *addr = p;
2309 if (!is_valid_ether_addr(addr->sa_data))
2310 return -EINVAL;
2312 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2313 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2314 if (offload_running(adapter))
2315 write_smt_entry(adapter, pi->port_id);
2316 return 0;
2320 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2321 * @adap: the adapter
2322 * @p: the port
2324 * Ensures that current Rx processing on any of the queues associated with
2325 * the given port completes before returning. We do this by acquiring and
2326 * releasing the locks of the response queues associated with the port.
2328 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2330 int i;
2332 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2333 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2335 spin_lock_irq(&q->lock);
2336 spin_unlock_irq(&q->lock);
2340 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2342 struct port_info *pi = netdev_priv(dev);
2343 struct adapter *adapter = pi->adapter;
2345 pi->vlan_grp = grp;
2346 if (adapter->params.rev > 0)
2347 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2348 else {
2349 /* single control for all ports */
2350 unsigned int i, have_vlans = 0;
2351 for_each_port(adapter, i)
2352 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2354 t3_set_vlan_accel(adapter, 1, have_vlans);
2356 t3_synchronize_rx(adapter, pi);
2359 #ifdef CONFIG_NET_POLL_CONTROLLER
2360 static void cxgb_netpoll(struct net_device *dev)
2362 struct port_info *pi = netdev_priv(dev);
2363 struct adapter *adapter = pi->adapter;
2364 int qidx;
2366 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2367 struct sge_qset *qs = &adapter->sge.qs[qidx];
2368 void *source;
2370 if (adapter->flags & USING_MSIX)
2371 source = qs;
2372 else
2373 source = adapter;
2375 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2378 #endif
2381 * Periodic accumulation of MAC statistics.
2383 static void mac_stats_update(struct adapter *adapter)
2385 int i;
2387 for_each_port(adapter, i) {
2388 struct net_device *dev = adapter->port[i];
2389 struct port_info *p = netdev_priv(dev);
2391 if (netif_running(dev)) {
2392 spin_lock(&adapter->stats_lock);
2393 t3_mac_update_stats(&p->mac);
2394 spin_unlock(&adapter->stats_lock);
2399 static void check_link_status(struct adapter *adapter)
2401 int i;
2403 for_each_port(adapter, i) {
2404 struct net_device *dev = adapter->port[i];
2405 struct port_info *p = netdev_priv(dev);
2407 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev))
2408 t3_link_changed(adapter, i);
2412 static void check_t3b2_mac(struct adapter *adapter)
2414 int i;
2416 if (!rtnl_trylock()) /* synchronize with ifdown */
2417 return;
2419 for_each_port(adapter, i) {
2420 struct net_device *dev = adapter->port[i];
2421 struct port_info *p = netdev_priv(dev);
2422 int status;
2424 if (!netif_running(dev))
2425 continue;
2427 status = 0;
2428 if (netif_running(dev) && netif_carrier_ok(dev))
2429 status = t3b2_mac_watchdog_task(&p->mac);
2430 if (status == 1)
2431 p->mac.stats.num_toggled++;
2432 else if (status == 2) {
2433 struct cmac *mac = &p->mac;
2435 t3_mac_set_mtu(mac, dev->mtu);
2436 t3_mac_set_address(mac, 0, dev->dev_addr);
2437 cxgb_set_rxmode(dev);
2438 t3_link_start(&p->phy, mac, &p->link_config);
2439 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2440 t3_port_intr_enable(adapter, p->port_id);
2441 p->mac.stats.num_resets++;
2444 rtnl_unlock();
2448 static void t3_adap_check_task(struct work_struct *work)
2450 struct adapter *adapter = container_of(work, struct adapter,
2451 adap_check_task.work);
2452 const struct adapter_params *p = &adapter->params;
2454 adapter->check_task_cnt++;
2456 /* Check link status for PHYs without interrupts */
2457 if (p->linkpoll_period)
2458 check_link_status(adapter);
2460 /* Accumulate MAC stats if needed */
2461 if (!p->linkpoll_period ||
2462 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2463 p->stats_update_period) {
2464 mac_stats_update(adapter);
2465 adapter->check_task_cnt = 0;
2468 if (p->rev == T3_REV_B2)
2469 check_t3b2_mac(adapter);
2471 /* Schedule the next check update if any port is active. */
2472 spin_lock_irq(&adapter->work_lock);
2473 if (adapter->open_device_map & PORT_MASK)
2474 schedule_chk_task(adapter);
2475 spin_unlock_irq(&adapter->work_lock);
2479 * Processes external (PHY) interrupts in process context.
2481 static void ext_intr_task(struct work_struct *work)
2483 struct adapter *adapter = container_of(work, struct adapter,
2484 ext_intr_handler_task);
2486 t3_phy_intr_handler(adapter);
2488 /* Now reenable external interrupts */
2489 spin_lock_irq(&adapter->work_lock);
2490 if (adapter->slow_intr_mask) {
2491 adapter->slow_intr_mask |= F_T3DBG;
2492 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2493 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2494 adapter->slow_intr_mask);
2496 spin_unlock_irq(&adapter->work_lock);
2500 * Interrupt-context handler for external (PHY) interrupts.
2502 void t3_os_ext_intr_handler(struct adapter *adapter)
2505 * Schedule a task to handle external interrupts as they may be slow
2506 * and we use a mutex to protect MDIO registers. We disable PHY
2507 * interrupts in the meantime and let the task reenable them when
2508 * it's done.
2510 spin_lock(&adapter->work_lock);
2511 if (adapter->slow_intr_mask) {
2512 adapter->slow_intr_mask &= ~F_T3DBG;
2513 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2514 adapter->slow_intr_mask);
2515 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2517 spin_unlock(&adapter->work_lock);
2520 static int t3_adapter_error(struct adapter *adapter, int reset)
2522 int i, ret = 0;
2524 /* Stop all ports */
2525 for_each_port(adapter, i) {
2526 struct net_device *netdev = adapter->port[i];
2528 if (netif_running(netdev))
2529 cxgb_close(netdev);
2532 if (is_offload(adapter) &&
2533 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2534 offload_close(&adapter->tdev);
2536 /* Stop SGE timers */
2537 t3_stop_sge_timers(adapter);
2539 adapter->flags &= ~FULL_INIT_DONE;
2541 if (reset)
2542 ret = t3_reset_adapter(adapter);
2544 pci_disable_device(adapter->pdev);
2546 return ret;
2549 static int t3_reenable_adapter(struct adapter *adapter)
2551 if (pci_enable_device(adapter->pdev)) {
2552 dev_err(&adapter->pdev->dev,
2553 "Cannot re-enable PCI device after reset.\n");
2554 goto err;
2556 pci_set_master(adapter->pdev);
2557 pci_restore_state(adapter->pdev);
2559 /* Free sge resources */
2560 t3_free_sge_resources(adapter);
2562 if (t3_replay_prep_adapter(adapter))
2563 goto err;
2565 return 0;
2566 err:
2567 return -1;
2570 static void t3_resume_ports(struct adapter *adapter)
2572 int i;
2574 /* Restart the ports */
2575 for_each_port(adapter, i) {
2576 struct net_device *netdev = adapter->port[i];
2578 if (netif_running(netdev)) {
2579 if (cxgb_open(netdev)) {
2580 dev_err(&adapter->pdev->dev,
2581 "can't bring device back up"
2582 " after reset\n");
2583 continue;
2590 * processes a fatal error.
2591 * Bring the ports down, reset the chip, bring the ports back up.
2593 static void fatal_error_task(struct work_struct *work)
2595 struct adapter *adapter = container_of(work, struct adapter,
2596 fatal_error_handler_task);
2597 int err = 0;
2599 rtnl_lock();
2600 err = t3_adapter_error(adapter, 1);
2601 if (!err)
2602 err = t3_reenable_adapter(adapter);
2603 if (!err)
2604 t3_resume_ports(adapter);
2606 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2607 rtnl_unlock();
2610 void t3_fatal_err(struct adapter *adapter)
2612 unsigned int fw_status[4];
2614 if (adapter->flags & FULL_INIT_DONE) {
2615 t3_sge_stop(adapter);
2616 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2617 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2618 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2619 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2621 spin_lock(&adapter->work_lock);
2622 t3_intr_disable(adapter);
2623 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2624 spin_unlock(&adapter->work_lock);
2626 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2627 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2628 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2629 fw_status[0], fw_status[1],
2630 fw_status[2], fw_status[3]);
2635 * t3_io_error_detected - called when PCI error is detected
2636 * @pdev: Pointer to PCI device
2637 * @state: The current pci connection state
2639 * This function is called after a PCI bus error affecting
2640 * this device has been detected.
2642 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2643 pci_channel_state_t state)
2645 struct adapter *adapter = pci_get_drvdata(pdev);
2646 int ret;
2648 ret = t3_adapter_error(adapter, 0);
2650 /* Request a slot reset. */
2651 return PCI_ERS_RESULT_NEED_RESET;
2655 * t3_io_slot_reset - called after the pci bus has been reset.
2656 * @pdev: Pointer to PCI device
2658 * Restart the card from scratch, as if from a cold-boot.
2660 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2662 struct adapter *adapter = pci_get_drvdata(pdev);
2664 if (!t3_reenable_adapter(adapter))
2665 return PCI_ERS_RESULT_RECOVERED;
2667 return PCI_ERS_RESULT_DISCONNECT;
2671 * t3_io_resume - called when traffic can start flowing again.
2672 * @pdev: Pointer to PCI device
2674 * This callback is called when the error recovery driver tells us that
2675 * its OK to resume normal operation.
2677 static void t3_io_resume(struct pci_dev *pdev)
2679 struct adapter *adapter = pci_get_drvdata(pdev);
2681 t3_resume_ports(adapter);
2684 static struct pci_error_handlers t3_err_handler = {
2685 .error_detected = t3_io_error_detected,
2686 .slot_reset = t3_io_slot_reset,
2687 .resume = t3_io_resume,
2691 * Set the number of qsets based on the number of CPUs and the number of ports,
2692 * not to exceed the number of available qsets, assuming there are enough qsets
2693 * per port in HW.
2695 static void set_nqsets(struct adapter *adap)
2697 int i, j = 0;
2698 int num_cpus = num_online_cpus();
2699 int hwports = adap->params.nports;
2700 int nqsets = SGE_QSETS;
2702 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2703 if (hwports == 2 &&
2704 (hwports * nqsets > SGE_QSETS ||
2705 num_cpus >= nqsets / hwports))
2706 nqsets /= hwports;
2707 if (nqsets > num_cpus)
2708 nqsets = num_cpus;
2709 if (nqsets < 1 || hwports == 4)
2710 nqsets = 1;
2711 } else
2712 nqsets = 1;
2714 for_each_port(adap, i) {
2715 struct port_info *pi = adap2pinfo(adap, i);
2717 pi->first_qset = j;
2718 pi->nqsets = nqsets;
2719 j = pi->first_qset + nqsets;
2721 dev_info(&adap->pdev->dev,
2722 "Port %d using %d queue sets.\n", i, nqsets);
2726 static int __devinit cxgb_enable_msix(struct adapter *adap)
2728 struct msix_entry entries[SGE_QSETS + 1];
2729 int i, err;
2731 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2732 entries[i].entry = i;
2734 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2735 if (!err) {
2736 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2737 adap->msix_info[i].vec = entries[i].vector;
2738 } else if (err > 0)
2739 dev_info(&adap->pdev->dev,
2740 "only %d MSI-X vectors left, not using MSI-X\n", err);
2741 return err;
2744 static void __devinit print_port_info(struct adapter *adap,
2745 const struct adapter_info *ai)
2747 static const char *pci_variant[] = {
2748 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2751 int i;
2752 char buf[80];
2754 if (is_pcie(adap))
2755 snprintf(buf, sizeof(buf), "%s x%d",
2756 pci_variant[adap->params.pci.variant],
2757 adap->params.pci.width);
2758 else
2759 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2760 pci_variant[adap->params.pci.variant],
2761 adap->params.pci.speed, adap->params.pci.width);
2763 for_each_port(adap, i) {
2764 struct net_device *dev = adap->port[i];
2765 const struct port_info *pi = netdev_priv(dev);
2767 if (!test_bit(i, &adap->registered_device_map))
2768 continue;
2769 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2770 dev->name, ai->desc, pi->phy.desc,
2771 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2772 (adap->flags & USING_MSIX) ? " MSI-X" :
2773 (adap->flags & USING_MSI) ? " MSI" : "");
2774 if (adap->name == dev->name && adap->params.vpd.mclk)
2775 printk(KERN_INFO
2776 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2777 adap->name, t3_mc7_size(&adap->cm) >> 20,
2778 t3_mc7_size(&adap->pmtx) >> 20,
2779 t3_mc7_size(&adap->pmrx) >> 20,
2780 adap->params.vpd.sn);
2784 static int __devinit init_one(struct pci_dev *pdev,
2785 const struct pci_device_id *ent)
2787 static int version_printed;
2789 int i, err, pci_using_dac = 0;
2790 unsigned long mmio_start, mmio_len;
2791 const struct adapter_info *ai;
2792 struct adapter *adapter = NULL;
2793 struct port_info *pi;
2795 if (!version_printed) {
2796 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2797 ++version_printed;
2800 if (!cxgb3_wq) {
2801 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2802 if (!cxgb3_wq) {
2803 printk(KERN_ERR DRV_NAME
2804 ": cannot initialize work queue\n");
2805 return -ENOMEM;
2809 err = pci_request_regions(pdev, DRV_NAME);
2810 if (err) {
2811 /* Just info, some other driver may have claimed the device. */
2812 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2813 return err;
2816 err = pci_enable_device(pdev);
2817 if (err) {
2818 dev_err(&pdev->dev, "cannot enable PCI device\n");
2819 goto out_release_regions;
2822 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2823 pci_using_dac = 1;
2824 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2825 if (err) {
2826 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2827 "coherent allocations\n");
2828 goto out_disable_device;
2830 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2831 dev_err(&pdev->dev, "no usable DMA configuration\n");
2832 goto out_disable_device;
2835 pci_set_master(pdev);
2836 pci_save_state(pdev);
2838 mmio_start = pci_resource_start(pdev, 0);
2839 mmio_len = pci_resource_len(pdev, 0);
2840 ai = t3_get_adapter_info(ent->driver_data);
2842 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2843 if (!adapter) {
2844 err = -ENOMEM;
2845 goto out_disable_device;
2848 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2849 if (!adapter->regs) {
2850 dev_err(&pdev->dev, "cannot map device registers\n");
2851 err = -ENOMEM;
2852 goto out_free_adapter;
2855 adapter->pdev = pdev;
2856 adapter->name = pci_name(pdev);
2857 adapter->msg_enable = dflt_msg_enable;
2858 adapter->mmio_len = mmio_len;
2860 mutex_init(&adapter->mdio_lock);
2861 spin_lock_init(&adapter->work_lock);
2862 spin_lock_init(&adapter->stats_lock);
2864 INIT_LIST_HEAD(&adapter->adapter_list);
2865 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2866 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
2867 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2869 for (i = 0; i < ai->nports; ++i) {
2870 struct net_device *netdev;
2872 netdev = alloc_etherdev(sizeof(struct port_info));
2873 if (!netdev) {
2874 err = -ENOMEM;
2875 goto out_free_dev;
2878 SET_NETDEV_DEV(netdev, &pdev->dev);
2880 adapter->port[i] = netdev;
2881 pi = netdev_priv(netdev);
2882 pi->adapter = adapter;
2883 pi->rx_csum_offload = 1;
2884 pi->port_id = i;
2885 netif_carrier_off(netdev);
2886 netdev->irq = pdev->irq;
2887 netdev->mem_start = mmio_start;
2888 netdev->mem_end = mmio_start + mmio_len - 1;
2889 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2890 netdev->features |= NETIF_F_LLTX;
2891 if (pci_using_dac)
2892 netdev->features |= NETIF_F_HIGHDMA;
2894 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2895 netdev->vlan_rx_register = vlan_rx_register;
2897 netdev->open = cxgb_open;
2898 netdev->stop = cxgb_close;
2899 netdev->hard_start_xmit = t3_eth_xmit;
2900 netdev->get_stats = cxgb_get_stats;
2901 netdev->set_multicast_list = cxgb_set_rxmode;
2902 netdev->do_ioctl = cxgb_ioctl;
2903 netdev->change_mtu = cxgb_change_mtu;
2904 netdev->set_mac_address = cxgb_set_mac_addr;
2905 #ifdef CONFIG_NET_POLL_CONTROLLER
2906 netdev->poll_controller = cxgb_netpoll;
2907 #endif
2909 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2912 pci_set_drvdata(pdev, adapter);
2913 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2914 err = -ENODEV;
2915 goto out_free_dev;
2919 * The card is now ready to go. If any errors occur during device
2920 * registration we do not fail the whole card but rather proceed only
2921 * with the ports we manage to register successfully. However we must
2922 * register at least one net device.
2924 for_each_port(adapter, i) {
2925 err = register_netdev(adapter->port[i]);
2926 if (err)
2927 dev_warn(&pdev->dev,
2928 "cannot register net device %s, skipping\n",
2929 adapter->port[i]->name);
2930 else {
2932 * Change the name we use for messages to the name of
2933 * the first successfully registered interface.
2935 if (!adapter->registered_device_map)
2936 adapter->name = adapter->port[i]->name;
2938 __set_bit(i, &adapter->registered_device_map);
2941 if (!adapter->registered_device_map) {
2942 dev_err(&pdev->dev, "could not register any net devices\n");
2943 goto out_free_dev;
2946 /* Driver's ready. Reflect it on LEDs */
2947 t3_led_ready(adapter);
2949 if (is_offload(adapter)) {
2950 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2951 cxgb3_adapter_ofld(adapter);
2954 /* See what interrupts we'll be using */
2955 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2956 adapter->flags |= USING_MSIX;
2957 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2958 adapter->flags |= USING_MSI;
2960 set_nqsets(adapter);
2962 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2963 &cxgb3_attr_group);
2965 print_port_info(adapter, ai);
2966 return 0;
2968 out_free_dev:
2969 iounmap(adapter->regs);
2970 for (i = ai->nports - 1; i >= 0; --i)
2971 if (adapter->port[i])
2972 free_netdev(adapter->port[i]);
2974 out_free_adapter:
2975 kfree(adapter);
2977 out_disable_device:
2978 pci_disable_device(pdev);
2979 out_release_regions:
2980 pci_release_regions(pdev);
2981 pci_set_drvdata(pdev, NULL);
2982 return err;
2985 static void __devexit remove_one(struct pci_dev *pdev)
2987 struct adapter *adapter = pci_get_drvdata(pdev);
2989 if (adapter) {
2990 int i;
2992 t3_sge_stop(adapter);
2993 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2994 &cxgb3_attr_group);
2996 if (is_offload(adapter)) {
2997 cxgb3_adapter_unofld(adapter);
2998 if (test_bit(OFFLOAD_DEVMAP_BIT,
2999 &adapter->open_device_map))
3000 offload_close(&adapter->tdev);
3003 for_each_port(adapter, i)
3004 if (test_bit(i, &adapter->registered_device_map))
3005 unregister_netdev(adapter->port[i]);
3007 t3_stop_sge_timers(adapter);
3008 t3_free_sge_resources(adapter);
3009 cxgb_disable_msi(adapter);
3011 for_each_port(adapter, i)
3012 if (adapter->port[i])
3013 free_netdev(adapter->port[i]);
3015 iounmap(adapter->regs);
3016 kfree(adapter);
3017 pci_release_regions(pdev);
3018 pci_disable_device(pdev);
3019 pci_set_drvdata(pdev, NULL);
3023 static struct pci_driver driver = {
3024 .name = DRV_NAME,
3025 .id_table = cxgb3_pci_tbl,
3026 .probe = init_one,
3027 .remove = __devexit_p(remove_one),
3028 .err_handler = &t3_err_handler,
3031 static int __init cxgb3_init_module(void)
3033 int ret;
3035 cxgb3_offload_init();
3037 ret = pci_register_driver(&driver);
3038 return ret;
3041 static void __exit cxgb3_cleanup_module(void)
3043 pci_unregister_driver(&driver);
3044 if (cxgb3_wq)
3045 destroy_workqueue(cxgb3_wq);
3048 module_init(cxgb3_init_module);
3049 module_exit(cxgb3_cleanup_module);