cxgb3: ease msi-x settings conditions
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / cxgb3 / cxgb3_main.c
blob52131bd4cc70bfc65e6f8c9f98d87abf34493e94
1 /*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
49 #include "common.h"
50 #include "cxgb3_ioctl.h"
51 #include "regs.h"
52 #include "cxgb3_offload.h"
53 #include "version.h"
55 #include "cxgb3_ctl_defs.h"
56 #include "t3_cpl.h"
57 #include "firmware_exports.h"
59 enum {
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
65 MIN_TXQ_ENTRIES = 4,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
68 MIN_FL_ENTRIES = 32
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
93 {0,}
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
116 static int msi = 2;
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
126 static int ofld_disable = 0;
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
139 static struct workqueue_struct *cxgb3_wq;
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
145 * Shows the link status, speed, and duplex of a port.
147 static void link_report(struct net_device *dev)
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
151 else {
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
155 switch (p->link_config.speed) {
156 case SPEED_10000:
157 s = "10Gbps";
158 break;
159 case SPEED_1000:
160 s = "1000Mbps";
161 break;
162 case SPEED_100:
163 s = "100Mbps";
164 break;
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
188 struct net_device *dev = adapter->port[port_id];
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
194 return;
196 if (link_stat != netif_carrier_ok(dev)) {
197 if (link_stat) {
198 t3_mac_enable(mac, MAC_DIRECTION_RX);
199 netif_carrier_on(dev);
200 } else {
201 netif_carrier_off(dev);
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
207 link_report(dev);
212 * t3_os_phymod_changed - handle PHY module changes
213 * @phy: the PHY reporting the module change
214 * @mod_type: new module type
216 * This is the OS-dependent handler for PHY module changes. It is
217 * invoked when a PHY module is removed or inserted for any OS-specific
218 * processing.
220 void t3_os_phymod_changed(struct adapter *adap, int port_id)
222 static const char *mod_str[] = {
223 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
226 const struct net_device *dev = adap->port[port_id];
227 const struct port_info *pi = netdev_priv(dev);
229 if (pi->phy.modtype == phy_modtype_none)
230 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
231 else
232 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
233 mod_str[pi->phy.modtype]);
236 static void cxgb_set_rxmode(struct net_device *dev)
238 struct t3_rx_mode rm;
239 struct port_info *pi = netdev_priv(dev);
241 init_rx_mode(&rm, dev, dev->mc_list);
242 t3_mac_set_rx_mode(&pi->mac, &rm);
246 * link_start - enable a port
247 * @dev: the device to enable
249 * Performs the MAC and PHY actions needed to enable a port.
251 static void link_start(struct net_device *dev)
253 struct t3_rx_mode rm;
254 struct port_info *pi = netdev_priv(dev);
255 struct cmac *mac = &pi->mac;
257 init_rx_mode(&rm, dev, dev->mc_list);
258 t3_mac_reset(mac);
259 t3_mac_set_mtu(mac, dev->mtu);
260 t3_mac_set_address(mac, 0, dev->dev_addr);
261 t3_mac_set_rx_mode(mac, &rm);
262 t3_link_start(&pi->phy, mac, &pi->link_config);
263 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
266 static inline void cxgb_disable_msi(struct adapter *adapter)
268 if (adapter->flags & USING_MSIX) {
269 pci_disable_msix(adapter->pdev);
270 adapter->flags &= ~USING_MSIX;
271 } else if (adapter->flags & USING_MSI) {
272 pci_disable_msi(adapter->pdev);
273 adapter->flags &= ~USING_MSI;
278 * Interrupt handler for asynchronous events used with MSI-X.
280 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
282 t3_slow_intr_handler(cookie);
283 return IRQ_HANDLED;
287 * Name the MSI-X interrupts.
289 static void name_msix_vecs(struct adapter *adap)
291 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
293 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
294 adap->msix_info[0].desc[n] = 0;
296 for_each_port(adap, j) {
297 struct net_device *d = adap->port[j];
298 const struct port_info *pi = netdev_priv(d);
300 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
301 snprintf(adap->msix_info[msi_idx].desc, n,
302 "%s-%d", d->name, pi->first_qset + i);
303 adap->msix_info[msi_idx].desc[n] = 0;
308 static int request_msix_data_irqs(struct adapter *adap)
310 int i, j, err, qidx = 0;
312 for_each_port(adap, i) {
313 int nqsets = adap2pinfo(adap, i)->nqsets;
315 for (j = 0; j < nqsets; ++j) {
316 err = request_irq(adap->msix_info[qidx + 1].vec,
317 t3_intr_handler(adap,
318 adap->sge.qs[qidx].
319 rspq.polling), 0,
320 adap->msix_info[qidx + 1].desc,
321 &adap->sge.qs[qidx]);
322 if (err) {
323 while (--qidx >= 0)
324 free_irq(adap->msix_info[qidx + 1].vec,
325 &adap->sge.qs[qidx]);
326 return err;
328 qidx++;
331 return 0;
334 static void free_irq_resources(struct adapter *adapter)
336 if (adapter->flags & USING_MSIX) {
337 int i, n = 0;
339 free_irq(adapter->msix_info[0].vec, adapter);
340 for_each_port(adapter, i)
341 n += adap2pinfo(adapter, i)->nqsets;
343 for (i = 0; i < n; ++i)
344 free_irq(adapter->msix_info[i + 1].vec,
345 &adapter->sge.qs[i]);
346 } else
347 free_irq(adapter->pdev->irq, adapter);
350 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
351 unsigned long n)
353 int attempts = 5;
355 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
356 if (!--attempts)
357 return -ETIMEDOUT;
358 msleep(10);
360 return 0;
363 static int init_tp_parity(struct adapter *adap)
365 int i;
366 struct sk_buff *skb;
367 struct cpl_set_tcb_field *greq;
368 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
370 t3_tp_set_offload_mode(adap, 1);
372 for (i = 0; i < 16; i++) {
373 struct cpl_smt_write_req *req;
375 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
376 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
377 memset(req, 0, sizeof(*req));
378 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
379 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
380 req->iff = i;
381 t3_mgmt_tx(adap, skb);
384 for (i = 0; i < 2048; i++) {
385 struct cpl_l2t_write_req *req;
387 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
388 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
389 memset(req, 0, sizeof(*req));
390 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
391 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
392 req->params = htonl(V_L2T_W_IDX(i));
393 t3_mgmt_tx(adap, skb);
396 for (i = 0; i < 2048; i++) {
397 struct cpl_rte_write_req *req;
399 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
400 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
401 memset(req, 0, sizeof(*req));
402 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
403 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
404 req->l2t_idx = htonl(V_L2T_W_IDX(i));
405 t3_mgmt_tx(adap, skb);
408 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
409 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
410 memset(greq, 0, sizeof(*greq));
411 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
412 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
413 greq->mask = cpu_to_be64(1);
414 t3_mgmt_tx(adap, skb);
416 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
417 t3_tp_set_offload_mode(adap, 0);
418 return i;
422 * setup_rss - configure RSS
423 * @adap: the adapter
425 * Sets up RSS to distribute packets to multiple receive queues. We
426 * configure the RSS CPU lookup table to distribute to the number of HW
427 * receive queues, and the response queue lookup table to narrow that
428 * down to the response queues actually configured for each port.
429 * We always configure the RSS mapping for two ports since the mapping
430 * table has plenty of entries.
432 static void setup_rss(struct adapter *adap)
434 int i;
435 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
436 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
437 u8 cpus[SGE_QSETS + 1];
438 u16 rspq_map[RSS_TABLE_SIZE];
440 for (i = 0; i < SGE_QSETS; ++i)
441 cpus[i] = i;
442 cpus[SGE_QSETS] = 0xff; /* terminator */
444 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
445 rspq_map[i] = i % nq0;
446 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
449 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
450 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
451 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
454 static void init_napi(struct adapter *adap)
456 int i;
458 for (i = 0; i < SGE_QSETS; i++) {
459 struct sge_qset *qs = &adap->sge.qs[i];
461 if (qs->adap)
462 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
463 64);
467 * netif_napi_add() can be called only once per napi_struct because it
468 * adds each new napi_struct to a list. Be careful not to call it a
469 * second time, e.g., during EEH recovery, by making a note of it.
471 adap->flags |= NAPI_INIT;
475 * Wait until all NAPI handlers are descheduled. This includes the handlers of
476 * both netdevices representing interfaces and the dummy ones for the extra
477 * queues.
479 static void quiesce_rx(struct adapter *adap)
481 int i;
483 for (i = 0; i < SGE_QSETS; i++)
484 if (adap->sge.qs[i].adap)
485 napi_disable(&adap->sge.qs[i].napi);
488 static void enable_all_napi(struct adapter *adap)
490 int i;
491 for (i = 0; i < SGE_QSETS; i++)
492 if (adap->sge.qs[i].adap)
493 napi_enable(&adap->sge.qs[i].napi);
497 * set_qset_lro - Turn a queue set's LRO capability on and off
498 * @dev: the device the qset is attached to
499 * @qset_idx: the queue set index
500 * @val: the LRO switch
502 * Sets LRO on or off for a particular queue set.
503 * the device's features flag is updated to reflect the LRO
504 * capability when all queues belonging to the device are
505 * in the same state.
507 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
509 struct port_info *pi = netdev_priv(dev);
510 struct adapter *adapter = pi->adapter;
511 int i, lro_on = 1;
513 adapter->params.sge.qset[qset_idx].lro = !!val;
514 adapter->sge.qs[qset_idx].lro_enabled = !!val;
516 /* let ethtool report LRO on only if all queues are LRO enabled */
517 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; ++i)
518 lro_on &= adapter->params.sge.qset[i].lro;
520 if (lro_on)
521 dev->features |= NETIF_F_LRO;
522 else
523 dev->features &= ~NETIF_F_LRO;
527 * setup_sge_qsets - configure SGE Tx/Rx/response queues
528 * @adap: the adapter
530 * Determines how many sets of SGE queues to use and initializes them.
531 * We support multiple queue sets per port if we have MSI-X, otherwise
532 * just one queue set per port.
534 static int setup_sge_qsets(struct adapter *adap)
536 int i, j, err, irq_idx = 0, qset_idx = 0;
537 unsigned int ntxq = SGE_TXQ_PER_SET;
539 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
540 irq_idx = -1;
542 for_each_port(adap, i) {
543 struct net_device *dev = adap->port[i];
544 struct port_info *pi = netdev_priv(dev);
546 pi->qs = &adap->sge.qs[pi->first_qset];
547 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
548 ++j, ++qset_idx) {
549 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
550 err = t3_sge_alloc_qset(adap, qset_idx, 1,
551 (adap->flags & USING_MSIX) ? qset_idx + 1 :
552 irq_idx,
553 &adap->params.sge.qset[qset_idx], ntxq, dev,
554 netdev_get_tx_queue(dev, j));
555 if (err) {
556 t3_stop_sge_timers(adap);
557 t3_free_sge_resources(adap);
558 return err;
563 return 0;
566 static ssize_t attr_show(struct device *d, char *buf,
567 ssize_t(*format) (struct net_device *, char *))
569 ssize_t len;
571 /* Synchronize with ioctls that may shut down the device */
572 rtnl_lock();
573 len = (*format) (to_net_dev(d), buf);
574 rtnl_unlock();
575 return len;
578 static ssize_t attr_store(struct device *d,
579 const char *buf, size_t len,
580 ssize_t(*set) (struct net_device *, unsigned int),
581 unsigned int min_val, unsigned int max_val)
583 char *endp;
584 ssize_t ret;
585 unsigned int val;
587 if (!capable(CAP_NET_ADMIN))
588 return -EPERM;
590 val = simple_strtoul(buf, &endp, 0);
591 if (endp == buf || val < min_val || val > max_val)
592 return -EINVAL;
594 rtnl_lock();
595 ret = (*set) (to_net_dev(d), val);
596 if (!ret)
597 ret = len;
598 rtnl_unlock();
599 return ret;
602 #define CXGB3_SHOW(name, val_expr) \
603 static ssize_t format_##name(struct net_device *dev, char *buf) \
605 struct port_info *pi = netdev_priv(dev); \
606 struct adapter *adap = pi->adapter; \
607 return sprintf(buf, "%u\n", val_expr); \
609 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
610 char *buf) \
612 return attr_show(d, buf, format_##name); \
615 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
617 struct port_info *pi = netdev_priv(dev);
618 struct adapter *adap = pi->adapter;
619 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
621 if (adap->flags & FULL_INIT_DONE)
622 return -EBUSY;
623 if (val && adap->params.rev == 0)
624 return -EINVAL;
625 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
626 min_tids)
627 return -EINVAL;
628 adap->params.mc5.nfilters = val;
629 return 0;
632 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
633 const char *buf, size_t len)
635 return attr_store(d, buf, len, set_nfilters, 0, ~0);
638 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
640 struct port_info *pi = netdev_priv(dev);
641 struct adapter *adap = pi->adapter;
643 if (adap->flags & FULL_INIT_DONE)
644 return -EBUSY;
645 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
646 MC5_MIN_TIDS)
647 return -EINVAL;
648 adap->params.mc5.nservers = val;
649 return 0;
652 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
653 const char *buf, size_t len)
655 return attr_store(d, buf, len, set_nservers, 0, ~0);
658 #define CXGB3_ATTR_R(name, val_expr) \
659 CXGB3_SHOW(name, val_expr) \
660 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
662 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
663 CXGB3_SHOW(name, val_expr) \
664 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
666 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
667 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
668 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
670 static struct attribute *cxgb3_attrs[] = {
671 &dev_attr_cam_size.attr,
672 &dev_attr_nfilters.attr,
673 &dev_attr_nservers.attr,
674 NULL
677 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
679 static ssize_t tm_attr_show(struct device *d,
680 char *buf, int sched)
682 struct port_info *pi = netdev_priv(to_net_dev(d));
683 struct adapter *adap = pi->adapter;
684 unsigned int v, addr, bpt, cpt;
685 ssize_t len;
687 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
688 rtnl_lock();
689 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
690 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
691 if (sched & 1)
692 v >>= 16;
693 bpt = (v >> 8) & 0xff;
694 cpt = v & 0xff;
695 if (!cpt)
696 len = sprintf(buf, "disabled\n");
697 else {
698 v = (adap->params.vpd.cclk * 1000) / cpt;
699 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
701 rtnl_unlock();
702 return len;
705 static ssize_t tm_attr_store(struct device *d,
706 const char *buf, size_t len, int sched)
708 struct port_info *pi = netdev_priv(to_net_dev(d));
709 struct adapter *adap = pi->adapter;
710 unsigned int val;
711 char *endp;
712 ssize_t ret;
714 if (!capable(CAP_NET_ADMIN))
715 return -EPERM;
717 val = simple_strtoul(buf, &endp, 0);
718 if (endp == buf || val > 10000000)
719 return -EINVAL;
721 rtnl_lock();
722 ret = t3_config_sched(adap, val, sched);
723 if (!ret)
724 ret = len;
725 rtnl_unlock();
726 return ret;
729 #define TM_ATTR(name, sched) \
730 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
731 char *buf) \
733 return tm_attr_show(d, buf, sched); \
735 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
736 const char *buf, size_t len) \
738 return tm_attr_store(d, buf, len, sched); \
740 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
742 TM_ATTR(sched0, 0);
743 TM_ATTR(sched1, 1);
744 TM_ATTR(sched2, 2);
745 TM_ATTR(sched3, 3);
746 TM_ATTR(sched4, 4);
747 TM_ATTR(sched5, 5);
748 TM_ATTR(sched6, 6);
749 TM_ATTR(sched7, 7);
751 static struct attribute *offload_attrs[] = {
752 &dev_attr_sched0.attr,
753 &dev_attr_sched1.attr,
754 &dev_attr_sched2.attr,
755 &dev_attr_sched3.attr,
756 &dev_attr_sched4.attr,
757 &dev_attr_sched5.attr,
758 &dev_attr_sched6.attr,
759 &dev_attr_sched7.attr,
760 NULL
763 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
766 * Sends an sk_buff to an offload queue driver
767 * after dealing with any active network taps.
769 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
771 int ret;
773 local_bh_disable();
774 ret = t3_offload_tx(tdev, skb);
775 local_bh_enable();
776 return ret;
779 static int write_smt_entry(struct adapter *adapter, int idx)
781 struct cpl_smt_write_req *req;
782 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
784 if (!skb)
785 return -ENOMEM;
787 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
788 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
789 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
790 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
791 req->iff = idx;
792 memset(req->src_mac1, 0, sizeof(req->src_mac1));
793 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
794 skb->priority = 1;
795 offload_tx(&adapter->tdev, skb);
796 return 0;
799 static int init_smt(struct adapter *adapter)
801 int i;
803 for_each_port(adapter, i)
804 write_smt_entry(adapter, i);
805 return 0;
808 static void init_port_mtus(struct adapter *adapter)
810 unsigned int mtus = adapter->port[0]->mtu;
812 if (adapter->port[1])
813 mtus |= adapter->port[1]->mtu << 16;
814 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
817 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
818 int hi, int port)
820 struct sk_buff *skb;
821 struct mngt_pktsched_wr *req;
822 int ret;
824 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
825 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
826 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
827 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
828 req->sched = sched;
829 req->idx = qidx;
830 req->min = lo;
831 req->max = hi;
832 req->binding = port;
833 ret = t3_mgmt_tx(adap, skb);
835 return ret;
838 static int bind_qsets(struct adapter *adap)
840 int i, j, err = 0;
842 for_each_port(adap, i) {
843 const struct port_info *pi = adap2pinfo(adap, i);
845 for (j = 0; j < pi->nqsets; ++j) {
846 int ret = send_pktsched_cmd(adap, 1,
847 pi->first_qset + j, -1,
848 -1, i);
849 if (ret)
850 err = ret;
854 return err;
857 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
858 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
860 static int upgrade_fw(struct adapter *adap)
862 int ret;
863 char buf[64];
864 const struct firmware *fw;
865 struct device *dev = &adap->pdev->dev;
867 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
868 FW_VERSION_MINOR, FW_VERSION_MICRO);
869 ret = request_firmware(&fw, buf, dev);
870 if (ret < 0) {
871 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
872 buf);
873 return ret;
875 ret = t3_load_fw(adap, fw->data, fw->size);
876 release_firmware(fw);
878 if (ret == 0)
879 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
880 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
881 else
882 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
883 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
885 return ret;
888 static inline char t3rev2char(struct adapter *adapter)
890 char rev = 0;
892 switch(adapter->params.rev) {
893 case T3_REV_B:
894 case T3_REV_B2:
895 rev = 'b';
896 break;
897 case T3_REV_C:
898 rev = 'c';
899 break;
901 return rev;
904 static int update_tpsram(struct adapter *adap)
906 const struct firmware *tpsram;
907 char buf[64];
908 struct device *dev = &adap->pdev->dev;
909 int ret;
910 char rev;
912 rev = t3rev2char(adap);
913 if (!rev)
914 return 0;
916 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
917 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
919 ret = request_firmware(&tpsram, buf, dev);
920 if (ret < 0) {
921 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
922 buf);
923 return ret;
926 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
927 if (ret)
928 goto release_tpsram;
930 ret = t3_set_proto_sram(adap, tpsram->data);
931 if (ret == 0)
932 dev_info(dev,
933 "successful update of protocol engine "
934 "to %d.%d.%d\n",
935 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
936 else
937 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
938 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
939 if (ret)
940 dev_err(dev, "loading protocol SRAM failed\n");
942 release_tpsram:
943 release_firmware(tpsram);
945 return ret;
949 * cxgb_up - enable the adapter
950 * @adapter: adapter being enabled
952 * Called when the first port is enabled, this function performs the
953 * actions necessary to make an adapter operational, such as completing
954 * the initialization of HW modules, and enabling interrupts.
956 * Must be called with the rtnl lock held.
958 static int cxgb_up(struct adapter *adap)
960 int err;
962 if (!(adap->flags & FULL_INIT_DONE)) {
963 err = t3_check_fw_version(adap);
964 if (err == -EINVAL) {
965 err = upgrade_fw(adap);
966 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
967 FW_VERSION_MAJOR, FW_VERSION_MINOR,
968 FW_VERSION_MICRO, err ? "failed" : "succeeded");
971 err = t3_check_tpsram_version(adap);
972 if (err == -EINVAL) {
973 err = update_tpsram(adap);
974 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
975 TP_VERSION_MAJOR, TP_VERSION_MINOR,
976 TP_VERSION_MICRO, err ? "failed" : "succeeded");
980 * Clear interrupts now to catch errors if t3_init_hw fails.
981 * We clear them again later as initialization may trigger
982 * conditions that can interrupt.
984 t3_intr_clear(adap);
986 err = t3_init_hw(adap, 0);
987 if (err)
988 goto out;
990 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
991 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
993 err = setup_sge_qsets(adap);
994 if (err)
995 goto out;
997 setup_rss(adap);
998 if (!(adap->flags & NAPI_INIT))
999 init_napi(adap);
1000 adap->flags |= FULL_INIT_DONE;
1003 t3_intr_clear(adap);
1005 if (adap->flags & USING_MSIX) {
1006 name_msix_vecs(adap);
1007 err = request_irq(adap->msix_info[0].vec,
1008 t3_async_intr_handler, 0,
1009 adap->msix_info[0].desc, adap);
1010 if (err)
1011 goto irq_err;
1013 err = request_msix_data_irqs(adap);
1014 if (err) {
1015 free_irq(adap->msix_info[0].vec, adap);
1016 goto irq_err;
1018 } else if ((err = request_irq(adap->pdev->irq,
1019 t3_intr_handler(adap,
1020 adap->sge.qs[0].rspq.
1021 polling),
1022 (adap->flags & USING_MSI) ?
1023 0 : IRQF_SHARED,
1024 adap->name, adap)))
1025 goto irq_err;
1027 enable_all_napi(adap);
1028 t3_sge_start(adap);
1029 t3_intr_enable(adap);
1031 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1032 is_offload(adap) && init_tp_parity(adap) == 0)
1033 adap->flags |= TP_PARITY_INIT;
1035 if (adap->flags & TP_PARITY_INIT) {
1036 t3_write_reg(adap, A_TP_INT_CAUSE,
1037 F_CMCACHEPERR | F_ARPLUTPERR);
1038 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1041 if (!(adap->flags & QUEUES_BOUND)) {
1042 err = bind_qsets(adap);
1043 if (err) {
1044 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1045 t3_intr_disable(adap);
1046 free_irq_resources(adap);
1047 goto out;
1049 adap->flags |= QUEUES_BOUND;
1052 out:
1053 return err;
1054 irq_err:
1055 CH_ERR(adap, "request_irq failed, err %d\n", err);
1056 goto out;
1060 * Release resources when all the ports and offloading have been stopped.
1062 static void cxgb_down(struct adapter *adapter)
1064 t3_sge_stop(adapter);
1065 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1066 t3_intr_disable(adapter);
1067 spin_unlock_irq(&adapter->work_lock);
1069 free_irq_resources(adapter);
1070 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1071 quiesce_rx(adapter);
1074 static void schedule_chk_task(struct adapter *adap)
1076 unsigned int timeo;
1078 timeo = adap->params.linkpoll_period ?
1079 (HZ * adap->params.linkpoll_period) / 10 :
1080 adap->params.stats_update_period * HZ;
1081 if (timeo)
1082 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1085 static int offload_open(struct net_device *dev)
1087 struct port_info *pi = netdev_priv(dev);
1088 struct adapter *adapter = pi->adapter;
1089 struct t3cdev *tdev = dev2t3cdev(dev);
1090 int adap_up = adapter->open_device_map & PORT_MASK;
1091 int err;
1093 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1094 return 0;
1096 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1097 goto out;
1099 t3_tp_set_offload_mode(adapter, 1);
1100 tdev->lldev = adapter->port[0];
1101 err = cxgb3_offload_activate(adapter);
1102 if (err)
1103 goto out;
1105 init_port_mtus(adapter);
1106 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1107 adapter->params.b_wnd,
1108 adapter->params.rev == 0 ?
1109 adapter->port[0]->mtu : 0xffff);
1110 init_smt(adapter);
1112 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1113 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1115 /* Call back all registered clients */
1116 cxgb3_add_clients(tdev);
1118 out:
1119 /* restore them in case the offload module has changed them */
1120 if (err) {
1121 t3_tp_set_offload_mode(adapter, 0);
1122 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1123 cxgb3_set_dummy_ops(tdev);
1125 return err;
1128 static int offload_close(struct t3cdev *tdev)
1130 struct adapter *adapter = tdev2adap(tdev);
1132 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1133 return 0;
1135 /* Call back all registered clients */
1136 cxgb3_remove_clients(tdev);
1138 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1140 tdev->lldev = NULL;
1141 cxgb3_set_dummy_ops(tdev);
1142 t3_tp_set_offload_mode(adapter, 0);
1143 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1145 if (!adapter->open_device_map)
1146 cxgb_down(adapter);
1148 cxgb3_offload_deactivate(adapter);
1149 return 0;
1152 static int cxgb_open(struct net_device *dev)
1154 struct port_info *pi = netdev_priv(dev);
1155 struct adapter *adapter = pi->adapter;
1156 int other_ports = adapter->open_device_map & PORT_MASK;
1157 int err;
1159 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1160 return err;
1162 set_bit(pi->port_id, &adapter->open_device_map);
1163 if (is_offload(adapter) && !ofld_disable) {
1164 err = offload_open(dev);
1165 if (err)
1166 printk(KERN_WARNING
1167 "Could not initialize offload capabilities\n");
1170 dev->real_num_tx_queues = pi->nqsets;
1171 link_start(dev);
1172 t3_port_intr_enable(adapter, pi->port_id);
1173 netif_tx_start_all_queues(dev);
1174 if (!other_ports)
1175 schedule_chk_task(adapter);
1177 return 0;
1180 static int cxgb_close(struct net_device *dev)
1182 struct port_info *pi = netdev_priv(dev);
1183 struct adapter *adapter = pi->adapter;
1185 t3_port_intr_disable(adapter, pi->port_id);
1186 netif_tx_stop_all_queues(dev);
1187 pi->phy.ops->power_down(&pi->phy, 1);
1188 netif_carrier_off(dev);
1189 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1191 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1192 clear_bit(pi->port_id, &adapter->open_device_map);
1193 spin_unlock_irq(&adapter->work_lock);
1195 if (!(adapter->open_device_map & PORT_MASK))
1196 cancel_rearming_delayed_workqueue(cxgb3_wq,
1197 &adapter->adap_check_task);
1199 if (!adapter->open_device_map)
1200 cxgb_down(adapter);
1202 return 0;
1205 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1207 struct port_info *pi = netdev_priv(dev);
1208 struct adapter *adapter = pi->adapter;
1209 struct net_device_stats *ns = &pi->netstats;
1210 const struct mac_stats *pstats;
1212 spin_lock(&adapter->stats_lock);
1213 pstats = t3_mac_update_stats(&pi->mac);
1214 spin_unlock(&adapter->stats_lock);
1216 ns->tx_bytes = pstats->tx_octets;
1217 ns->tx_packets = pstats->tx_frames;
1218 ns->rx_bytes = pstats->rx_octets;
1219 ns->rx_packets = pstats->rx_frames;
1220 ns->multicast = pstats->rx_mcast_frames;
1222 ns->tx_errors = pstats->tx_underrun;
1223 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1224 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1225 pstats->rx_fifo_ovfl;
1227 /* detailed rx_errors */
1228 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1229 ns->rx_over_errors = 0;
1230 ns->rx_crc_errors = pstats->rx_fcs_errs;
1231 ns->rx_frame_errors = pstats->rx_symbol_errs;
1232 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1233 ns->rx_missed_errors = pstats->rx_cong_drops;
1235 /* detailed tx_errors */
1236 ns->tx_aborted_errors = 0;
1237 ns->tx_carrier_errors = 0;
1238 ns->tx_fifo_errors = pstats->tx_underrun;
1239 ns->tx_heartbeat_errors = 0;
1240 ns->tx_window_errors = 0;
1241 return ns;
1244 static u32 get_msglevel(struct net_device *dev)
1246 struct port_info *pi = netdev_priv(dev);
1247 struct adapter *adapter = pi->adapter;
1249 return adapter->msg_enable;
1252 static void set_msglevel(struct net_device *dev, u32 val)
1254 struct port_info *pi = netdev_priv(dev);
1255 struct adapter *adapter = pi->adapter;
1257 adapter->msg_enable = val;
1260 static char stats_strings[][ETH_GSTRING_LEN] = {
1261 "TxOctetsOK ",
1262 "TxFramesOK ",
1263 "TxMulticastFramesOK",
1264 "TxBroadcastFramesOK",
1265 "TxPauseFrames ",
1266 "TxUnderrun ",
1267 "TxExtUnderrun ",
1269 "TxFrames64 ",
1270 "TxFrames65To127 ",
1271 "TxFrames128To255 ",
1272 "TxFrames256To511 ",
1273 "TxFrames512To1023 ",
1274 "TxFrames1024To1518 ",
1275 "TxFrames1519ToMax ",
1277 "RxOctetsOK ",
1278 "RxFramesOK ",
1279 "RxMulticastFramesOK",
1280 "RxBroadcastFramesOK",
1281 "RxPauseFrames ",
1282 "RxFCSErrors ",
1283 "RxSymbolErrors ",
1284 "RxShortErrors ",
1285 "RxJabberErrors ",
1286 "RxLengthErrors ",
1287 "RxFIFOoverflow ",
1289 "RxFrames64 ",
1290 "RxFrames65To127 ",
1291 "RxFrames128To255 ",
1292 "RxFrames256To511 ",
1293 "RxFrames512To1023 ",
1294 "RxFrames1024To1518 ",
1295 "RxFrames1519ToMax ",
1297 "PhyFIFOErrors ",
1298 "TSO ",
1299 "VLANextractions ",
1300 "VLANinsertions ",
1301 "TxCsumOffload ",
1302 "RxCsumGood ",
1303 "LroAggregated ",
1304 "LroFlushed ",
1305 "LroNoDesc ",
1306 "RxDrops ",
1308 "CheckTXEnToggled ",
1309 "CheckResets ",
1313 static int get_sset_count(struct net_device *dev, int sset)
1315 switch (sset) {
1316 case ETH_SS_STATS:
1317 return ARRAY_SIZE(stats_strings);
1318 default:
1319 return -EOPNOTSUPP;
1323 #define T3_REGMAP_SIZE (3 * 1024)
1325 static int get_regs_len(struct net_device *dev)
1327 return T3_REGMAP_SIZE;
1330 static int get_eeprom_len(struct net_device *dev)
1332 return EEPROMSIZE;
1335 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1337 struct port_info *pi = netdev_priv(dev);
1338 struct adapter *adapter = pi->adapter;
1339 u32 fw_vers = 0;
1340 u32 tp_vers = 0;
1342 spin_lock(&adapter->stats_lock);
1343 t3_get_fw_version(adapter, &fw_vers);
1344 t3_get_tp_version(adapter, &tp_vers);
1345 spin_unlock(&adapter->stats_lock);
1347 strcpy(info->driver, DRV_NAME);
1348 strcpy(info->version, DRV_VERSION);
1349 strcpy(info->bus_info, pci_name(adapter->pdev));
1350 if (!fw_vers)
1351 strcpy(info->fw_version, "N/A");
1352 else {
1353 snprintf(info->fw_version, sizeof(info->fw_version),
1354 "%s %u.%u.%u TP %u.%u.%u",
1355 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1356 G_FW_VERSION_MAJOR(fw_vers),
1357 G_FW_VERSION_MINOR(fw_vers),
1358 G_FW_VERSION_MICRO(fw_vers),
1359 G_TP_VERSION_MAJOR(tp_vers),
1360 G_TP_VERSION_MINOR(tp_vers),
1361 G_TP_VERSION_MICRO(tp_vers));
1365 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1367 if (stringset == ETH_SS_STATS)
1368 memcpy(data, stats_strings, sizeof(stats_strings));
1371 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1372 struct port_info *p, int idx)
1374 int i;
1375 unsigned long tot = 0;
1377 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1378 tot += adapter->sge.qs[i].port_stats[idx];
1379 return tot;
1382 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1383 u64 *data)
1385 struct port_info *pi = netdev_priv(dev);
1386 struct adapter *adapter = pi->adapter;
1387 const struct mac_stats *s;
1389 spin_lock(&adapter->stats_lock);
1390 s = t3_mac_update_stats(&pi->mac);
1391 spin_unlock(&adapter->stats_lock);
1393 *data++ = s->tx_octets;
1394 *data++ = s->tx_frames;
1395 *data++ = s->tx_mcast_frames;
1396 *data++ = s->tx_bcast_frames;
1397 *data++ = s->tx_pause;
1398 *data++ = s->tx_underrun;
1399 *data++ = s->tx_fifo_urun;
1401 *data++ = s->tx_frames_64;
1402 *data++ = s->tx_frames_65_127;
1403 *data++ = s->tx_frames_128_255;
1404 *data++ = s->tx_frames_256_511;
1405 *data++ = s->tx_frames_512_1023;
1406 *data++ = s->tx_frames_1024_1518;
1407 *data++ = s->tx_frames_1519_max;
1409 *data++ = s->rx_octets;
1410 *data++ = s->rx_frames;
1411 *data++ = s->rx_mcast_frames;
1412 *data++ = s->rx_bcast_frames;
1413 *data++ = s->rx_pause;
1414 *data++ = s->rx_fcs_errs;
1415 *data++ = s->rx_symbol_errs;
1416 *data++ = s->rx_short;
1417 *data++ = s->rx_jabber;
1418 *data++ = s->rx_too_long;
1419 *data++ = s->rx_fifo_ovfl;
1421 *data++ = s->rx_frames_64;
1422 *data++ = s->rx_frames_65_127;
1423 *data++ = s->rx_frames_128_255;
1424 *data++ = s->rx_frames_256_511;
1425 *data++ = s->rx_frames_512_1023;
1426 *data++ = s->rx_frames_1024_1518;
1427 *data++ = s->rx_frames_1519_max;
1429 *data++ = pi->phy.fifo_errors;
1431 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1432 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1433 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1434 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1435 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1436 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
1437 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
1438 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
1439 *data++ = s->rx_cong_drops;
1441 *data++ = s->num_toggled;
1442 *data++ = s->num_resets;
1445 static inline void reg_block_dump(struct adapter *ap, void *buf,
1446 unsigned int start, unsigned int end)
1448 u32 *p = buf + start;
1450 for (; start <= end; start += sizeof(u32))
1451 *p++ = t3_read_reg(ap, start);
1454 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1455 void *buf)
1457 struct port_info *pi = netdev_priv(dev);
1458 struct adapter *ap = pi->adapter;
1461 * Version scheme:
1462 * bits 0..9: chip version
1463 * bits 10..15: chip revision
1464 * bit 31: set for PCIe cards
1466 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1469 * We skip the MAC statistics registers because they are clear-on-read.
1470 * Also reading multi-register stats would need to synchronize with the
1471 * periodic mac stats accumulation. Hard to justify the complexity.
1473 memset(buf, 0, T3_REGMAP_SIZE);
1474 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1475 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1476 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1477 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1478 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1479 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1480 XGM_REG(A_XGM_SERDES_STAT3, 1));
1481 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1482 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1485 static int restart_autoneg(struct net_device *dev)
1487 struct port_info *p = netdev_priv(dev);
1489 if (!netif_running(dev))
1490 return -EAGAIN;
1491 if (p->link_config.autoneg != AUTONEG_ENABLE)
1492 return -EINVAL;
1493 p->phy.ops->autoneg_restart(&p->phy);
1494 return 0;
1497 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1499 struct port_info *pi = netdev_priv(dev);
1500 struct adapter *adapter = pi->adapter;
1501 int i;
1503 if (data == 0)
1504 data = 2;
1506 for (i = 0; i < data * 2; i++) {
1507 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1508 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1509 if (msleep_interruptible(500))
1510 break;
1512 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1513 F_GPIO0_OUT_VAL);
1514 return 0;
1517 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1519 struct port_info *p = netdev_priv(dev);
1521 cmd->supported = p->link_config.supported;
1522 cmd->advertising = p->link_config.advertising;
1524 if (netif_carrier_ok(dev)) {
1525 cmd->speed = p->link_config.speed;
1526 cmd->duplex = p->link_config.duplex;
1527 } else {
1528 cmd->speed = -1;
1529 cmd->duplex = -1;
1532 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1533 cmd->phy_address = p->phy.addr;
1534 cmd->transceiver = XCVR_EXTERNAL;
1535 cmd->autoneg = p->link_config.autoneg;
1536 cmd->maxtxpkt = 0;
1537 cmd->maxrxpkt = 0;
1538 return 0;
1541 static int speed_duplex_to_caps(int speed, int duplex)
1543 int cap = 0;
1545 switch (speed) {
1546 case SPEED_10:
1547 if (duplex == DUPLEX_FULL)
1548 cap = SUPPORTED_10baseT_Full;
1549 else
1550 cap = SUPPORTED_10baseT_Half;
1551 break;
1552 case SPEED_100:
1553 if (duplex == DUPLEX_FULL)
1554 cap = SUPPORTED_100baseT_Full;
1555 else
1556 cap = SUPPORTED_100baseT_Half;
1557 break;
1558 case SPEED_1000:
1559 if (duplex == DUPLEX_FULL)
1560 cap = SUPPORTED_1000baseT_Full;
1561 else
1562 cap = SUPPORTED_1000baseT_Half;
1563 break;
1564 case SPEED_10000:
1565 if (duplex == DUPLEX_FULL)
1566 cap = SUPPORTED_10000baseT_Full;
1568 return cap;
1571 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1572 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1573 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1574 ADVERTISED_10000baseT_Full)
1576 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1578 int cap;
1579 struct port_info *p = netdev_priv(dev);
1580 struct link_config *lc = &p->link_config;
1582 if (!(lc->supported & SUPPORTED_Autoneg)) {
1584 * PHY offers a single speed/duplex. See if that's what's
1585 * being requested.
1587 if (cmd->autoneg == AUTONEG_DISABLE) {
1588 cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1589 if (lc->supported & cap)
1590 return 0;
1592 return -EINVAL;
1595 if (cmd->autoneg == AUTONEG_DISABLE) {
1596 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1598 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1599 return -EINVAL;
1600 lc->requested_speed = cmd->speed;
1601 lc->requested_duplex = cmd->duplex;
1602 lc->advertising = 0;
1603 } else {
1604 cmd->advertising &= ADVERTISED_MASK;
1605 cmd->advertising &= lc->supported;
1606 if (!cmd->advertising)
1607 return -EINVAL;
1608 lc->requested_speed = SPEED_INVALID;
1609 lc->requested_duplex = DUPLEX_INVALID;
1610 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1612 lc->autoneg = cmd->autoneg;
1613 if (netif_running(dev))
1614 t3_link_start(&p->phy, &p->mac, lc);
1615 return 0;
1618 static void get_pauseparam(struct net_device *dev,
1619 struct ethtool_pauseparam *epause)
1621 struct port_info *p = netdev_priv(dev);
1623 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1624 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1625 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1628 static int set_pauseparam(struct net_device *dev,
1629 struct ethtool_pauseparam *epause)
1631 struct port_info *p = netdev_priv(dev);
1632 struct link_config *lc = &p->link_config;
1634 if (epause->autoneg == AUTONEG_DISABLE)
1635 lc->requested_fc = 0;
1636 else if (lc->supported & SUPPORTED_Autoneg)
1637 lc->requested_fc = PAUSE_AUTONEG;
1638 else
1639 return -EINVAL;
1641 if (epause->rx_pause)
1642 lc->requested_fc |= PAUSE_RX;
1643 if (epause->tx_pause)
1644 lc->requested_fc |= PAUSE_TX;
1645 if (lc->autoneg == AUTONEG_ENABLE) {
1646 if (netif_running(dev))
1647 t3_link_start(&p->phy, &p->mac, lc);
1648 } else {
1649 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1650 if (netif_running(dev))
1651 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1653 return 0;
1656 static u32 get_rx_csum(struct net_device *dev)
1658 struct port_info *p = netdev_priv(dev);
1660 return p->rx_offload & T3_RX_CSUM;
1663 static int set_rx_csum(struct net_device *dev, u32 data)
1665 struct port_info *p = netdev_priv(dev);
1667 if (data) {
1668 p->rx_offload |= T3_RX_CSUM;
1669 } else {
1670 int i;
1672 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1673 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1674 set_qset_lro(dev, i, 0);
1676 return 0;
1679 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1681 struct port_info *pi = netdev_priv(dev);
1682 struct adapter *adapter = pi->adapter;
1683 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1685 e->rx_max_pending = MAX_RX_BUFFERS;
1686 e->rx_mini_max_pending = 0;
1687 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1688 e->tx_max_pending = MAX_TXQ_ENTRIES;
1690 e->rx_pending = q->fl_size;
1691 e->rx_mini_pending = q->rspq_size;
1692 e->rx_jumbo_pending = q->jumbo_size;
1693 e->tx_pending = q->txq_size[0];
1696 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1698 struct port_info *pi = netdev_priv(dev);
1699 struct adapter *adapter = pi->adapter;
1700 struct qset_params *q;
1701 int i;
1703 if (e->rx_pending > MAX_RX_BUFFERS ||
1704 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1705 e->tx_pending > MAX_TXQ_ENTRIES ||
1706 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1707 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1708 e->rx_pending < MIN_FL_ENTRIES ||
1709 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1710 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1711 return -EINVAL;
1713 if (adapter->flags & FULL_INIT_DONE)
1714 return -EBUSY;
1716 q = &adapter->params.sge.qset[pi->first_qset];
1717 for (i = 0; i < pi->nqsets; ++i, ++q) {
1718 q->rspq_size = e->rx_mini_pending;
1719 q->fl_size = e->rx_pending;
1720 q->jumbo_size = e->rx_jumbo_pending;
1721 q->txq_size[0] = e->tx_pending;
1722 q->txq_size[1] = e->tx_pending;
1723 q->txq_size[2] = e->tx_pending;
1725 return 0;
1728 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1730 struct port_info *pi = netdev_priv(dev);
1731 struct adapter *adapter = pi->adapter;
1732 struct qset_params *qsp = &adapter->params.sge.qset[0];
1733 struct sge_qset *qs = &adapter->sge.qs[0];
1735 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1736 return -EINVAL;
1738 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1739 t3_update_qset_coalesce(qs, qsp);
1740 return 0;
1743 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1745 struct port_info *pi = netdev_priv(dev);
1746 struct adapter *adapter = pi->adapter;
1747 struct qset_params *q = adapter->params.sge.qset;
1749 c->rx_coalesce_usecs = q->coalesce_usecs;
1750 return 0;
1753 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1754 u8 * data)
1756 struct port_info *pi = netdev_priv(dev);
1757 struct adapter *adapter = pi->adapter;
1758 int i, err = 0;
1760 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1761 if (!buf)
1762 return -ENOMEM;
1764 e->magic = EEPROM_MAGIC;
1765 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1766 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1768 if (!err)
1769 memcpy(data, buf + e->offset, e->len);
1770 kfree(buf);
1771 return err;
1774 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1775 u8 * data)
1777 struct port_info *pi = netdev_priv(dev);
1778 struct adapter *adapter = pi->adapter;
1779 u32 aligned_offset, aligned_len;
1780 __le32 *p;
1781 u8 *buf;
1782 int err;
1784 if (eeprom->magic != EEPROM_MAGIC)
1785 return -EINVAL;
1787 aligned_offset = eeprom->offset & ~3;
1788 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1790 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1791 buf = kmalloc(aligned_len, GFP_KERNEL);
1792 if (!buf)
1793 return -ENOMEM;
1794 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1795 if (!err && aligned_len > 4)
1796 err = t3_seeprom_read(adapter,
1797 aligned_offset + aligned_len - 4,
1798 (__le32 *) & buf[aligned_len - 4]);
1799 if (err)
1800 goto out;
1801 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1802 } else
1803 buf = data;
1805 err = t3_seeprom_wp(adapter, 0);
1806 if (err)
1807 goto out;
1809 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1810 err = t3_seeprom_write(adapter, aligned_offset, *p);
1811 aligned_offset += 4;
1814 if (!err)
1815 err = t3_seeprom_wp(adapter, 1);
1816 out:
1817 if (buf != data)
1818 kfree(buf);
1819 return err;
1822 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1824 wol->supported = 0;
1825 wol->wolopts = 0;
1826 memset(&wol->sopass, 0, sizeof(wol->sopass));
1829 static int cxgb3_set_flags(struct net_device *dev, u32 data)
1831 struct port_info *pi = netdev_priv(dev);
1832 int i;
1834 if (data & ETH_FLAG_LRO) {
1835 if (!(pi->rx_offload & T3_RX_CSUM))
1836 return -EINVAL;
1838 pi->rx_offload |= T3_LRO;
1839 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1840 set_qset_lro(dev, i, 1);
1842 } else {
1843 pi->rx_offload &= ~T3_LRO;
1844 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1845 set_qset_lro(dev, i, 0);
1848 return 0;
1851 static const struct ethtool_ops cxgb_ethtool_ops = {
1852 .get_settings = get_settings,
1853 .set_settings = set_settings,
1854 .get_drvinfo = get_drvinfo,
1855 .get_msglevel = get_msglevel,
1856 .set_msglevel = set_msglevel,
1857 .get_ringparam = get_sge_param,
1858 .set_ringparam = set_sge_param,
1859 .get_coalesce = get_coalesce,
1860 .set_coalesce = set_coalesce,
1861 .get_eeprom_len = get_eeprom_len,
1862 .get_eeprom = get_eeprom,
1863 .set_eeprom = set_eeprom,
1864 .get_pauseparam = get_pauseparam,
1865 .set_pauseparam = set_pauseparam,
1866 .get_rx_csum = get_rx_csum,
1867 .set_rx_csum = set_rx_csum,
1868 .set_tx_csum = ethtool_op_set_tx_csum,
1869 .set_sg = ethtool_op_set_sg,
1870 .get_link = ethtool_op_get_link,
1871 .get_strings = get_strings,
1872 .phys_id = cxgb3_phys_id,
1873 .nway_reset = restart_autoneg,
1874 .get_sset_count = get_sset_count,
1875 .get_ethtool_stats = get_stats,
1876 .get_regs_len = get_regs_len,
1877 .get_regs = get_regs,
1878 .get_wol = get_wol,
1879 .set_tso = ethtool_op_set_tso,
1880 .get_flags = ethtool_op_get_flags,
1881 .set_flags = cxgb3_set_flags,
1884 static int in_range(int val, int lo, int hi)
1886 return val < 0 || (val <= hi && val >= lo);
1889 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1891 struct port_info *pi = netdev_priv(dev);
1892 struct adapter *adapter = pi->adapter;
1893 u32 cmd;
1894 int ret;
1896 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1897 return -EFAULT;
1899 switch (cmd) {
1900 case CHELSIO_SET_QSET_PARAMS:{
1901 int i;
1902 struct qset_params *q;
1903 struct ch_qset_params t;
1904 int q1 = pi->first_qset;
1905 int nqsets = pi->nqsets;
1907 if (!capable(CAP_NET_ADMIN))
1908 return -EPERM;
1909 if (copy_from_user(&t, useraddr, sizeof(t)))
1910 return -EFAULT;
1911 if (t.qset_idx >= SGE_QSETS)
1912 return -EINVAL;
1913 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1914 !in_range(t.cong_thres, 0, 255) ||
1915 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1916 MAX_TXQ_ENTRIES) ||
1917 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1918 MAX_TXQ_ENTRIES) ||
1919 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1920 MAX_CTRL_TXQ_ENTRIES) ||
1921 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1922 MAX_RX_BUFFERS)
1923 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1924 MAX_RX_JUMBO_BUFFERS)
1925 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1926 MAX_RSPQ_ENTRIES))
1927 return -EINVAL;
1929 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1930 for_each_port(adapter, i) {
1931 pi = adap2pinfo(adapter, i);
1932 if (t.qset_idx >= pi->first_qset &&
1933 t.qset_idx < pi->first_qset + pi->nqsets &&
1934 !(pi->rx_offload & T3_RX_CSUM))
1935 return -EINVAL;
1938 if ((adapter->flags & FULL_INIT_DONE) &&
1939 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1940 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1941 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1942 t.polling >= 0 || t.cong_thres >= 0))
1943 return -EBUSY;
1945 /* Allow setting of any available qset when offload enabled */
1946 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1947 q1 = 0;
1948 for_each_port(adapter, i) {
1949 pi = adap2pinfo(adapter, i);
1950 nqsets += pi->first_qset + pi->nqsets;
1954 if (t.qset_idx < q1)
1955 return -EINVAL;
1956 if (t.qset_idx > q1 + nqsets - 1)
1957 return -EINVAL;
1959 q = &adapter->params.sge.qset[t.qset_idx];
1961 if (t.rspq_size >= 0)
1962 q->rspq_size = t.rspq_size;
1963 if (t.fl_size[0] >= 0)
1964 q->fl_size = t.fl_size[0];
1965 if (t.fl_size[1] >= 0)
1966 q->jumbo_size = t.fl_size[1];
1967 if (t.txq_size[0] >= 0)
1968 q->txq_size[0] = t.txq_size[0];
1969 if (t.txq_size[1] >= 0)
1970 q->txq_size[1] = t.txq_size[1];
1971 if (t.txq_size[2] >= 0)
1972 q->txq_size[2] = t.txq_size[2];
1973 if (t.cong_thres >= 0)
1974 q->cong_thres = t.cong_thres;
1975 if (t.intr_lat >= 0) {
1976 struct sge_qset *qs =
1977 &adapter->sge.qs[t.qset_idx];
1979 q->coalesce_usecs = t.intr_lat;
1980 t3_update_qset_coalesce(qs, q);
1982 if (t.polling >= 0) {
1983 if (adapter->flags & USING_MSIX)
1984 q->polling = t.polling;
1985 else {
1986 /* No polling with INTx for T3A */
1987 if (adapter->params.rev == 0 &&
1988 !(adapter->flags & USING_MSI))
1989 t.polling = 0;
1991 for (i = 0; i < SGE_QSETS; i++) {
1992 q = &adapter->params.sge.
1993 qset[i];
1994 q->polling = t.polling;
1998 if (t.lro >= 0)
1999 set_qset_lro(dev, t.qset_idx, t.lro);
2001 break;
2003 case CHELSIO_GET_QSET_PARAMS:{
2004 struct qset_params *q;
2005 struct ch_qset_params t;
2006 int q1 = pi->first_qset;
2007 int nqsets = pi->nqsets;
2008 int i;
2010 if (copy_from_user(&t, useraddr, sizeof(t)))
2011 return -EFAULT;
2013 /* Display qsets for all ports when offload enabled */
2014 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2015 q1 = 0;
2016 for_each_port(adapter, i) {
2017 pi = adap2pinfo(adapter, i);
2018 nqsets = pi->first_qset + pi->nqsets;
2022 if (t.qset_idx >= nqsets)
2023 return -EINVAL;
2025 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2026 t.rspq_size = q->rspq_size;
2027 t.txq_size[0] = q->txq_size[0];
2028 t.txq_size[1] = q->txq_size[1];
2029 t.txq_size[2] = q->txq_size[2];
2030 t.fl_size[0] = q->fl_size;
2031 t.fl_size[1] = q->jumbo_size;
2032 t.polling = q->polling;
2033 t.lro = q->lro;
2034 t.intr_lat = q->coalesce_usecs;
2035 t.cong_thres = q->cong_thres;
2036 t.qnum = q1;
2038 if (adapter->flags & USING_MSIX)
2039 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2040 else
2041 t.vector = adapter->pdev->irq;
2043 if (copy_to_user(useraddr, &t, sizeof(t)))
2044 return -EFAULT;
2045 break;
2047 case CHELSIO_SET_QSET_NUM:{
2048 struct ch_reg edata;
2049 unsigned int i, first_qset = 0, other_qsets = 0;
2051 if (!capable(CAP_NET_ADMIN))
2052 return -EPERM;
2053 if (adapter->flags & FULL_INIT_DONE)
2054 return -EBUSY;
2055 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2056 return -EFAULT;
2057 if (edata.val < 1 ||
2058 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2059 return -EINVAL;
2061 for_each_port(adapter, i)
2062 if (adapter->port[i] && adapter->port[i] != dev)
2063 other_qsets += adap2pinfo(adapter, i)->nqsets;
2065 if (edata.val + other_qsets > SGE_QSETS)
2066 return -EINVAL;
2068 pi->nqsets = edata.val;
2070 for_each_port(adapter, i)
2071 if (adapter->port[i]) {
2072 pi = adap2pinfo(adapter, i);
2073 pi->first_qset = first_qset;
2074 first_qset += pi->nqsets;
2076 break;
2078 case CHELSIO_GET_QSET_NUM:{
2079 struct ch_reg edata;
2081 edata.cmd = CHELSIO_GET_QSET_NUM;
2082 edata.val = pi->nqsets;
2083 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2084 return -EFAULT;
2085 break;
2087 case CHELSIO_LOAD_FW:{
2088 u8 *fw_data;
2089 struct ch_mem_range t;
2091 if (!capable(CAP_SYS_RAWIO))
2092 return -EPERM;
2093 if (copy_from_user(&t, useraddr, sizeof(t)))
2094 return -EFAULT;
2095 /* Check t.len sanity ? */
2096 fw_data = kmalloc(t.len, GFP_KERNEL);
2097 if (!fw_data)
2098 return -ENOMEM;
2100 if (copy_from_user
2101 (fw_data, useraddr + sizeof(t), t.len)) {
2102 kfree(fw_data);
2103 return -EFAULT;
2106 ret = t3_load_fw(adapter, fw_data, t.len);
2107 kfree(fw_data);
2108 if (ret)
2109 return ret;
2110 break;
2112 case CHELSIO_SETMTUTAB:{
2113 struct ch_mtus m;
2114 int i;
2116 if (!is_offload(adapter))
2117 return -EOPNOTSUPP;
2118 if (!capable(CAP_NET_ADMIN))
2119 return -EPERM;
2120 if (offload_running(adapter))
2121 return -EBUSY;
2122 if (copy_from_user(&m, useraddr, sizeof(m)))
2123 return -EFAULT;
2124 if (m.nmtus != NMTUS)
2125 return -EINVAL;
2126 if (m.mtus[0] < 81) /* accommodate SACK */
2127 return -EINVAL;
2129 /* MTUs must be in ascending order */
2130 for (i = 1; i < NMTUS; ++i)
2131 if (m.mtus[i] < m.mtus[i - 1])
2132 return -EINVAL;
2134 memcpy(adapter->params.mtus, m.mtus,
2135 sizeof(adapter->params.mtus));
2136 break;
2138 case CHELSIO_GET_PM:{
2139 struct tp_params *p = &adapter->params.tp;
2140 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2142 if (!is_offload(adapter))
2143 return -EOPNOTSUPP;
2144 m.tx_pg_sz = p->tx_pg_size;
2145 m.tx_num_pg = p->tx_num_pgs;
2146 m.rx_pg_sz = p->rx_pg_size;
2147 m.rx_num_pg = p->rx_num_pgs;
2148 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2149 if (copy_to_user(useraddr, &m, sizeof(m)))
2150 return -EFAULT;
2151 break;
2153 case CHELSIO_SET_PM:{
2154 struct ch_pm m;
2155 struct tp_params *p = &adapter->params.tp;
2157 if (!is_offload(adapter))
2158 return -EOPNOTSUPP;
2159 if (!capable(CAP_NET_ADMIN))
2160 return -EPERM;
2161 if (adapter->flags & FULL_INIT_DONE)
2162 return -EBUSY;
2163 if (copy_from_user(&m, useraddr, sizeof(m)))
2164 return -EFAULT;
2165 if (!is_power_of_2(m.rx_pg_sz) ||
2166 !is_power_of_2(m.tx_pg_sz))
2167 return -EINVAL; /* not power of 2 */
2168 if (!(m.rx_pg_sz & 0x14000))
2169 return -EINVAL; /* not 16KB or 64KB */
2170 if (!(m.tx_pg_sz & 0x1554000))
2171 return -EINVAL;
2172 if (m.tx_num_pg == -1)
2173 m.tx_num_pg = p->tx_num_pgs;
2174 if (m.rx_num_pg == -1)
2175 m.rx_num_pg = p->rx_num_pgs;
2176 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2177 return -EINVAL;
2178 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2179 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2180 return -EINVAL;
2181 p->rx_pg_size = m.rx_pg_sz;
2182 p->tx_pg_size = m.tx_pg_sz;
2183 p->rx_num_pgs = m.rx_num_pg;
2184 p->tx_num_pgs = m.tx_num_pg;
2185 break;
2187 case CHELSIO_GET_MEM:{
2188 struct ch_mem_range t;
2189 struct mc7 *mem;
2190 u64 buf[32];
2192 if (!is_offload(adapter))
2193 return -EOPNOTSUPP;
2194 if (!(adapter->flags & FULL_INIT_DONE))
2195 return -EIO; /* need the memory controllers */
2196 if (copy_from_user(&t, useraddr, sizeof(t)))
2197 return -EFAULT;
2198 if ((t.addr & 7) || (t.len & 7))
2199 return -EINVAL;
2200 if (t.mem_id == MEM_CM)
2201 mem = &adapter->cm;
2202 else if (t.mem_id == MEM_PMRX)
2203 mem = &adapter->pmrx;
2204 else if (t.mem_id == MEM_PMTX)
2205 mem = &adapter->pmtx;
2206 else
2207 return -EINVAL;
2210 * Version scheme:
2211 * bits 0..9: chip version
2212 * bits 10..15: chip revision
2214 t.version = 3 | (adapter->params.rev << 10);
2215 if (copy_to_user(useraddr, &t, sizeof(t)))
2216 return -EFAULT;
2219 * Read 256 bytes at a time as len can be large and we don't
2220 * want to use huge intermediate buffers.
2222 useraddr += sizeof(t); /* advance to start of buffer */
2223 while (t.len) {
2224 unsigned int chunk =
2225 min_t(unsigned int, t.len, sizeof(buf));
2227 ret =
2228 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2229 buf);
2230 if (ret)
2231 return ret;
2232 if (copy_to_user(useraddr, buf, chunk))
2233 return -EFAULT;
2234 useraddr += chunk;
2235 t.addr += chunk;
2236 t.len -= chunk;
2238 break;
2240 case CHELSIO_SET_TRACE_FILTER:{
2241 struct ch_trace t;
2242 const struct trace_params *tp;
2244 if (!capable(CAP_NET_ADMIN))
2245 return -EPERM;
2246 if (!offload_running(adapter))
2247 return -EAGAIN;
2248 if (copy_from_user(&t, useraddr, sizeof(t)))
2249 return -EFAULT;
2251 tp = (const struct trace_params *)&t.sip;
2252 if (t.config_tx)
2253 t3_config_trace_filter(adapter, tp, 0,
2254 t.invert_match,
2255 t.trace_tx);
2256 if (t.config_rx)
2257 t3_config_trace_filter(adapter, tp, 1,
2258 t.invert_match,
2259 t.trace_rx);
2260 break;
2262 default:
2263 return -EOPNOTSUPP;
2265 return 0;
2268 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2270 struct mii_ioctl_data *data = if_mii(req);
2271 struct port_info *pi = netdev_priv(dev);
2272 struct adapter *adapter = pi->adapter;
2273 int ret, mmd;
2275 switch (cmd) {
2276 case SIOCGMIIPHY:
2277 data->phy_id = pi->phy.addr;
2278 /* FALLTHRU */
2279 case SIOCGMIIREG:{
2280 u32 val;
2281 struct cphy *phy = &pi->phy;
2283 if (!phy->mdio_read)
2284 return -EOPNOTSUPP;
2285 if (is_10G(adapter)) {
2286 mmd = data->phy_id >> 8;
2287 if (!mmd)
2288 mmd = MDIO_DEV_PCS;
2289 else if (mmd > MDIO_DEV_VEND2)
2290 return -EINVAL;
2292 ret =
2293 phy->mdio_read(adapter, data->phy_id & 0x1f,
2294 mmd, data->reg_num, &val);
2295 } else
2296 ret =
2297 phy->mdio_read(adapter, data->phy_id & 0x1f,
2298 0, data->reg_num & 0x1f,
2299 &val);
2300 if (!ret)
2301 data->val_out = val;
2302 break;
2304 case SIOCSMIIREG:{
2305 struct cphy *phy = &pi->phy;
2307 if (!capable(CAP_NET_ADMIN))
2308 return -EPERM;
2309 if (!phy->mdio_write)
2310 return -EOPNOTSUPP;
2311 if (is_10G(adapter)) {
2312 mmd = data->phy_id >> 8;
2313 if (!mmd)
2314 mmd = MDIO_DEV_PCS;
2315 else if (mmd > MDIO_DEV_VEND2)
2316 return -EINVAL;
2318 ret =
2319 phy->mdio_write(adapter,
2320 data->phy_id & 0x1f, mmd,
2321 data->reg_num,
2322 data->val_in);
2323 } else
2324 ret =
2325 phy->mdio_write(adapter,
2326 data->phy_id & 0x1f, 0,
2327 data->reg_num & 0x1f,
2328 data->val_in);
2329 break;
2331 case SIOCCHIOCTL:
2332 return cxgb_extension_ioctl(dev, req->ifr_data);
2333 default:
2334 return -EOPNOTSUPP;
2336 return ret;
2339 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2341 struct port_info *pi = netdev_priv(dev);
2342 struct adapter *adapter = pi->adapter;
2343 int ret;
2345 if (new_mtu < 81) /* accommodate SACK */
2346 return -EINVAL;
2347 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2348 return ret;
2349 dev->mtu = new_mtu;
2350 init_port_mtus(adapter);
2351 if (adapter->params.rev == 0 && offload_running(adapter))
2352 t3_load_mtus(adapter, adapter->params.mtus,
2353 adapter->params.a_wnd, adapter->params.b_wnd,
2354 adapter->port[0]->mtu);
2355 return 0;
2358 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2360 struct port_info *pi = netdev_priv(dev);
2361 struct adapter *adapter = pi->adapter;
2362 struct sockaddr *addr = p;
2364 if (!is_valid_ether_addr(addr->sa_data))
2365 return -EINVAL;
2367 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2368 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2369 if (offload_running(adapter))
2370 write_smt_entry(adapter, pi->port_id);
2371 return 0;
2375 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2376 * @adap: the adapter
2377 * @p: the port
2379 * Ensures that current Rx processing on any of the queues associated with
2380 * the given port completes before returning. We do this by acquiring and
2381 * releasing the locks of the response queues associated with the port.
2383 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2385 int i;
2387 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2388 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2390 spin_lock_irq(&q->lock);
2391 spin_unlock_irq(&q->lock);
2395 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2397 struct port_info *pi = netdev_priv(dev);
2398 struct adapter *adapter = pi->adapter;
2400 pi->vlan_grp = grp;
2401 if (adapter->params.rev > 0)
2402 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2403 else {
2404 /* single control for all ports */
2405 unsigned int i, have_vlans = 0;
2406 for_each_port(adapter, i)
2407 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2409 t3_set_vlan_accel(adapter, 1, have_vlans);
2411 t3_synchronize_rx(adapter, pi);
2414 #ifdef CONFIG_NET_POLL_CONTROLLER
2415 static void cxgb_netpoll(struct net_device *dev)
2417 struct port_info *pi = netdev_priv(dev);
2418 struct adapter *adapter = pi->adapter;
2419 int qidx;
2421 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2422 struct sge_qset *qs = &adapter->sge.qs[qidx];
2423 void *source;
2425 if (adapter->flags & USING_MSIX)
2426 source = qs;
2427 else
2428 source = adapter;
2430 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2433 #endif
2436 * Periodic accumulation of MAC statistics.
2438 static void mac_stats_update(struct adapter *adapter)
2440 int i;
2442 for_each_port(adapter, i) {
2443 struct net_device *dev = adapter->port[i];
2444 struct port_info *p = netdev_priv(dev);
2446 if (netif_running(dev)) {
2447 spin_lock(&adapter->stats_lock);
2448 t3_mac_update_stats(&p->mac);
2449 spin_unlock(&adapter->stats_lock);
2454 static void check_link_status(struct adapter *adapter)
2456 int i;
2458 for_each_port(adapter, i) {
2459 struct net_device *dev = adapter->port[i];
2460 struct port_info *p = netdev_priv(dev);
2462 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev))
2463 t3_link_changed(adapter, i);
2467 static void check_t3b2_mac(struct adapter *adapter)
2469 int i;
2471 if (!rtnl_trylock()) /* synchronize with ifdown */
2472 return;
2474 for_each_port(adapter, i) {
2475 struct net_device *dev = adapter->port[i];
2476 struct port_info *p = netdev_priv(dev);
2477 int status;
2479 if (!netif_running(dev))
2480 continue;
2482 status = 0;
2483 if (netif_running(dev) && netif_carrier_ok(dev))
2484 status = t3b2_mac_watchdog_task(&p->mac);
2485 if (status == 1)
2486 p->mac.stats.num_toggled++;
2487 else if (status == 2) {
2488 struct cmac *mac = &p->mac;
2490 t3_mac_set_mtu(mac, dev->mtu);
2491 t3_mac_set_address(mac, 0, dev->dev_addr);
2492 cxgb_set_rxmode(dev);
2493 t3_link_start(&p->phy, mac, &p->link_config);
2494 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2495 t3_port_intr_enable(adapter, p->port_id);
2496 p->mac.stats.num_resets++;
2499 rtnl_unlock();
2503 static void t3_adap_check_task(struct work_struct *work)
2505 struct adapter *adapter = container_of(work, struct adapter,
2506 adap_check_task.work);
2507 const struct adapter_params *p = &adapter->params;
2509 adapter->check_task_cnt++;
2511 /* Check link status for PHYs without interrupts */
2512 if (p->linkpoll_period)
2513 check_link_status(adapter);
2515 /* Accumulate MAC stats if needed */
2516 if (!p->linkpoll_period ||
2517 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2518 p->stats_update_period) {
2519 mac_stats_update(adapter);
2520 adapter->check_task_cnt = 0;
2523 if (p->rev == T3_REV_B2)
2524 check_t3b2_mac(adapter);
2526 /* Schedule the next check update if any port is active. */
2527 spin_lock_irq(&adapter->work_lock);
2528 if (adapter->open_device_map & PORT_MASK)
2529 schedule_chk_task(adapter);
2530 spin_unlock_irq(&adapter->work_lock);
2534 * Processes external (PHY) interrupts in process context.
2536 static void ext_intr_task(struct work_struct *work)
2538 struct adapter *adapter = container_of(work, struct adapter,
2539 ext_intr_handler_task);
2541 t3_phy_intr_handler(adapter);
2543 /* Now reenable external interrupts */
2544 spin_lock_irq(&adapter->work_lock);
2545 if (adapter->slow_intr_mask) {
2546 adapter->slow_intr_mask |= F_T3DBG;
2547 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2548 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2549 adapter->slow_intr_mask);
2551 spin_unlock_irq(&adapter->work_lock);
2555 * Interrupt-context handler for external (PHY) interrupts.
2557 void t3_os_ext_intr_handler(struct adapter *adapter)
2560 * Schedule a task to handle external interrupts as they may be slow
2561 * and we use a mutex to protect MDIO registers. We disable PHY
2562 * interrupts in the meantime and let the task reenable them when
2563 * it's done.
2565 spin_lock(&adapter->work_lock);
2566 if (adapter->slow_intr_mask) {
2567 adapter->slow_intr_mask &= ~F_T3DBG;
2568 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2569 adapter->slow_intr_mask);
2570 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2572 spin_unlock(&adapter->work_lock);
2575 static int t3_adapter_error(struct adapter *adapter, int reset)
2577 int i, ret = 0;
2579 /* Stop all ports */
2580 for_each_port(adapter, i) {
2581 struct net_device *netdev = adapter->port[i];
2583 if (netif_running(netdev))
2584 cxgb_close(netdev);
2587 if (is_offload(adapter) &&
2588 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2589 offload_close(&adapter->tdev);
2591 /* Stop SGE timers */
2592 t3_stop_sge_timers(adapter);
2594 adapter->flags &= ~FULL_INIT_DONE;
2596 if (reset)
2597 ret = t3_reset_adapter(adapter);
2599 pci_disable_device(adapter->pdev);
2601 return ret;
2604 static int t3_reenable_adapter(struct adapter *adapter)
2606 if (pci_enable_device(adapter->pdev)) {
2607 dev_err(&adapter->pdev->dev,
2608 "Cannot re-enable PCI device after reset.\n");
2609 goto err;
2611 pci_set_master(adapter->pdev);
2612 pci_restore_state(adapter->pdev);
2614 /* Free sge resources */
2615 t3_free_sge_resources(adapter);
2617 if (t3_replay_prep_adapter(adapter))
2618 goto err;
2620 return 0;
2621 err:
2622 return -1;
2625 static void t3_resume_ports(struct adapter *adapter)
2627 int i;
2629 /* Restart the ports */
2630 for_each_port(adapter, i) {
2631 struct net_device *netdev = adapter->port[i];
2633 if (netif_running(netdev)) {
2634 if (cxgb_open(netdev)) {
2635 dev_err(&adapter->pdev->dev,
2636 "can't bring device back up"
2637 " after reset\n");
2638 continue;
2645 * processes a fatal error.
2646 * Bring the ports down, reset the chip, bring the ports back up.
2648 static void fatal_error_task(struct work_struct *work)
2650 struct adapter *adapter = container_of(work, struct adapter,
2651 fatal_error_handler_task);
2652 int err = 0;
2654 rtnl_lock();
2655 err = t3_adapter_error(adapter, 1);
2656 if (!err)
2657 err = t3_reenable_adapter(adapter);
2658 if (!err)
2659 t3_resume_ports(adapter);
2661 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2662 rtnl_unlock();
2665 void t3_fatal_err(struct adapter *adapter)
2667 unsigned int fw_status[4];
2669 if (adapter->flags & FULL_INIT_DONE) {
2670 t3_sge_stop(adapter);
2671 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2672 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2673 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2674 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2676 spin_lock(&adapter->work_lock);
2677 t3_intr_disable(adapter);
2678 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2679 spin_unlock(&adapter->work_lock);
2681 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2682 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2683 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2684 fw_status[0], fw_status[1],
2685 fw_status[2], fw_status[3]);
2690 * t3_io_error_detected - called when PCI error is detected
2691 * @pdev: Pointer to PCI device
2692 * @state: The current pci connection state
2694 * This function is called after a PCI bus error affecting
2695 * this device has been detected.
2697 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2698 pci_channel_state_t state)
2700 struct adapter *adapter = pci_get_drvdata(pdev);
2701 int ret;
2703 ret = t3_adapter_error(adapter, 0);
2705 /* Request a slot reset. */
2706 return PCI_ERS_RESULT_NEED_RESET;
2710 * t3_io_slot_reset - called after the pci bus has been reset.
2711 * @pdev: Pointer to PCI device
2713 * Restart the card from scratch, as if from a cold-boot.
2715 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2717 struct adapter *adapter = pci_get_drvdata(pdev);
2719 if (!t3_reenable_adapter(adapter))
2720 return PCI_ERS_RESULT_RECOVERED;
2722 return PCI_ERS_RESULT_DISCONNECT;
2726 * t3_io_resume - called when traffic can start flowing again.
2727 * @pdev: Pointer to PCI device
2729 * This callback is called when the error recovery driver tells us that
2730 * its OK to resume normal operation.
2732 static void t3_io_resume(struct pci_dev *pdev)
2734 struct adapter *adapter = pci_get_drvdata(pdev);
2736 t3_resume_ports(adapter);
2739 static struct pci_error_handlers t3_err_handler = {
2740 .error_detected = t3_io_error_detected,
2741 .slot_reset = t3_io_slot_reset,
2742 .resume = t3_io_resume,
2746 * Set the number of qsets based on the number of CPUs and the number of ports,
2747 * not to exceed the number of available qsets, assuming there are enough qsets
2748 * per port in HW.
2750 static void set_nqsets(struct adapter *adap)
2752 int i, j = 0;
2753 int num_cpus = num_online_cpus();
2754 int hwports = adap->params.nports;
2755 int nqsets = adap->msix_nvectors - 1;
2757 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2758 if (hwports == 2 &&
2759 (hwports * nqsets > SGE_QSETS ||
2760 num_cpus >= nqsets / hwports))
2761 nqsets /= hwports;
2762 if (nqsets > num_cpus)
2763 nqsets = num_cpus;
2764 if (nqsets < 1 || hwports == 4)
2765 nqsets = 1;
2766 } else
2767 nqsets = 1;
2769 for_each_port(adap, i) {
2770 struct port_info *pi = adap2pinfo(adap, i);
2772 pi->first_qset = j;
2773 pi->nqsets = nqsets;
2774 j = pi->first_qset + nqsets;
2776 dev_info(&adap->pdev->dev,
2777 "Port %d using %d queue sets.\n", i, nqsets);
2781 static int __devinit cxgb_enable_msix(struct adapter *adap)
2783 struct msix_entry entries[SGE_QSETS + 1];
2784 int vectors;
2785 int i, err;
2787 vectors = ARRAY_SIZE(entries);
2788 for (i = 0; i < vectors; ++i)
2789 entries[i].entry = i;
2791 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
2792 vectors = err;
2794 if (!err && vectors < (adap->params.nports + 1))
2795 err = -1;
2797 if (!err) {
2798 for (i = 0; i < vectors; ++i)
2799 adap->msix_info[i].vec = entries[i].vector;
2800 adap->msix_nvectors = vectors;
2803 return err;
2806 static void __devinit print_port_info(struct adapter *adap,
2807 const struct adapter_info *ai)
2809 static const char *pci_variant[] = {
2810 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2813 int i;
2814 char buf[80];
2816 if (is_pcie(adap))
2817 snprintf(buf, sizeof(buf), "%s x%d",
2818 pci_variant[adap->params.pci.variant],
2819 adap->params.pci.width);
2820 else
2821 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2822 pci_variant[adap->params.pci.variant],
2823 adap->params.pci.speed, adap->params.pci.width);
2825 for_each_port(adap, i) {
2826 struct net_device *dev = adap->port[i];
2827 const struct port_info *pi = netdev_priv(dev);
2829 if (!test_bit(i, &adap->registered_device_map))
2830 continue;
2831 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2832 dev->name, ai->desc, pi->phy.desc,
2833 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2834 (adap->flags & USING_MSIX) ? " MSI-X" :
2835 (adap->flags & USING_MSI) ? " MSI" : "");
2836 if (adap->name == dev->name && adap->params.vpd.mclk)
2837 printk(KERN_INFO
2838 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2839 adap->name, t3_mc7_size(&adap->cm) >> 20,
2840 t3_mc7_size(&adap->pmtx) >> 20,
2841 t3_mc7_size(&adap->pmrx) >> 20,
2842 adap->params.vpd.sn);
2846 static const struct net_device_ops cxgb_netdev_ops = {
2847 .ndo_open = cxgb_open,
2848 .ndo_stop = cxgb_close,
2849 .ndo_start_xmit = t3_eth_xmit,
2850 .ndo_get_stats = cxgb_get_stats,
2851 .ndo_validate_addr = eth_validate_addr,
2852 .ndo_set_multicast_list = cxgb_set_rxmode,
2853 .ndo_do_ioctl = cxgb_ioctl,
2854 .ndo_change_mtu = cxgb_change_mtu,
2855 .ndo_set_mac_address = cxgb_set_mac_addr,
2856 .ndo_vlan_rx_register = vlan_rx_register,
2857 #ifdef CONFIG_NET_POLL_CONTROLLER
2858 .ndo_poll_controller = cxgb_netpoll,
2859 #endif
2862 static int __devinit init_one(struct pci_dev *pdev,
2863 const struct pci_device_id *ent)
2865 static int version_printed;
2867 int i, err, pci_using_dac = 0;
2868 unsigned long mmio_start, mmio_len;
2869 const struct adapter_info *ai;
2870 struct adapter *adapter = NULL;
2871 struct port_info *pi;
2873 if (!version_printed) {
2874 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2875 ++version_printed;
2878 if (!cxgb3_wq) {
2879 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2880 if (!cxgb3_wq) {
2881 printk(KERN_ERR DRV_NAME
2882 ": cannot initialize work queue\n");
2883 return -ENOMEM;
2887 err = pci_request_regions(pdev, DRV_NAME);
2888 if (err) {
2889 /* Just info, some other driver may have claimed the device. */
2890 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2891 return err;
2894 err = pci_enable_device(pdev);
2895 if (err) {
2896 dev_err(&pdev->dev, "cannot enable PCI device\n");
2897 goto out_release_regions;
2900 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2901 pci_using_dac = 1;
2902 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2903 if (err) {
2904 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2905 "coherent allocations\n");
2906 goto out_disable_device;
2908 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2909 dev_err(&pdev->dev, "no usable DMA configuration\n");
2910 goto out_disable_device;
2913 pci_set_master(pdev);
2914 pci_save_state(pdev);
2916 mmio_start = pci_resource_start(pdev, 0);
2917 mmio_len = pci_resource_len(pdev, 0);
2918 ai = t3_get_adapter_info(ent->driver_data);
2920 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2921 if (!adapter) {
2922 err = -ENOMEM;
2923 goto out_disable_device;
2926 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2927 if (!adapter->regs) {
2928 dev_err(&pdev->dev, "cannot map device registers\n");
2929 err = -ENOMEM;
2930 goto out_free_adapter;
2933 adapter->pdev = pdev;
2934 adapter->name = pci_name(pdev);
2935 adapter->msg_enable = dflt_msg_enable;
2936 adapter->mmio_len = mmio_len;
2938 mutex_init(&adapter->mdio_lock);
2939 spin_lock_init(&adapter->work_lock);
2940 spin_lock_init(&adapter->stats_lock);
2942 INIT_LIST_HEAD(&adapter->adapter_list);
2943 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2944 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
2945 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2947 for (i = 0; i < ai->nports; ++i) {
2948 struct net_device *netdev;
2950 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
2951 if (!netdev) {
2952 err = -ENOMEM;
2953 goto out_free_dev;
2956 SET_NETDEV_DEV(netdev, &pdev->dev);
2958 adapter->port[i] = netdev;
2959 pi = netdev_priv(netdev);
2960 pi->adapter = adapter;
2961 pi->rx_offload = T3_RX_CSUM | T3_LRO;
2962 pi->port_id = i;
2963 netif_carrier_off(netdev);
2964 netif_tx_stop_all_queues(netdev);
2965 netdev->irq = pdev->irq;
2966 netdev->mem_start = mmio_start;
2967 netdev->mem_end = mmio_start + mmio_len - 1;
2968 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2969 netdev->features |= NETIF_F_LLTX;
2970 netdev->features |= NETIF_F_LRO;
2971 if (pci_using_dac)
2972 netdev->features |= NETIF_F_HIGHDMA;
2974 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2975 netdev->netdev_ops = &cxgb_netdev_ops;
2976 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2979 pci_set_drvdata(pdev, adapter);
2980 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2981 err = -ENODEV;
2982 goto out_free_dev;
2986 * The card is now ready to go. If any errors occur during device
2987 * registration we do not fail the whole card but rather proceed only
2988 * with the ports we manage to register successfully. However we must
2989 * register at least one net device.
2991 for_each_port(adapter, i) {
2992 err = register_netdev(adapter->port[i]);
2993 if (err)
2994 dev_warn(&pdev->dev,
2995 "cannot register net device %s, skipping\n",
2996 adapter->port[i]->name);
2997 else {
2999 * Change the name we use for messages to the name of
3000 * the first successfully registered interface.
3002 if (!adapter->registered_device_map)
3003 adapter->name = adapter->port[i]->name;
3005 __set_bit(i, &adapter->registered_device_map);
3008 if (!adapter->registered_device_map) {
3009 dev_err(&pdev->dev, "could not register any net devices\n");
3010 goto out_free_dev;
3013 /* Driver's ready. Reflect it on LEDs */
3014 t3_led_ready(adapter);
3016 if (is_offload(adapter)) {
3017 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3018 cxgb3_adapter_ofld(adapter);
3021 /* See what interrupts we'll be using */
3022 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3023 adapter->flags |= USING_MSIX;
3024 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3025 adapter->flags |= USING_MSI;
3027 set_nqsets(adapter);
3029 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3030 &cxgb3_attr_group);
3032 print_port_info(adapter, ai);
3033 return 0;
3035 out_free_dev:
3036 iounmap(adapter->regs);
3037 for (i = ai->nports - 1; i >= 0; --i)
3038 if (adapter->port[i])
3039 free_netdev(adapter->port[i]);
3041 out_free_adapter:
3042 kfree(adapter);
3044 out_disable_device:
3045 pci_disable_device(pdev);
3046 out_release_regions:
3047 pci_release_regions(pdev);
3048 pci_set_drvdata(pdev, NULL);
3049 return err;
3052 static void __devexit remove_one(struct pci_dev *pdev)
3054 struct adapter *adapter = pci_get_drvdata(pdev);
3056 if (adapter) {
3057 int i;
3059 t3_sge_stop(adapter);
3060 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3061 &cxgb3_attr_group);
3063 if (is_offload(adapter)) {
3064 cxgb3_adapter_unofld(adapter);
3065 if (test_bit(OFFLOAD_DEVMAP_BIT,
3066 &adapter->open_device_map))
3067 offload_close(&adapter->tdev);
3070 for_each_port(adapter, i)
3071 if (test_bit(i, &adapter->registered_device_map))
3072 unregister_netdev(adapter->port[i]);
3074 t3_stop_sge_timers(adapter);
3075 t3_free_sge_resources(adapter);
3076 cxgb_disable_msi(adapter);
3078 for_each_port(adapter, i)
3079 if (adapter->port[i])
3080 free_netdev(adapter->port[i]);
3082 iounmap(adapter->regs);
3083 kfree(adapter);
3084 pci_release_regions(pdev);
3085 pci_disable_device(pdev);
3086 pci_set_drvdata(pdev, NULL);
3090 static struct pci_driver driver = {
3091 .name = DRV_NAME,
3092 .id_table = cxgb3_pci_tbl,
3093 .probe = init_one,
3094 .remove = __devexit_p(remove_one),
3095 .err_handler = &t3_err_handler,
3098 static int __init cxgb3_init_module(void)
3100 int ret;
3102 cxgb3_offload_init();
3104 ret = pci_register_driver(&driver);
3105 return ret;
3108 static void __exit cxgb3_cleanup_module(void)
3110 pci_unregister_driver(&driver);
3111 if (cxgb3_wq)
3112 destroy_workqueue(cxgb3_wq);
3115 module_init(cxgb3_init_module);
3116 module_exit(cxgb3_cleanup_module);