cxgb3 - TP SRAM update
[linux-2.6/sactl.git] / drivers / net / cxgb3 / cxgb3_main.c
blob15defe4c4f05e947f765b2e5b3ab17904bb2722b
1 /*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <asm/uaccess.h>
48 #include "common.h"
49 #include "cxgb3_ioctl.h"
50 #include "regs.h"
51 #include "cxgb3_offload.h"
52 #include "version.h"
54 #include "cxgb3_ctl_defs.h"
55 #include "t3_cpl.h"
56 #include "firmware_exports.h"
58 enum {
59 MAX_TXQ_ENTRIES = 16384,
60 MAX_CTRL_TXQ_ENTRIES = 1024,
61 MAX_RSPQ_ENTRIES = 16384,
62 MAX_RX_BUFFERS = 16384,
63 MAX_RX_JUMBO_BUFFERS = 16384,
64 MIN_TXQ_ENTRIES = 4,
65 MIN_CTRL_TXQ_ENTRIES = 4,
66 MIN_RSPQ_ENTRIES = 32,
67 MIN_FL_ENTRIES = 32
70 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
72 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
73 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
74 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76 #define EEPROM_MAGIC 0x38E2F10C
78 #define CH_DEVICE(devid, ssid, idx) \
79 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
81 static const struct pci_device_id cxgb3_pci_tbl[] = {
82 CH_DEVICE(0x20, 1, 0), /* PE9000 */
83 CH_DEVICE(0x21, 1, 1), /* T302E */
84 CH_DEVICE(0x22, 1, 2), /* T310E */
85 CH_DEVICE(0x23, 1, 3), /* T320X */
86 CH_DEVICE(0x24, 1, 1), /* T302X */
87 CH_DEVICE(0x25, 1, 3), /* T320E */
88 CH_DEVICE(0x26, 1, 2), /* T310X */
89 CH_DEVICE(0x30, 1, 2), /* T3B10 */
90 CH_DEVICE(0x31, 1, 3), /* T3B20 */
91 CH_DEVICE(0x32, 1, 1), /* T3B02 */
92 {0,}
95 MODULE_DESCRIPTION(DRV_DESC);
96 MODULE_AUTHOR("Chelsio Communications");
97 MODULE_LICENSE("Dual BSD/GPL");
98 MODULE_VERSION(DRV_VERSION);
99 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101 static int dflt_msg_enable = DFLT_MSG_ENABLE;
103 module_param(dflt_msg_enable, int, 0644);
104 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
107 * The driver uses the best interrupt scheme available on a platform in the
108 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
109 * of these schemes the driver may consider as follows:
111 * msi = 2: choose from among all three options
112 * msi = 1: only consider MSI and pin interrupts
113 * msi = 0: force pin interrupts
115 static int msi = 2;
117 module_param(msi, int, 0644);
118 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
121 * The driver enables offload as a default.
122 * To disable it, use ofld_disable = 1.
125 static int ofld_disable = 0;
127 module_param(ofld_disable, int, 0644);
128 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
131 * We have work elements that we need to cancel when an interface is taken
132 * down. Normally the work elements would be executed by keventd but that
133 * can deadlock because of linkwatch. If our close method takes the rtnl
134 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
135 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
136 * for our work to complete. Get our own work queue to solve this.
138 static struct workqueue_struct *cxgb3_wq;
141 * link_report - show link status and link speed/duplex
142 * @p: the port whose settings are to be reported
144 * Shows the link status, speed, and duplex of a port.
146 static void link_report(struct net_device *dev)
148 if (!netif_carrier_ok(dev))
149 printk(KERN_INFO "%s: link down\n", dev->name);
150 else {
151 const char *s = "10Mbps";
152 const struct port_info *p = netdev_priv(dev);
154 switch (p->link_config.speed) {
155 case SPEED_10000:
156 s = "10Gbps";
157 break;
158 case SPEED_1000:
159 s = "1000Mbps";
160 break;
161 case SPEED_100:
162 s = "100Mbps";
163 break;
166 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
167 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
172 * t3_os_link_changed - handle link status changes
173 * @adapter: the adapter associated with the link change
174 * @port_id: the port index whose limk status has changed
175 * @link_stat: the new status of the link
176 * @speed: the new speed setting
177 * @duplex: the new duplex setting
178 * @pause: the new flow-control setting
180 * This is the OS-dependent handler for link status changes. The OS
181 * neutral handler takes care of most of the processing for these events,
182 * then calls this handler for any OS-specific processing.
184 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
185 int speed, int duplex, int pause)
187 struct net_device *dev = adapter->port[port_id];
188 struct port_info *pi = netdev_priv(dev);
189 struct cmac *mac = &pi->mac;
191 /* Skip changes from disabled ports. */
192 if (!netif_running(dev))
193 return;
195 if (link_stat != netif_carrier_ok(dev)) {
196 if (link_stat) {
197 t3_mac_enable(mac, MAC_DIRECTION_RX);
198 netif_carrier_on(dev);
199 } else {
200 netif_carrier_off(dev);
201 pi->phy.ops->power_down(&pi->phy, 1);
202 t3_mac_disable(mac, MAC_DIRECTION_RX);
203 t3_link_start(&pi->phy, mac, &pi->link_config);
206 link_report(dev);
210 static void cxgb_set_rxmode(struct net_device *dev)
212 struct t3_rx_mode rm;
213 struct port_info *pi = netdev_priv(dev);
215 init_rx_mode(&rm, dev, dev->mc_list);
216 t3_mac_set_rx_mode(&pi->mac, &rm);
220 * link_start - enable a port
221 * @dev: the device to enable
223 * Performs the MAC and PHY actions needed to enable a port.
225 static void link_start(struct net_device *dev)
227 struct t3_rx_mode rm;
228 struct port_info *pi = netdev_priv(dev);
229 struct cmac *mac = &pi->mac;
231 init_rx_mode(&rm, dev, dev->mc_list);
232 t3_mac_reset(mac);
233 t3_mac_set_mtu(mac, dev->mtu);
234 t3_mac_set_address(mac, 0, dev->dev_addr);
235 t3_mac_set_rx_mode(mac, &rm);
236 t3_link_start(&pi->phy, mac, &pi->link_config);
237 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
240 static inline void cxgb_disable_msi(struct adapter *adapter)
242 if (adapter->flags & USING_MSIX) {
243 pci_disable_msix(adapter->pdev);
244 adapter->flags &= ~USING_MSIX;
245 } else if (adapter->flags & USING_MSI) {
246 pci_disable_msi(adapter->pdev);
247 adapter->flags &= ~USING_MSI;
252 * Interrupt handler for asynchronous events used with MSI-X.
254 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
256 t3_slow_intr_handler(cookie);
257 return IRQ_HANDLED;
261 * Name the MSI-X interrupts.
263 static void name_msix_vecs(struct adapter *adap)
265 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
267 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
268 adap->msix_info[0].desc[n] = 0;
270 for_each_port(adap, j) {
271 struct net_device *d = adap->port[j];
272 const struct port_info *pi = netdev_priv(d);
274 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
275 snprintf(adap->msix_info[msi_idx].desc, n,
276 "%s (queue %d)", d->name, i);
277 adap->msix_info[msi_idx].desc[n] = 0;
282 static int request_msix_data_irqs(struct adapter *adap)
284 int i, j, err, qidx = 0;
286 for_each_port(adap, i) {
287 int nqsets = adap2pinfo(adap, i)->nqsets;
289 for (j = 0; j < nqsets; ++j) {
290 err = request_irq(adap->msix_info[qidx + 1].vec,
291 t3_intr_handler(adap,
292 adap->sge.qs[qidx].
293 rspq.polling), 0,
294 adap->msix_info[qidx + 1].desc,
295 &adap->sge.qs[qidx]);
296 if (err) {
297 while (--qidx >= 0)
298 free_irq(adap->msix_info[qidx + 1].vec,
299 &adap->sge.qs[qidx]);
300 return err;
302 qidx++;
305 return 0;
309 * setup_rss - configure RSS
310 * @adap: the adapter
312 * Sets up RSS to distribute packets to multiple receive queues. We
313 * configure the RSS CPU lookup table to distribute to the number of HW
314 * receive queues, and the response queue lookup table to narrow that
315 * down to the response queues actually configured for each port.
316 * We always configure the RSS mapping for two ports since the mapping
317 * table has plenty of entries.
319 static void setup_rss(struct adapter *adap)
321 int i;
322 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
323 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
324 u8 cpus[SGE_QSETS + 1];
325 u16 rspq_map[RSS_TABLE_SIZE];
327 for (i = 0; i < SGE_QSETS; ++i)
328 cpus[i] = i;
329 cpus[SGE_QSETS] = 0xff; /* terminator */
331 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
332 rspq_map[i] = i % nq0;
333 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
336 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
337 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
338 V_RRCPLCPUSIZE(6), cpus, rspq_map);
342 * If we have multiple receive queues per port serviced by NAPI we need one
343 * netdevice per queue as NAPI operates on netdevices. We already have one
344 * netdevice, namely the one associated with the interface, so we use dummy
345 * ones for any additional queues. Note that these netdevices exist purely
346 * so that NAPI has something to work with, they do not represent network
347 * ports and are not registered.
349 static int init_dummy_netdevs(struct adapter *adap)
351 int i, j, dummy_idx = 0;
352 struct net_device *nd;
354 for_each_port(adap, i) {
355 struct net_device *dev = adap->port[i];
356 const struct port_info *pi = netdev_priv(dev);
358 for (j = 0; j < pi->nqsets - 1; j++) {
359 if (!adap->dummy_netdev[dummy_idx]) {
360 nd = alloc_netdev(0, "", ether_setup);
361 if (!nd)
362 goto free_all;
364 nd->priv = adap;
365 nd->weight = 64;
366 set_bit(__LINK_STATE_START, &nd->state);
367 adap->dummy_netdev[dummy_idx] = nd;
369 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
370 dummy_idx++;
373 return 0;
375 free_all:
376 while (--dummy_idx >= 0) {
377 free_netdev(adap->dummy_netdev[dummy_idx]);
378 adap->dummy_netdev[dummy_idx] = NULL;
380 return -ENOMEM;
384 * Wait until all NAPI handlers are descheduled. This includes the handlers of
385 * both netdevices representing interfaces and the dummy ones for the extra
386 * queues.
388 static void quiesce_rx(struct adapter *adap)
390 int i;
391 struct net_device *dev;
393 for_each_port(adap, i) {
394 dev = adap->port[i];
395 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
396 msleep(1);
399 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
400 dev = adap->dummy_netdev[i];
401 if (dev)
402 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
403 msleep(1);
408 * setup_sge_qsets - configure SGE Tx/Rx/response queues
409 * @adap: the adapter
411 * Determines how many sets of SGE queues to use and initializes them.
412 * We support multiple queue sets per port if we have MSI-X, otherwise
413 * just one queue set per port.
415 static int setup_sge_qsets(struct adapter *adap)
417 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
418 unsigned int ntxq = SGE_TXQ_PER_SET;
420 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
421 irq_idx = -1;
423 for_each_port(adap, i) {
424 struct net_device *dev = adap->port[i];
425 const struct port_info *pi = netdev_priv(dev);
427 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
428 err = t3_sge_alloc_qset(adap, qset_idx, 1,
429 (adap->flags & USING_MSIX) ? qset_idx + 1 :
430 irq_idx,
431 &adap->params.sge.qset[qset_idx], ntxq,
432 j == 0 ? dev :
433 adap-> dummy_netdev[dummy_dev_idx++]);
434 if (err) {
435 t3_free_sge_resources(adap);
436 return err;
441 return 0;
444 static ssize_t attr_show(struct device *d, struct device_attribute *attr,
445 char *buf,
446 ssize_t(*format) (struct net_device *, char *))
448 ssize_t len;
450 /* Synchronize with ioctls that may shut down the device */
451 rtnl_lock();
452 len = (*format) (to_net_dev(d), buf);
453 rtnl_unlock();
454 return len;
457 static ssize_t attr_store(struct device *d, struct device_attribute *attr,
458 const char *buf, size_t len,
459 ssize_t(*set) (struct net_device *, unsigned int),
460 unsigned int min_val, unsigned int max_val)
462 char *endp;
463 ssize_t ret;
464 unsigned int val;
466 if (!capable(CAP_NET_ADMIN))
467 return -EPERM;
469 val = simple_strtoul(buf, &endp, 0);
470 if (endp == buf || val < min_val || val > max_val)
471 return -EINVAL;
473 rtnl_lock();
474 ret = (*set) (to_net_dev(d), val);
475 if (!ret)
476 ret = len;
477 rtnl_unlock();
478 return ret;
481 #define CXGB3_SHOW(name, val_expr) \
482 static ssize_t format_##name(struct net_device *dev, char *buf) \
484 struct adapter *adap = dev->priv; \
485 return sprintf(buf, "%u\n", val_expr); \
487 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
488 char *buf) \
490 return attr_show(d, attr, buf, format_##name); \
493 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
495 struct adapter *adap = dev->priv;
496 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
498 if (adap->flags & FULL_INIT_DONE)
499 return -EBUSY;
500 if (val && adap->params.rev == 0)
501 return -EINVAL;
502 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
503 min_tids)
504 return -EINVAL;
505 adap->params.mc5.nfilters = val;
506 return 0;
509 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
510 const char *buf, size_t len)
512 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
515 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
517 struct adapter *adap = dev->priv;
519 if (adap->flags & FULL_INIT_DONE)
520 return -EBUSY;
521 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
522 MC5_MIN_TIDS)
523 return -EINVAL;
524 adap->params.mc5.nservers = val;
525 return 0;
528 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
529 const char *buf, size_t len)
531 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
534 #define CXGB3_ATTR_R(name, val_expr) \
535 CXGB3_SHOW(name, val_expr) \
536 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
538 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
539 CXGB3_SHOW(name, val_expr) \
540 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
542 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
543 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
544 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
546 static struct attribute *cxgb3_attrs[] = {
547 &dev_attr_cam_size.attr,
548 &dev_attr_nfilters.attr,
549 &dev_attr_nservers.attr,
550 NULL
553 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
555 static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
556 char *buf, int sched)
558 ssize_t len;
559 unsigned int v, addr, bpt, cpt;
560 struct adapter *adap = to_net_dev(d)->priv;
562 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
563 rtnl_lock();
564 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
565 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
566 if (sched & 1)
567 v >>= 16;
568 bpt = (v >> 8) & 0xff;
569 cpt = v & 0xff;
570 if (!cpt)
571 len = sprintf(buf, "disabled\n");
572 else {
573 v = (adap->params.vpd.cclk * 1000) / cpt;
574 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
576 rtnl_unlock();
577 return len;
580 static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
581 const char *buf, size_t len, int sched)
583 char *endp;
584 ssize_t ret;
585 unsigned int val;
586 struct adapter *adap = to_net_dev(d)->priv;
588 if (!capable(CAP_NET_ADMIN))
589 return -EPERM;
591 val = simple_strtoul(buf, &endp, 0);
592 if (endp == buf || val > 10000000)
593 return -EINVAL;
595 rtnl_lock();
596 ret = t3_config_sched(adap, val, sched);
597 if (!ret)
598 ret = len;
599 rtnl_unlock();
600 return ret;
603 #define TM_ATTR(name, sched) \
604 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
605 char *buf) \
607 return tm_attr_show(d, attr, buf, sched); \
609 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
610 const char *buf, size_t len) \
612 return tm_attr_store(d, attr, buf, len, sched); \
614 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
616 TM_ATTR(sched0, 0);
617 TM_ATTR(sched1, 1);
618 TM_ATTR(sched2, 2);
619 TM_ATTR(sched3, 3);
620 TM_ATTR(sched4, 4);
621 TM_ATTR(sched5, 5);
622 TM_ATTR(sched6, 6);
623 TM_ATTR(sched7, 7);
625 static struct attribute *offload_attrs[] = {
626 &dev_attr_sched0.attr,
627 &dev_attr_sched1.attr,
628 &dev_attr_sched2.attr,
629 &dev_attr_sched3.attr,
630 &dev_attr_sched4.attr,
631 &dev_attr_sched5.attr,
632 &dev_attr_sched6.attr,
633 &dev_attr_sched7.attr,
634 NULL
637 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
640 * Sends an sk_buff to an offload queue driver
641 * after dealing with any active network taps.
643 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
645 int ret;
647 local_bh_disable();
648 ret = t3_offload_tx(tdev, skb);
649 local_bh_enable();
650 return ret;
653 static int write_smt_entry(struct adapter *adapter, int idx)
655 struct cpl_smt_write_req *req;
656 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
658 if (!skb)
659 return -ENOMEM;
661 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
662 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
663 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
664 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
665 req->iff = idx;
666 memset(req->src_mac1, 0, sizeof(req->src_mac1));
667 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
668 skb->priority = 1;
669 offload_tx(&adapter->tdev, skb);
670 return 0;
673 static int init_smt(struct adapter *adapter)
675 int i;
677 for_each_port(adapter, i)
678 write_smt_entry(adapter, i);
679 return 0;
682 static void init_port_mtus(struct adapter *adapter)
684 unsigned int mtus = adapter->port[0]->mtu;
686 if (adapter->port[1])
687 mtus |= adapter->port[1]->mtu << 16;
688 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
691 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
692 int hi, int port)
694 struct sk_buff *skb;
695 struct mngt_pktsched_wr *req;
697 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
698 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
699 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
700 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
701 req->sched = sched;
702 req->idx = qidx;
703 req->min = lo;
704 req->max = hi;
705 req->binding = port;
706 t3_mgmt_tx(adap, skb);
709 static void bind_qsets(struct adapter *adap)
711 int i, j;
713 for_each_port(adap, i) {
714 const struct port_info *pi = adap2pinfo(adap, i);
716 for (j = 0; j < pi->nqsets; ++j)
717 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
718 -1, i);
722 #define FW_FNAME "t3fw-%d.%d.%d.bin"
724 static int upgrade_fw(struct adapter *adap)
726 int ret;
727 char buf[64];
728 const struct firmware *fw;
729 struct device *dev = &adap->pdev->dev;
731 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
732 FW_VERSION_MINOR, FW_VERSION_MICRO);
733 ret = request_firmware(&fw, buf, dev);
734 if (ret < 0) {
735 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
736 buf);
737 return ret;
739 ret = t3_load_fw(adap, fw->data, fw->size);
740 release_firmware(fw);
741 return ret;
745 * cxgb_up - enable the adapter
746 * @adapter: adapter being enabled
748 * Called when the first port is enabled, this function performs the
749 * actions necessary to make an adapter operational, such as completing
750 * the initialization of HW modules, and enabling interrupts.
752 * Must be called with the rtnl lock held.
754 static int cxgb_up(struct adapter *adap)
756 int err = 0;
758 if (!(adap->flags & FULL_INIT_DONE)) {
759 err = t3_check_fw_version(adap);
760 if (err == -EINVAL)
761 err = upgrade_fw(adap);
762 if (err)
763 goto out;
765 err = init_dummy_netdevs(adap);
766 if (err)
767 goto out;
769 err = t3_init_hw(adap, 0);
770 if (err)
771 goto out;
773 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
775 err = setup_sge_qsets(adap);
776 if (err)
777 goto out;
779 setup_rss(adap);
780 adap->flags |= FULL_INIT_DONE;
783 t3_intr_clear(adap);
785 if (adap->flags & USING_MSIX) {
786 name_msix_vecs(adap);
787 err = request_irq(adap->msix_info[0].vec,
788 t3_async_intr_handler, 0,
789 adap->msix_info[0].desc, adap);
790 if (err)
791 goto irq_err;
793 if (request_msix_data_irqs(adap)) {
794 free_irq(adap->msix_info[0].vec, adap);
795 goto irq_err;
797 } else if ((err = request_irq(adap->pdev->irq,
798 t3_intr_handler(adap,
799 adap->sge.qs[0].rspq.
800 polling),
801 (adap->flags & USING_MSI) ?
802 0 : IRQF_SHARED,
803 adap->name, adap)))
804 goto irq_err;
806 t3_sge_start(adap);
807 t3_intr_enable(adap);
809 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
810 bind_qsets(adap);
811 adap->flags |= QUEUES_BOUND;
813 out:
814 return err;
815 irq_err:
816 CH_ERR(adap, "request_irq failed, err %d\n", err);
817 goto out;
821 * Release resources when all the ports and offloading have been stopped.
823 static void cxgb_down(struct adapter *adapter)
825 t3_sge_stop(adapter);
826 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
827 t3_intr_disable(adapter);
828 spin_unlock_irq(&adapter->work_lock);
830 if (adapter->flags & USING_MSIX) {
831 int i, n = 0;
833 free_irq(adapter->msix_info[0].vec, adapter);
834 for_each_port(adapter, i)
835 n += adap2pinfo(adapter, i)->nqsets;
837 for (i = 0; i < n; ++i)
838 free_irq(adapter->msix_info[i + 1].vec,
839 &adapter->sge.qs[i]);
840 } else
841 free_irq(adapter->pdev->irq, adapter);
843 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
844 quiesce_rx(adapter);
847 static void schedule_chk_task(struct adapter *adap)
849 unsigned int timeo;
851 timeo = adap->params.linkpoll_period ?
852 (HZ * adap->params.linkpoll_period) / 10 :
853 adap->params.stats_update_period * HZ;
854 if (timeo)
855 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
858 static int offload_open(struct net_device *dev)
860 struct adapter *adapter = dev->priv;
861 struct t3cdev *tdev = T3CDEV(dev);
862 int adap_up = adapter->open_device_map & PORT_MASK;
863 int err = 0;
865 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
866 return 0;
868 if (!adap_up && (err = cxgb_up(adapter)) < 0)
869 return err;
871 t3_tp_set_offload_mode(adapter, 1);
872 tdev->lldev = adapter->port[0];
873 err = cxgb3_offload_activate(adapter);
874 if (err)
875 goto out;
877 init_port_mtus(adapter);
878 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
879 adapter->params.b_wnd,
880 adapter->params.rev == 0 ?
881 adapter->port[0]->mtu : 0xffff);
882 init_smt(adapter);
884 /* Never mind if the next step fails */
885 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
887 /* Call back all registered clients */
888 cxgb3_add_clients(tdev);
890 out:
891 /* restore them in case the offload module has changed them */
892 if (err) {
893 t3_tp_set_offload_mode(adapter, 0);
894 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
895 cxgb3_set_dummy_ops(tdev);
897 return err;
900 static int offload_close(struct t3cdev *tdev)
902 struct adapter *adapter = tdev2adap(tdev);
904 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
905 return 0;
907 /* Call back all registered clients */
908 cxgb3_remove_clients(tdev);
910 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
912 tdev->lldev = NULL;
913 cxgb3_set_dummy_ops(tdev);
914 t3_tp_set_offload_mode(adapter, 0);
915 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
917 if (!adapter->open_device_map)
918 cxgb_down(adapter);
920 cxgb3_offload_deactivate(adapter);
921 return 0;
924 static int cxgb_open(struct net_device *dev)
926 int err;
927 struct adapter *adapter = dev->priv;
928 struct port_info *pi = netdev_priv(dev);
929 int other_ports = adapter->open_device_map & PORT_MASK;
931 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
932 return err;
934 set_bit(pi->port_id, &adapter->open_device_map);
935 if (is_offload(adapter) && !ofld_disable) {
936 err = offload_open(dev);
937 if (err)
938 printk(KERN_WARNING
939 "Could not initialize offload capabilities\n");
942 link_start(dev);
943 t3_port_intr_enable(adapter, pi->port_id);
944 netif_start_queue(dev);
945 if (!other_ports)
946 schedule_chk_task(adapter);
948 return 0;
951 static int cxgb_close(struct net_device *dev)
953 struct adapter *adapter = dev->priv;
954 struct port_info *p = netdev_priv(dev);
956 t3_port_intr_disable(adapter, p->port_id);
957 netif_stop_queue(dev);
958 p->phy.ops->power_down(&p->phy, 1);
959 netif_carrier_off(dev);
960 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
962 spin_lock(&adapter->work_lock); /* sync with update task */
963 clear_bit(p->port_id, &adapter->open_device_map);
964 spin_unlock(&adapter->work_lock);
966 if (!(adapter->open_device_map & PORT_MASK))
967 cancel_rearming_delayed_workqueue(cxgb3_wq,
968 &adapter->adap_check_task);
970 if (!adapter->open_device_map)
971 cxgb_down(adapter);
973 return 0;
976 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
978 struct adapter *adapter = dev->priv;
979 struct port_info *p = netdev_priv(dev);
980 struct net_device_stats *ns = &p->netstats;
981 const struct mac_stats *pstats;
983 spin_lock(&adapter->stats_lock);
984 pstats = t3_mac_update_stats(&p->mac);
985 spin_unlock(&adapter->stats_lock);
987 ns->tx_bytes = pstats->tx_octets;
988 ns->tx_packets = pstats->tx_frames;
989 ns->rx_bytes = pstats->rx_octets;
990 ns->rx_packets = pstats->rx_frames;
991 ns->multicast = pstats->rx_mcast_frames;
993 ns->tx_errors = pstats->tx_underrun;
994 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
995 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
996 pstats->rx_fifo_ovfl;
998 /* detailed rx_errors */
999 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1000 ns->rx_over_errors = 0;
1001 ns->rx_crc_errors = pstats->rx_fcs_errs;
1002 ns->rx_frame_errors = pstats->rx_symbol_errs;
1003 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1004 ns->rx_missed_errors = pstats->rx_cong_drops;
1006 /* detailed tx_errors */
1007 ns->tx_aborted_errors = 0;
1008 ns->tx_carrier_errors = 0;
1009 ns->tx_fifo_errors = pstats->tx_underrun;
1010 ns->tx_heartbeat_errors = 0;
1011 ns->tx_window_errors = 0;
1012 return ns;
1015 static u32 get_msglevel(struct net_device *dev)
1017 struct adapter *adapter = dev->priv;
1019 return adapter->msg_enable;
1022 static void set_msglevel(struct net_device *dev, u32 val)
1024 struct adapter *adapter = dev->priv;
1026 adapter->msg_enable = val;
1029 static char stats_strings[][ETH_GSTRING_LEN] = {
1030 "TxOctetsOK ",
1031 "TxFramesOK ",
1032 "TxMulticastFramesOK",
1033 "TxBroadcastFramesOK",
1034 "TxPauseFrames ",
1035 "TxUnderrun ",
1036 "TxExtUnderrun ",
1038 "TxFrames64 ",
1039 "TxFrames65To127 ",
1040 "TxFrames128To255 ",
1041 "TxFrames256To511 ",
1042 "TxFrames512To1023 ",
1043 "TxFrames1024To1518 ",
1044 "TxFrames1519ToMax ",
1046 "RxOctetsOK ",
1047 "RxFramesOK ",
1048 "RxMulticastFramesOK",
1049 "RxBroadcastFramesOK",
1050 "RxPauseFrames ",
1051 "RxFCSErrors ",
1052 "RxSymbolErrors ",
1053 "RxShortErrors ",
1054 "RxJabberErrors ",
1055 "RxLengthErrors ",
1056 "RxFIFOoverflow ",
1058 "RxFrames64 ",
1059 "RxFrames65To127 ",
1060 "RxFrames128To255 ",
1061 "RxFrames256To511 ",
1062 "RxFrames512To1023 ",
1063 "RxFrames1024To1518 ",
1064 "RxFrames1519ToMax ",
1066 "PhyFIFOErrors ",
1067 "TSO ",
1068 "VLANextractions ",
1069 "VLANinsertions ",
1070 "TxCsumOffload ",
1071 "RxCsumGood ",
1072 "RxDrops ",
1074 "CheckTXEnToggled ",
1075 "CheckResets ",
1079 static int get_stats_count(struct net_device *dev)
1081 return ARRAY_SIZE(stats_strings);
1084 #define T3_REGMAP_SIZE (3 * 1024)
1086 static int get_regs_len(struct net_device *dev)
1088 return T3_REGMAP_SIZE;
1091 static int get_eeprom_len(struct net_device *dev)
1093 return EEPROMSIZE;
1096 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1098 u32 fw_vers = 0;
1099 struct adapter *adapter = dev->priv;
1101 t3_get_fw_version(adapter, &fw_vers);
1103 strcpy(info->driver, DRV_NAME);
1104 strcpy(info->version, DRV_VERSION);
1105 strcpy(info->bus_info, pci_name(adapter->pdev));
1106 if (!fw_vers)
1107 strcpy(info->fw_version, "N/A");
1108 else {
1109 snprintf(info->fw_version, sizeof(info->fw_version),
1110 "%s %u.%u.%u",
1111 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1112 G_FW_VERSION_MAJOR(fw_vers),
1113 G_FW_VERSION_MINOR(fw_vers),
1114 G_FW_VERSION_MICRO(fw_vers));
1118 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1120 if (stringset == ETH_SS_STATS)
1121 memcpy(data, stats_strings, sizeof(stats_strings));
1124 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1125 struct port_info *p, int idx)
1127 int i;
1128 unsigned long tot = 0;
1130 for (i = 0; i < p->nqsets; ++i)
1131 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1132 return tot;
1135 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1136 u64 *data)
1138 struct adapter *adapter = dev->priv;
1139 struct port_info *pi = netdev_priv(dev);
1140 const struct mac_stats *s;
1142 spin_lock(&adapter->stats_lock);
1143 s = t3_mac_update_stats(&pi->mac);
1144 spin_unlock(&adapter->stats_lock);
1146 *data++ = s->tx_octets;
1147 *data++ = s->tx_frames;
1148 *data++ = s->tx_mcast_frames;
1149 *data++ = s->tx_bcast_frames;
1150 *data++ = s->tx_pause;
1151 *data++ = s->tx_underrun;
1152 *data++ = s->tx_fifo_urun;
1154 *data++ = s->tx_frames_64;
1155 *data++ = s->tx_frames_65_127;
1156 *data++ = s->tx_frames_128_255;
1157 *data++ = s->tx_frames_256_511;
1158 *data++ = s->tx_frames_512_1023;
1159 *data++ = s->tx_frames_1024_1518;
1160 *data++ = s->tx_frames_1519_max;
1162 *data++ = s->rx_octets;
1163 *data++ = s->rx_frames;
1164 *data++ = s->rx_mcast_frames;
1165 *data++ = s->rx_bcast_frames;
1166 *data++ = s->rx_pause;
1167 *data++ = s->rx_fcs_errs;
1168 *data++ = s->rx_symbol_errs;
1169 *data++ = s->rx_short;
1170 *data++ = s->rx_jabber;
1171 *data++ = s->rx_too_long;
1172 *data++ = s->rx_fifo_ovfl;
1174 *data++ = s->rx_frames_64;
1175 *data++ = s->rx_frames_65_127;
1176 *data++ = s->rx_frames_128_255;
1177 *data++ = s->rx_frames_256_511;
1178 *data++ = s->rx_frames_512_1023;
1179 *data++ = s->rx_frames_1024_1518;
1180 *data++ = s->rx_frames_1519_max;
1182 *data++ = pi->phy.fifo_errors;
1184 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1185 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1186 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1187 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1188 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1189 *data++ = s->rx_cong_drops;
1191 *data++ = s->num_toggled;
1192 *data++ = s->num_resets;
1195 static inline void reg_block_dump(struct adapter *ap, void *buf,
1196 unsigned int start, unsigned int end)
1198 u32 *p = buf + start;
1200 for (; start <= end; start += sizeof(u32))
1201 *p++ = t3_read_reg(ap, start);
1204 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1205 void *buf)
1207 struct adapter *ap = dev->priv;
1210 * Version scheme:
1211 * bits 0..9: chip version
1212 * bits 10..15: chip revision
1213 * bit 31: set for PCIe cards
1215 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1218 * We skip the MAC statistics registers because they are clear-on-read.
1219 * Also reading multi-register stats would need to synchronize with the
1220 * periodic mac stats accumulation. Hard to justify the complexity.
1222 memset(buf, 0, T3_REGMAP_SIZE);
1223 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1224 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1225 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1226 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1227 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1228 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1229 XGM_REG(A_XGM_SERDES_STAT3, 1));
1230 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1231 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1234 static int restart_autoneg(struct net_device *dev)
1236 struct port_info *p = netdev_priv(dev);
1238 if (!netif_running(dev))
1239 return -EAGAIN;
1240 if (p->link_config.autoneg != AUTONEG_ENABLE)
1241 return -EINVAL;
1242 p->phy.ops->autoneg_restart(&p->phy);
1243 return 0;
1246 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1248 int i;
1249 struct adapter *adapter = dev->priv;
1251 if (data == 0)
1252 data = 2;
1254 for (i = 0; i < data * 2; i++) {
1255 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1256 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1257 if (msleep_interruptible(500))
1258 break;
1260 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1261 F_GPIO0_OUT_VAL);
1262 return 0;
1265 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1267 struct port_info *p = netdev_priv(dev);
1269 cmd->supported = p->link_config.supported;
1270 cmd->advertising = p->link_config.advertising;
1272 if (netif_carrier_ok(dev)) {
1273 cmd->speed = p->link_config.speed;
1274 cmd->duplex = p->link_config.duplex;
1275 } else {
1276 cmd->speed = -1;
1277 cmd->duplex = -1;
1280 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1281 cmd->phy_address = p->phy.addr;
1282 cmd->transceiver = XCVR_EXTERNAL;
1283 cmd->autoneg = p->link_config.autoneg;
1284 cmd->maxtxpkt = 0;
1285 cmd->maxrxpkt = 0;
1286 return 0;
1289 static int speed_duplex_to_caps(int speed, int duplex)
1291 int cap = 0;
1293 switch (speed) {
1294 case SPEED_10:
1295 if (duplex == DUPLEX_FULL)
1296 cap = SUPPORTED_10baseT_Full;
1297 else
1298 cap = SUPPORTED_10baseT_Half;
1299 break;
1300 case SPEED_100:
1301 if (duplex == DUPLEX_FULL)
1302 cap = SUPPORTED_100baseT_Full;
1303 else
1304 cap = SUPPORTED_100baseT_Half;
1305 break;
1306 case SPEED_1000:
1307 if (duplex == DUPLEX_FULL)
1308 cap = SUPPORTED_1000baseT_Full;
1309 else
1310 cap = SUPPORTED_1000baseT_Half;
1311 break;
1312 case SPEED_10000:
1313 if (duplex == DUPLEX_FULL)
1314 cap = SUPPORTED_10000baseT_Full;
1316 return cap;
1319 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1320 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1321 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1322 ADVERTISED_10000baseT_Full)
1324 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1326 struct port_info *p = netdev_priv(dev);
1327 struct link_config *lc = &p->link_config;
1329 if (!(lc->supported & SUPPORTED_Autoneg))
1330 return -EOPNOTSUPP; /* can't change speed/duplex */
1332 if (cmd->autoneg == AUTONEG_DISABLE) {
1333 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1335 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1336 return -EINVAL;
1337 lc->requested_speed = cmd->speed;
1338 lc->requested_duplex = cmd->duplex;
1339 lc->advertising = 0;
1340 } else {
1341 cmd->advertising &= ADVERTISED_MASK;
1342 cmd->advertising &= lc->supported;
1343 if (!cmd->advertising)
1344 return -EINVAL;
1345 lc->requested_speed = SPEED_INVALID;
1346 lc->requested_duplex = DUPLEX_INVALID;
1347 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1349 lc->autoneg = cmd->autoneg;
1350 if (netif_running(dev))
1351 t3_link_start(&p->phy, &p->mac, lc);
1352 return 0;
1355 static void get_pauseparam(struct net_device *dev,
1356 struct ethtool_pauseparam *epause)
1358 struct port_info *p = netdev_priv(dev);
1360 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1361 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1362 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1365 static int set_pauseparam(struct net_device *dev,
1366 struct ethtool_pauseparam *epause)
1368 struct port_info *p = netdev_priv(dev);
1369 struct link_config *lc = &p->link_config;
1371 if (epause->autoneg == AUTONEG_DISABLE)
1372 lc->requested_fc = 0;
1373 else if (lc->supported & SUPPORTED_Autoneg)
1374 lc->requested_fc = PAUSE_AUTONEG;
1375 else
1376 return -EINVAL;
1378 if (epause->rx_pause)
1379 lc->requested_fc |= PAUSE_RX;
1380 if (epause->tx_pause)
1381 lc->requested_fc |= PAUSE_TX;
1382 if (lc->autoneg == AUTONEG_ENABLE) {
1383 if (netif_running(dev))
1384 t3_link_start(&p->phy, &p->mac, lc);
1385 } else {
1386 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1387 if (netif_running(dev))
1388 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1390 return 0;
1393 static u32 get_rx_csum(struct net_device *dev)
1395 struct port_info *p = netdev_priv(dev);
1397 return p->rx_csum_offload;
1400 static int set_rx_csum(struct net_device *dev, u32 data)
1402 struct port_info *p = netdev_priv(dev);
1404 p->rx_csum_offload = data;
1405 return 0;
1408 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1410 const struct adapter *adapter = dev->priv;
1411 const struct port_info *pi = netdev_priv(dev);
1412 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1414 e->rx_max_pending = MAX_RX_BUFFERS;
1415 e->rx_mini_max_pending = 0;
1416 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1417 e->tx_max_pending = MAX_TXQ_ENTRIES;
1419 e->rx_pending = q->fl_size;
1420 e->rx_mini_pending = q->rspq_size;
1421 e->rx_jumbo_pending = q->jumbo_size;
1422 e->tx_pending = q->txq_size[0];
1425 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1427 int i;
1428 struct qset_params *q;
1429 struct adapter *adapter = dev->priv;
1430 const struct port_info *pi = netdev_priv(dev);
1432 if (e->rx_pending > MAX_RX_BUFFERS ||
1433 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1434 e->tx_pending > MAX_TXQ_ENTRIES ||
1435 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1436 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1437 e->rx_pending < MIN_FL_ENTRIES ||
1438 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1439 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1440 return -EINVAL;
1442 if (adapter->flags & FULL_INIT_DONE)
1443 return -EBUSY;
1445 q = &adapter->params.sge.qset[pi->first_qset];
1446 for (i = 0; i < pi->nqsets; ++i, ++q) {
1447 q->rspq_size = e->rx_mini_pending;
1448 q->fl_size = e->rx_pending;
1449 q->jumbo_size = e->rx_jumbo_pending;
1450 q->txq_size[0] = e->tx_pending;
1451 q->txq_size[1] = e->tx_pending;
1452 q->txq_size[2] = e->tx_pending;
1454 return 0;
1457 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1459 struct adapter *adapter = dev->priv;
1460 struct qset_params *qsp = &adapter->params.sge.qset[0];
1461 struct sge_qset *qs = &adapter->sge.qs[0];
1463 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1464 return -EINVAL;
1466 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1467 t3_update_qset_coalesce(qs, qsp);
1468 return 0;
1471 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1473 struct adapter *adapter = dev->priv;
1474 struct qset_params *q = adapter->params.sge.qset;
1476 c->rx_coalesce_usecs = q->coalesce_usecs;
1477 return 0;
1480 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1481 u8 * data)
1483 int i, err = 0;
1484 struct adapter *adapter = dev->priv;
1486 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1487 if (!buf)
1488 return -ENOMEM;
1490 e->magic = EEPROM_MAGIC;
1491 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1492 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1494 if (!err)
1495 memcpy(data, buf + e->offset, e->len);
1496 kfree(buf);
1497 return err;
1500 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1501 u8 * data)
1503 u8 *buf;
1504 int err = 0;
1505 u32 aligned_offset, aligned_len, *p;
1506 struct adapter *adapter = dev->priv;
1508 if (eeprom->magic != EEPROM_MAGIC)
1509 return -EINVAL;
1511 aligned_offset = eeprom->offset & ~3;
1512 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1514 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1515 buf = kmalloc(aligned_len, GFP_KERNEL);
1516 if (!buf)
1517 return -ENOMEM;
1518 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1519 if (!err && aligned_len > 4)
1520 err = t3_seeprom_read(adapter,
1521 aligned_offset + aligned_len - 4,
1522 (u32 *) & buf[aligned_len - 4]);
1523 if (err)
1524 goto out;
1525 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1526 } else
1527 buf = data;
1529 err = t3_seeprom_wp(adapter, 0);
1530 if (err)
1531 goto out;
1533 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1534 err = t3_seeprom_write(adapter, aligned_offset, *p);
1535 aligned_offset += 4;
1538 if (!err)
1539 err = t3_seeprom_wp(adapter, 1);
1540 out:
1541 if (buf != data)
1542 kfree(buf);
1543 return err;
1546 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1548 wol->supported = 0;
1549 wol->wolopts = 0;
1550 memset(&wol->sopass, 0, sizeof(wol->sopass));
1553 static const struct ethtool_ops cxgb_ethtool_ops = {
1554 .get_settings = get_settings,
1555 .set_settings = set_settings,
1556 .get_drvinfo = get_drvinfo,
1557 .get_msglevel = get_msglevel,
1558 .set_msglevel = set_msglevel,
1559 .get_ringparam = get_sge_param,
1560 .set_ringparam = set_sge_param,
1561 .get_coalesce = get_coalesce,
1562 .set_coalesce = set_coalesce,
1563 .get_eeprom_len = get_eeprom_len,
1564 .get_eeprom = get_eeprom,
1565 .set_eeprom = set_eeprom,
1566 .get_pauseparam = get_pauseparam,
1567 .set_pauseparam = set_pauseparam,
1568 .get_rx_csum = get_rx_csum,
1569 .set_rx_csum = set_rx_csum,
1570 .get_tx_csum = ethtool_op_get_tx_csum,
1571 .set_tx_csum = ethtool_op_set_tx_csum,
1572 .get_sg = ethtool_op_get_sg,
1573 .set_sg = ethtool_op_set_sg,
1574 .get_link = ethtool_op_get_link,
1575 .get_strings = get_strings,
1576 .phys_id = cxgb3_phys_id,
1577 .nway_reset = restart_autoneg,
1578 .get_stats_count = get_stats_count,
1579 .get_ethtool_stats = get_stats,
1580 .get_regs_len = get_regs_len,
1581 .get_regs = get_regs,
1582 .get_wol = get_wol,
1583 .get_tso = ethtool_op_get_tso,
1584 .set_tso = ethtool_op_set_tso,
1585 .get_perm_addr = ethtool_op_get_perm_addr
1588 static int in_range(int val, int lo, int hi)
1590 return val < 0 || (val <= hi && val >= lo);
1593 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1595 int ret;
1596 u32 cmd;
1597 struct adapter *adapter = dev->priv;
1599 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1600 return -EFAULT;
1602 switch (cmd) {
1603 case CHELSIO_SET_QSET_PARAMS:{
1604 int i;
1605 struct qset_params *q;
1606 struct ch_qset_params t;
1608 if (!capable(CAP_NET_ADMIN))
1609 return -EPERM;
1610 if (copy_from_user(&t, useraddr, sizeof(t)))
1611 return -EFAULT;
1612 if (t.qset_idx >= SGE_QSETS)
1613 return -EINVAL;
1614 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1615 !in_range(t.cong_thres, 0, 255) ||
1616 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1617 MAX_TXQ_ENTRIES) ||
1618 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1619 MAX_TXQ_ENTRIES) ||
1620 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1621 MAX_CTRL_TXQ_ENTRIES) ||
1622 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1623 MAX_RX_BUFFERS)
1624 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1625 MAX_RX_JUMBO_BUFFERS)
1626 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1627 MAX_RSPQ_ENTRIES))
1628 return -EINVAL;
1629 if ((adapter->flags & FULL_INIT_DONE) &&
1630 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1631 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1632 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1633 t.polling >= 0 || t.cong_thres >= 0))
1634 return -EBUSY;
1636 q = &adapter->params.sge.qset[t.qset_idx];
1638 if (t.rspq_size >= 0)
1639 q->rspq_size = t.rspq_size;
1640 if (t.fl_size[0] >= 0)
1641 q->fl_size = t.fl_size[0];
1642 if (t.fl_size[1] >= 0)
1643 q->jumbo_size = t.fl_size[1];
1644 if (t.txq_size[0] >= 0)
1645 q->txq_size[0] = t.txq_size[0];
1646 if (t.txq_size[1] >= 0)
1647 q->txq_size[1] = t.txq_size[1];
1648 if (t.txq_size[2] >= 0)
1649 q->txq_size[2] = t.txq_size[2];
1650 if (t.cong_thres >= 0)
1651 q->cong_thres = t.cong_thres;
1652 if (t.intr_lat >= 0) {
1653 struct sge_qset *qs =
1654 &adapter->sge.qs[t.qset_idx];
1656 q->coalesce_usecs = t.intr_lat;
1657 t3_update_qset_coalesce(qs, q);
1659 if (t.polling >= 0) {
1660 if (adapter->flags & USING_MSIX)
1661 q->polling = t.polling;
1662 else {
1663 /* No polling with INTx for T3A */
1664 if (adapter->params.rev == 0 &&
1665 !(adapter->flags & USING_MSI))
1666 t.polling = 0;
1668 for (i = 0; i < SGE_QSETS; i++) {
1669 q = &adapter->params.sge.
1670 qset[i];
1671 q->polling = t.polling;
1675 break;
1677 case CHELSIO_GET_QSET_PARAMS:{
1678 struct qset_params *q;
1679 struct ch_qset_params t;
1681 if (copy_from_user(&t, useraddr, sizeof(t)))
1682 return -EFAULT;
1683 if (t.qset_idx >= SGE_QSETS)
1684 return -EINVAL;
1686 q = &adapter->params.sge.qset[t.qset_idx];
1687 t.rspq_size = q->rspq_size;
1688 t.txq_size[0] = q->txq_size[0];
1689 t.txq_size[1] = q->txq_size[1];
1690 t.txq_size[2] = q->txq_size[2];
1691 t.fl_size[0] = q->fl_size;
1692 t.fl_size[1] = q->jumbo_size;
1693 t.polling = q->polling;
1694 t.intr_lat = q->coalesce_usecs;
1695 t.cong_thres = q->cong_thres;
1697 if (copy_to_user(useraddr, &t, sizeof(t)))
1698 return -EFAULT;
1699 break;
1701 case CHELSIO_SET_QSET_NUM:{
1702 struct ch_reg edata;
1703 struct port_info *pi = netdev_priv(dev);
1704 unsigned int i, first_qset = 0, other_qsets = 0;
1706 if (!capable(CAP_NET_ADMIN))
1707 return -EPERM;
1708 if (adapter->flags & FULL_INIT_DONE)
1709 return -EBUSY;
1710 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1711 return -EFAULT;
1712 if (edata.val < 1 ||
1713 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1714 return -EINVAL;
1716 for_each_port(adapter, i)
1717 if (adapter->port[i] && adapter->port[i] != dev)
1718 other_qsets += adap2pinfo(adapter, i)->nqsets;
1720 if (edata.val + other_qsets > SGE_QSETS)
1721 return -EINVAL;
1723 pi->nqsets = edata.val;
1725 for_each_port(adapter, i)
1726 if (adapter->port[i]) {
1727 pi = adap2pinfo(adapter, i);
1728 pi->first_qset = first_qset;
1729 first_qset += pi->nqsets;
1731 break;
1733 case CHELSIO_GET_QSET_NUM:{
1734 struct ch_reg edata;
1735 struct port_info *pi = netdev_priv(dev);
1737 edata.cmd = CHELSIO_GET_QSET_NUM;
1738 edata.val = pi->nqsets;
1739 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1740 return -EFAULT;
1741 break;
1743 case CHELSIO_LOAD_FW:{
1744 u8 *fw_data;
1745 struct ch_mem_range t;
1747 if (!capable(CAP_NET_ADMIN))
1748 return -EPERM;
1749 if (copy_from_user(&t, useraddr, sizeof(t)))
1750 return -EFAULT;
1752 fw_data = kmalloc(t.len, GFP_KERNEL);
1753 if (!fw_data)
1754 return -ENOMEM;
1756 if (copy_from_user
1757 (fw_data, useraddr + sizeof(t), t.len)) {
1758 kfree(fw_data);
1759 return -EFAULT;
1762 ret = t3_load_fw(adapter, fw_data, t.len);
1763 kfree(fw_data);
1764 if (ret)
1765 return ret;
1766 break;
1768 case CHELSIO_SETMTUTAB:{
1769 struct ch_mtus m;
1770 int i;
1772 if (!is_offload(adapter))
1773 return -EOPNOTSUPP;
1774 if (!capable(CAP_NET_ADMIN))
1775 return -EPERM;
1776 if (offload_running(adapter))
1777 return -EBUSY;
1778 if (copy_from_user(&m, useraddr, sizeof(m)))
1779 return -EFAULT;
1780 if (m.nmtus != NMTUS)
1781 return -EINVAL;
1782 if (m.mtus[0] < 81) /* accommodate SACK */
1783 return -EINVAL;
1785 /* MTUs must be in ascending order */
1786 for (i = 1; i < NMTUS; ++i)
1787 if (m.mtus[i] < m.mtus[i - 1])
1788 return -EINVAL;
1790 memcpy(adapter->params.mtus, m.mtus,
1791 sizeof(adapter->params.mtus));
1792 break;
1794 case CHELSIO_GET_PM:{
1795 struct tp_params *p = &adapter->params.tp;
1796 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1798 if (!is_offload(adapter))
1799 return -EOPNOTSUPP;
1800 m.tx_pg_sz = p->tx_pg_size;
1801 m.tx_num_pg = p->tx_num_pgs;
1802 m.rx_pg_sz = p->rx_pg_size;
1803 m.rx_num_pg = p->rx_num_pgs;
1804 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1805 if (copy_to_user(useraddr, &m, sizeof(m)))
1806 return -EFAULT;
1807 break;
1809 case CHELSIO_SET_PM:{
1810 struct ch_pm m;
1811 struct tp_params *p = &adapter->params.tp;
1813 if (!is_offload(adapter))
1814 return -EOPNOTSUPP;
1815 if (!capable(CAP_NET_ADMIN))
1816 return -EPERM;
1817 if (adapter->flags & FULL_INIT_DONE)
1818 return -EBUSY;
1819 if (copy_from_user(&m, useraddr, sizeof(m)))
1820 return -EFAULT;
1821 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1822 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1823 return -EINVAL; /* not power of 2 */
1824 if (!(m.rx_pg_sz & 0x14000))
1825 return -EINVAL; /* not 16KB or 64KB */
1826 if (!(m.tx_pg_sz & 0x1554000))
1827 return -EINVAL;
1828 if (m.tx_num_pg == -1)
1829 m.tx_num_pg = p->tx_num_pgs;
1830 if (m.rx_num_pg == -1)
1831 m.rx_num_pg = p->rx_num_pgs;
1832 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1833 return -EINVAL;
1834 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1835 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1836 return -EINVAL;
1837 p->rx_pg_size = m.rx_pg_sz;
1838 p->tx_pg_size = m.tx_pg_sz;
1839 p->rx_num_pgs = m.rx_num_pg;
1840 p->tx_num_pgs = m.tx_num_pg;
1841 break;
1843 case CHELSIO_GET_MEM:{
1844 struct ch_mem_range t;
1845 struct mc7 *mem;
1846 u64 buf[32];
1848 if (!is_offload(adapter))
1849 return -EOPNOTSUPP;
1850 if (!(adapter->flags & FULL_INIT_DONE))
1851 return -EIO; /* need the memory controllers */
1852 if (copy_from_user(&t, useraddr, sizeof(t)))
1853 return -EFAULT;
1854 if ((t.addr & 7) || (t.len & 7))
1855 return -EINVAL;
1856 if (t.mem_id == MEM_CM)
1857 mem = &adapter->cm;
1858 else if (t.mem_id == MEM_PMRX)
1859 mem = &adapter->pmrx;
1860 else if (t.mem_id == MEM_PMTX)
1861 mem = &adapter->pmtx;
1862 else
1863 return -EINVAL;
1866 * Version scheme:
1867 * bits 0..9: chip version
1868 * bits 10..15: chip revision
1870 t.version = 3 | (adapter->params.rev << 10);
1871 if (copy_to_user(useraddr, &t, sizeof(t)))
1872 return -EFAULT;
1875 * Read 256 bytes at a time as len can be large and we don't
1876 * want to use huge intermediate buffers.
1878 useraddr += sizeof(t); /* advance to start of buffer */
1879 while (t.len) {
1880 unsigned int chunk =
1881 min_t(unsigned int, t.len, sizeof(buf));
1883 ret =
1884 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1885 buf);
1886 if (ret)
1887 return ret;
1888 if (copy_to_user(useraddr, buf, chunk))
1889 return -EFAULT;
1890 useraddr += chunk;
1891 t.addr += chunk;
1892 t.len -= chunk;
1894 break;
1896 case CHELSIO_SET_TRACE_FILTER:{
1897 struct ch_trace t;
1898 const struct trace_params *tp;
1900 if (!capable(CAP_NET_ADMIN))
1901 return -EPERM;
1902 if (!offload_running(adapter))
1903 return -EAGAIN;
1904 if (copy_from_user(&t, useraddr, sizeof(t)))
1905 return -EFAULT;
1907 tp = (const struct trace_params *)&t.sip;
1908 if (t.config_tx)
1909 t3_config_trace_filter(adapter, tp, 0,
1910 t.invert_match,
1911 t.trace_tx);
1912 if (t.config_rx)
1913 t3_config_trace_filter(adapter, tp, 1,
1914 t.invert_match,
1915 t.trace_rx);
1916 break;
1918 default:
1919 return -EOPNOTSUPP;
1921 return 0;
1924 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1926 int ret, mmd;
1927 struct adapter *adapter = dev->priv;
1928 struct port_info *pi = netdev_priv(dev);
1929 struct mii_ioctl_data *data = if_mii(req);
1931 switch (cmd) {
1932 case SIOCGMIIPHY:
1933 data->phy_id = pi->phy.addr;
1934 /* FALLTHRU */
1935 case SIOCGMIIREG:{
1936 u32 val;
1937 struct cphy *phy = &pi->phy;
1939 if (!phy->mdio_read)
1940 return -EOPNOTSUPP;
1941 if (is_10G(adapter)) {
1942 mmd = data->phy_id >> 8;
1943 if (!mmd)
1944 mmd = MDIO_DEV_PCS;
1945 else if (mmd > MDIO_DEV_XGXS)
1946 return -EINVAL;
1948 ret =
1949 phy->mdio_read(adapter, data->phy_id & 0x1f,
1950 mmd, data->reg_num, &val);
1951 } else
1952 ret =
1953 phy->mdio_read(adapter, data->phy_id & 0x1f,
1954 0, data->reg_num & 0x1f,
1955 &val);
1956 if (!ret)
1957 data->val_out = val;
1958 break;
1960 case SIOCSMIIREG:{
1961 struct cphy *phy = &pi->phy;
1963 if (!capable(CAP_NET_ADMIN))
1964 return -EPERM;
1965 if (!phy->mdio_write)
1966 return -EOPNOTSUPP;
1967 if (is_10G(adapter)) {
1968 mmd = data->phy_id >> 8;
1969 if (!mmd)
1970 mmd = MDIO_DEV_PCS;
1971 else if (mmd > MDIO_DEV_XGXS)
1972 return -EINVAL;
1974 ret =
1975 phy->mdio_write(adapter,
1976 data->phy_id & 0x1f, mmd,
1977 data->reg_num,
1978 data->val_in);
1979 } else
1980 ret =
1981 phy->mdio_write(adapter,
1982 data->phy_id & 0x1f, 0,
1983 data->reg_num & 0x1f,
1984 data->val_in);
1985 break;
1987 case SIOCCHIOCTL:
1988 return cxgb_extension_ioctl(dev, req->ifr_data);
1989 default:
1990 return -EOPNOTSUPP;
1992 return ret;
1995 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1997 int ret;
1998 struct adapter *adapter = dev->priv;
1999 struct port_info *pi = netdev_priv(dev);
2001 if (new_mtu < 81) /* accommodate SACK */
2002 return -EINVAL;
2003 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2004 return ret;
2005 dev->mtu = new_mtu;
2006 init_port_mtus(adapter);
2007 if (adapter->params.rev == 0 && offload_running(adapter))
2008 t3_load_mtus(adapter, adapter->params.mtus,
2009 adapter->params.a_wnd, adapter->params.b_wnd,
2010 adapter->port[0]->mtu);
2011 return 0;
2014 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2016 struct adapter *adapter = dev->priv;
2017 struct port_info *pi = netdev_priv(dev);
2018 struct sockaddr *addr = p;
2020 if (!is_valid_ether_addr(addr->sa_data))
2021 return -EINVAL;
2023 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2024 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2025 if (offload_running(adapter))
2026 write_smt_entry(adapter, pi->port_id);
2027 return 0;
2031 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2032 * @adap: the adapter
2033 * @p: the port
2035 * Ensures that current Rx processing on any of the queues associated with
2036 * the given port completes before returning. We do this by acquiring and
2037 * releasing the locks of the response queues associated with the port.
2039 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2041 int i;
2043 for (i = 0; i < p->nqsets; i++) {
2044 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2046 spin_lock_irq(&q->lock);
2047 spin_unlock_irq(&q->lock);
2051 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2053 struct adapter *adapter = dev->priv;
2054 struct port_info *pi = netdev_priv(dev);
2056 pi->vlan_grp = grp;
2057 if (adapter->params.rev > 0)
2058 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2059 else {
2060 /* single control for all ports */
2061 unsigned int i, have_vlans = 0;
2062 for_each_port(adapter, i)
2063 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2065 t3_set_vlan_accel(adapter, 1, have_vlans);
2067 t3_synchronize_rx(adapter, pi);
2070 #ifdef CONFIG_NET_POLL_CONTROLLER
2071 static void cxgb_netpoll(struct net_device *dev)
2073 struct adapter *adapter = dev->priv;
2074 struct port_info *pi = netdev_priv(dev);
2075 int qidx;
2077 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2078 struct sge_qset *qs = &adapter->sge.qs[qidx];
2079 void *source;
2081 if (adapter->flags & USING_MSIX)
2082 source = qs;
2083 else
2084 source = adapter;
2086 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2089 #endif
2091 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
2092 int update_tpsram(struct adapter *adap)
2094 const struct firmware *tpsram;
2095 char buf[64];
2096 struct device *dev = &adap->pdev->dev;
2097 int ret;
2098 char rev;
2100 rev = adap->params.rev == T3_REV_B2 ? 'b' : 'a';
2102 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
2103 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
2105 ret = request_firmware(&tpsram, buf, dev);
2106 if (ret < 0) {
2107 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
2108 buf);
2109 return ret;
2112 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
2113 if (ret)
2114 goto release_tpsram;
2116 ret = t3_set_proto_sram(adap, tpsram->data);
2117 if (ret)
2118 dev_err(dev, "loading protocol SRAM failed\n");
2120 release_tpsram:
2121 release_firmware(tpsram);
2123 return ret;
2128 * Periodic accumulation of MAC statistics.
2130 static void mac_stats_update(struct adapter *adapter)
2132 int i;
2134 for_each_port(adapter, i) {
2135 struct net_device *dev = adapter->port[i];
2136 struct port_info *p = netdev_priv(dev);
2138 if (netif_running(dev)) {
2139 spin_lock(&adapter->stats_lock);
2140 t3_mac_update_stats(&p->mac);
2141 spin_unlock(&adapter->stats_lock);
2146 static void check_link_status(struct adapter *adapter)
2148 int i;
2150 for_each_port(adapter, i) {
2151 struct net_device *dev = adapter->port[i];
2152 struct port_info *p = netdev_priv(dev);
2154 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2155 t3_link_changed(adapter, i);
2159 static void check_t3b2_mac(struct adapter *adapter)
2161 int i;
2163 if (!rtnl_trylock()) /* synchronize with ifdown */
2164 return;
2166 for_each_port(adapter, i) {
2167 struct net_device *dev = adapter->port[i];
2168 struct port_info *p = netdev_priv(dev);
2169 int status;
2171 if (!netif_running(dev))
2172 continue;
2174 status = 0;
2175 if (netif_running(dev) && netif_carrier_ok(dev))
2176 status = t3b2_mac_watchdog_task(&p->mac);
2177 if (status == 1)
2178 p->mac.stats.num_toggled++;
2179 else if (status == 2) {
2180 struct cmac *mac = &p->mac;
2182 t3_mac_set_mtu(mac, dev->mtu);
2183 t3_mac_set_address(mac, 0, dev->dev_addr);
2184 cxgb_set_rxmode(dev);
2185 t3_link_start(&p->phy, mac, &p->link_config);
2186 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2187 t3_port_intr_enable(adapter, p->port_id);
2188 p->mac.stats.num_resets++;
2191 rtnl_unlock();
2195 static void t3_adap_check_task(struct work_struct *work)
2197 struct adapter *adapter = container_of(work, struct adapter,
2198 adap_check_task.work);
2199 const struct adapter_params *p = &adapter->params;
2201 adapter->check_task_cnt++;
2203 /* Check link status for PHYs without interrupts */
2204 if (p->linkpoll_period)
2205 check_link_status(adapter);
2207 /* Accumulate MAC stats if needed */
2208 if (!p->linkpoll_period ||
2209 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2210 p->stats_update_period) {
2211 mac_stats_update(adapter);
2212 adapter->check_task_cnt = 0;
2215 if (p->rev == T3_REV_B2)
2216 check_t3b2_mac(adapter);
2218 /* Schedule the next check update if any port is active. */
2219 spin_lock(&adapter->work_lock);
2220 if (adapter->open_device_map & PORT_MASK)
2221 schedule_chk_task(adapter);
2222 spin_unlock(&adapter->work_lock);
2226 * Processes external (PHY) interrupts in process context.
2228 static void ext_intr_task(struct work_struct *work)
2230 struct adapter *adapter = container_of(work, struct adapter,
2231 ext_intr_handler_task);
2233 t3_phy_intr_handler(adapter);
2235 /* Now reenable external interrupts */
2236 spin_lock_irq(&adapter->work_lock);
2237 if (adapter->slow_intr_mask) {
2238 adapter->slow_intr_mask |= F_T3DBG;
2239 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2240 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2241 adapter->slow_intr_mask);
2243 spin_unlock_irq(&adapter->work_lock);
2247 * Interrupt-context handler for external (PHY) interrupts.
2249 void t3_os_ext_intr_handler(struct adapter *adapter)
2252 * Schedule a task to handle external interrupts as they may be slow
2253 * and we use a mutex to protect MDIO registers. We disable PHY
2254 * interrupts in the meantime and let the task reenable them when
2255 * it's done.
2257 spin_lock(&adapter->work_lock);
2258 if (adapter->slow_intr_mask) {
2259 adapter->slow_intr_mask &= ~F_T3DBG;
2260 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2261 adapter->slow_intr_mask);
2262 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2264 spin_unlock(&adapter->work_lock);
2267 void t3_fatal_err(struct adapter *adapter)
2269 unsigned int fw_status[4];
2271 if (adapter->flags & FULL_INIT_DONE) {
2272 t3_sge_stop(adapter);
2273 t3_intr_disable(adapter);
2275 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2276 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2277 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2278 fw_status[0], fw_status[1],
2279 fw_status[2], fw_status[3]);
2283 static int __devinit cxgb_enable_msix(struct adapter *adap)
2285 struct msix_entry entries[SGE_QSETS + 1];
2286 int i, err;
2288 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2289 entries[i].entry = i;
2291 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2292 if (!err) {
2293 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2294 adap->msix_info[i].vec = entries[i].vector;
2295 } else if (err > 0)
2296 dev_info(&adap->pdev->dev,
2297 "only %d MSI-X vectors left, not using MSI-X\n", err);
2298 return err;
2301 static void __devinit print_port_info(struct adapter *adap,
2302 const struct adapter_info *ai)
2304 static const char *pci_variant[] = {
2305 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2308 int i;
2309 char buf[80];
2311 if (is_pcie(adap))
2312 snprintf(buf, sizeof(buf), "%s x%d",
2313 pci_variant[adap->params.pci.variant],
2314 adap->params.pci.width);
2315 else
2316 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2317 pci_variant[adap->params.pci.variant],
2318 adap->params.pci.speed, adap->params.pci.width);
2320 for_each_port(adap, i) {
2321 struct net_device *dev = adap->port[i];
2322 const struct port_info *pi = netdev_priv(dev);
2324 if (!test_bit(i, &adap->registered_device_map))
2325 continue;
2326 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2327 dev->name, ai->desc, pi->port_type->desc,
2328 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2329 (adap->flags & USING_MSIX) ? " MSI-X" :
2330 (adap->flags & USING_MSI) ? " MSI" : "");
2331 if (adap->name == dev->name && adap->params.vpd.mclk)
2332 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2333 adap->name, t3_mc7_size(&adap->cm) >> 20,
2334 t3_mc7_size(&adap->pmtx) >> 20,
2335 t3_mc7_size(&adap->pmrx) >> 20);
2339 static int __devinit init_one(struct pci_dev *pdev,
2340 const struct pci_device_id *ent)
2342 static int version_printed;
2344 int i, err, pci_using_dac = 0;
2345 unsigned long mmio_start, mmio_len;
2346 const struct adapter_info *ai;
2347 struct adapter *adapter = NULL;
2348 struct port_info *pi;
2350 if (!version_printed) {
2351 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2352 ++version_printed;
2355 if (!cxgb3_wq) {
2356 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2357 if (!cxgb3_wq) {
2358 printk(KERN_ERR DRV_NAME
2359 ": cannot initialize work queue\n");
2360 return -ENOMEM;
2364 err = pci_request_regions(pdev, DRV_NAME);
2365 if (err) {
2366 /* Just info, some other driver may have claimed the device. */
2367 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2368 return err;
2371 err = pci_enable_device(pdev);
2372 if (err) {
2373 dev_err(&pdev->dev, "cannot enable PCI device\n");
2374 goto out_release_regions;
2377 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2378 pci_using_dac = 1;
2379 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2380 if (err) {
2381 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2382 "coherent allocations\n");
2383 goto out_disable_device;
2385 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2386 dev_err(&pdev->dev, "no usable DMA configuration\n");
2387 goto out_disable_device;
2390 pci_set_master(pdev);
2392 mmio_start = pci_resource_start(pdev, 0);
2393 mmio_len = pci_resource_len(pdev, 0);
2394 ai = t3_get_adapter_info(ent->driver_data);
2396 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2397 if (!adapter) {
2398 err = -ENOMEM;
2399 goto out_disable_device;
2402 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2403 if (!adapter->regs) {
2404 dev_err(&pdev->dev, "cannot map device registers\n");
2405 err = -ENOMEM;
2406 goto out_free_adapter;
2409 adapter->pdev = pdev;
2410 adapter->name = pci_name(pdev);
2411 adapter->msg_enable = dflt_msg_enable;
2412 adapter->mmio_len = mmio_len;
2414 mutex_init(&adapter->mdio_lock);
2415 spin_lock_init(&adapter->work_lock);
2416 spin_lock_init(&adapter->stats_lock);
2418 INIT_LIST_HEAD(&adapter->adapter_list);
2419 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2420 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2422 for (i = 0; i < ai->nports; ++i) {
2423 struct net_device *netdev;
2425 netdev = alloc_etherdev(sizeof(struct port_info));
2426 if (!netdev) {
2427 err = -ENOMEM;
2428 goto out_free_dev;
2431 SET_MODULE_OWNER(netdev);
2432 SET_NETDEV_DEV(netdev, &pdev->dev);
2434 adapter->port[i] = netdev;
2435 pi = netdev_priv(netdev);
2436 pi->rx_csum_offload = 1;
2437 pi->nqsets = 1;
2438 pi->first_qset = i;
2439 pi->activity = 0;
2440 pi->port_id = i;
2441 netif_carrier_off(netdev);
2442 netdev->irq = pdev->irq;
2443 netdev->mem_start = mmio_start;
2444 netdev->mem_end = mmio_start + mmio_len - 1;
2445 netdev->priv = adapter;
2446 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2447 netdev->features |= NETIF_F_LLTX;
2448 if (pci_using_dac)
2449 netdev->features |= NETIF_F_HIGHDMA;
2451 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2452 netdev->vlan_rx_register = vlan_rx_register;
2454 netdev->open = cxgb_open;
2455 netdev->stop = cxgb_close;
2456 netdev->hard_start_xmit = t3_eth_xmit;
2457 netdev->get_stats = cxgb_get_stats;
2458 netdev->set_multicast_list = cxgb_set_rxmode;
2459 netdev->do_ioctl = cxgb_ioctl;
2460 netdev->change_mtu = cxgb_change_mtu;
2461 netdev->set_mac_address = cxgb_set_mac_addr;
2462 #ifdef CONFIG_NET_POLL_CONTROLLER
2463 netdev->poll_controller = cxgb_netpoll;
2464 #endif
2465 netdev->weight = 64;
2467 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2470 pci_set_drvdata(pdev, adapter->port[0]);
2471 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2472 err = -ENODEV;
2473 goto out_free_dev;
2476 err = t3_check_tpsram_version(adapter);
2477 if (err == -EINVAL)
2478 err = update_tpsram(adapter);
2480 if (err)
2481 goto out_free_dev;
2484 * The card is now ready to go. If any errors occur during device
2485 * registration we do not fail the whole card but rather proceed only
2486 * with the ports we manage to register successfully. However we must
2487 * register at least one net device.
2489 for_each_port(adapter, i) {
2490 err = register_netdev(adapter->port[i]);
2491 if (err)
2492 dev_warn(&pdev->dev,
2493 "cannot register net device %s, skipping\n",
2494 adapter->port[i]->name);
2495 else {
2497 * Change the name we use for messages to the name of
2498 * the first successfully registered interface.
2500 if (!adapter->registered_device_map)
2501 adapter->name = adapter->port[i]->name;
2503 __set_bit(i, &adapter->registered_device_map);
2506 if (!adapter->registered_device_map) {
2507 dev_err(&pdev->dev, "could not register any net devices\n");
2508 goto out_free_dev;
2511 /* Driver's ready. Reflect it on LEDs */
2512 t3_led_ready(adapter);
2514 if (is_offload(adapter)) {
2515 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2516 cxgb3_adapter_ofld(adapter);
2519 /* See what interrupts we'll be using */
2520 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2521 adapter->flags |= USING_MSIX;
2522 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2523 adapter->flags |= USING_MSI;
2525 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2526 &cxgb3_attr_group);
2528 print_port_info(adapter, ai);
2529 return 0;
2531 out_free_dev:
2532 iounmap(adapter->regs);
2533 for (i = ai->nports - 1; i >= 0; --i)
2534 if (adapter->port[i])
2535 free_netdev(adapter->port[i]);
2537 out_free_adapter:
2538 kfree(adapter);
2540 out_disable_device:
2541 pci_disable_device(pdev);
2542 out_release_regions:
2543 pci_release_regions(pdev);
2544 pci_set_drvdata(pdev, NULL);
2545 return err;
2548 static void __devexit remove_one(struct pci_dev *pdev)
2550 struct net_device *dev = pci_get_drvdata(pdev);
2552 if (dev) {
2553 int i;
2554 struct adapter *adapter = dev->priv;
2556 t3_sge_stop(adapter);
2557 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2558 &cxgb3_attr_group);
2560 for_each_port(adapter, i)
2561 if (test_bit(i, &adapter->registered_device_map))
2562 unregister_netdev(adapter->port[i]);
2564 if (is_offload(adapter)) {
2565 cxgb3_adapter_unofld(adapter);
2566 if (test_bit(OFFLOAD_DEVMAP_BIT,
2567 &adapter->open_device_map))
2568 offload_close(&adapter->tdev);
2571 t3_free_sge_resources(adapter);
2572 cxgb_disable_msi(adapter);
2574 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2575 if (adapter->dummy_netdev[i]) {
2576 free_netdev(adapter->dummy_netdev[i]);
2577 adapter->dummy_netdev[i] = NULL;
2580 for_each_port(adapter, i)
2581 if (adapter->port[i])
2582 free_netdev(adapter->port[i]);
2584 iounmap(adapter->regs);
2585 kfree(adapter);
2586 pci_release_regions(pdev);
2587 pci_disable_device(pdev);
2588 pci_set_drvdata(pdev, NULL);
2592 static struct pci_driver driver = {
2593 .name = DRV_NAME,
2594 .id_table = cxgb3_pci_tbl,
2595 .probe = init_one,
2596 .remove = __devexit_p(remove_one),
2599 static int __init cxgb3_init_module(void)
2601 int ret;
2603 cxgb3_offload_init();
2605 ret = pci_register_driver(&driver);
2606 return ret;
2609 static void __exit cxgb3_cleanup_module(void)
2611 pci_unregister_driver(&driver);
2612 if (cxgb3_wq)
2613 destroy_workqueue(cxgb3_wq);
2616 module_init(cxgb3_init_module);
2617 module_exit(cxgb3_cleanup_module);