2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES
= 16384,
61 MAX_CTRL_TXQ_ENTRIES
= 1024,
62 MAX_RSPQ_ENTRIES
= 16384,
63 MAX_RX_BUFFERS
= 16384,
64 MAX_RX_JUMBO_BUFFERS
= 16384,
66 MIN_CTRL_TXQ_ENTRIES
= 4,
67 MIN_RSPQ_ENTRIES
= 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl
[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
96 MODULE_DESCRIPTION(DRV_DESC
);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION
);
100 MODULE_DEVICE_TABLE(pci
, cxgb3_pci_tbl
);
102 static int dflt_msg_enable
= DFLT_MSG_ENABLE
;
104 module_param(dflt_msg_enable
, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable
, "Chelsio T3 default message enable bitmap");
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
118 module_param(msi
, int, 0644);
119 MODULE_PARM_DESC(msi
, "whether to use MSI or MSI-X");
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
126 static int ofld_disable
= 0;
128 module_param(ofld_disable
, int, 0644);
129 MODULE_PARM_DESC(ofld_disable
, "whether to enable offload at init time or not");
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
139 static struct workqueue_struct
*cxgb3_wq
;
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
145 * Shows the link status, speed, and duplex of a port.
147 static void link_report(struct net_device
*dev
)
149 if (!netif_carrier_ok(dev
))
150 printk(KERN_INFO
"%s: link down\n", dev
->name
);
152 const char *s
= "10Mbps";
153 const struct port_info
*p
= netdev_priv(dev
);
155 switch (p
->link_config
.speed
) {
167 printk(KERN_INFO
"%s: link up, %s, %s-duplex\n", dev
->name
, s
,
168 p
->link_config
.duplex
== DUPLEX_FULL
? "full" : "half");
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
185 void t3_os_link_changed(struct adapter
*adapter
, int port_id
, int link_stat
,
186 int speed
, int duplex
, int pause
)
188 struct net_device
*dev
= adapter
->port
[port_id
];
189 struct port_info
*pi
= netdev_priv(dev
);
190 struct cmac
*mac
= &pi
->mac
;
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev
))
196 if (link_stat
!= netif_carrier_ok(dev
)) {
198 t3_mac_enable(mac
, MAC_DIRECTION_RX
);
199 netif_carrier_on(dev
);
201 netif_carrier_off(dev
);
202 pi
->phy
.ops
->power_down(&pi
->phy
, 1);
203 t3_mac_disable(mac
, MAC_DIRECTION_RX
);
204 t3_link_start(&pi
->phy
, mac
, &pi
->link_config
);
211 static void cxgb_set_rxmode(struct net_device
*dev
)
213 struct t3_rx_mode rm
;
214 struct port_info
*pi
= netdev_priv(dev
);
216 init_rx_mode(&rm
, dev
, dev
->mc_list
);
217 t3_mac_set_rx_mode(&pi
->mac
, &rm
);
221 * link_start - enable a port
222 * @dev: the device to enable
224 * Performs the MAC and PHY actions needed to enable a port.
226 static void link_start(struct net_device
*dev
)
228 struct t3_rx_mode rm
;
229 struct port_info
*pi
= netdev_priv(dev
);
230 struct cmac
*mac
= &pi
->mac
;
232 init_rx_mode(&rm
, dev
, dev
->mc_list
);
234 t3_mac_set_mtu(mac
, dev
->mtu
);
235 t3_mac_set_address(mac
, 0, dev
->dev_addr
);
236 t3_mac_set_rx_mode(mac
, &rm
);
237 t3_link_start(&pi
->phy
, mac
, &pi
->link_config
);
238 t3_mac_enable(mac
, MAC_DIRECTION_RX
| MAC_DIRECTION_TX
);
241 static inline void cxgb_disable_msi(struct adapter
*adapter
)
243 if (adapter
->flags
& USING_MSIX
) {
244 pci_disable_msix(adapter
->pdev
);
245 adapter
->flags
&= ~USING_MSIX
;
246 } else if (adapter
->flags
& USING_MSI
) {
247 pci_disable_msi(adapter
->pdev
);
248 adapter
->flags
&= ~USING_MSI
;
253 * Interrupt handler for asynchronous events used with MSI-X.
255 static irqreturn_t
t3_async_intr_handler(int irq
, void *cookie
)
257 t3_slow_intr_handler(cookie
);
262 * Name the MSI-X interrupts.
264 static void name_msix_vecs(struct adapter
*adap
)
266 int i
, j
, msi_idx
= 1, n
= sizeof(adap
->msix_info
[0].desc
) - 1;
268 snprintf(adap
->msix_info
[0].desc
, n
, "%s", adap
->name
);
269 adap
->msix_info
[0].desc
[n
] = 0;
271 for_each_port(adap
, j
) {
272 struct net_device
*d
= adap
->port
[j
];
273 const struct port_info
*pi
= netdev_priv(d
);
275 for (i
= 0; i
< pi
->nqsets
; i
++, msi_idx
++) {
276 snprintf(adap
->msix_info
[msi_idx
].desc
, n
,
277 "%s (queue %d)", d
->name
, i
);
278 adap
->msix_info
[msi_idx
].desc
[n
] = 0;
283 static int request_msix_data_irqs(struct adapter
*adap
)
285 int i
, j
, err
, qidx
= 0;
287 for_each_port(adap
, i
) {
288 int nqsets
= adap2pinfo(adap
, i
)->nqsets
;
290 for (j
= 0; j
< nqsets
; ++j
) {
291 err
= request_irq(adap
->msix_info
[qidx
+ 1].vec
,
292 t3_intr_handler(adap
,
295 adap
->msix_info
[qidx
+ 1].desc
,
296 &adap
->sge
.qs
[qidx
]);
299 free_irq(adap
->msix_info
[qidx
+ 1].vec
,
300 &adap
->sge
.qs
[qidx
]);
310 * setup_rss - configure RSS
313 * Sets up RSS to distribute packets to multiple receive queues. We
314 * configure the RSS CPU lookup table to distribute to the number of HW
315 * receive queues, and the response queue lookup table to narrow that
316 * down to the response queues actually configured for each port.
317 * We always configure the RSS mapping for two ports since the mapping
318 * table has plenty of entries.
320 static void setup_rss(struct adapter
*adap
)
323 unsigned int nq0
= adap2pinfo(adap
, 0)->nqsets
;
324 unsigned int nq1
= adap
->port
[1] ? adap2pinfo(adap
, 1)->nqsets
: 1;
325 u8 cpus
[SGE_QSETS
+ 1];
326 u16 rspq_map
[RSS_TABLE_SIZE
];
328 for (i
= 0; i
< SGE_QSETS
; ++i
)
330 cpus
[SGE_QSETS
] = 0xff; /* terminator */
332 for (i
= 0; i
< RSS_TABLE_SIZE
/ 2; ++i
) {
333 rspq_map
[i
] = i
% nq0
;
334 rspq_map
[i
+ RSS_TABLE_SIZE
/ 2] = (i
% nq1
) + nq0
;
337 t3_config_rss(adap
, F_RQFEEDBACKENABLE
| F_TNLLKPEN
| F_TNLMAPEN
|
338 F_TNLPRTEN
| F_TNL2TUPEN
| F_TNL4TUPEN
|
339 V_RRCPLCPUSIZE(6), cpus
, rspq_map
);
342 static void init_napi(struct adapter
*adap
)
346 for (i
= 0; i
< SGE_QSETS
; i
++) {
347 struct sge_qset
*qs
= &adap
->sge
.qs
[i
];
350 netif_napi_add(qs
->netdev
, &qs
->napi
, qs
->napi
.poll
,
356 * Wait until all NAPI handlers are descheduled. This includes the handlers of
357 * both netdevices representing interfaces and the dummy ones for the extra
360 static void quiesce_rx(struct adapter
*adap
)
364 for (i
= 0; i
< SGE_QSETS
; i
++)
365 if (adap
->sge
.qs
[i
].adap
)
366 napi_disable(&adap
->sge
.qs
[i
].napi
);
369 static void enable_all_napi(struct adapter
*adap
)
372 for (i
= 0; i
< SGE_QSETS
; i
++)
373 if (adap
->sge
.qs
[i
].adap
)
374 napi_enable(&adap
->sge
.qs
[i
].napi
);
378 * setup_sge_qsets - configure SGE Tx/Rx/response queues
381 * Determines how many sets of SGE queues to use and initializes them.
382 * We support multiple queue sets per port if we have MSI-X, otherwise
383 * just one queue set per port.
385 static int setup_sge_qsets(struct adapter
*adap
)
387 int i
, j
, err
, irq_idx
= 0, qset_idx
= 0;
388 unsigned int ntxq
= SGE_TXQ_PER_SET
;
390 if (adap
->params
.rev
> 0 && !(adap
->flags
& USING_MSI
))
393 for_each_port(adap
, i
) {
394 struct net_device
*dev
= adap
->port
[i
];
395 struct port_info
*pi
= netdev_priv(dev
);
397 pi
->qs
= &adap
->sge
.qs
[pi
->first_qset
];
398 for (j
= 0; j
< pi
->nqsets
; ++j
, ++qset_idx
) {
399 err
= t3_sge_alloc_qset(adap
, qset_idx
, 1,
400 (adap
->flags
& USING_MSIX
) ? qset_idx
+ 1 :
402 &adap
->params
.sge
.qset
[qset_idx
], ntxq
, dev
);
404 t3_free_sge_resources(adap
);
413 static ssize_t
attr_show(struct device
*d
, struct device_attribute
*attr
,
415 ssize_t(*format
) (struct net_device
*, char *))
419 /* Synchronize with ioctls that may shut down the device */
421 len
= (*format
) (to_net_dev(d
), buf
);
426 static ssize_t
attr_store(struct device
*d
, struct device_attribute
*attr
,
427 const char *buf
, size_t len
,
428 ssize_t(*set
) (struct net_device
*, unsigned int),
429 unsigned int min_val
, unsigned int max_val
)
435 if (!capable(CAP_NET_ADMIN
))
438 val
= simple_strtoul(buf
, &endp
, 0);
439 if (endp
== buf
|| val
< min_val
|| val
> max_val
)
443 ret
= (*set
) (to_net_dev(d
), val
);
450 #define CXGB3_SHOW(name, val_expr) \
451 static ssize_t format_##name(struct net_device *dev, char *buf) \
453 struct port_info *pi = netdev_priv(dev); \
454 struct adapter *adap = pi->adapter; \
455 return sprintf(buf, "%u\n", val_expr); \
457 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
460 return attr_show(d, attr, buf, format_##name); \
463 static ssize_t
set_nfilters(struct net_device
*dev
, unsigned int val
)
465 struct port_info
*pi
= netdev_priv(dev
);
466 struct adapter
*adap
= pi
->adapter
;
467 int min_tids
= is_offload(adap
) ? MC5_MIN_TIDS
: 0;
469 if (adap
->flags
& FULL_INIT_DONE
)
471 if (val
&& adap
->params
.rev
== 0)
473 if (val
> t3_mc5_size(&adap
->mc5
) - adap
->params
.mc5
.nservers
-
476 adap
->params
.mc5
.nfilters
= val
;
480 static ssize_t
store_nfilters(struct device
*d
, struct device_attribute
*attr
,
481 const char *buf
, size_t len
)
483 return attr_store(d
, attr
, buf
, len
, set_nfilters
, 0, ~0);
486 static ssize_t
set_nservers(struct net_device
*dev
, unsigned int val
)
488 struct port_info
*pi
= netdev_priv(dev
);
489 struct adapter
*adap
= pi
->adapter
;
491 if (adap
->flags
& FULL_INIT_DONE
)
493 if (val
> t3_mc5_size(&adap
->mc5
) - adap
->params
.mc5
.nfilters
-
496 adap
->params
.mc5
.nservers
= val
;
500 static ssize_t
store_nservers(struct device
*d
, struct device_attribute
*attr
,
501 const char *buf
, size_t len
)
503 return attr_store(d
, attr
, buf
, len
, set_nservers
, 0, ~0);
506 #define CXGB3_ATTR_R(name, val_expr) \
507 CXGB3_SHOW(name, val_expr) \
508 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
510 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
511 CXGB3_SHOW(name, val_expr) \
512 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
514 CXGB3_ATTR_R(cam_size
, t3_mc5_size(&adap
->mc5
));
515 CXGB3_ATTR_RW(nfilters
, adap
->params
.mc5
.nfilters
, store_nfilters
);
516 CXGB3_ATTR_RW(nservers
, adap
->params
.mc5
.nservers
, store_nservers
);
518 static struct attribute
*cxgb3_attrs
[] = {
519 &dev_attr_cam_size
.attr
,
520 &dev_attr_nfilters
.attr
,
521 &dev_attr_nservers
.attr
,
525 static struct attribute_group cxgb3_attr_group
= {.attrs
= cxgb3_attrs
};
527 static ssize_t
tm_attr_show(struct device
*d
, struct device_attribute
*attr
,
528 char *buf
, int sched
)
530 struct port_info
*pi
= netdev_priv(to_net_dev(d
));
531 struct adapter
*adap
= pi
->adapter
;
532 unsigned int v
, addr
, bpt
, cpt
;
535 addr
= A_TP_TX_MOD_Q1_Q0_RATE_LIMIT
- sched
/ 2;
537 t3_write_reg(adap
, A_TP_TM_PIO_ADDR
, addr
);
538 v
= t3_read_reg(adap
, A_TP_TM_PIO_DATA
);
541 bpt
= (v
>> 8) & 0xff;
544 len
= sprintf(buf
, "disabled\n");
546 v
= (adap
->params
.vpd
.cclk
* 1000) / cpt
;
547 len
= sprintf(buf
, "%u Kbps\n", (v
* bpt
) / 125);
553 static ssize_t
tm_attr_store(struct device
*d
, struct device_attribute
*attr
,
554 const char *buf
, size_t len
, int sched
)
556 struct port_info
*pi
= netdev_priv(to_net_dev(d
));
557 struct adapter
*adap
= pi
->adapter
;
562 if (!capable(CAP_NET_ADMIN
))
565 val
= simple_strtoul(buf
, &endp
, 0);
566 if (endp
== buf
|| val
> 10000000)
570 ret
= t3_config_sched(adap
, val
, sched
);
577 #define TM_ATTR(name, sched) \
578 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
581 return tm_attr_show(d, attr, buf, sched); \
583 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
584 const char *buf, size_t len) \
586 return tm_attr_store(d, attr, buf, len, sched); \
588 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
599 static struct attribute
*offload_attrs
[] = {
600 &dev_attr_sched0
.attr
,
601 &dev_attr_sched1
.attr
,
602 &dev_attr_sched2
.attr
,
603 &dev_attr_sched3
.attr
,
604 &dev_attr_sched4
.attr
,
605 &dev_attr_sched5
.attr
,
606 &dev_attr_sched6
.attr
,
607 &dev_attr_sched7
.attr
,
611 static struct attribute_group offload_attr_group
= {.attrs
= offload_attrs
};
614 * Sends an sk_buff to an offload queue driver
615 * after dealing with any active network taps.
617 static inline int offload_tx(struct t3cdev
*tdev
, struct sk_buff
*skb
)
622 ret
= t3_offload_tx(tdev
, skb
);
627 static int write_smt_entry(struct adapter
*adapter
, int idx
)
629 struct cpl_smt_write_req
*req
;
630 struct sk_buff
*skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
635 req
= (struct cpl_smt_write_req
*)__skb_put(skb
, sizeof(*req
));
636 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
637 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ
, idx
));
638 req
->mtu_idx
= NMTUS
- 1; /* should be 0 but there's a T3 bug */
640 memset(req
->src_mac1
, 0, sizeof(req
->src_mac1
));
641 memcpy(req
->src_mac0
, adapter
->port
[idx
]->dev_addr
, ETH_ALEN
);
643 offload_tx(&adapter
->tdev
, skb
);
647 static int init_smt(struct adapter
*adapter
)
651 for_each_port(adapter
, i
)
652 write_smt_entry(adapter
, i
);
656 static void init_port_mtus(struct adapter
*adapter
)
658 unsigned int mtus
= adapter
->port
[0]->mtu
;
660 if (adapter
->port
[1])
661 mtus
|= adapter
->port
[1]->mtu
<< 16;
662 t3_write_reg(adapter
, A_TP_MTU_PORT_TABLE
, mtus
);
665 static void send_pktsched_cmd(struct adapter
*adap
, int sched
, int qidx
, int lo
,
669 struct mngt_pktsched_wr
*req
;
671 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
| __GFP_NOFAIL
);
672 req
= (struct mngt_pktsched_wr
*)skb_put(skb
, sizeof(*req
));
673 req
->wr_hi
= htonl(V_WR_OP(FW_WROPCODE_MNGT
));
674 req
->mngt_opcode
= FW_MNGTOPCODE_PKTSCHED_SET
;
680 t3_mgmt_tx(adap
, skb
);
683 static void bind_qsets(struct adapter
*adap
)
687 for_each_port(adap
, i
) {
688 const struct port_info
*pi
= adap2pinfo(adap
, i
);
690 for (j
= 0; j
< pi
->nqsets
; ++j
)
691 send_pktsched_cmd(adap
, 1, pi
->first_qset
+ j
, -1,
696 #define FW_FNAME "t3fw-%d.%d.%d.bin"
697 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
699 static int upgrade_fw(struct adapter
*adap
)
703 const struct firmware
*fw
;
704 struct device
*dev
= &adap
->pdev
->dev
;
706 snprintf(buf
, sizeof(buf
), FW_FNAME
, FW_VERSION_MAJOR
,
707 FW_VERSION_MINOR
, FW_VERSION_MICRO
);
708 ret
= request_firmware(&fw
, buf
, dev
);
710 dev_err(dev
, "could not upgrade firmware: unable to load %s\n",
714 ret
= t3_load_fw(adap
, fw
->data
, fw
->size
);
715 release_firmware(fw
);
718 dev_info(dev
, "successful upgrade to firmware %d.%d.%d\n",
719 FW_VERSION_MAJOR
, FW_VERSION_MINOR
, FW_VERSION_MICRO
);
721 dev_err(dev
, "failed to upgrade to firmware %d.%d.%d\n",
722 FW_VERSION_MAJOR
, FW_VERSION_MINOR
, FW_VERSION_MICRO
);
727 static inline char t3rev2char(struct adapter
*adapter
)
731 switch(adapter
->params
.rev
) {
743 static int update_tpsram(struct adapter
*adap
)
745 const struct firmware
*tpsram
;
747 struct device
*dev
= &adap
->pdev
->dev
;
751 rev
= t3rev2char(adap
);
755 snprintf(buf
, sizeof(buf
), TPSRAM_NAME
, rev
,
756 TP_VERSION_MAJOR
, TP_VERSION_MINOR
, TP_VERSION_MICRO
);
758 ret
= request_firmware(&tpsram
, buf
, dev
);
760 dev_err(dev
, "could not load TP SRAM: unable to load %s\n",
765 ret
= t3_check_tpsram(adap
, tpsram
->data
, tpsram
->size
);
769 ret
= t3_set_proto_sram(adap
, tpsram
->data
);
772 "successful update of protocol engine "
774 TP_VERSION_MAJOR
, TP_VERSION_MINOR
, TP_VERSION_MICRO
);
776 dev_err(dev
, "failed to update of protocol engine %d.%d.%d\n",
777 TP_VERSION_MAJOR
, TP_VERSION_MINOR
, TP_VERSION_MICRO
);
779 dev_err(dev
, "loading protocol SRAM failed\n");
782 release_firmware(tpsram
);
788 * cxgb_up - enable the adapter
789 * @adapter: adapter being enabled
791 * Called when the first port is enabled, this function performs the
792 * actions necessary to make an adapter operational, such as completing
793 * the initialization of HW modules, and enabling interrupts.
795 * Must be called with the rtnl lock held.
797 static int cxgb_up(struct adapter
*adap
)
802 if (!(adap
->flags
& FULL_INIT_DONE
)) {
803 err
= t3_check_fw_version(adap
, &must_load
);
804 if (err
== -EINVAL
) {
805 err
= upgrade_fw(adap
);
806 if (err
&& must_load
)
810 err
= t3_check_tpsram_version(adap
, &must_load
);
811 if (err
== -EINVAL
) {
812 err
= update_tpsram(adap
);
813 if (err
&& must_load
)
817 err
= t3_init_hw(adap
, 0);
821 t3_write_reg(adap
, A_ULPRX_TDDP_PSZ
, V_HPZ0(PAGE_SHIFT
- 12));
823 err
= setup_sge_qsets(adap
);
829 adap
->flags
|= FULL_INIT_DONE
;
834 if (adap
->flags
& USING_MSIX
) {
835 name_msix_vecs(adap
);
836 err
= request_irq(adap
->msix_info
[0].vec
,
837 t3_async_intr_handler
, 0,
838 adap
->msix_info
[0].desc
, adap
);
842 err
= request_msix_data_irqs(adap
);
844 free_irq(adap
->msix_info
[0].vec
, adap
);
847 } else if ((err
= request_irq(adap
->pdev
->irq
,
848 t3_intr_handler(adap
,
849 adap
->sge
.qs
[0].rspq
.
851 (adap
->flags
& USING_MSI
) ?
856 enable_all_napi(adap
);
858 t3_intr_enable(adap
);
860 if ((adap
->flags
& (USING_MSIX
| QUEUES_BOUND
)) == USING_MSIX
)
862 adap
->flags
|= QUEUES_BOUND
;
867 CH_ERR(adap
, "request_irq failed, err %d\n", err
);
872 * Release resources when all the ports and offloading have been stopped.
874 static void cxgb_down(struct adapter
*adapter
)
876 t3_sge_stop(adapter
);
877 spin_lock_irq(&adapter
->work_lock
); /* sync with PHY intr task */
878 t3_intr_disable(adapter
);
879 spin_unlock_irq(&adapter
->work_lock
);
881 if (adapter
->flags
& USING_MSIX
) {
884 free_irq(adapter
->msix_info
[0].vec
, adapter
);
885 for_each_port(adapter
, i
)
886 n
+= adap2pinfo(adapter
, i
)->nqsets
;
888 for (i
= 0; i
< n
; ++i
)
889 free_irq(adapter
->msix_info
[i
+ 1].vec
,
890 &adapter
->sge
.qs
[i
]);
892 free_irq(adapter
->pdev
->irq
, adapter
);
894 flush_workqueue(cxgb3_wq
); /* wait for external IRQ handler */
898 static void schedule_chk_task(struct adapter
*adap
)
902 timeo
= adap
->params
.linkpoll_period
?
903 (HZ
* adap
->params
.linkpoll_period
) / 10 :
904 adap
->params
.stats_update_period
* HZ
;
906 queue_delayed_work(cxgb3_wq
, &adap
->adap_check_task
, timeo
);
909 static int offload_open(struct net_device
*dev
)
911 struct port_info
*pi
= netdev_priv(dev
);
912 struct adapter
*adapter
= pi
->adapter
;
913 struct t3cdev
*tdev
= dev2t3cdev(dev
);
914 int adap_up
= adapter
->open_device_map
& PORT_MASK
;
917 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->open_device_map
))
920 if (!adap_up
&& (err
= cxgb_up(adapter
)) < 0)
923 t3_tp_set_offload_mode(adapter
, 1);
924 tdev
->lldev
= adapter
->port
[0];
925 err
= cxgb3_offload_activate(adapter
);
929 init_port_mtus(adapter
);
930 t3_load_mtus(adapter
, adapter
->params
.mtus
, adapter
->params
.a_wnd
,
931 adapter
->params
.b_wnd
,
932 adapter
->params
.rev
== 0 ?
933 adapter
->port
[0]->mtu
: 0xffff);
936 /* Never mind if the next step fails */
937 sysfs_create_group(&tdev
->lldev
->dev
.kobj
, &offload_attr_group
);
939 /* Call back all registered clients */
940 cxgb3_add_clients(tdev
);
943 /* restore them in case the offload module has changed them */
945 t3_tp_set_offload_mode(adapter
, 0);
946 clear_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->open_device_map
);
947 cxgb3_set_dummy_ops(tdev
);
952 static int offload_close(struct t3cdev
*tdev
)
954 struct adapter
*adapter
= tdev2adap(tdev
);
956 if (!test_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->open_device_map
))
959 /* Call back all registered clients */
960 cxgb3_remove_clients(tdev
);
962 sysfs_remove_group(&tdev
->lldev
->dev
.kobj
, &offload_attr_group
);
965 cxgb3_set_dummy_ops(tdev
);
966 t3_tp_set_offload_mode(adapter
, 0);
967 clear_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->open_device_map
);
969 if (!adapter
->open_device_map
)
972 cxgb3_offload_deactivate(adapter
);
976 static int cxgb_open(struct net_device
*dev
)
978 struct port_info
*pi
= netdev_priv(dev
);
979 struct adapter
*adapter
= pi
->adapter
;
980 int other_ports
= adapter
->open_device_map
& PORT_MASK
;
983 if (!adapter
->open_device_map
&& (err
= cxgb_up(adapter
)) < 0) {
988 set_bit(pi
->port_id
, &adapter
->open_device_map
);
989 if (is_offload(adapter
) && !ofld_disable
) {
990 err
= offload_open(dev
);
993 "Could not initialize offload capabilities\n");
997 t3_port_intr_enable(adapter
, pi
->port_id
);
998 netif_start_queue(dev
);
1000 schedule_chk_task(adapter
);
1005 static int cxgb_close(struct net_device
*dev
)
1007 struct port_info
*pi
= netdev_priv(dev
);
1008 struct adapter
*adapter
= pi
->adapter
;
1010 t3_port_intr_disable(adapter
, pi
->port_id
);
1011 netif_stop_queue(dev
);
1012 pi
->phy
.ops
->power_down(&pi
->phy
, 1);
1013 netif_carrier_off(dev
);
1014 t3_mac_disable(&pi
->mac
, MAC_DIRECTION_TX
| MAC_DIRECTION_RX
);
1016 spin_lock(&adapter
->work_lock
); /* sync with update task */
1017 clear_bit(pi
->port_id
, &adapter
->open_device_map
);
1018 spin_unlock(&adapter
->work_lock
);
1020 if (!(adapter
->open_device_map
& PORT_MASK
))
1021 cancel_rearming_delayed_workqueue(cxgb3_wq
,
1022 &adapter
->adap_check_task
);
1024 if (!adapter
->open_device_map
)
1030 static struct net_device_stats
*cxgb_get_stats(struct net_device
*dev
)
1032 struct port_info
*pi
= netdev_priv(dev
);
1033 struct adapter
*adapter
= pi
->adapter
;
1034 struct net_device_stats
*ns
= &pi
->netstats
;
1035 const struct mac_stats
*pstats
;
1037 spin_lock(&adapter
->stats_lock
);
1038 pstats
= t3_mac_update_stats(&pi
->mac
);
1039 spin_unlock(&adapter
->stats_lock
);
1041 ns
->tx_bytes
= pstats
->tx_octets
;
1042 ns
->tx_packets
= pstats
->tx_frames
;
1043 ns
->rx_bytes
= pstats
->rx_octets
;
1044 ns
->rx_packets
= pstats
->rx_frames
;
1045 ns
->multicast
= pstats
->rx_mcast_frames
;
1047 ns
->tx_errors
= pstats
->tx_underrun
;
1048 ns
->rx_errors
= pstats
->rx_symbol_errs
+ pstats
->rx_fcs_errs
+
1049 pstats
->rx_too_long
+ pstats
->rx_jabber
+ pstats
->rx_short
+
1050 pstats
->rx_fifo_ovfl
;
1052 /* detailed rx_errors */
1053 ns
->rx_length_errors
= pstats
->rx_jabber
+ pstats
->rx_too_long
;
1054 ns
->rx_over_errors
= 0;
1055 ns
->rx_crc_errors
= pstats
->rx_fcs_errs
;
1056 ns
->rx_frame_errors
= pstats
->rx_symbol_errs
;
1057 ns
->rx_fifo_errors
= pstats
->rx_fifo_ovfl
;
1058 ns
->rx_missed_errors
= pstats
->rx_cong_drops
;
1060 /* detailed tx_errors */
1061 ns
->tx_aborted_errors
= 0;
1062 ns
->tx_carrier_errors
= 0;
1063 ns
->tx_fifo_errors
= pstats
->tx_underrun
;
1064 ns
->tx_heartbeat_errors
= 0;
1065 ns
->tx_window_errors
= 0;
1069 static u32
get_msglevel(struct net_device
*dev
)
1071 struct port_info
*pi
= netdev_priv(dev
);
1072 struct adapter
*adapter
= pi
->adapter
;
1074 return adapter
->msg_enable
;
1077 static void set_msglevel(struct net_device
*dev
, u32 val
)
1079 struct port_info
*pi
= netdev_priv(dev
);
1080 struct adapter
*adapter
= pi
->adapter
;
1082 adapter
->msg_enable
= val
;
1085 static char stats_strings
[][ETH_GSTRING_LEN
] = {
1088 "TxMulticastFramesOK",
1089 "TxBroadcastFramesOK",
1096 "TxFrames128To255 ",
1097 "TxFrames256To511 ",
1098 "TxFrames512To1023 ",
1099 "TxFrames1024To1518 ",
1100 "TxFrames1519ToMax ",
1104 "RxMulticastFramesOK",
1105 "RxBroadcastFramesOK",
1116 "RxFrames128To255 ",
1117 "RxFrames256To511 ",
1118 "RxFrames512To1023 ",
1119 "RxFrames1024To1518 ",
1120 "RxFrames1519ToMax ",
1130 "CheckTXEnToggled ",
1135 static int get_sset_count(struct net_device
*dev
, int sset
)
1139 return ARRAY_SIZE(stats_strings
);
1145 #define T3_REGMAP_SIZE (3 * 1024)
1147 static int get_regs_len(struct net_device
*dev
)
1149 return T3_REGMAP_SIZE
;
1152 static int get_eeprom_len(struct net_device
*dev
)
1157 static void get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
1159 struct port_info
*pi
= netdev_priv(dev
);
1160 struct adapter
*adapter
= pi
->adapter
;
1164 t3_get_fw_version(adapter
, &fw_vers
);
1165 t3_get_tp_version(adapter
, &tp_vers
);
1167 strcpy(info
->driver
, DRV_NAME
);
1168 strcpy(info
->version
, DRV_VERSION
);
1169 strcpy(info
->bus_info
, pci_name(adapter
->pdev
));
1171 strcpy(info
->fw_version
, "N/A");
1173 snprintf(info
->fw_version
, sizeof(info
->fw_version
),
1174 "%s %u.%u.%u TP %u.%u.%u",
1175 G_FW_VERSION_TYPE(fw_vers
) ? "T" : "N",
1176 G_FW_VERSION_MAJOR(fw_vers
),
1177 G_FW_VERSION_MINOR(fw_vers
),
1178 G_FW_VERSION_MICRO(fw_vers
),
1179 G_TP_VERSION_MAJOR(tp_vers
),
1180 G_TP_VERSION_MINOR(tp_vers
),
1181 G_TP_VERSION_MICRO(tp_vers
));
1185 static void get_strings(struct net_device
*dev
, u32 stringset
, u8
* data
)
1187 if (stringset
== ETH_SS_STATS
)
1188 memcpy(data
, stats_strings
, sizeof(stats_strings
));
1191 static unsigned long collect_sge_port_stats(struct adapter
*adapter
,
1192 struct port_info
*p
, int idx
)
1195 unsigned long tot
= 0;
1197 for (i
= 0; i
< p
->nqsets
; ++i
)
1198 tot
+= adapter
->sge
.qs
[i
+ p
->first_qset
].port_stats
[idx
];
1202 static void get_stats(struct net_device
*dev
, struct ethtool_stats
*stats
,
1205 struct port_info
*pi
= netdev_priv(dev
);
1206 struct adapter
*adapter
= pi
->adapter
;
1207 const struct mac_stats
*s
;
1209 spin_lock(&adapter
->stats_lock
);
1210 s
= t3_mac_update_stats(&pi
->mac
);
1211 spin_unlock(&adapter
->stats_lock
);
1213 *data
++ = s
->tx_octets
;
1214 *data
++ = s
->tx_frames
;
1215 *data
++ = s
->tx_mcast_frames
;
1216 *data
++ = s
->tx_bcast_frames
;
1217 *data
++ = s
->tx_pause
;
1218 *data
++ = s
->tx_underrun
;
1219 *data
++ = s
->tx_fifo_urun
;
1221 *data
++ = s
->tx_frames_64
;
1222 *data
++ = s
->tx_frames_65_127
;
1223 *data
++ = s
->tx_frames_128_255
;
1224 *data
++ = s
->tx_frames_256_511
;
1225 *data
++ = s
->tx_frames_512_1023
;
1226 *data
++ = s
->tx_frames_1024_1518
;
1227 *data
++ = s
->tx_frames_1519_max
;
1229 *data
++ = s
->rx_octets
;
1230 *data
++ = s
->rx_frames
;
1231 *data
++ = s
->rx_mcast_frames
;
1232 *data
++ = s
->rx_bcast_frames
;
1233 *data
++ = s
->rx_pause
;
1234 *data
++ = s
->rx_fcs_errs
;
1235 *data
++ = s
->rx_symbol_errs
;
1236 *data
++ = s
->rx_short
;
1237 *data
++ = s
->rx_jabber
;
1238 *data
++ = s
->rx_too_long
;
1239 *data
++ = s
->rx_fifo_ovfl
;
1241 *data
++ = s
->rx_frames_64
;
1242 *data
++ = s
->rx_frames_65_127
;
1243 *data
++ = s
->rx_frames_128_255
;
1244 *data
++ = s
->rx_frames_256_511
;
1245 *data
++ = s
->rx_frames_512_1023
;
1246 *data
++ = s
->rx_frames_1024_1518
;
1247 *data
++ = s
->rx_frames_1519_max
;
1249 *data
++ = pi
->phy
.fifo_errors
;
1251 *data
++ = collect_sge_port_stats(adapter
, pi
, SGE_PSTAT_TSO
);
1252 *data
++ = collect_sge_port_stats(adapter
, pi
, SGE_PSTAT_VLANEX
);
1253 *data
++ = collect_sge_port_stats(adapter
, pi
, SGE_PSTAT_VLANINS
);
1254 *data
++ = collect_sge_port_stats(adapter
, pi
, SGE_PSTAT_TX_CSUM
);
1255 *data
++ = collect_sge_port_stats(adapter
, pi
, SGE_PSTAT_RX_CSUM_GOOD
);
1256 *data
++ = s
->rx_cong_drops
;
1258 *data
++ = s
->num_toggled
;
1259 *data
++ = s
->num_resets
;
1262 static inline void reg_block_dump(struct adapter
*ap
, void *buf
,
1263 unsigned int start
, unsigned int end
)
1265 u32
*p
= buf
+ start
;
1267 for (; start
<= end
; start
+= sizeof(u32
))
1268 *p
++ = t3_read_reg(ap
, start
);
1271 static void get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
1274 struct port_info
*pi
= netdev_priv(dev
);
1275 struct adapter
*ap
= pi
->adapter
;
1279 * bits 0..9: chip version
1280 * bits 10..15: chip revision
1281 * bit 31: set for PCIe cards
1283 regs
->version
= 3 | (ap
->params
.rev
<< 10) | (is_pcie(ap
) << 31);
1286 * We skip the MAC statistics registers because they are clear-on-read.
1287 * Also reading multi-register stats would need to synchronize with the
1288 * periodic mac stats accumulation. Hard to justify the complexity.
1290 memset(buf
, 0, T3_REGMAP_SIZE
);
1291 reg_block_dump(ap
, buf
, 0, A_SG_RSPQ_CREDIT_RETURN
);
1292 reg_block_dump(ap
, buf
, A_SG_HI_DRB_HI_THRSH
, A_ULPRX_PBL_ULIMIT
);
1293 reg_block_dump(ap
, buf
, A_ULPTX_CONFIG
, A_MPS_INT_CAUSE
);
1294 reg_block_dump(ap
, buf
, A_CPL_SWITCH_CNTRL
, A_CPL_MAP_TBL_DATA
);
1295 reg_block_dump(ap
, buf
, A_SMB_GLOBAL_TIME_CFG
, A_XGM_SERDES_STAT3
);
1296 reg_block_dump(ap
, buf
, A_XGM_SERDES_STATUS0
,
1297 XGM_REG(A_XGM_SERDES_STAT3
, 1));
1298 reg_block_dump(ap
, buf
, XGM_REG(A_XGM_SERDES_STATUS0
, 1),
1299 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT
, 1));
1302 static int restart_autoneg(struct net_device
*dev
)
1304 struct port_info
*p
= netdev_priv(dev
);
1306 if (!netif_running(dev
))
1308 if (p
->link_config
.autoneg
!= AUTONEG_ENABLE
)
1310 p
->phy
.ops
->autoneg_restart(&p
->phy
);
1314 static int cxgb3_phys_id(struct net_device
*dev
, u32 data
)
1316 struct port_info
*pi
= netdev_priv(dev
);
1317 struct adapter
*adapter
= pi
->adapter
;
1323 for (i
= 0; i
< data
* 2; i
++) {
1324 t3_set_reg_field(adapter
, A_T3DBG_GPIO_EN
, F_GPIO0_OUT_VAL
,
1325 (i
& 1) ? F_GPIO0_OUT_VAL
: 0);
1326 if (msleep_interruptible(500))
1329 t3_set_reg_field(adapter
, A_T3DBG_GPIO_EN
, F_GPIO0_OUT_VAL
,
1334 static int get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1336 struct port_info
*p
= netdev_priv(dev
);
1338 cmd
->supported
= p
->link_config
.supported
;
1339 cmd
->advertising
= p
->link_config
.advertising
;
1341 if (netif_carrier_ok(dev
)) {
1342 cmd
->speed
= p
->link_config
.speed
;
1343 cmd
->duplex
= p
->link_config
.duplex
;
1349 cmd
->port
= (cmd
->supported
& SUPPORTED_TP
) ? PORT_TP
: PORT_FIBRE
;
1350 cmd
->phy_address
= p
->phy
.addr
;
1351 cmd
->transceiver
= XCVR_EXTERNAL
;
1352 cmd
->autoneg
= p
->link_config
.autoneg
;
1358 static int speed_duplex_to_caps(int speed
, int duplex
)
1364 if (duplex
== DUPLEX_FULL
)
1365 cap
= SUPPORTED_10baseT_Full
;
1367 cap
= SUPPORTED_10baseT_Half
;
1370 if (duplex
== DUPLEX_FULL
)
1371 cap
= SUPPORTED_100baseT_Full
;
1373 cap
= SUPPORTED_100baseT_Half
;
1376 if (duplex
== DUPLEX_FULL
)
1377 cap
= SUPPORTED_1000baseT_Full
;
1379 cap
= SUPPORTED_1000baseT_Half
;
1382 if (duplex
== DUPLEX_FULL
)
1383 cap
= SUPPORTED_10000baseT_Full
;
1388 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1389 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1390 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1391 ADVERTISED_10000baseT_Full)
1393 static int set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1395 struct port_info
*p
= netdev_priv(dev
);
1396 struct link_config
*lc
= &p
->link_config
;
1398 if (!(lc
->supported
& SUPPORTED_Autoneg
))
1399 return -EOPNOTSUPP
; /* can't change speed/duplex */
1401 if (cmd
->autoneg
== AUTONEG_DISABLE
) {
1402 int cap
= speed_duplex_to_caps(cmd
->speed
, cmd
->duplex
);
1404 if (!(lc
->supported
& cap
) || cmd
->speed
== SPEED_1000
)
1406 lc
->requested_speed
= cmd
->speed
;
1407 lc
->requested_duplex
= cmd
->duplex
;
1408 lc
->advertising
= 0;
1410 cmd
->advertising
&= ADVERTISED_MASK
;
1411 cmd
->advertising
&= lc
->supported
;
1412 if (!cmd
->advertising
)
1414 lc
->requested_speed
= SPEED_INVALID
;
1415 lc
->requested_duplex
= DUPLEX_INVALID
;
1416 lc
->advertising
= cmd
->advertising
| ADVERTISED_Autoneg
;
1418 lc
->autoneg
= cmd
->autoneg
;
1419 if (netif_running(dev
))
1420 t3_link_start(&p
->phy
, &p
->mac
, lc
);
1424 static void get_pauseparam(struct net_device
*dev
,
1425 struct ethtool_pauseparam
*epause
)
1427 struct port_info
*p
= netdev_priv(dev
);
1429 epause
->autoneg
= (p
->link_config
.requested_fc
& PAUSE_AUTONEG
) != 0;
1430 epause
->rx_pause
= (p
->link_config
.fc
& PAUSE_RX
) != 0;
1431 epause
->tx_pause
= (p
->link_config
.fc
& PAUSE_TX
) != 0;
1434 static int set_pauseparam(struct net_device
*dev
,
1435 struct ethtool_pauseparam
*epause
)
1437 struct port_info
*p
= netdev_priv(dev
);
1438 struct link_config
*lc
= &p
->link_config
;
1440 if (epause
->autoneg
== AUTONEG_DISABLE
)
1441 lc
->requested_fc
= 0;
1442 else if (lc
->supported
& SUPPORTED_Autoneg
)
1443 lc
->requested_fc
= PAUSE_AUTONEG
;
1447 if (epause
->rx_pause
)
1448 lc
->requested_fc
|= PAUSE_RX
;
1449 if (epause
->tx_pause
)
1450 lc
->requested_fc
|= PAUSE_TX
;
1451 if (lc
->autoneg
== AUTONEG_ENABLE
) {
1452 if (netif_running(dev
))
1453 t3_link_start(&p
->phy
, &p
->mac
, lc
);
1455 lc
->fc
= lc
->requested_fc
& (PAUSE_RX
| PAUSE_TX
);
1456 if (netif_running(dev
))
1457 t3_mac_set_speed_duplex_fc(&p
->mac
, -1, -1, lc
->fc
);
1462 static u32
get_rx_csum(struct net_device
*dev
)
1464 struct port_info
*p
= netdev_priv(dev
);
1466 return p
->rx_csum_offload
;
1469 static int set_rx_csum(struct net_device
*dev
, u32 data
)
1471 struct port_info
*p
= netdev_priv(dev
);
1473 p
->rx_csum_offload
= data
;
1477 static void get_sge_param(struct net_device
*dev
, struct ethtool_ringparam
*e
)
1479 struct port_info
*pi
= netdev_priv(dev
);
1480 struct adapter
*adapter
= pi
->adapter
;
1481 const struct qset_params
*q
= &adapter
->params
.sge
.qset
[pi
->first_qset
];
1483 e
->rx_max_pending
= MAX_RX_BUFFERS
;
1484 e
->rx_mini_max_pending
= 0;
1485 e
->rx_jumbo_max_pending
= MAX_RX_JUMBO_BUFFERS
;
1486 e
->tx_max_pending
= MAX_TXQ_ENTRIES
;
1488 e
->rx_pending
= q
->fl_size
;
1489 e
->rx_mini_pending
= q
->rspq_size
;
1490 e
->rx_jumbo_pending
= q
->jumbo_size
;
1491 e
->tx_pending
= q
->txq_size
[0];
1494 static int set_sge_param(struct net_device
*dev
, struct ethtool_ringparam
*e
)
1496 struct port_info
*pi
= netdev_priv(dev
);
1497 struct adapter
*adapter
= pi
->adapter
;
1498 struct qset_params
*q
;
1501 if (e
->rx_pending
> MAX_RX_BUFFERS
||
1502 e
->rx_jumbo_pending
> MAX_RX_JUMBO_BUFFERS
||
1503 e
->tx_pending
> MAX_TXQ_ENTRIES
||
1504 e
->rx_mini_pending
> MAX_RSPQ_ENTRIES
||
1505 e
->rx_mini_pending
< MIN_RSPQ_ENTRIES
||
1506 e
->rx_pending
< MIN_FL_ENTRIES
||
1507 e
->rx_jumbo_pending
< MIN_FL_ENTRIES
||
1508 e
->tx_pending
< adapter
->params
.nports
* MIN_TXQ_ENTRIES
)
1511 if (adapter
->flags
& FULL_INIT_DONE
)
1514 q
= &adapter
->params
.sge
.qset
[pi
->first_qset
];
1515 for (i
= 0; i
< pi
->nqsets
; ++i
, ++q
) {
1516 q
->rspq_size
= e
->rx_mini_pending
;
1517 q
->fl_size
= e
->rx_pending
;
1518 q
->jumbo_size
= e
->rx_jumbo_pending
;
1519 q
->txq_size
[0] = e
->tx_pending
;
1520 q
->txq_size
[1] = e
->tx_pending
;
1521 q
->txq_size
[2] = e
->tx_pending
;
1526 static int set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*c
)
1528 struct port_info
*pi
= netdev_priv(dev
);
1529 struct adapter
*adapter
= pi
->adapter
;
1530 struct qset_params
*qsp
= &adapter
->params
.sge
.qset
[0];
1531 struct sge_qset
*qs
= &adapter
->sge
.qs
[0];
1533 if (c
->rx_coalesce_usecs
* 10 > M_NEWTIMER
)
1536 qsp
->coalesce_usecs
= c
->rx_coalesce_usecs
;
1537 t3_update_qset_coalesce(qs
, qsp
);
1541 static int get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*c
)
1543 struct port_info
*pi
= netdev_priv(dev
);
1544 struct adapter
*adapter
= pi
->adapter
;
1545 struct qset_params
*q
= adapter
->params
.sge
.qset
;
1547 c
->rx_coalesce_usecs
= q
->coalesce_usecs
;
1551 static int get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*e
,
1554 struct port_info
*pi
= netdev_priv(dev
);
1555 struct adapter
*adapter
= pi
->adapter
;
1558 u8
*buf
= kmalloc(EEPROMSIZE
, GFP_KERNEL
);
1562 e
->magic
= EEPROM_MAGIC
;
1563 for (i
= e
->offset
& ~3; !err
&& i
< e
->offset
+ e
->len
; i
+= 4)
1564 err
= t3_seeprom_read(adapter
, i
, (u32
*) & buf
[i
]);
1567 memcpy(data
, buf
+ e
->offset
, e
->len
);
1572 static int set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
1575 struct port_info
*pi
= netdev_priv(dev
);
1576 struct adapter
*adapter
= pi
->adapter
;
1577 u32 aligned_offset
, aligned_len
, *p
;
1581 if (eeprom
->magic
!= EEPROM_MAGIC
)
1584 aligned_offset
= eeprom
->offset
& ~3;
1585 aligned_len
= (eeprom
->len
+ (eeprom
->offset
& 3) + 3) & ~3;
1587 if (aligned_offset
!= eeprom
->offset
|| aligned_len
!= eeprom
->len
) {
1588 buf
= kmalloc(aligned_len
, GFP_KERNEL
);
1591 err
= t3_seeprom_read(adapter
, aligned_offset
, (u32
*) buf
);
1592 if (!err
&& aligned_len
> 4)
1593 err
= t3_seeprom_read(adapter
,
1594 aligned_offset
+ aligned_len
- 4,
1595 (u32
*) & buf
[aligned_len
- 4]);
1598 memcpy(buf
+ (eeprom
->offset
& 3), data
, eeprom
->len
);
1602 err
= t3_seeprom_wp(adapter
, 0);
1606 for (p
= (u32
*) buf
; !err
&& aligned_len
; aligned_len
-= 4, p
++) {
1607 err
= t3_seeprom_write(adapter
, aligned_offset
, *p
);
1608 aligned_offset
+= 4;
1612 err
= t3_seeprom_wp(adapter
, 1);
1619 static void get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
1623 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
1626 static const struct ethtool_ops cxgb_ethtool_ops
= {
1627 .get_settings
= get_settings
,
1628 .set_settings
= set_settings
,
1629 .get_drvinfo
= get_drvinfo
,
1630 .get_msglevel
= get_msglevel
,
1631 .set_msglevel
= set_msglevel
,
1632 .get_ringparam
= get_sge_param
,
1633 .set_ringparam
= set_sge_param
,
1634 .get_coalesce
= get_coalesce
,
1635 .set_coalesce
= set_coalesce
,
1636 .get_eeprom_len
= get_eeprom_len
,
1637 .get_eeprom
= get_eeprom
,
1638 .set_eeprom
= set_eeprom
,
1639 .get_pauseparam
= get_pauseparam
,
1640 .set_pauseparam
= set_pauseparam
,
1641 .get_rx_csum
= get_rx_csum
,
1642 .set_rx_csum
= set_rx_csum
,
1643 .set_tx_csum
= ethtool_op_set_tx_csum
,
1644 .set_sg
= ethtool_op_set_sg
,
1645 .get_link
= ethtool_op_get_link
,
1646 .get_strings
= get_strings
,
1647 .phys_id
= cxgb3_phys_id
,
1648 .nway_reset
= restart_autoneg
,
1649 .get_sset_count
= get_sset_count
,
1650 .get_ethtool_stats
= get_stats
,
1651 .get_regs_len
= get_regs_len
,
1652 .get_regs
= get_regs
,
1654 .set_tso
= ethtool_op_set_tso
,
1657 static int in_range(int val
, int lo
, int hi
)
1659 return val
< 0 || (val
<= hi
&& val
>= lo
);
1662 static int cxgb_extension_ioctl(struct net_device
*dev
, void __user
*useraddr
)
1664 struct port_info
*pi
= netdev_priv(dev
);
1665 struct adapter
*adapter
= pi
->adapter
;
1669 if (copy_from_user(&cmd
, useraddr
, sizeof(cmd
)))
1673 case CHELSIO_SET_QSET_PARAMS
:{
1675 struct qset_params
*q
;
1676 struct ch_qset_params t
;
1678 if (!capable(CAP_NET_ADMIN
))
1680 if (copy_from_user(&t
, useraddr
, sizeof(t
)))
1682 if (t
.qset_idx
>= SGE_QSETS
)
1684 if (!in_range(t
.intr_lat
, 0, M_NEWTIMER
) ||
1685 !in_range(t
.cong_thres
, 0, 255) ||
1686 !in_range(t
.txq_size
[0], MIN_TXQ_ENTRIES
,
1688 !in_range(t
.txq_size
[1], MIN_TXQ_ENTRIES
,
1690 !in_range(t
.txq_size
[2], MIN_CTRL_TXQ_ENTRIES
,
1691 MAX_CTRL_TXQ_ENTRIES
) ||
1692 !in_range(t
.fl_size
[0], MIN_FL_ENTRIES
,
1694 || !in_range(t
.fl_size
[1], MIN_FL_ENTRIES
,
1695 MAX_RX_JUMBO_BUFFERS
)
1696 || !in_range(t
.rspq_size
, MIN_RSPQ_ENTRIES
,
1699 if ((adapter
->flags
& FULL_INIT_DONE
) &&
1700 (t
.rspq_size
>= 0 || t
.fl_size
[0] >= 0 ||
1701 t
.fl_size
[1] >= 0 || t
.txq_size
[0] >= 0 ||
1702 t
.txq_size
[1] >= 0 || t
.txq_size
[2] >= 0 ||
1703 t
.polling
>= 0 || t
.cong_thres
>= 0))
1706 q
= &adapter
->params
.sge
.qset
[t
.qset_idx
];
1708 if (t
.rspq_size
>= 0)
1709 q
->rspq_size
= t
.rspq_size
;
1710 if (t
.fl_size
[0] >= 0)
1711 q
->fl_size
= t
.fl_size
[0];
1712 if (t
.fl_size
[1] >= 0)
1713 q
->jumbo_size
= t
.fl_size
[1];
1714 if (t
.txq_size
[0] >= 0)
1715 q
->txq_size
[0] = t
.txq_size
[0];
1716 if (t
.txq_size
[1] >= 0)
1717 q
->txq_size
[1] = t
.txq_size
[1];
1718 if (t
.txq_size
[2] >= 0)
1719 q
->txq_size
[2] = t
.txq_size
[2];
1720 if (t
.cong_thres
>= 0)
1721 q
->cong_thres
= t
.cong_thres
;
1722 if (t
.intr_lat
>= 0) {
1723 struct sge_qset
*qs
=
1724 &adapter
->sge
.qs
[t
.qset_idx
];
1726 q
->coalesce_usecs
= t
.intr_lat
;
1727 t3_update_qset_coalesce(qs
, q
);
1729 if (t
.polling
>= 0) {
1730 if (adapter
->flags
& USING_MSIX
)
1731 q
->polling
= t
.polling
;
1733 /* No polling with INTx for T3A */
1734 if (adapter
->params
.rev
== 0 &&
1735 !(adapter
->flags
& USING_MSI
))
1738 for (i
= 0; i
< SGE_QSETS
; i
++) {
1739 q
= &adapter
->params
.sge
.
1741 q
->polling
= t
.polling
;
1747 case CHELSIO_GET_QSET_PARAMS
:{
1748 struct qset_params
*q
;
1749 struct ch_qset_params t
;
1751 if (copy_from_user(&t
, useraddr
, sizeof(t
)))
1753 if (t
.qset_idx
>= SGE_QSETS
)
1756 q
= &adapter
->params
.sge
.qset
[t
.qset_idx
];
1757 t
.rspq_size
= q
->rspq_size
;
1758 t
.txq_size
[0] = q
->txq_size
[0];
1759 t
.txq_size
[1] = q
->txq_size
[1];
1760 t
.txq_size
[2] = q
->txq_size
[2];
1761 t
.fl_size
[0] = q
->fl_size
;
1762 t
.fl_size
[1] = q
->jumbo_size
;
1763 t
.polling
= q
->polling
;
1764 t
.intr_lat
= q
->coalesce_usecs
;
1765 t
.cong_thres
= q
->cong_thres
;
1767 if (copy_to_user(useraddr
, &t
, sizeof(t
)))
1771 case CHELSIO_SET_QSET_NUM
:{
1772 struct ch_reg edata
;
1773 unsigned int i
, first_qset
= 0, other_qsets
= 0;
1775 if (!capable(CAP_NET_ADMIN
))
1777 if (adapter
->flags
& FULL_INIT_DONE
)
1779 if (copy_from_user(&edata
, useraddr
, sizeof(edata
)))
1781 if (edata
.val
< 1 ||
1782 (edata
.val
> 1 && !(adapter
->flags
& USING_MSIX
)))
1785 for_each_port(adapter
, i
)
1786 if (adapter
->port
[i
] && adapter
->port
[i
] != dev
)
1787 other_qsets
+= adap2pinfo(adapter
, i
)->nqsets
;
1789 if (edata
.val
+ other_qsets
> SGE_QSETS
)
1792 pi
->nqsets
= edata
.val
;
1794 for_each_port(adapter
, i
)
1795 if (adapter
->port
[i
]) {
1796 pi
= adap2pinfo(adapter
, i
);
1797 pi
->first_qset
= first_qset
;
1798 first_qset
+= pi
->nqsets
;
1802 case CHELSIO_GET_QSET_NUM
:{
1803 struct ch_reg edata
;
1805 edata
.cmd
= CHELSIO_GET_QSET_NUM
;
1806 edata
.val
= pi
->nqsets
;
1807 if (copy_to_user(useraddr
, &edata
, sizeof(edata
)))
1811 case CHELSIO_LOAD_FW
:{
1813 struct ch_mem_range t
;
1815 if (!capable(CAP_NET_ADMIN
))
1817 if (copy_from_user(&t
, useraddr
, sizeof(t
)))
1820 fw_data
= kmalloc(t
.len
, GFP_KERNEL
);
1825 (fw_data
, useraddr
+ sizeof(t
), t
.len
)) {
1830 ret
= t3_load_fw(adapter
, fw_data
, t
.len
);
1836 case CHELSIO_SETMTUTAB
:{
1840 if (!is_offload(adapter
))
1842 if (!capable(CAP_NET_ADMIN
))
1844 if (offload_running(adapter
))
1846 if (copy_from_user(&m
, useraddr
, sizeof(m
)))
1848 if (m
.nmtus
!= NMTUS
)
1850 if (m
.mtus
[0] < 81) /* accommodate SACK */
1853 /* MTUs must be in ascending order */
1854 for (i
= 1; i
< NMTUS
; ++i
)
1855 if (m
.mtus
[i
] < m
.mtus
[i
- 1])
1858 memcpy(adapter
->params
.mtus
, m
.mtus
,
1859 sizeof(adapter
->params
.mtus
));
1862 case CHELSIO_GET_PM
:{
1863 struct tp_params
*p
= &adapter
->params
.tp
;
1864 struct ch_pm m
= {.cmd
= CHELSIO_GET_PM
};
1866 if (!is_offload(adapter
))
1868 m
.tx_pg_sz
= p
->tx_pg_size
;
1869 m
.tx_num_pg
= p
->tx_num_pgs
;
1870 m
.rx_pg_sz
= p
->rx_pg_size
;
1871 m
.rx_num_pg
= p
->rx_num_pgs
;
1872 m
.pm_total
= p
->pmtx_size
+ p
->chan_rx_size
* p
->nchan
;
1873 if (copy_to_user(useraddr
, &m
, sizeof(m
)))
1877 case CHELSIO_SET_PM
:{
1879 struct tp_params
*p
= &adapter
->params
.tp
;
1881 if (!is_offload(adapter
))
1883 if (!capable(CAP_NET_ADMIN
))
1885 if (adapter
->flags
& FULL_INIT_DONE
)
1887 if (copy_from_user(&m
, useraddr
, sizeof(m
)))
1889 if (!is_power_of_2(m
.rx_pg_sz
) ||
1890 !is_power_of_2(m
.tx_pg_sz
))
1891 return -EINVAL
; /* not power of 2 */
1892 if (!(m
.rx_pg_sz
& 0x14000))
1893 return -EINVAL
; /* not 16KB or 64KB */
1894 if (!(m
.tx_pg_sz
& 0x1554000))
1896 if (m
.tx_num_pg
== -1)
1897 m
.tx_num_pg
= p
->tx_num_pgs
;
1898 if (m
.rx_num_pg
== -1)
1899 m
.rx_num_pg
= p
->rx_num_pgs
;
1900 if (m
.tx_num_pg
% 24 || m
.rx_num_pg
% 24)
1902 if (m
.rx_num_pg
* m
.rx_pg_sz
> p
->chan_rx_size
||
1903 m
.tx_num_pg
* m
.tx_pg_sz
> p
->chan_tx_size
)
1905 p
->rx_pg_size
= m
.rx_pg_sz
;
1906 p
->tx_pg_size
= m
.tx_pg_sz
;
1907 p
->rx_num_pgs
= m
.rx_num_pg
;
1908 p
->tx_num_pgs
= m
.tx_num_pg
;
1911 case CHELSIO_GET_MEM
:{
1912 struct ch_mem_range t
;
1916 if (!is_offload(adapter
))
1918 if (!(adapter
->flags
& FULL_INIT_DONE
))
1919 return -EIO
; /* need the memory controllers */
1920 if (copy_from_user(&t
, useraddr
, sizeof(t
)))
1922 if ((t
.addr
& 7) || (t
.len
& 7))
1924 if (t
.mem_id
== MEM_CM
)
1926 else if (t
.mem_id
== MEM_PMRX
)
1927 mem
= &adapter
->pmrx
;
1928 else if (t
.mem_id
== MEM_PMTX
)
1929 mem
= &adapter
->pmtx
;
1935 * bits 0..9: chip version
1936 * bits 10..15: chip revision
1938 t
.version
= 3 | (adapter
->params
.rev
<< 10);
1939 if (copy_to_user(useraddr
, &t
, sizeof(t
)))
1943 * Read 256 bytes at a time as len can be large and we don't
1944 * want to use huge intermediate buffers.
1946 useraddr
+= sizeof(t
); /* advance to start of buffer */
1948 unsigned int chunk
=
1949 min_t(unsigned int, t
.len
, sizeof(buf
));
1952 t3_mc7_bd_read(mem
, t
.addr
/ 8, chunk
/ 8,
1956 if (copy_to_user(useraddr
, buf
, chunk
))
1964 case CHELSIO_SET_TRACE_FILTER
:{
1966 const struct trace_params
*tp
;
1968 if (!capable(CAP_NET_ADMIN
))
1970 if (!offload_running(adapter
))
1972 if (copy_from_user(&t
, useraddr
, sizeof(t
)))
1975 tp
= (const struct trace_params
*)&t
.sip
;
1977 t3_config_trace_filter(adapter
, tp
, 0,
1981 t3_config_trace_filter(adapter
, tp
, 1,
1992 static int cxgb_ioctl(struct net_device
*dev
, struct ifreq
*req
, int cmd
)
1994 struct mii_ioctl_data
*data
= if_mii(req
);
1995 struct port_info
*pi
= netdev_priv(dev
);
1996 struct adapter
*adapter
= pi
->adapter
;
2001 data
->phy_id
= pi
->phy
.addr
;
2005 struct cphy
*phy
= &pi
->phy
;
2007 if (!phy
->mdio_read
)
2009 if (is_10G(adapter
)) {
2010 mmd
= data
->phy_id
>> 8;
2013 else if (mmd
> MDIO_DEV_XGXS
)
2017 phy
->mdio_read(adapter
, data
->phy_id
& 0x1f,
2018 mmd
, data
->reg_num
, &val
);
2021 phy
->mdio_read(adapter
, data
->phy_id
& 0x1f,
2022 0, data
->reg_num
& 0x1f,
2025 data
->val_out
= val
;
2029 struct cphy
*phy
= &pi
->phy
;
2031 if (!capable(CAP_NET_ADMIN
))
2033 if (!phy
->mdio_write
)
2035 if (is_10G(adapter
)) {
2036 mmd
= data
->phy_id
>> 8;
2039 else if (mmd
> MDIO_DEV_XGXS
)
2043 phy
->mdio_write(adapter
,
2044 data
->phy_id
& 0x1f, mmd
,
2049 phy
->mdio_write(adapter
,
2050 data
->phy_id
& 0x1f, 0,
2051 data
->reg_num
& 0x1f,
2056 return cxgb_extension_ioctl(dev
, req
->ifr_data
);
2063 static int cxgb_change_mtu(struct net_device
*dev
, int new_mtu
)
2065 struct port_info
*pi
= netdev_priv(dev
);
2066 struct adapter
*adapter
= pi
->adapter
;
2069 if (new_mtu
< 81) /* accommodate SACK */
2071 if ((ret
= t3_mac_set_mtu(&pi
->mac
, new_mtu
)))
2074 init_port_mtus(adapter
);
2075 if (adapter
->params
.rev
== 0 && offload_running(adapter
))
2076 t3_load_mtus(adapter
, adapter
->params
.mtus
,
2077 adapter
->params
.a_wnd
, adapter
->params
.b_wnd
,
2078 adapter
->port
[0]->mtu
);
2082 static int cxgb_set_mac_addr(struct net_device
*dev
, void *p
)
2084 struct port_info
*pi
= netdev_priv(dev
);
2085 struct adapter
*adapter
= pi
->adapter
;
2086 struct sockaddr
*addr
= p
;
2088 if (!is_valid_ether_addr(addr
->sa_data
))
2091 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
2092 t3_mac_set_address(&pi
->mac
, 0, dev
->dev_addr
);
2093 if (offload_running(adapter
))
2094 write_smt_entry(adapter
, pi
->port_id
);
2099 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2100 * @adap: the adapter
2103 * Ensures that current Rx processing on any of the queues associated with
2104 * the given port completes before returning. We do this by acquiring and
2105 * releasing the locks of the response queues associated with the port.
2107 static void t3_synchronize_rx(struct adapter
*adap
, const struct port_info
*p
)
2111 for (i
= 0; i
< p
->nqsets
; i
++) {
2112 struct sge_rspq
*q
= &adap
->sge
.qs
[i
+ p
->first_qset
].rspq
;
2114 spin_lock_irq(&q
->lock
);
2115 spin_unlock_irq(&q
->lock
);
2119 static void vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
2121 struct port_info
*pi
= netdev_priv(dev
);
2122 struct adapter
*adapter
= pi
->adapter
;
2125 if (adapter
->params
.rev
> 0)
2126 t3_set_vlan_accel(adapter
, 1 << pi
->port_id
, grp
!= NULL
);
2128 /* single control for all ports */
2129 unsigned int i
, have_vlans
= 0;
2130 for_each_port(adapter
, i
)
2131 have_vlans
|= adap2pinfo(adapter
, i
)->vlan_grp
!= NULL
;
2133 t3_set_vlan_accel(adapter
, 1, have_vlans
);
2135 t3_synchronize_rx(adapter
, pi
);
2138 #ifdef CONFIG_NET_POLL_CONTROLLER
2139 static void cxgb_netpoll(struct net_device
*dev
)
2141 struct port_info
*pi
= netdev_priv(dev
);
2142 struct adapter
*adapter
= pi
->adapter
;
2145 for (qidx
= pi
->first_qset
; qidx
< pi
->first_qset
+ pi
->nqsets
; qidx
++) {
2146 struct sge_qset
*qs
= &adapter
->sge
.qs
[qidx
];
2149 if (adapter
->flags
& USING_MSIX
)
2154 t3_intr_handler(adapter
, qs
->rspq
.polling
) (0, source
);
2160 * Periodic accumulation of MAC statistics.
2162 static void mac_stats_update(struct adapter
*adapter
)
2166 for_each_port(adapter
, i
) {
2167 struct net_device
*dev
= adapter
->port
[i
];
2168 struct port_info
*p
= netdev_priv(dev
);
2170 if (netif_running(dev
)) {
2171 spin_lock(&adapter
->stats_lock
);
2172 t3_mac_update_stats(&p
->mac
);
2173 spin_unlock(&adapter
->stats_lock
);
2178 static void check_link_status(struct adapter
*adapter
)
2182 for_each_port(adapter
, i
) {
2183 struct net_device
*dev
= adapter
->port
[i
];
2184 struct port_info
*p
= netdev_priv(dev
);
2186 if (!(p
->port_type
->caps
& SUPPORTED_IRQ
) && netif_running(dev
))
2187 t3_link_changed(adapter
, i
);
2191 static void check_t3b2_mac(struct adapter
*adapter
)
2195 if (!rtnl_trylock()) /* synchronize with ifdown */
2198 for_each_port(adapter
, i
) {
2199 struct net_device
*dev
= adapter
->port
[i
];
2200 struct port_info
*p
= netdev_priv(dev
);
2203 if (!netif_running(dev
))
2207 if (netif_running(dev
) && netif_carrier_ok(dev
))
2208 status
= t3b2_mac_watchdog_task(&p
->mac
);
2210 p
->mac
.stats
.num_toggled
++;
2211 else if (status
== 2) {
2212 struct cmac
*mac
= &p
->mac
;
2214 t3_mac_set_mtu(mac
, dev
->mtu
);
2215 t3_mac_set_address(mac
, 0, dev
->dev_addr
);
2216 cxgb_set_rxmode(dev
);
2217 t3_link_start(&p
->phy
, mac
, &p
->link_config
);
2218 t3_mac_enable(mac
, MAC_DIRECTION_RX
| MAC_DIRECTION_TX
);
2219 t3_port_intr_enable(adapter
, p
->port_id
);
2220 p
->mac
.stats
.num_resets
++;
2227 static void t3_adap_check_task(struct work_struct
*work
)
2229 struct adapter
*adapter
= container_of(work
, struct adapter
,
2230 adap_check_task
.work
);
2231 const struct adapter_params
*p
= &adapter
->params
;
2233 adapter
->check_task_cnt
++;
2235 /* Check link status for PHYs without interrupts */
2236 if (p
->linkpoll_period
)
2237 check_link_status(adapter
);
2239 /* Accumulate MAC stats if needed */
2240 if (!p
->linkpoll_period
||
2241 (adapter
->check_task_cnt
* p
->linkpoll_period
) / 10 >=
2242 p
->stats_update_period
) {
2243 mac_stats_update(adapter
);
2244 adapter
->check_task_cnt
= 0;
2247 if (p
->rev
== T3_REV_B2
)
2248 check_t3b2_mac(adapter
);
2250 /* Schedule the next check update if any port is active. */
2251 spin_lock(&adapter
->work_lock
);
2252 if (adapter
->open_device_map
& PORT_MASK
)
2253 schedule_chk_task(adapter
);
2254 spin_unlock(&adapter
->work_lock
);
2258 * Processes external (PHY) interrupts in process context.
2260 static void ext_intr_task(struct work_struct
*work
)
2262 struct adapter
*adapter
= container_of(work
, struct adapter
,
2263 ext_intr_handler_task
);
2265 t3_phy_intr_handler(adapter
);
2267 /* Now reenable external interrupts */
2268 spin_lock_irq(&adapter
->work_lock
);
2269 if (adapter
->slow_intr_mask
) {
2270 adapter
->slow_intr_mask
|= F_T3DBG
;
2271 t3_write_reg(adapter
, A_PL_INT_CAUSE0
, F_T3DBG
);
2272 t3_write_reg(adapter
, A_PL_INT_ENABLE0
,
2273 adapter
->slow_intr_mask
);
2275 spin_unlock_irq(&adapter
->work_lock
);
2279 * Interrupt-context handler for external (PHY) interrupts.
2281 void t3_os_ext_intr_handler(struct adapter
*adapter
)
2284 * Schedule a task to handle external interrupts as they may be slow
2285 * and we use a mutex to protect MDIO registers. We disable PHY
2286 * interrupts in the meantime and let the task reenable them when
2289 spin_lock(&adapter
->work_lock
);
2290 if (adapter
->slow_intr_mask
) {
2291 adapter
->slow_intr_mask
&= ~F_T3DBG
;
2292 t3_write_reg(adapter
, A_PL_INT_ENABLE0
,
2293 adapter
->slow_intr_mask
);
2294 queue_work(cxgb3_wq
, &adapter
->ext_intr_handler_task
);
2296 spin_unlock(&adapter
->work_lock
);
2299 void t3_fatal_err(struct adapter
*adapter
)
2301 unsigned int fw_status
[4];
2303 if (adapter
->flags
& FULL_INIT_DONE
) {
2304 t3_sge_stop(adapter
);
2305 t3_write_reg(adapter
, A_XGM_TX_CTRL
, 0);
2306 t3_write_reg(adapter
, A_XGM_RX_CTRL
, 0);
2307 t3_write_reg(adapter
, XGM_REG(A_XGM_TX_CTRL
, 1), 0);
2308 t3_write_reg(adapter
, XGM_REG(A_XGM_RX_CTRL
, 1), 0);
2309 t3_intr_disable(adapter
);
2311 CH_ALERT(adapter
, "encountered fatal error, operation suspended\n");
2312 if (!t3_cim_ctl_blk_read(adapter
, 0xa0, 4, fw_status
))
2313 CH_ALERT(adapter
, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2314 fw_status
[0], fw_status
[1],
2315 fw_status
[2], fw_status
[3]);
2319 static int __devinit
cxgb_enable_msix(struct adapter
*adap
)
2321 struct msix_entry entries
[SGE_QSETS
+ 1];
2324 for (i
= 0; i
< ARRAY_SIZE(entries
); ++i
)
2325 entries
[i
].entry
= i
;
2327 err
= pci_enable_msix(adap
->pdev
, entries
, ARRAY_SIZE(entries
));
2329 for (i
= 0; i
< ARRAY_SIZE(entries
); ++i
)
2330 adap
->msix_info
[i
].vec
= entries
[i
].vector
;
2332 dev_info(&adap
->pdev
->dev
,
2333 "only %d MSI-X vectors left, not using MSI-X\n", err
);
2337 static void __devinit
print_port_info(struct adapter
*adap
,
2338 const struct adapter_info
*ai
)
2340 static const char *pci_variant
[] = {
2341 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2348 snprintf(buf
, sizeof(buf
), "%s x%d",
2349 pci_variant
[adap
->params
.pci
.variant
],
2350 adap
->params
.pci
.width
);
2352 snprintf(buf
, sizeof(buf
), "%s %dMHz/%d-bit",
2353 pci_variant
[adap
->params
.pci
.variant
],
2354 adap
->params
.pci
.speed
, adap
->params
.pci
.width
);
2356 for_each_port(adap
, i
) {
2357 struct net_device
*dev
= adap
->port
[i
];
2358 const struct port_info
*pi
= netdev_priv(dev
);
2360 if (!test_bit(i
, &adap
->registered_device_map
))
2362 printk(KERN_INFO
"%s: %s %s %sNIC (rev %d) %s%s\n",
2363 dev
->name
, ai
->desc
, pi
->port_type
->desc
,
2364 is_offload(adap
) ? "R" : "", adap
->params
.rev
, buf
,
2365 (adap
->flags
& USING_MSIX
) ? " MSI-X" :
2366 (adap
->flags
& USING_MSI
) ? " MSI" : "");
2367 if (adap
->name
== dev
->name
&& adap
->params
.vpd
.mclk
)
2369 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2370 adap
->name
, t3_mc7_size(&adap
->cm
) >> 20,
2371 t3_mc7_size(&adap
->pmtx
) >> 20,
2372 t3_mc7_size(&adap
->pmrx
) >> 20,
2373 adap
->params
.vpd
.sn
);
2377 static int __devinit
init_one(struct pci_dev
*pdev
,
2378 const struct pci_device_id
*ent
)
2380 static int version_printed
;
2382 int i
, err
, pci_using_dac
= 0;
2383 unsigned long mmio_start
, mmio_len
;
2384 const struct adapter_info
*ai
;
2385 struct adapter
*adapter
= NULL
;
2386 struct port_info
*pi
;
2388 if (!version_printed
) {
2389 printk(KERN_INFO
"%s - version %s\n", DRV_DESC
, DRV_VERSION
);
2394 cxgb3_wq
= create_singlethread_workqueue(DRV_NAME
);
2396 printk(KERN_ERR DRV_NAME
2397 ": cannot initialize work queue\n");
2402 err
= pci_request_regions(pdev
, DRV_NAME
);
2404 /* Just info, some other driver may have claimed the device. */
2405 dev_info(&pdev
->dev
, "cannot obtain PCI resources\n");
2409 err
= pci_enable_device(pdev
);
2411 dev_err(&pdev
->dev
, "cannot enable PCI device\n");
2412 goto out_release_regions
;
2415 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
2417 err
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
2419 dev_err(&pdev
->dev
, "unable to obtain 64-bit DMA for "
2420 "coherent allocations\n");
2421 goto out_disable_device
;
2423 } else if ((err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
)) != 0) {
2424 dev_err(&pdev
->dev
, "no usable DMA configuration\n");
2425 goto out_disable_device
;
2428 pci_set_master(pdev
);
2430 mmio_start
= pci_resource_start(pdev
, 0);
2431 mmio_len
= pci_resource_len(pdev
, 0);
2432 ai
= t3_get_adapter_info(ent
->driver_data
);
2434 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
2437 goto out_disable_device
;
2440 adapter
->regs
= ioremap_nocache(mmio_start
, mmio_len
);
2441 if (!adapter
->regs
) {
2442 dev_err(&pdev
->dev
, "cannot map device registers\n");
2444 goto out_free_adapter
;
2447 adapter
->pdev
= pdev
;
2448 adapter
->name
= pci_name(pdev
);
2449 adapter
->msg_enable
= dflt_msg_enable
;
2450 adapter
->mmio_len
= mmio_len
;
2452 mutex_init(&adapter
->mdio_lock
);
2453 spin_lock_init(&adapter
->work_lock
);
2454 spin_lock_init(&adapter
->stats_lock
);
2456 INIT_LIST_HEAD(&adapter
->adapter_list
);
2457 INIT_WORK(&adapter
->ext_intr_handler_task
, ext_intr_task
);
2458 INIT_DELAYED_WORK(&adapter
->adap_check_task
, t3_adap_check_task
);
2460 for (i
= 0; i
< ai
->nports
; ++i
) {
2461 struct net_device
*netdev
;
2463 netdev
= alloc_etherdev(sizeof(struct port_info
));
2469 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2471 adapter
->port
[i
] = netdev
;
2472 pi
= netdev_priv(netdev
);
2473 pi
->adapter
= adapter
;
2474 pi
->rx_csum_offload
= 1;
2479 netif_carrier_off(netdev
);
2480 netdev
->irq
= pdev
->irq
;
2481 netdev
->mem_start
= mmio_start
;
2482 netdev
->mem_end
= mmio_start
+ mmio_len
- 1;
2483 netdev
->features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
;
2484 netdev
->features
|= NETIF_F_LLTX
;
2486 netdev
->features
|= NETIF_F_HIGHDMA
;
2488 netdev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
2489 netdev
->vlan_rx_register
= vlan_rx_register
;
2491 netdev
->open
= cxgb_open
;
2492 netdev
->stop
= cxgb_close
;
2493 netdev
->hard_start_xmit
= t3_eth_xmit
;
2494 netdev
->get_stats
= cxgb_get_stats
;
2495 netdev
->set_multicast_list
= cxgb_set_rxmode
;
2496 netdev
->do_ioctl
= cxgb_ioctl
;
2497 netdev
->change_mtu
= cxgb_change_mtu
;
2498 netdev
->set_mac_address
= cxgb_set_mac_addr
;
2499 #ifdef CONFIG_NET_POLL_CONTROLLER
2500 netdev
->poll_controller
= cxgb_netpoll
;
2503 SET_ETHTOOL_OPS(netdev
, &cxgb_ethtool_ops
);
2506 pci_set_drvdata(pdev
, adapter
);
2507 if (t3_prep_adapter(adapter
, ai
, 1) < 0) {
2513 * The card is now ready to go. If any errors occur during device
2514 * registration we do not fail the whole card but rather proceed only
2515 * with the ports we manage to register successfully. However we must
2516 * register at least one net device.
2518 for_each_port(adapter
, i
) {
2519 err
= register_netdev(adapter
->port
[i
]);
2521 dev_warn(&pdev
->dev
,
2522 "cannot register net device %s, skipping\n",
2523 adapter
->port
[i
]->name
);
2526 * Change the name we use for messages to the name of
2527 * the first successfully registered interface.
2529 if (!adapter
->registered_device_map
)
2530 adapter
->name
= adapter
->port
[i
]->name
;
2532 __set_bit(i
, &adapter
->registered_device_map
);
2535 if (!adapter
->registered_device_map
) {
2536 dev_err(&pdev
->dev
, "could not register any net devices\n");
2540 /* Driver's ready. Reflect it on LEDs */
2541 t3_led_ready(adapter
);
2543 if (is_offload(adapter
)) {
2544 __set_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->registered_device_map
);
2545 cxgb3_adapter_ofld(adapter
);
2548 /* See what interrupts we'll be using */
2549 if (msi
> 1 && cxgb_enable_msix(adapter
) == 0)
2550 adapter
->flags
|= USING_MSIX
;
2551 else if (msi
> 0 && pci_enable_msi(pdev
) == 0)
2552 adapter
->flags
|= USING_MSI
;
2554 err
= sysfs_create_group(&adapter
->port
[0]->dev
.kobj
,
2557 print_port_info(adapter
, ai
);
2561 iounmap(adapter
->regs
);
2562 for (i
= ai
->nports
- 1; i
>= 0; --i
)
2563 if (adapter
->port
[i
])
2564 free_netdev(adapter
->port
[i
]);
2570 pci_disable_device(pdev
);
2571 out_release_regions
:
2572 pci_release_regions(pdev
);
2573 pci_set_drvdata(pdev
, NULL
);
2577 static void __devexit
remove_one(struct pci_dev
*pdev
)
2579 struct adapter
*adapter
= pci_get_drvdata(pdev
);
2584 t3_sge_stop(adapter
);
2585 sysfs_remove_group(&adapter
->port
[0]->dev
.kobj
,
2588 for_each_port(adapter
, i
)
2589 if (test_bit(i
, &adapter
->registered_device_map
))
2590 unregister_netdev(adapter
->port
[i
]);
2592 if (is_offload(adapter
)) {
2593 cxgb3_adapter_unofld(adapter
);
2594 if (test_bit(OFFLOAD_DEVMAP_BIT
,
2595 &adapter
->open_device_map
))
2596 offload_close(&adapter
->tdev
);
2599 t3_free_sge_resources(adapter
);
2600 cxgb_disable_msi(adapter
);
2602 for_each_port(adapter
, i
)
2603 if (adapter
->port
[i
])
2604 free_netdev(adapter
->port
[i
]);
2606 iounmap(adapter
->regs
);
2608 pci_release_regions(pdev
);
2609 pci_disable_device(pdev
);
2610 pci_set_drvdata(pdev
, NULL
);
2614 static struct pci_driver driver
= {
2616 .id_table
= cxgb3_pci_tbl
,
2618 .remove
= __devexit_p(remove_one
),
2621 static int __init
cxgb3_init_module(void)
2625 cxgb3_offload_init();
2627 ret
= pci_register_driver(&driver
);
2631 static void __exit
cxgb3_cleanup_module(void)
2633 pci_unregister_driver(&driver
);
2635 destroy_workqueue(cxgb3_wq
);
2638 module_init(cxgb3_init_module
);
2639 module_exit(cxgb3_cleanup_module
);