2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <net/addrconf.h>
66 #include <linux/uaccess.h>
67 #include <linux/crash_dump.h>
68 #include <net/udp_tunnel.h>
71 #include "cxgb4_filter.h"
73 #include "t4_values.h"
76 #include "t4fw_version.h"
77 #include "cxgb4_dcb.h"
79 #include "cxgb4_debugfs.h"
84 #include "cxgb4_tc_u32.h"
85 #include "cxgb4_tc_flower.h"
86 #include "cxgb4_ptp.h"
87 #include "cxgb4_cudbg.h"
89 char cxgb4_driver_name
[] = KBUILD_MODNAME
;
94 #define DRV_VERSION "2.0.0-ko"
95 const char cxgb4_driver_version
[] = DRV_VERSION
;
96 #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
98 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
99 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
100 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
102 /* Macros needed to support the PCI Device ID Table ...
104 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
105 static const struct pci_device_id cxgb4_pci_tbl[] = {
106 #define CXGB4_UNIFIED_PF 0x4
108 #define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
110 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
113 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
115 #define CH_PCI_ID_TABLE_ENTRY(devid) \
116 {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
118 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
122 #include "t4_pci_id_tbl.h"
124 #define FW4_FNAME "cxgb4/t4fw.bin"
125 #define FW5_FNAME "cxgb4/t5fw.bin"
126 #define FW6_FNAME "cxgb4/t6fw.bin"
127 #define FW4_CFNAME "cxgb4/t4-config.txt"
128 #define FW5_CFNAME "cxgb4/t5-config.txt"
129 #define FW6_CFNAME "cxgb4/t6-config.txt"
130 #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
131 #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
132 #define PHY_AQ1202_DEVICEID 0x4409
133 #define PHY_BCM84834_DEVICEID 0x4486
135 MODULE_DESCRIPTION(DRV_DESC
);
136 MODULE_AUTHOR("Chelsio Communications");
137 MODULE_LICENSE("Dual BSD/GPL");
138 MODULE_VERSION(DRV_VERSION
);
139 MODULE_DEVICE_TABLE(pci
, cxgb4_pci_tbl
);
140 MODULE_FIRMWARE(FW4_FNAME
);
141 MODULE_FIRMWARE(FW5_FNAME
);
142 MODULE_FIRMWARE(FW6_FNAME
);
145 * The driver uses the best interrupt scheme available on a platform in the
146 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
147 * of these schemes the driver may consider as follows:
149 * msi = 2: choose from among all three options
150 * msi = 1: only consider MSI and INTx interrupts
151 * msi = 0: force INTx interrupts
155 module_param(msi
, int, 0644);
156 MODULE_PARM_DESC(msi
, "whether to use INTx (0), MSI (1) or MSI-X (2)");
159 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
160 * offset by 2 bytes in order to have the IP headers line up on 4-byte
161 * boundaries. This is a requirement for many architectures which will throw
162 * a machine check fault if an attempt is made to access one of the 4-byte IP
163 * header fields on a non-4-byte boundary. And it's a major performance issue
164 * even on some architectures which allow it like some implementations of the
165 * x86 ISA. However, some architectures don't mind this and for some very
166 * edge-case performance sensitive applications (like forwarding large volumes
167 * of small packets), setting this DMA offset to 0 will decrease the number of
168 * PCI-E Bus transfers enough to measurably affect performance.
170 static int rx_dma_offset
= 2;
172 /* TX Queue select used to determine what algorithm to use for selecting TX
173 * queue. Select between the kernel provided function (select_queue=0) or user
174 * cxgb_select_queue function (select_queue=1)
176 * Default: select_queue=0
178 static int select_queue
;
179 module_param(select_queue
, int, 0644);
180 MODULE_PARM_DESC(select_queue
,
181 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
183 static struct dentry
*cxgb4_debugfs_root
;
185 LIST_HEAD(adapter_list
);
186 DEFINE_MUTEX(uld_mutex
);
188 static void link_report(struct net_device
*dev
)
190 if (!netif_carrier_ok(dev
))
191 netdev_info(dev
, "link down\n");
193 static const char *fc
[] = { "no", "Rx", "Tx", "Tx/Rx" };
196 const struct port_info
*p
= netdev_priv(dev
);
198 switch (p
->link_cfg
.speed
) {
221 pr_info("%s: unsupported speed: %d\n",
222 dev
->name
, p
->link_cfg
.speed
);
226 netdev_info(dev
, "link up, %s, full-duplex, %s PAUSE\n", s
,
231 #ifdef CONFIG_CHELSIO_T4_DCB
232 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
233 static void dcb_tx_queue_prio_enable(struct net_device
*dev
, int enable
)
235 struct port_info
*pi
= netdev_priv(dev
);
236 struct adapter
*adap
= pi
->adapter
;
237 struct sge_eth_txq
*txq
= &adap
->sge
.ethtxq
[pi
->first_qset
];
240 /* We use a simple mapping of Port TX Queue Index to DCB
241 * Priority when we're enabling DCB.
243 for (i
= 0; i
< pi
->nqsets
; i
++, txq
++) {
247 name
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
249 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH
) |
250 FW_PARAMS_PARAM_YZ_V(txq
->q
.cntxt_id
));
251 value
= enable
? i
: 0xffffffff;
253 /* Since we can be called while atomic (from "interrupt
254 * level") we need to issue the Set Parameters Commannd
255 * without sleeping (timeout < 0).
257 err
= t4_set_params_timeout(adap
, adap
->mbox
, adap
->pf
, 0, 1,
259 -FW_CMD_MAX_TIMEOUT
);
262 dev_err(adap
->pdev_dev
,
263 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
264 enable
? "set" : "unset", pi
->port_id
, i
, -err
);
266 txq
->dcb_prio
= enable
? value
: 0;
270 int cxgb4_dcb_enabled(const struct net_device
*dev
)
272 struct port_info
*pi
= netdev_priv(dev
);
274 if (!pi
->dcb
.enabled
)
277 return ((pi
->dcb
.state
== CXGB4_DCB_STATE_FW_ALLSYNCED
) ||
278 (pi
->dcb
.state
== CXGB4_DCB_STATE_HOST
));
280 #endif /* CONFIG_CHELSIO_T4_DCB */
282 void t4_os_link_changed(struct adapter
*adapter
, int port_id
, int link_stat
)
284 struct net_device
*dev
= adapter
->port
[port_id
];
286 /* Skip changes from disabled ports. */
287 if (netif_running(dev
) && link_stat
!= netif_carrier_ok(dev
)) {
289 netif_carrier_on(dev
);
291 #ifdef CONFIG_CHELSIO_T4_DCB
292 if (cxgb4_dcb_enabled(dev
)) {
293 cxgb4_dcb_reset(dev
);
294 dcb_tx_queue_prio_enable(dev
, false);
296 #endif /* CONFIG_CHELSIO_T4_DCB */
297 netif_carrier_off(dev
);
304 void t4_os_portmod_changed(struct adapter
*adap
, int port_id
)
306 static const char *mod_str
[] = {
307 NULL
, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
310 struct net_device
*dev
= adap
->port
[port_id
];
311 struct port_info
*pi
= netdev_priv(dev
);
313 if (pi
->mod_type
== FW_PORT_MOD_TYPE_NONE
)
314 netdev_info(dev
, "port module unplugged\n");
315 else if (pi
->mod_type
< ARRAY_SIZE(mod_str
))
316 netdev_info(dev
, "%s module inserted\n", mod_str
[pi
->mod_type
]);
317 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_NOTSUPPORTED
)
318 netdev_info(dev
, "%s: unsupported port module inserted\n",
320 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_UNKNOWN
)
321 netdev_info(dev
, "%s: unknown port module inserted\n",
323 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_ERROR
)
324 netdev_info(dev
, "%s: transceiver module error\n", dev
->name
);
326 netdev_info(dev
, "%s: unknown module type %d inserted\n",
327 dev
->name
, pi
->mod_type
);
329 /* If the interface is running, then we'll need any "sticky" Link
330 * Parameters redone with a new Transceiver Module.
332 pi
->link_cfg
.redo_l1cfg
= netif_running(dev
);
335 int dbfifo_int_thresh
= 10; /* 10 == 640 entry threshold */
336 module_param(dbfifo_int_thresh
, int, 0644);
337 MODULE_PARM_DESC(dbfifo_int_thresh
, "doorbell fifo interrupt threshold");
340 * usecs to sleep while draining the dbfifo
342 static int dbfifo_drain_delay
= 1000;
343 module_param(dbfifo_drain_delay
, int, 0644);
344 MODULE_PARM_DESC(dbfifo_drain_delay
,
345 "usecs to sleep while draining the dbfifo");
347 static inline int cxgb4_set_addr_hash(struct port_info
*pi
)
349 struct adapter
*adap
= pi
->adapter
;
352 struct hash_mac_addr
*entry
;
354 /* Calculate the hash vector for the updated list and program it */
355 list_for_each_entry(entry
, &adap
->mac_hlist
, list
) {
356 ucast
|= is_unicast_ether_addr(entry
->addr
);
357 vec
|= (1ULL << hash_mac_addr(entry
->addr
));
359 return t4_set_addr_hash(adap
, adap
->mbox
, pi
->viid
, ucast
,
363 static int cxgb4_mac_sync(struct net_device
*netdev
, const u8
*mac_addr
)
365 struct port_info
*pi
= netdev_priv(netdev
);
366 struct adapter
*adap
= pi
->adapter
;
371 bool ucast
= is_unicast_ether_addr(mac_addr
);
372 const u8
*maclist
[1] = {mac_addr
};
373 struct hash_mac_addr
*new_entry
;
375 ret
= t4_alloc_mac_filt(adap
, adap
->mbox
, pi
->viid
, free
, 1, maclist
,
376 NULL
, ucast
? &uhash
: &mhash
, false);
379 /* if hash != 0, then add the addr to hash addr list
380 * so on the end we will calculate the hash for the
381 * list and program it
383 if (uhash
|| mhash
) {
384 new_entry
= kzalloc(sizeof(*new_entry
), GFP_ATOMIC
);
387 ether_addr_copy(new_entry
->addr
, mac_addr
);
388 list_add_tail(&new_entry
->list
, &adap
->mac_hlist
);
389 ret
= cxgb4_set_addr_hash(pi
);
392 return ret
< 0 ? ret
: 0;
395 static int cxgb4_mac_unsync(struct net_device
*netdev
, const u8
*mac_addr
)
397 struct port_info
*pi
= netdev_priv(netdev
);
398 struct adapter
*adap
= pi
->adapter
;
400 const u8
*maclist
[1] = {mac_addr
};
401 struct hash_mac_addr
*entry
, *tmp
;
403 /* If the MAC address to be removed is in the hash addr
404 * list, delete it from the list and update hash vector
406 list_for_each_entry_safe(entry
, tmp
, &adap
->mac_hlist
, list
) {
407 if (ether_addr_equal(entry
->addr
, mac_addr
)) {
408 list_del(&entry
->list
);
410 return cxgb4_set_addr_hash(pi
);
414 ret
= t4_free_mac_filt(adap
, adap
->mbox
, pi
->viid
, 1, maclist
, false);
415 return ret
< 0 ? -EINVAL
: 0;
419 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
420 * If @mtu is -1 it is left unchanged.
422 static int set_rxmode(struct net_device
*dev
, int mtu
, bool sleep_ok
)
424 struct port_info
*pi
= netdev_priv(dev
);
425 struct adapter
*adapter
= pi
->adapter
;
427 __dev_uc_sync(dev
, cxgb4_mac_sync
, cxgb4_mac_unsync
);
428 __dev_mc_sync(dev
, cxgb4_mac_sync
, cxgb4_mac_unsync
);
430 return t4_set_rxmode(adapter
, adapter
->mbox
, pi
->viid
, mtu
,
431 (dev
->flags
& IFF_PROMISC
) ? 1 : 0,
432 (dev
->flags
& IFF_ALLMULTI
) ? 1 : 0, 1, -1,
437 * link_start - enable a port
438 * @dev: the port to enable
440 * Performs the MAC and PHY actions needed to enable a port.
442 static int link_start(struct net_device
*dev
)
445 struct port_info
*pi
= netdev_priv(dev
);
446 unsigned int mb
= pi
->adapter
->pf
;
449 * We do not set address filters and promiscuity here, the stack does
450 * that step explicitly.
452 ret
= t4_set_rxmode(pi
->adapter
, mb
, pi
->viid
, dev
->mtu
, -1, -1, -1,
453 !!(dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
), true);
455 ret
= t4_change_mac(pi
->adapter
, mb
, pi
->viid
,
456 pi
->xact_addr_filt
, dev
->dev_addr
, true,
459 pi
->xact_addr_filt
= ret
;
464 ret
= t4_link_l1cfg(pi
->adapter
, mb
, pi
->tx_chan
,
468 ret
= t4_enable_pi_params(pi
->adapter
, mb
, pi
, true,
469 true, CXGB4_DCB_ENABLED
);
476 #ifdef CONFIG_CHELSIO_T4_DCB
477 /* Handle a Data Center Bridging update message from the firmware. */
478 static void dcb_rpl(struct adapter
*adap
, const struct fw_port_cmd
*pcmd
)
480 int port
= FW_PORT_CMD_PORTID_G(ntohl(pcmd
->op_to_portid
));
481 struct net_device
*dev
= adap
->port
[adap
->chan_map
[port
]];
482 int old_dcb_enabled
= cxgb4_dcb_enabled(dev
);
485 cxgb4_dcb_handle_fw_update(adap
, pcmd
);
486 new_dcb_enabled
= cxgb4_dcb_enabled(dev
);
488 /* If the DCB has become enabled or disabled on the port then we're
489 * going to need to set up/tear down DCB Priority parameters for the
490 * TX Queues associated with the port.
492 if (new_dcb_enabled
!= old_dcb_enabled
)
493 dcb_tx_queue_prio_enable(dev
, new_dcb_enabled
);
495 #endif /* CONFIG_CHELSIO_T4_DCB */
497 /* Response queue handler for the FW event queue.
499 static int fwevtq_handler(struct sge_rspq
*q
, const __be64
*rsp
,
500 const struct pkt_gl
*gl
)
502 u8 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
504 rsp
++; /* skip RSS header */
506 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
508 if (unlikely(opcode
== CPL_FW4_MSG
&&
509 ((const struct cpl_fw4_msg
*)rsp
)->type
== FW_TYPE_RSSCPL
)) {
511 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
513 if (opcode
!= CPL_SGE_EGR_UPDATE
) {
514 dev_err(q
->adap
->pdev_dev
, "unexpected FW4/CPL %#x on FW event queue\n"
520 if (likely(opcode
== CPL_SGE_EGR_UPDATE
)) {
521 const struct cpl_sge_egr_update
*p
= (void *)rsp
;
522 unsigned int qid
= EGR_QID_G(ntohl(p
->opcode_qid
));
525 txq
= q
->adap
->sge
.egr_map
[qid
- q
->adap
->sge
.egr_start
];
527 if (txq
->q_type
== CXGB4_TXQ_ETH
) {
528 struct sge_eth_txq
*eq
;
530 eq
= container_of(txq
, struct sge_eth_txq
, q
);
531 netif_tx_wake_queue(eq
->txq
);
533 struct sge_uld_txq
*oq
;
535 oq
= container_of(txq
, struct sge_uld_txq
, q
);
536 tasklet_schedule(&oq
->qresume_tsk
);
538 } else if (opcode
== CPL_FW6_MSG
|| opcode
== CPL_FW4_MSG
) {
539 const struct cpl_fw6_msg
*p
= (void *)rsp
;
541 #ifdef CONFIG_CHELSIO_T4_DCB
542 const struct fw_port_cmd
*pcmd
= (const void *)p
->data
;
543 unsigned int cmd
= FW_CMD_OP_G(ntohl(pcmd
->op_to_portid
));
544 unsigned int action
=
545 FW_PORT_CMD_ACTION_G(ntohl(pcmd
->action_to_len16
));
547 if (cmd
== FW_PORT_CMD
&&
548 (action
== FW_PORT_ACTION_GET_PORT_INFO
||
549 action
== FW_PORT_ACTION_GET_PORT_INFO32
)) {
550 int port
= FW_PORT_CMD_PORTID_G(
551 be32_to_cpu(pcmd
->op_to_portid
));
552 struct net_device
*dev
;
553 int dcbxdis
, state_input
;
555 dev
= q
->adap
->port
[q
->adap
->chan_map
[port
]];
556 dcbxdis
= (action
== FW_PORT_ACTION_GET_PORT_INFO
557 ? !!(pcmd
->u
.info
.dcbxdis_pkd
& FW_PORT_CMD_DCBXDIS_F
)
558 : !!(be32_to_cpu(pcmd
->u
.info32
.lstatus32_to_cbllen32
)
559 & FW_PORT_CMD_DCBXDIS32_F
));
560 state_input
= (dcbxdis
561 ? CXGB4_DCB_INPUT_FW_DISABLED
562 : CXGB4_DCB_INPUT_FW_ENABLED
);
564 cxgb4_dcb_state_fsm(dev
, state_input
);
567 if (cmd
== FW_PORT_CMD
&&
568 action
== FW_PORT_ACTION_L2_DCB_CFG
)
569 dcb_rpl(q
->adap
, pcmd
);
573 t4_handle_fw_rpl(q
->adap
, p
->data
);
574 } else if (opcode
== CPL_L2T_WRITE_RPL
) {
575 const struct cpl_l2t_write_rpl
*p
= (void *)rsp
;
577 do_l2t_write_rpl(q
->adap
, p
);
578 } else if (opcode
== CPL_SMT_WRITE_RPL
) {
579 const struct cpl_smt_write_rpl
*p
= (void *)rsp
;
581 do_smt_write_rpl(q
->adap
, p
);
582 } else if (opcode
== CPL_SET_TCB_RPL
) {
583 const struct cpl_set_tcb_rpl
*p
= (void *)rsp
;
585 filter_rpl(q
->adap
, p
);
586 } else if (opcode
== CPL_ACT_OPEN_RPL
) {
587 const struct cpl_act_open_rpl
*p
= (void *)rsp
;
589 hash_filter_rpl(q
->adap
, p
);
590 } else if (opcode
== CPL_ABORT_RPL_RSS
) {
591 const struct cpl_abort_rpl_rss
*p
= (void *)rsp
;
593 hash_del_filter_rpl(q
->adap
, p
);
594 } else if (opcode
== CPL_SRQ_TABLE_RPL
) {
595 const struct cpl_srq_table_rpl
*p
= (void *)rsp
;
597 do_srq_table_rpl(q
->adap
, p
);
599 dev_err(q
->adap
->pdev_dev
,
600 "unexpected CPL %#x on FW event queue\n", opcode
);
605 static void disable_msi(struct adapter
*adapter
)
607 if (adapter
->flags
& USING_MSIX
) {
608 pci_disable_msix(adapter
->pdev
);
609 adapter
->flags
&= ~USING_MSIX
;
610 } else if (adapter
->flags
& USING_MSI
) {
611 pci_disable_msi(adapter
->pdev
);
612 adapter
->flags
&= ~USING_MSI
;
617 * Interrupt handler for non-data events used with MSI-X.
619 static irqreturn_t
t4_nondata_intr(int irq
, void *cookie
)
621 struct adapter
*adap
= cookie
;
622 u32 v
= t4_read_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE_A
));
626 t4_write_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE_A
), v
);
628 if (adap
->flags
& MASTER_PF
)
629 t4_slow_intr_handler(adap
);
634 * Name the MSI-X interrupts.
636 static void name_msix_vecs(struct adapter
*adap
)
638 int i
, j
, msi_idx
= 2, n
= sizeof(adap
->msix_info
[0].desc
);
640 /* non-data interrupts */
641 snprintf(adap
->msix_info
[0].desc
, n
, "%s", adap
->port
[0]->name
);
644 snprintf(adap
->msix_info
[1].desc
, n
, "%s-FWeventq",
645 adap
->port
[0]->name
);
647 /* Ethernet queues */
648 for_each_port(adap
, j
) {
649 struct net_device
*d
= adap
->port
[j
];
650 const struct port_info
*pi
= netdev_priv(d
);
652 for (i
= 0; i
< pi
->nqsets
; i
++, msi_idx
++)
653 snprintf(adap
->msix_info
[msi_idx
].desc
, n
, "%s-Rx%d",
658 static int request_msix_queue_irqs(struct adapter
*adap
)
660 struct sge
*s
= &adap
->sge
;
664 err
= request_irq(adap
->msix_info
[1].vec
, t4_sge_intr_msix
, 0,
665 adap
->msix_info
[1].desc
, &s
->fw_evtq
);
669 for_each_ethrxq(s
, ethqidx
) {
670 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
672 adap
->msix_info
[msi_index
].desc
,
673 &s
->ethrxq
[ethqidx
].rspq
);
681 while (--ethqidx
>= 0)
682 free_irq(adap
->msix_info
[--msi_index
].vec
,
683 &s
->ethrxq
[ethqidx
].rspq
);
684 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
688 static void free_msix_queue_irqs(struct adapter
*adap
)
690 int i
, msi_index
= 2;
691 struct sge
*s
= &adap
->sge
;
693 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
694 for_each_ethrxq(s
, i
)
695 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->ethrxq
[i
].rspq
);
699 * cxgb4_write_rss - write the RSS table for a given port
701 * @queues: array of queue indices for RSS
703 * Sets up the portion of the HW RSS table for the port's VI to distribute
704 * packets to the Rx queues in @queues.
705 * Should never be called before setting up sge eth rx queues
707 int cxgb4_write_rss(const struct port_info
*pi
, const u16
*queues
)
711 struct adapter
*adapter
= pi
->adapter
;
712 const struct sge_eth_rxq
*rxq
;
714 rxq
= &adapter
->sge
.ethrxq
[pi
->first_qset
];
715 rss
= kmalloc_array(pi
->rss_size
, sizeof(u16
), GFP_KERNEL
);
719 /* map the queue indices to queue ids */
720 for (i
= 0; i
< pi
->rss_size
; i
++, queues
++)
721 rss
[i
] = rxq
[*queues
].rspq
.abs_id
;
723 err
= t4_config_rss_range(adapter
, adapter
->pf
, pi
->viid
, 0,
724 pi
->rss_size
, rss
, pi
->rss_size
);
725 /* If Tunnel All Lookup isn't specified in the global RSS
726 * Configuration, then we need to specify a default Ingress
727 * Queue for any ingress packets which aren't hashed. We'll
728 * use our first ingress queue ...
731 err
= t4_config_vi_rss(adapter
, adapter
->mbox
, pi
->viid
,
732 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F
|
733 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F
|
734 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F
|
735 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F
|
736 FW_RSS_VI_CONFIG_CMD_UDPEN_F
,
743 * setup_rss - configure RSS
746 * Sets up RSS for each port.
748 static int setup_rss(struct adapter
*adap
)
752 for_each_port(adap
, i
) {
753 const struct port_info
*pi
= adap2pinfo(adap
, i
);
755 /* Fill default values with equal distribution */
756 for (j
= 0; j
< pi
->rss_size
; j
++)
757 pi
->rss
[j
] = j
% pi
->nqsets
;
759 err
= cxgb4_write_rss(pi
, pi
->rss
);
767 * Return the channel of the ingress queue with the given qid.
769 static unsigned int rxq_to_chan(const struct sge
*p
, unsigned int qid
)
771 qid
-= p
->ingr_start
;
772 return netdev2pinfo(p
->ingr_map
[qid
]->netdev
)->tx_chan
;
776 * Wait until all NAPI handlers are descheduled.
778 static void quiesce_rx(struct adapter
*adap
)
782 for (i
= 0; i
< adap
->sge
.ingr_sz
; i
++) {
783 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
786 napi_disable(&q
->napi
);
790 /* Disable interrupt and napi handler */
791 static void disable_interrupts(struct adapter
*adap
)
793 if (adap
->flags
& FULL_INIT_DONE
) {
794 t4_intr_disable(adap
);
795 if (adap
->flags
& USING_MSIX
) {
796 free_msix_queue_irqs(adap
);
797 free_irq(adap
->msix_info
[0].vec
, adap
);
799 free_irq(adap
->pdev
->irq
, adap
);
806 * Enable NAPI scheduling and interrupt generation for all Rx queues.
808 static void enable_rx(struct adapter
*adap
)
812 for (i
= 0; i
< adap
->sge
.ingr_sz
; i
++) {
813 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
818 napi_enable(&q
->napi
);
820 /* 0-increment GTS to start the timer and enable interrupts */
821 t4_write_reg(adap
, MYPF_REG(SGE_PF_GTS_A
),
822 SEINTARM_V(q
->intr_params
) |
823 INGRESSQID_V(q
->cntxt_id
));
828 static int setup_fw_sge_queues(struct adapter
*adap
)
830 struct sge
*s
= &adap
->sge
;
833 bitmap_zero(s
->starving_fl
, s
->egr_sz
);
834 bitmap_zero(s
->txq_maperr
, s
->egr_sz
);
836 if (adap
->flags
& USING_MSIX
)
837 adap
->msi_idx
= 1; /* vector 0 is for non-queue interrupts */
839 err
= t4_sge_alloc_rxq(adap
, &s
->intrq
, false, adap
->port
[0], 0,
840 NULL
, NULL
, NULL
, -1);
843 adap
->msi_idx
= -((int)s
->intrq
.abs_id
+ 1);
846 err
= t4_sge_alloc_rxq(adap
, &s
->fw_evtq
, true, adap
->port
[0],
847 adap
->msi_idx
, NULL
, fwevtq_handler
, NULL
, -1);
852 * setup_sge_queues - configure SGE Tx/Rx/response queues
855 * Determines how many sets of SGE queues to use and initializes them.
856 * We support multiple queue sets per port if we have MSI-X, otherwise
857 * just one queue set per port.
859 static int setup_sge_queues(struct adapter
*adap
)
862 struct sge
*s
= &adap
->sge
;
863 struct sge_uld_rxq_info
*rxq_info
= NULL
;
864 unsigned int cmplqid
= 0;
867 rxq_info
= s
->uld_rxq_info
[CXGB4_ULD_RDMA
];
869 for_each_port(adap
, i
) {
870 struct net_device
*dev
= adap
->port
[i
];
871 struct port_info
*pi
= netdev_priv(dev
);
872 struct sge_eth_rxq
*q
= &s
->ethrxq
[pi
->first_qset
];
873 struct sge_eth_txq
*t
= &s
->ethtxq
[pi
->first_qset
];
875 for (j
= 0; j
< pi
->nqsets
; j
++, q
++) {
876 if (adap
->msi_idx
> 0)
878 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false, dev
,
879 adap
->msi_idx
, &q
->fl
,
882 t4_get_tp_ch_map(adap
,
887 memset(&q
->stats
, 0, sizeof(q
->stats
));
889 for (j
= 0; j
< pi
->nqsets
; j
++, t
++) {
890 err
= t4_sge_alloc_eth_txq(adap
, t
, dev
,
891 netdev_get_tx_queue(dev
, j
),
892 s
->fw_evtq
.cntxt_id
);
898 for_each_port(adap
, i
) {
899 /* Note that cmplqid below is 0 if we don't
900 * have RDMA queues, and that's the right value.
903 cmplqid
= rxq_info
->uldrxq
[i
].rspq
.cntxt_id
;
905 err
= t4_sge_alloc_ctrl_txq(adap
, &s
->ctrlq
[i
], adap
->port
[i
],
906 s
->fw_evtq
.cntxt_id
, cmplqid
);
911 if (!is_t4(adap
->params
.chip
)) {
912 err
= t4_sge_alloc_eth_txq(adap
, &s
->ptptxq
, adap
->port
[0],
913 netdev_get_tx_queue(adap
->port
[0], 0)
914 , s
->fw_evtq
.cntxt_id
);
919 t4_write_reg(adap
, is_t4(adap
->params
.chip
) ?
920 MPS_TRC_RSS_CONTROL_A
:
921 MPS_T5_TRC_RSS_CONTROL_A
,
922 RSSCONTROL_V(netdev2pinfo(adap
->port
[0])->tx_chan
) |
923 QUEUENUMBER_V(s
->ethrxq
[0].rspq
.abs_id
));
926 dev_err(adap
->pdev_dev
, "Can't allocate queues, err=%d\n", -err
);
927 t4_free_sge_resources(adap
);
931 static u16
cxgb_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
932 struct net_device
*sb_dev
,
933 select_queue_fallback_t fallback
)
937 #ifdef CONFIG_CHELSIO_T4_DCB
938 /* If a Data Center Bridging has been successfully negotiated on this
939 * link then we'll use the skb's priority to map it to a TX Queue.
940 * The skb's priority is determined via the VLAN Tag Priority Code
943 if (cxgb4_dcb_enabled(dev
) && !is_kdump_kernel()) {
947 err
= vlan_get_tag(skb
, &vlan_tci
);
951 "TX Packet without VLAN Tag on DCB Link\n");
954 txq
= (vlan_tci
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
955 #ifdef CONFIG_CHELSIO_T4_FCOE
956 if (skb
->protocol
== htons(ETH_P_FCOE
))
957 txq
= skb
->priority
& 0x7;
958 #endif /* CONFIG_CHELSIO_T4_FCOE */
962 #endif /* CONFIG_CHELSIO_T4_DCB */
965 txq
= (skb_rx_queue_recorded(skb
)
966 ? skb_get_rx_queue(skb
)
967 : smp_processor_id());
969 while (unlikely(txq
>= dev
->real_num_tx_queues
))
970 txq
-= dev
->real_num_tx_queues
;
975 return fallback(dev
, skb
, NULL
) % dev
->real_num_tx_queues
;
978 static int closest_timer(const struct sge
*s
, int time
)
980 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
982 for (i
= 0; i
< ARRAY_SIZE(s
->timer_val
); i
++) {
983 delta
= time
- s
->timer_val
[i
];
986 if (delta
< min_delta
) {
994 static int closest_thres(const struct sge
*s
, int thres
)
996 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
998 for (i
= 0; i
< ARRAY_SIZE(s
->counter_val
); i
++) {
999 delta
= thres
- s
->counter_val
[i
];
1002 if (delta
< min_delta
) {
1011 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1013 * @us: the hold-off time in us, or 0 to disable timer
1014 * @cnt: the hold-off packet count, or 0 to disable counter
1016 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1017 * one of the two needs to be enabled for the queue to generate interrupts.
1019 int cxgb4_set_rspq_intr_params(struct sge_rspq
*q
,
1020 unsigned int us
, unsigned int cnt
)
1022 struct adapter
*adap
= q
->adap
;
1024 if ((us
| cnt
) == 0)
1031 new_idx
= closest_thres(&adap
->sge
, cnt
);
1032 if (q
->desc
&& q
->pktcnt_idx
!= new_idx
) {
1033 /* the queue has already been created, update it */
1034 v
= FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
1035 FW_PARAMS_PARAM_X_V(
1036 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH
) |
1037 FW_PARAMS_PARAM_YZ_V(q
->cntxt_id
);
1038 err
= t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1,
1043 q
->pktcnt_idx
= new_idx
;
1046 us
= us
== 0 ? 6 : closest_timer(&adap
->sge
, us
);
1047 q
->intr_params
= QINTR_TIMER_IDX_V(us
) | QINTR_CNT_EN_V(cnt
> 0);
1051 static int cxgb_set_features(struct net_device
*dev
, netdev_features_t features
)
1053 const struct port_info
*pi
= netdev_priv(dev
);
1054 netdev_features_t changed
= dev
->features
^ features
;
1057 if (!(changed
& NETIF_F_HW_VLAN_CTAG_RX
))
1060 err
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
, -1,
1062 !!(features
& NETIF_F_HW_VLAN_CTAG_RX
), true);
1064 dev
->features
= features
^ NETIF_F_HW_VLAN_CTAG_RX
;
1068 static int setup_debugfs(struct adapter
*adap
)
1070 if (IS_ERR_OR_NULL(adap
->debugfs_root
))
1073 #ifdef CONFIG_DEBUG_FS
1074 t4_setup_debugfs(adap
);
1080 * upper-layer driver support
1084 * Allocate an active-open TID and set it to the supplied value.
1086 int cxgb4_alloc_atid(struct tid_info
*t
, void *data
)
1090 spin_lock_bh(&t
->atid_lock
);
1092 union aopen_entry
*p
= t
->afree
;
1094 atid
= (p
- t
->atid_tab
) + t
->atid_base
;
1099 spin_unlock_bh(&t
->atid_lock
);
1102 EXPORT_SYMBOL(cxgb4_alloc_atid
);
1105 * Release an active-open TID.
1107 void cxgb4_free_atid(struct tid_info
*t
, unsigned int atid
)
1109 union aopen_entry
*p
= &t
->atid_tab
[atid
- t
->atid_base
];
1111 spin_lock_bh(&t
->atid_lock
);
1115 spin_unlock_bh(&t
->atid_lock
);
1117 EXPORT_SYMBOL(cxgb4_free_atid
);
1120 * Allocate a server TID and set it to the supplied value.
1122 int cxgb4_alloc_stid(struct tid_info
*t
, int family
, void *data
)
1126 spin_lock_bh(&t
->stid_lock
);
1127 if (family
== PF_INET
) {
1128 stid
= find_first_zero_bit(t
->stid_bmap
, t
->nstids
);
1129 if (stid
< t
->nstids
)
1130 __set_bit(stid
, t
->stid_bmap
);
1134 stid
= bitmap_find_free_region(t
->stid_bmap
, t
->nstids
, 1);
1139 t
->stid_tab
[stid
].data
= data
;
1140 stid
+= t
->stid_base
;
1141 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1142 * This is equivalent to 4 TIDs. With CLIP enabled it
1145 if (family
== PF_INET6
) {
1146 t
->stids_in_use
+= 2;
1147 t
->v6_stids_in_use
+= 2;
1152 spin_unlock_bh(&t
->stid_lock
);
1155 EXPORT_SYMBOL(cxgb4_alloc_stid
);
1157 /* Allocate a server filter TID and set it to the supplied value.
1159 int cxgb4_alloc_sftid(struct tid_info
*t
, int family
, void *data
)
1163 spin_lock_bh(&t
->stid_lock
);
1164 if (family
== PF_INET
) {
1165 stid
= find_next_zero_bit(t
->stid_bmap
,
1166 t
->nstids
+ t
->nsftids
, t
->nstids
);
1167 if (stid
< (t
->nstids
+ t
->nsftids
))
1168 __set_bit(stid
, t
->stid_bmap
);
1175 t
->stid_tab
[stid
].data
= data
;
1177 stid
+= t
->sftid_base
;
1180 spin_unlock_bh(&t
->stid_lock
);
1183 EXPORT_SYMBOL(cxgb4_alloc_sftid
);
1185 /* Release a server TID.
1187 void cxgb4_free_stid(struct tid_info
*t
, unsigned int stid
, int family
)
1189 /* Is it a server filter TID? */
1190 if (t
->nsftids
&& (stid
>= t
->sftid_base
)) {
1191 stid
-= t
->sftid_base
;
1194 stid
-= t
->stid_base
;
1197 spin_lock_bh(&t
->stid_lock
);
1198 if (family
== PF_INET
)
1199 __clear_bit(stid
, t
->stid_bmap
);
1201 bitmap_release_region(t
->stid_bmap
, stid
, 1);
1202 t
->stid_tab
[stid
].data
= NULL
;
1203 if (stid
< t
->nstids
) {
1204 if (family
== PF_INET6
) {
1205 t
->stids_in_use
-= 2;
1206 t
->v6_stids_in_use
-= 2;
1214 spin_unlock_bh(&t
->stid_lock
);
1216 EXPORT_SYMBOL(cxgb4_free_stid
);
1219 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1221 static void mk_tid_release(struct sk_buff
*skb
, unsigned int chan
,
1224 struct cpl_tid_release
*req
;
1226 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, chan
);
1227 req
= __skb_put(skb
, sizeof(*req
));
1228 INIT_TP_WR(req
, tid
);
1229 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE
, tid
));
1233 * Queue a TID release request and if necessary schedule a work queue to
1236 static void cxgb4_queue_tid_release(struct tid_info
*t
, unsigned int chan
,
1239 void **p
= &t
->tid_tab
[tid
];
1240 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1242 spin_lock_bh(&adap
->tid_release_lock
);
1243 *p
= adap
->tid_release_head
;
1244 /* Low 2 bits encode the Tx channel number */
1245 adap
->tid_release_head
= (void **)((uintptr_t)p
| chan
);
1246 if (!adap
->tid_release_task_busy
) {
1247 adap
->tid_release_task_busy
= true;
1248 queue_work(adap
->workq
, &adap
->tid_release_task
);
1250 spin_unlock_bh(&adap
->tid_release_lock
);
1254 * Process the list of pending TID release requests.
1256 static void process_tid_release_list(struct work_struct
*work
)
1258 struct sk_buff
*skb
;
1259 struct adapter
*adap
;
1261 adap
= container_of(work
, struct adapter
, tid_release_task
);
1263 spin_lock_bh(&adap
->tid_release_lock
);
1264 while (adap
->tid_release_head
) {
1265 void **p
= adap
->tid_release_head
;
1266 unsigned int chan
= (uintptr_t)p
& 3;
1267 p
= (void *)p
- chan
;
1269 adap
->tid_release_head
= *p
;
1271 spin_unlock_bh(&adap
->tid_release_lock
);
1273 while (!(skb
= alloc_skb(sizeof(struct cpl_tid_release
),
1275 schedule_timeout_uninterruptible(1);
1277 mk_tid_release(skb
, chan
, p
- adap
->tids
.tid_tab
);
1278 t4_ofld_send(adap
, skb
);
1279 spin_lock_bh(&adap
->tid_release_lock
);
1281 adap
->tid_release_task_busy
= false;
1282 spin_unlock_bh(&adap
->tid_release_lock
);
1286 * Release a TID and inform HW. If we are unable to allocate the release
1287 * message we defer to a work queue.
1289 void cxgb4_remove_tid(struct tid_info
*t
, unsigned int chan
, unsigned int tid
,
1290 unsigned short family
)
1292 struct sk_buff
*skb
;
1293 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1295 WARN_ON(tid
>= t
->ntids
);
1297 if (t
->tid_tab
[tid
]) {
1298 t
->tid_tab
[tid
] = NULL
;
1299 atomic_dec(&t
->conns_in_use
);
1300 if (t
->hash_base
&& (tid
>= t
->hash_base
)) {
1301 if (family
== AF_INET6
)
1302 atomic_sub(2, &t
->hash_tids_in_use
);
1304 atomic_dec(&t
->hash_tids_in_use
);
1306 if (family
== AF_INET6
)
1307 atomic_sub(2, &t
->tids_in_use
);
1309 atomic_dec(&t
->tids_in_use
);
1313 skb
= alloc_skb(sizeof(struct cpl_tid_release
), GFP_ATOMIC
);
1315 mk_tid_release(skb
, chan
, tid
);
1316 t4_ofld_send(adap
, skb
);
1318 cxgb4_queue_tid_release(t
, chan
, tid
);
1320 EXPORT_SYMBOL(cxgb4_remove_tid
);
1323 * Allocate and initialize the TID tables. Returns 0 on success.
1325 static int tid_init(struct tid_info
*t
)
1327 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1328 unsigned int max_ftids
= t
->nftids
+ t
->nsftids
;
1329 unsigned int natids
= t
->natids
;
1330 unsigned int stid_bmap_size
;
1331 unsigned int ftid_bmap_size
;
1334 stid_bmap_size
= BITS_TO_LONGS(t
->nstids
+ t
->nsftids
);
1335 ftid_bmap_size
= BITS_TO_LONGS(t
->nftids
);
1336 size
= t
->ntids
* sizeof(*t
->tid_tab
) +
1337 natids
* sizeof(*t
->atid_tab
) +
1338 t
->nstids
* sizeof(*t
->stid_tab
) +
1339 t
->nsftids
* sizeof(*t
->stid_tab
) +
1340 stid_bmap_size
* sizeof(long) +
1341 max_ftids
* sizeof(*t
->ftid_tab
) +
1342 ftid_bmap_size
* sizeof(long);
1344 t
->tid_tab
= kvzalloc(size
, GFP_KERNEL
);
1348 t
->atid_tab
= (union aopen_entry
*)&t
->tid_tab
[t
->ntids
];
1349 t
->stid_tab
= (struct serv_entry
*)&t
->atid_tab
[natids
];
1350 t
->stid_bmap
= (unsigned long *)&t
->stid_tab
[t
->nstids
+ t
->nsftids
];
1351 t
->ftid_tab
= (struct filter_entry
*)&t
->stid_bmap
[stid_bmap_size
];
1352 t
->ftid_bmap
= (unsigned long *)&t
->ftid_tab
[max_ftids
];
1353 spin_lock_init(&t
->stid_lock
);
1354 spin_lock_init(&t
->atid_lock
);
1355 spin_lock_init(&t
->ftid_lock
);
1357 t
->stids_in_use
= 0;
1358 t
->v6_stids_in_use
= 0;
1359 t
->sftids_in_use
= 0;
1361 t
->atids_in_use
= 0;
1362 atomic_set(&t
->tids_in_use
, 0);
1363 atomic_set(&t
->conns_in_use
, 0);
1364 atomic_set(&t
->hash_tids_in_use
, 0);
1366 /* Setup the free list for atid_tab and clear the stid bitmap. */
1369 t
->atid_tab
[natids
- 1].next
= &t
->atid_tab
[natids
];
1370 t
->afree
= t
->atid_tab
;
1373 if (is_offload(adap
)) {
1374 bitmap_zero(t
->stid_bmap
, t
->nstids
+ t
->nsftids
);
1375 /* Reserve stid 0 for T4/T5 adapters */
1376 if (!t
->stid_base
&&
1377 CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
)
1378 __set_bit(0, t
->stid_bmap
);
1381 bitmap_zero(t
->ftid_bmap
, t
->nftids
);
1386 * cxgb4_create_server - create an IP server
1388 * @stid: the server TID
1389 * @sip: local IP address to bind server to
1390 * @sport: the server's TCP port
1391 * @queue: queue to direct messages from this server to
1393 * Create an IP server for the given port and address.
1394 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1396 int cxgb4_create_server(const struct net_device
*dev
, unsigned int stid
,
1397 __be32 sip
, __be16 sport
, __be16 vlan
,
1401 struct sk_buff
*skb
;
1402 struct adapter
*adap
;
1403 struct cpl_pass_open_req
*req
;
1406 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1410 adap
= netdev2adap(dev
);
1411 req
= __skb_put(skb
, sizeof(*req
));
1413 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ
, stid
));
1414 req
->local_port
= sport
;
1415 req
->peer_port
= htons(0);
1416 req
->local_ip
= sip
;
1417 req
->peer_ip
= htonl(0);
1418 chan
= rxq_to_chan(&adap
->sge
, queue
);
1419 req
->opt0
= cpu_to_be64(TX_CHAN_V(chan
));
1420 req
->opt1
= cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK
) |
1421 SYN_RSS_ENABLE_F
| SYN_RSS_QUEUE_V(queue
));
1422 ret
= t4_mgmt_tx(adap
, skb
);
1423 return net_xmit_eval(ret
);
1425 EXPORT_SYMBOL(cxgb4_create_server
);
1427 /* cxgb4_create_server6 - create an IPv6 server
1429 * @stid: the server TID
1430 * @sip: local IPv6 address to bind server to
1431 * @sport: the server's TCP port
1432 * @queue: queue to direct messages from this server to
1434 * Create an IPv6 server for the given port and address.
1435 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1437 int cxgb4_create_server6(const struct net_device
*dev
, unsigned int stid
,
1438 const struct in6_addr
*sip
, __be16 sport
,
1442 struct sk_buff
*skb
;
1443 struct adapter
*adap
;
1444 struct cpl_pass_open_req6
*req
;
1447 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1451 adap
= netdev2adap(dev
);
1452 req
= __skb_put(skb
, sizeof(*req
));
1454 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6
, stid
));
1455 req
->local_port
= sport
;
1456 req
->peer_port
= htons(0);
1457 req
->local_ip_hi
= *(__be64
*)(sip
->s6_addr
);
1458 req
->local_ip_lo
= *(__be64
*)(sip
->s6_addr
+ 8);
1459 req
->peer_ip_hi
= cpu_to_be64(0);
1460 req
->peer_ip_lo
= cpu_to_be64(0);
1461 chan
= rxq_to_chan(&adap
->sge
, queue
);
1462 req
->opt0
= cpu_to_be64(TX_CHAN_V(chan
));
1463 req
->opt1
= cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK
) |
1464 SYN_RSS_ENABLE_F
| SYN_RSS_QUEUE_V(queue
));
1465 ret
= t4_mgmt_tx(adap
, skb
);
1466 return net_xmit_eval(ret
);
1468 EXPORT_SYMBOL(cxgb4_create_server6
);
1470 int cxgb4_remove_server(const struct net_device
*dev
, unsigned int stid
,
1471 unsigned int queue
, bool ipv6
)
1473 struct sk_buff
*skb
;
1474 struct adapter
*adap
;
1475 struct cpl_close_listsvr_req
*req
;
1478 adap
= netdev2adap(dev
);
1480 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1484 req
= __skb_put(skb
, sizeof(*req
));
1486 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ
, stid
));
1487 req
->reply_ctrl
= htons(NO_REPLY_V(0) | (ipv6
? LISTSVR_IPV6_V(1) :
1488 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue
));
1489 ret
= t4_mgmt_tx(adap
, skb
);
1490 return net_xmit_eval(ret
);
1492 EXPORT_SYMBOL(cxgb4_remove_server
);
1495 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1496 * @mtus: the HW MTU table
1497 * @mtu: the target MTU
1498 * @idx: index of selected entry in the MTU table
1500 * Returns the index and the value in the HW MTU table that is closest to
1501 * but does not exceed @mtu, unless @mtu is smaller than any value in the
1502 * table, in which case that smallest available value is selected.
1504 unsigned int cxgb4_best_mtu(const unsigned short *mtus
, unsigned short mtu
,
1509 while (i
< NMTUS
- 1 && mtus
[i
+ 1] <= mtu
)
1515 EXPORT_SYMBOL(cxgb4_best_mtu
);
1518 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1519 * @mtus: the HW MTU table
1520 * @header_size: Header Size
1521 * @data_size_max: maximum Data Segment Size
1522 * @data_size_align: desired Data Segment Size Alignment (2^N)
1523 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1525 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
1526 * MTU Table based solely on a Maximum MTU parameter, we break that
1527 * parameter up into a Header Size and Maximum Data Segment Size, and
1528 * provide a desired Data Segment Size Alignment. If we find an MTU in
1529 * the Hardware MTU Table which will result in a Data Segment Size with
1530 * the requested alignment _and_ that MTU isn't "too far" from the
1531 * closest MTU, then we'll return that rather than the closest MTU.
1533 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus
,
1534 unsigned short header_size
,
1535 unsigned short data_size_max
,
1536 unsigned short data_size_align
,
1537 unsigned int *mtu_idxp
)
1539 unsigned short max_mtu
= header_size
+ data_size_max
;
1540 unsigned short data_size_align_mask
= data_size_align
- 1;
1541 int mtu_idx
, aligned_mtu_idx
;
1543 /* Scan the MTU Table till we find an MTU which is larger than our
1544 * Maximum MTU or we reach the end of the table. Along the way,
1545 * record the last MTU found, if any, which will result in a Data
1546 * Segment Length matching the requested alignment.
1548 for (mtu_idx
= 0, aligned_mtu_idx
= -1; mtu_idx
< NMTUS
; mtu_idx
++) {
1549 unsigned short data_size
= mtus
[mtu_idx
] - header_size
;
1551 /* If this MTU minus the Header Size would result in a
1552 * Data Segment Size of the desired alignment, remember it.
1554 if ((data_size
& data_size_align_mask
) == 0)
1555 aligned_mtu_idx
= mtu_idx
;
1557 /* If we're not at the end of the Hardware MTU Table and the
1558 * next element is larger than our Maximum MTU, drop out of
1561 if (mtu_idx
+1 < NMTUS
&& mtus
[mtu_idx
+1] > max_mtu
)
1565 /* If we fell out of the loop because we ran to the end of the table,
1566 * then we just have to use the last [largest] entry.
1568 if (mtu_idx
== NMTUS
)
1571 /* If we found an MTU which resulted in the requested Data Segment
1572 * Length alignment and that's "not far" from the largest MTU which is
1573 * less than or equal to the maximum MTU, then use that.
1575 if (aligned_mtu_idx
>= 0 &&
1576 mtu_idx
- aligned_mtu_idx
<= 1)
1577 mtu_idx
= aligned_mtu_idx
;
1579 /* If the caller has passed in an MTU Index pointer, pass the
1580 * MTU Index back. Return the MTU value.
1583 *mtu_idxp
= mtu_idx
;
1584 return mtus
[mtu_idx
];
1586 EXPORT_SYMBOL(cxgb4_best_aligned_mtu
);
1589 * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
1591 * @viid: VI id of the given port
1593 * Return the SMT index for this VI.
1595 unsigned int cxgb4_tp_smt_idx(enum chip_type chip
, unsigned int viid
)
1597 /* In T4/T5, SMT contains 256 SMAC entries organized in
1598 * 128 rows of 2 entries each.
1599 * In T6, SMT contains 256 SMAC entries in 256 rows.
1600 * TODO: The below code needs to be updated when we add support
1603 if (CHELSIO_CHIP_VERSION(chip
) <= CHELSIO_T5
)
1604 return ((viid
& 0x7f) << 1);
1606 return (viid
& 0x7f);
1608 EXPORT_SYMBOL(cxgb4_tp_smt_idx
);
1611 * cxgb4_port_chan - get the HW channel of a port
1612 * @dev: the net device for the port
1614 * Return the HW Tx channel of the given port.
1616 unsigned int cxgb4_port_chan(const struct net_device
*dev
)
1618 return netdev2pinfo(dev
)->tx_chan
;
1620 EXPORT_SYMBOL(cxgb4_port_chan
);
1622 unsigned int cxgb4_dbfifo_count(const struct net_device
*dev
, int lpfifo
)
1624 struct adapter
*adap
= netdev2adap(dev
);
1625 u32 v1
, v2
, lp_count
, hp_count
;
1627 v1
= t4_read_reg(adap
, SGE_DBFIFO_STATUS_A
);
1628 v2
= t4_read_reg(adap
, SGE_DBFIFO_STATUS2_A
);
1629 if (is_t4(adap
->params
.chip
)) {
1630 lp_count
= LP_COUNT_G(v1
);
1631 hp_count
= HP_COUNT_G(v1
);
1633 lp_count
= LP_COUNT_T5_G(v1
);
1634 hp_count
= HP_COUNT_T5_G(v2
);
1636 return lpfifo
? lp_count
: hp_count
;
1638 EXPORT_SYMBOL(cxgb4_dbfifo_count
);
1641 * cxgb4_port_viid - get the VI id of a port
1642 * @dev: the net device for the port
1644 * Return the VI id of the given port.
1646 unsigned int cxgb4_port_viid(const struct net_device
*dev
)
1648 return netdev2pinfo(dev
)->viid
;
1650 EXPORT_SYMBOL(cxgb4_port_viid
);
1653 * cxgb4_port_idx - get the index of a port
1654 * @dev: the net device for the port
1656 * Return the index of the given port.
1658 unsigned int cxgb4_port_idx(const struct net_device
*dev
)
1660 return netdev2pinfo(dev
)->port_id
;
1662 EXPORT_SYMBOL(cxgb4_port_idx
);
1664 void cxgb4_get_tcp_stats(struct pci_dev
*pdev
, struct tp_tcp_stats
*v4
,
1665 struct tp_tcp_stats
*v6
)
1667 struct adapter
*adap
= pci_get_drvdata(pdev
);
1669 spin_lock(&adap
->stats_lock
);
1670 t4_tp_get_tcp_stats(adap
, v4
, v6
, false);
1671 spin_unlock(&adap
->stats_lock
);
1673 EXPORT_SYMBOL(cxgb4_get_tcp_stats
);
1675 void cxgb4_iscsi_init(struct net_device
*dev
, unsigned int tag_mask
,
1676 const unsigned int *pgsz_order
)
1678 struct adapter
*adap
= netdev2adap(dev
);
1680 t4_write_reg(adap
, ULP_RX_ISCSI_TAGMASK_A
, tag_mask
);
1681 t4_write_reg(adap
, ULP_RX_ISCSI_PSZ_A
, HPZ0_V(pgsz_order
[0]) |
1682 HPZ1_V(pgsz_order
[1]) | HPZ2_V(pgsz_order
[2]) |
1683 HPZ3_V(pgsz_order
[3]));
1685 EXPORT_SYMBOL(cxgb4_iscsi_init
);
1687 int cxgb4_flush_eq_cache(struct net_device
*dev
)
1689 struct adapter
*adap
= netdev2adap(dev
);
1691 return t4_sge_ctxt_flush(adap
, adap
->mbox
, CTXT_EGRESS
);
1693 EXPORT_SYMBOL(cxgb4_flush_eq_cache
);
1695 static int read_eq_indices(struct adapter
*adap
, u16 qid
, u16
*pidx
, u16
*cidx
)
1697 u32 addr
= t4_read_reg(adap
, SGE_DBQ_CTXT_BADDR_A
) + 24 * qid
+ 8;
1701 spin_lock(&adap
->win0_lock
);
1702 ret
= t4_memory_rw(adap
, 0, MEM_EDC0
, addr
,
1703 sizeof(indices
), (__be32
*)&indices
,
1705 spin_unlock(&adap
->win0_lock
);
1707 *cidx
= (be64_to_cpu(indices
) >> 25) & 0xffff;
1708 *pidx
= (be64_to_cpu(indices
) >> 9) & 0xffff;
1713 int cxgb4_sync_txq_pidx(struct net_device
*dev
, u16 qid
, u16 pidx
,
1716 struct adapter
*adap
= netdev2adap(dev
);
1717 u16 hw_pidx
, hw_cidx
;
1720 ret
= read_eq_indices(adap
, qid
, &hw_pidx
, &hw_cidx
);
1724 if (pidx
!= hw_pidx
) {
1728 if (pidx
>= hw_pidx
)
1729 delta
= pidx
- hw_pidx
;
1731 delta
= size
- hw_pidx
+ pidx
;
1733 if (is_t4(adap
->params
.chip
))
1734 val
= PIDX_V(delta
);
1736 val
= PIDX_T5_V(delta
);
1738 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
1744 EXPORT_SYMBOL(cxgb4_sync_txq_pidx
);
1746 int cxgb4_read_tpte(struct net_device
*dev
, u32 stag
, __be32
*tpte
)
1748 u32 edc0_size
, edc1_size
, mc0_size
, mc1_size
, size
;
1749 u32 edc0_end
, edc1_end
, mc0_end
, mc1_end
;
1750 u32 offset
, memtype
, memaddr
;
1751 struct adapter
*adap
;
1755 adap
= netdev2adap(dev
);
1757 offset
= ((stag
>> 8) * 32) + adap
->vres
.stag
.start
;
1759 /* Figure out where the offset lands in the Memory Type/Address scheme.
1760 * This code assumes that the memory is laid out starting at offset 0
1761 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
1762 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
1763 * MC0, and some have both MC0 and MC1.
1765 size
= t4_read_reg(adap
, MA_EDRAM0_BAR_A
);
1766 edc0_size
= EDRAM0_SIZE_G(size
) << 20;
1767 size
= t4_read_reg(adap
, MA_EDRAM1_BAR_A
);
1768 edc1_size
= EDRAM1_SIZE_G(size
) << 20;
1769 size
= t4_read_reg(adap
, MA_EXT_MEMORY0_BAR_A
);
1770 mc0_size
= EXT_MEM0_SIZE_G(size
) << 20;
1772 if (t4_read_reg(adap
, MA_TARGET_MEM_ENABLE_A
) & HMA_MUX_F
) {
1773 size
= t4_read_reg(adap
, MA_EXT_MEMORY1_BAR_A
);
1774 hma_size
= EXT_MEM1_SIZE_G(size
) << 20;
1776 edc0_end
= edc0_size
;
1777 edc1_end
= edc0_end
+ edc1_size
;
1778 mc0_end
= edc1_end
+ mc0_size
;
1780 if (offset
< edc0_end
) {
1783 } else if (offset
< edc1_end
) {
1785 memaddr
= offset
- edc0_end
;
1787 if (hma_size
&& (offset
< (edc1_end
+ hma_size
))) {
1789 memaddr
= offset
- edc1_end
;
1790 } else if (offset
< mc0_end
) {
1792 memaddr
= offset
- edc1_end
;
1793 } else if (is_t5(adap
->params
.chip
)) {
1794 size
= t4_read_reg(adap
, MA_EXT_MEMORY1_BAR_A
);
1795 mc1_size
= EXT_MEM1_SIZE_G(size
) << 20;
1796 mc1_end
= mc0_end
+ mc1_size
;
1797 if (offset
< mc1_end
) {
1799 memaddr
= offset
- mc0_end
;
1801 /* offset beyond the end of any memory */
1805 /* T4/T6 only has a single memory channel */
1810 spin_lock(&adap
->win0_lock
);
1811 ret
= t4_memory_rw(adap
, 0, memtype
, memaddr
, 32, tpte
, T4_MEMORY_READ
);
1812 spin_unlock(&adap
->win0_lock
);
1816 dev_err(adap
->pdev_dev
, "stag %#x, offset %#x out of range\n",
1820 EXPORT_SYMBOL(cxgb4_read_tpte
);
1822 u64
cxgb4_read_sge_timestamp(struct net_device
*dev
)
1825 struct adapter
*adap
;
1827 adap
= netdev2adap(dev
);
1828 lo
= t4_read_reg(adap
, SGE_TIMESTAMP_LO_A
);
1829 hi
= TSVAL_G(t4_read_reg(adap
, SGE_TIMESTAMP_HI_A
));
1831 return ((u64
)hi
<< 32) | (u64
)lo
;
1833 EXPORT_SYMBOL(cxgb4_read_sge_timestamp
);
1835 int cxgb4_bar2_sge_qregs(struct net_device
*dev
,
1837 enum cxgb4_bar2_qtype qtype
,
1840 unsigned int *pbar2_qid
)
1842 return t4_bar2_sge_qregs(netdev2adap(dev
),
1844 (qtype
== CXGB4_BAR2_QTYPE_EGRESS
1845 ? T4_BAR2_QTYPE_EGRESS
1846 : T4_BAR2_QTYPE_INGRESS
),
1851 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs
);
1853 static struct pci_driver cxgb4_driver
;
1855 static void check_neigh_update(struct neighbour
*neigh
)
1857 const struct device
*parent
;
1858 const struct net_device
*netdev
= neigh
->dev
;
1860 if (is_vlan_dev(netdev
))
1861 netdev
= vlan_dev_real_dev(netdev
);
1862 parent
= netdev
->dev
.parent
;
1863 if (parent
&& parent
->driver
== &cxgb4_driver
.driver
)
1864 t4_l2t_update(dev_get_drvdata(parent
), neigh
);
1867 static int netevent_cb(struct notifier_block
*nb
, unsigned long event
,
1871 case NETEVENT_NEIGH_UPDATE
:
1872 check_neigh_update(data
);
1874 case NETEVENT_REDIRECT
:
1881 static bool netevent_registered
;
1882 static struct notifier_block cxgb4_netevent_nb
= {
1883 .notifier_call
= netevent_cb
1886 static void drain_db_fifo(struct adapter
*adap
, int usecs
)
1888 u32 v1
, v2
, lp_count
, hp_count
;
1891 v1
= t4_read_reg(adap
, SGE_DBFIFO_STATUS_A
);
1892 v2
= t4_read_reg(adap
, SGE_DBFIFO_STATUS2_A
);
1893 if (is_t4(adap
->params
.chip
)) {
1894 lp_count
= LP_COUNT_G(v1
);
1895 hp_count
= HP_COUNT_G(v1
);
1897 lp_count
= LP_COUNT_T5_G(v1
);
1898 hp_count
= HP_COUNT_T5_G(v2
);
1901 if (lp_count
== 0 && hp_count
== 0)
1903 set_current_state(TASK_UNINTERRUPTIBLE
);
1904 schedule_timeout(usecs_to_jiffies(usecs
));
1908 static void disable_txq_db(struct sge_txq
*q
)
1910 unsigned long flags
;
1912 spin_lock_irqsave(&q
->db_lock
, flags
);
1914 spin_unlock_irqrestore(&q
->db_lock
, flags
);
1917 static void enable_txq_db(struct adapter
*adap
, struct sge_txq
*q
)
1919 spin_lock_irq(&q
->db_lock
);
1920 if (q
->db_pidx_inc
) {
1921 /* Make sure that all writes to the TX descriptors
1922 * are committed before we tell HW about them.
1925 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
1926 QID_V(q
->cntxt_id
) | PIDX_V(q
->db_pidx_inc
));
1930 spin_unlock_irq(&q
->db_lock
);
1933 static void disable_dbs(struct adapter
*adap
)
1937 for_each_ethrxq(&adap
->sge
, i
)
1938 disable_txq_db(&adap
->sge
.ethtxq
[i
].q
);
1939 if (is_offload(adap
)) {
1940 struct sge_uld_txq_info
*txq_info
=
1941 adap
->sge
.uld_txq_info
[CXGB4_TX_OFLD
];
1944 for_each_ofldtxq(&adap
->sge
, i
) {
1945 struct sge_uld_txq
*txq
= &txq_info
->uldtxq
[i
];
1947 disable_txq_db(&txq
->q
);
1951 for_each_port(adap
, i
)
1952 disable_txq_db(&adap
->sge
.ctrlq
[i
].q
);
1955 static void enable_dbs(struct adapter
*adap
)
1959 for_each_ethrxq(&adap
->sge
, i
)
1960 enable_txq_db(adap
, &adap
->sge
.ethtxq
[i
].q
);
1961 if (is_offload(adap
)) {
1962 struct sge_uld_txq_info
*txq_info
=
1963 adap
->sge
.uld_txq_info
[CXGB4_TX_OFLD
];
1966 for_each_ofldtxq(&adap
->sge
, i
) {
1967 struct sge_uld_txq
*txq
= &txq_info
->uldtxq
[i
];
1969 enable_txq_db(adap
, &txq
->q
);
1973 for_each_port(adap
, i
)
1974 enable_txq_db(adap
, &adap
->sge
.ctrlq
[i
].q
);
1977 static void notify_rdma_uld(struct adapter
*adap
, enum cxgb4_control cmd
)
1979 enum cxgb4_uld type
= CXGB4_ULD_RDMA
;
1981 if (adap
->uld
&& adap
->uld
[type
].handle
)
1982 adap
->uld
[type
].control(adap
->uld
[type
].handle
, cmd
);
1985 static void process_db_full(struct work_struct
*work
)
1987 struct adapter
*adap
;
1989 adap
= container_of(work
, struct adapter
, db_full_task
);
1991 drain_db_fifo(adap
, dbfifo_drain_delay
);
1993 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_EMPTY
);
1994 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
)
1995 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
1996 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
,
1997 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
);
1999 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
2000 DBFIFO_LP_INT_F
, DBFIFO_LP_INT_F
);
2003 static void sync_txq_pidx(struct adapter
*adap
, struct sge_txq
*q
)
2005 u16 hw_pidx
, hw_cidx
;
2008 spin_lock_irq(&q
->db_lock
);
2009 ret
= read_eq_indices(adap
, (u16
)q
->cntxt_id
, &hw_pidx
, &hw_cidx
);
2012 if (q
->db_pidx
!= hw_pidx
) {
2016 if (q
->db_pidx
>= hw_pidx
)
2017 delta
= q
->db_pidx
- hw_pidx
;
2019 delta
= q
->size
- hw_pidx
+ q
->db_pidx
;
2021 if (is_t4(adap
->params
.chip
))
2022 val
= PIDX_V(delta
);
2024 val
= PIDX_T5_V(delta
);
2026 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
2027 QID_V(q
->cntxt_id
) | val
);
2032 spin_unlock_irq(&q
->db_lock
);
2034 CH_WARN(adap
, "DB drop recovery failed.\n");
2037 static void recover_all_queues(struct adapter
*adap
)
2041 for_each_ethrxq(&adap
->sge
, i
)
2042 sync_txq_pidx(adap
, &adap
->sge
.ethtxq
[i
].q
);
2043 if (is_offload(adap
)) {
2044 struct sge_uld_txq_info
*txq_info
=
2045 adap
->sge
.uld_txq_info
[CXGB4_TX_OFLD
];
2047 for_each_ofldtxq(&adap
->sge
, i
) {
2048 struct sge_uld_txq
*txq
= &txq_info
->uldtxq
[i
];
2050 sync_txq_pidx(adap
, &txq
->q
);
2054 for_each_port(adap
, i
)
2055 sync_txq_pidx(adap
, &adap
->sge
.ctrlq
[i
].q
);
2058 static void process_db_drop(struct work_struct
*work
)
2060 struct adapter
*adap
;
2062 adap
= container_of(work
, struct adapter
, db_drop_task
);
2064 if (is_t4(adap
->params
.chip
)) {
2065 drain_db_fifo(adap
, dbfifo_drain_delay
);
2066 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_DROP
);
2067 drain_db_fifo(adap
, dbfifo_drain_delay
);
2068 recover_all_queues(adap
);
2069 drain_db_fifo(adap
, dbfifo_drain_delay
);
2071 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_EMPTY
);
2072 } else if (is_t5(adap
->params
.chip
)) {
2073 u32 dropped_db
= t4_read_reg(adap
, 0x010ac);
2074 u16 qid
= (dropped_db
>> 15) & 0x1ffff;
2075 u16 pidx_inc
= dropped_db
& 0x1fff;
2077 unsigned int bar2_qid
;
2080 ret
= t4_bar2_sge_qregs(adap
, qid
, T4_BAR2_QTYPE_EGRESS
,
2081 0, &bar2_qoffset
, &bar2_qid
);
2083 dev_err(adap
->pdev_dev
, "doorbell drop recovery: "
2084 "qid=%d, pidx_inc=%d\n", qid
, pidx_inc
);
2086 writel(PIDX_T5_V(pidx_inc
) | QID_V(bar2_qid
),
2087 adap
->bar2
+ bar2_qoffset
+ SGE_UDB_KDOORBELL
);
2089 /* Re-enable BAR2 WC */
2090 t4_set_reg_field(adap
, 0x10b0, 1<<15, 1<<15);
2093 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
)
2094 t4_set_reg_field(adap
, SGE_DOORBELL_CONTROL_A
, DROPPED_DB_F
, 0);
2097 void t4_db_full(struct adapter
*adap
)
2099 if (is_t4(adap
->params
.chip
)) {
2101 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_FULL
);
2102 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
2103 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
, 0);
2104 queue_work(adap
->workq
, &adap
->db_full_task
);
2108 void t4_db_dropped(struct adapter
*adap
)
2110 if (is_t4(adap
->params
.chip
)) {
2112 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_FULL
);
2114 queue_work(adap
->workq
, &adap
->db_drop_task
);
2117 void t4_register_netevent_notifier(void)
2119 if (!netevent_registered
) {
2120 register_netevent_notifier(&cxgb4_netevent_nb
);
2121 netevent_registered
= true;
2125 static void detach_ulds(struct adapter
*adap
)
2129 mutex_lock(&uld_mutex
);
2130 list_del(&adap
->list_node
);
2132 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2133 if (adap
->uld
&& adap
->uld
[i
].handle
)
2134 adap
->uld
[i
].state_change(adap
->uld
[i
].handle
,
2135 CXGB4_STATE_DETACH
);
2137 if (netevent_registered
&& list_empty(&adapter_list
)) {
2138 unregister_netevent_notifier(&cxgb4_netevent_nb
);
2139 netevent_registered
= false;
2141 mutex_unlock(&uld_mutex
);
2144 static void notify_ulds(struct adapter
*adap
, enum cxgb4_state new_state
)
2148 mutex_lock(&uld_mutex
);
2149 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2150 if (adap
->uld
&& adap
->uld
[i
].handle
)
2151 adap
->uld
[i
].state_change(adap
->uld
[i
].handle
,
2153 mutex_unlock(&uld_mutex
);
2156 #if IS_ENABLED(CONFIG_IPV6)
2157 static int cxgb4_inet6addr_handler(struct notifier_block
*this,
2158 unsigned long event
, void *data
)
2160 struct inet6_ifaddr
*ifa
= data
;
2161 struct net_device
*event_dev
= ifa
->idev
->dev
;
2162 const struct device
*parent
= NULL
;
2163 #if IS_ENABLED(CONFIG_BONDING)
2164 struct adapter
*adap
;
2166 if (is_vlan_dev(event_dev
))
2167 event_dev
= vlan_dev_real_dev(event_dev
);
2168 #if IS_ENABLED(CONFIG_BONDING)
2169 if (event_dev
->flags
& IFF_MASTER
) {
2170 list_for_each_entry(adap
, &adapter_list
, list_node
) {
2173 cxgb4_clip_get(adap
->port
[0],
2174 (const u32
*)ifa
, 1);
2177 cxgb4_clip_release(adap
->port
[0],
2178 (const u32
*)ifa
, 1);
2189 parent
= event_dev
->dev
.parent
;
2191 if (parent
&& parent
->driver
== &cxgb4_driver
.driver
) {
2194 cxgb4_clip_get(event_dev
, (const u32
*)ifa
, 1);
2197 cxgb4_clip_release(event_dev
, (const u32
*)ifa
, 1);
2206 static bool inet6addr_registered
;
2207 static struct notifier_block cxgb4_inet6addr_notifier
= {
2208 .notifier_call
= cxgb4_inet6addr_handler
2211 static void update_clip(const struct adapter
*adap
)
2214 struct net_device
*dev
;
2219 for (i
= 0; i
< MAX_NPORTS
; i
++) {
2220 dev
= adap
->port
[i
];
2224 ret
= cxgb4_update_root_dev_clip(dev
);
2231 #endif /* IS_ENABLED(CONFIG_IPV6) */
2234 * cxgb_up - enable the adapter
2235 * @adap: adapter being enabled
2237 * Called when the first port is enabled, this function performs the
2238 * actions necessary to make an adapter operational, such as completing
2239 * the initialization of HW modules, and enabling interrupts.
2241 * Must be called with the rtnl lock held.
2243 static int cxgb_up(struct adapter
*adap
)
2247 mutex_lock(&uld_mutex
);
2248 err
= setup_sge_queues(adap
);
2251 err
= setup_rss(adap
);
2255 if (adap
->flags
& USING_MSIX
) {
2256 name_msix_vecs(adap
);
2257 err
= request_irq(adap
->msix_info
[0].vec
, t4_nondata_intr
, 0,
2258 adap
->msix_info
[0].desc
, adap
);
2261 err
= request_msix_queue_irqs(adap
);
2263 free_irq(adap
->msix_info
[0].vec
, adap
);
2267 err
= request_irq(adap
->pdev
->irq
, t4_intr_handler(adap
),
2268 (adap
->flags
& USING_MSI
) ? 0 : IRQF_SHARED
,
2269 adap
->port
[0]->name
, adap
);
2276 t4_intr_enable(adap
);
2277 adap
->flags
|= FULL_INIT_DONE
;
2278 mutex_unlock(&uld_mutex
);
2280 notify_ulds(adap
, CXGB4_STATE_UP
);
2281 #if IS_ENABLED(CONFIG_IPV6)
2284 /* Initialize hash mac addr list*/
2285 INIT_LIST_HEAD(&adap
->mac_hlist
);
2289 dev_err(adap
->pdev_dev
, "request_irq failed, err %d\n", err
);
2291 t4_free_sge_resources(adap
);
2293 mutex_unlock(&uld_mutex
);
2297 static void cxgb_down(struct adapter
*adapter
)
2299 cancel_work_sync(&adapter
->tid_release_task
);
2300 cancel_work_sync(&adapter
->db_full_task
);
2301 cancel_work_sync(&adapter
->db_drop_task
);
2302 adapter
->tid_release_task_busy
= false;
2303 adapter
->tid_release_head
= NULL
;
2305 t4_sge_stop(adapter
);
2306 t4_free_sge_resources(adapter
);
2307 adapter
->flags
&= ~FULL_INIT_DONE
;
2311 * net_device operations
2313 static int cxgb_open(struct net_device
*dev
)
2316 struct port_info
*pi
= netdev_priv(dev
);
2317 struct adapter
*adapter
= pi
->adapter
;
2319 netif_carrier_off(dev
);
2321 if (!(adapter
->flags
& FULL_INIT_DONE
)) {
2322 err
= cxgb_up(adapter
);
2327 /* It's possible that the basic port information could have
2328 * changed since we first read it.
2330 err
= t4_update_port_info(pi
);
2334 err
= link_start(dev
);
2336 netif_tx_start_all_queues(dev
);
2340 static int cxgb_close(struct net_device
*dev
)
2342 struct port_info
*pi
= netdev_priv(dev
);
2343 struct adapter
*adapter
= pi
->adapter
;
2346 netif_tx_stop_all_queues(dev
);
2347 netif_carrier_off(dev
);
2348 ret
= t4_enable_pi_params(adapter
, adapter
->pf
, pi
,
2349 false, false, false);
2350 #ifdef CONFIG_CHELSIO_T4_DCB
2351 cxgb4_dcb_reset(dev
);
2352 dcb_tx_queue_prio_enable(dev
, false);
2357 int cxgb4_create_server_filter(const struct net_device
*dev
, unsigned int stid
,
2358 __be32 sip
, __be16 sport
, __be16 vlan
,
2359 unsigned int queue
, unsigned char port
, unsigned char mask
)
2362 struct filter_entry
*f
;
2363 struct adapter
*adap
;
2367 adap
= netdev2adap(dev
);
2369 /* Adjust stid to correct filter index */
2370 stid
-= adap
->tids
.sftid_base
;
2371 stid
+= adap
->tids
.nftids
;
2373 /* Check to make sure the filter requested is writable ...
2375 f
= &adap
->tids
.ftid_tab
[stid
];
2376 ret
= writable_filter(f
);
2380 /* Clear out any old resources being used by the filter before
2381 * we start constructing the new filter.
2384 clear_filter(adap
, f
);
2386 /* Clear out filter specifications */
2387 memset(&f
->fs
, 0, sizeof(struct ch_filter_specification
));
2388 f
->fs
.val
.lport
= cpu_to_be16(sport
);
2389 f
->fs
.mask
.lport
= ~0;
2391 if ((val
[0] | val
[1] | val
[2] | val
[3]) != 0) {
2392 for (i
= 0; i
< 4; i
++) {
2393 f
->fs
.val
.lip
[i
] = val
[i
];
2394 f
->fs
.mask
.lip
[i
] = ~0;
2396 if (adap
->params
.tp
.vlan_pri_map
& PORT_F
) {
2397 f
->fs
.val
.iport
= port
;
2398 f
->fs
.mask
.iport
= mask
;
2402 if (adap
->params
.tp
.vlan_pri_map
& PROTOCOL_F
) {
2403 f
->fs
.val
.proto
= IPPROTO_TCP
;
2404 f
->fs
.mask
.proto
= ~0;
2409 /* Mark filter as locked */
2413 /* Save the actual tid. We need this to get the corresponding
2414 * filter entry structure in filter_rpl.
2416 f
->tid
= stid
+ adap
->tids
.ftid_base
;
2417 ret
= set_filter_wr(adap
, stid
);
2419 clear_filter(adap
, f
);
2425 EXPORT_SYMBOL(cxgb4_create_server_filter
);
2427 int cxgb4_remove_server_filter(const struct net_device
*dev
, unsigned int stid
,
2428 unsigned int queue
, bool ipv6
)
2430 struct filter_entry
*f
;
2431 struct adapter
*adap
;
2433 adap
= netdev2adap(dev
);
2435 /* Adjust stid to correct filter index */
2436 stid
-= adap
->tids
.sftid_base
;
2437 stid
+= adap
->tids
.nftids
;
2439 f
= &adap
->tids
.ftid_tab
[stid
];
2440 /* Unlock the filter */
2443 return delete_filter(adap
, stid
);
2445 EXPORT_SYMBOL(cxgb4_remove_server_filter
);
2447 static void cxgb_get_stats(struct net_device
*dev
,
2448 struct rtnl_link_stats64
*ns
)
2450 struct port_stats stats
;
2451 struct port_info
*p
= netdev_priv(dev
);
2452 struct adapter
*adapter
= p
->adapter
;
2454 /* Block retrieving statistics during EEH error
2455 * recovery. Otherwise, the recovery might fail
2456 * and the PCI device will be removed permanently
2458 spin_lock(&adapter
->stats_lock
);
2459 if (!netif_device_present(dev
)) {
2460 spin_unlock(&adapter
->stats_lock
);
2463 t4_get_port_stats_offset(adapter
, p
->tx_chan
, &stats
,
2465 spin_unlock(&adapter
->stats_lock
);
2467 ns
->tx_bytes
= stats
.tx_octets
;
2468 ns
->tx_packets
= stats
.tx_frames
;
2469 ns
->rx_bytes
= stats
.rx_octets
;
2470 ns
->rx_packets
= stats
.rx_frames
;
2471 ns
->multicast
= stats
.rx_mcast_frames
;
2473 /* detailed rx_errors */
2474 ns
->rx_length_errors
= stats
.rx_jabber
+ stats
.rx_too_long
+
2476 ns
->rx_over_errors
= 0;
2477 ns
->rx_crc_errors
= stats
.rx_fcs_err
;
2478 ns
->rx_frame_errors
= stats
.rx_symbol_err
;
2479 ns
->rx_dropped
= stats
.rx_ovflow0
+ stats
.rx_ovflow1
+
2480 stats
.rx_ovflow2
+ stats
.rx_ovflow3
+
2481 stats
.rx_trunc0
+ stats
.rx_trunc1
+
2482 stats
.rx_trunc2
+ stats
.rx_trunc3
;
2483 ns
->rx_missed_errors
= 0;
2485 /* detailed tx_errors */
2486 ns
->tx_aborted_errors
= 0;
2487 ns
->tx_carrier_errors
= 0;
2488 ns
->tx_fifo_errors
= 0;
2489 ns
->tx_heartbeat_errors
= 0;
2490 ns
->tx_window_errors
= 0;
2492 ns
->tx_errors
= stats
.tx_error_frames
;
2493 ns
->rx_errors
= stats
.rx_symbol_err
+ stats
.rx_fcs_err
+
2494 ns
->rx_length_errors
+ stats
.rx_len_err
+ ns
->rx_fifo_errors
;
2497 static int cxgb_ioctl(struct net_device
*dev
, struct ifreq
*req
, int cmd
)
2500 int ret
= 0, prtad
, devad
;
2501 struct port_info
*pi
= netdev_priv(dev
);
2502 struct adapter
*adapter
= pi
->adapter
;
2503 struct mii_ioctl_data
*data
= (struct mii_ioctl_data
*)&req
->ifr_data
;
2507 if (pi
->mdio_addr
< 0)
2509 data
->phy_id
= pi
->mdio_addr
;
2513 if (mdio_phy_id_is_c45(data
->phy_id
)) {
2514 prtad
= mdio_phy_id_prtad(data
->phy_id
);
2515 devad
= mdio_phy_id_devad(data
->phy_id
);
2516 } else if (data
->phy_id
< 32) {
2517 prtad
= data
->phy_id
;
2519 data
->reg_num
&= 0x1f;
2523 mbox
= pi
->adapter
->pf
;
2524 if (cmd
== SIOCGMIIREG
)
2525 ret
= t4_mdio_rd(pi
->adapter
, mbox
, prtad
, devad
,
2526 data
->reg_num
, &data
->val_out
);
2528 ret
= t4_mdio_wr(pi
->adapter
, mbox
, prtad
, devad
,
2529 data
->reg_num
, data
->val_in
);
2532 return copy_to_user(req
->ifr_data
, &pi
->tstamp_config
,
2533 sizeof(pi
->tstamp_config
)) ?
2536 if (copy_from_user(&pi
->tstamp_config
, req
->ifr_data
,
2537 sizeof(pi
->tstamp_config
)))
2540 if (!is_t4(adapter
->params
.chip
)) {
2541 switch (pi
->tstamp_config
.tx_type
) {
2542 case HWTSTAMP_TX_OFF
:
2543 case HWTSTAMP_TX_ON
:
2549 switch (pi
->tstamp_config
.rx_filter
) {
2550 case HWTSTAMP_FILTER_NONE
:
2551 pi
->rxtstamp
= false;
2553 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
2554 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
2555 cxgb4_ptprx_timestamping(pi
, pi
->port_id
,
2558 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
2559 cxgb4_ptprx_timestamping(pi
, pi
->port_id
,
2562 case HWTSTAMP_FILTER_ALL
:
2563 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
2564 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
2565 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
2566 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
2567 pi
->rxtstamp
= true;
2570 pi
->tstamp_config
.rx_filter
=
2571 HWTSTAMP_FILTER_NONE
;
2575 if ((pi
->tstamp_config
.tx_type
== HWTSTAMP_TX_OFF
) &&
2576 (pi
->tstamp_config
.rx_filter
==
2577 HWTSTAMP_FILTER_NONE
)) {
2578 if (cxgb4_ptp_txtype(adapter
, pi
->port_id
) >= 0)
2579 pi
->ptp_enable
= false;
2582 if (pi
->tstamp_config
.rx_filter
!=
2583 HWTSTAMP_FILTER_NONE
) {
2584 if (cxgb4_ptp_redirect_rx_packet(adapter
,
2586 pi
->ptp_enable
= true;
2589 /* For T4 Adapters */
2590 switch (pi
->tstamp_config
.rx_filter
) {
2591 case HWTSTAMP_FILTER_NONE
:
2592 pi
->rxtstamp
= false;
2594 case HWTSTAMP_FILTER_ALL
:
2595 pi
->rxtstamp
= true;
2598 pi
->tstamp_config
.rx_filter
=
2599 HWTSTAMP_FILTER_NONE
;
2603 return copy_to_user(req
->ifr_data
, &pi
->tstamp_config
,
2604 sizeof(pi
->tstamp_config
)) ?
2612 static void cxgb_set_rxmode(struct net_device
*dev
)
2614 /* unfortunately we can't return errors to the stack */
2615 set_rxmode(dev
, -1, false);
2618 static int cxgb_change_mtu(struct net_device
*dev
, int new_mtu
)
2621 struct port_info
*pi
= netdev_priv(dev
);
2623 ret
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
, new_mtu
, -1,
2630 #ifdef CONFIG_PCI_IOV
2631 static int cxgb4_mgmt_open(struct net_device
*dev
)
2633 /* Turn carrier off since we don't have to transmit anything on this
2636 netif_carrier_off(dev
);
2640 /* Fill MAC address that will be assigned by the FW */
2641 static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter
*adap
)
2643 u8 hw_addr
[ETH_ALEN
], macaddr
[ETH_ALEN
];
2644 unsigned int i
, vf
, nvfs
;
2649 adap
->params
.pci
.vpd_cap_addr
= pci_find_capability(adap
->pdev
,
2651 err
= t4_get_raw_vpd_params(adap
, &adap
->params
.vpd
);
2655 na
= adap
->params
.vpd
.na
;
2656 for (i
= 0; i
< ETH_ALEN
; i
++)
2657 hw_addr
[i
] = (hex2val(na
[2 * i
+ 0]) * 16 +
2658 hex2val(na
[2 * i
+ 1]));
2660 a
= (hw_addr
[0] << 8) | hw_addr
[1];
2661 b
= (hw_addr
[1] << 8) | hw_addr
[2];
2663 a
|= 0x0200; /* locally assigned Ethernet MAC address */
2664 a
&= ~0x0100; /* not a multicast Ethernet MAC address */
2665 macaddr
[0] = a
>> 8;
2666 macaddr
[1] = a
& 0xff;
2668 for (i
= 2; i
< 5; i
++)
2669 macaddr
[i
] = hw_addr
[i
+ 1];
2671 for (vf
= 0, nvfs
= pci_sriov_get_totalvfs(adap
->pdev
);
2673 macaddr
[5] = adap
->pf
* 16 + vf
;
2674 ether_addr_copy(adap
->vfinfo
[vf
].vf_mac_addr
, macaddr
);
2678 static int cxgb4_mgmt_set_vf_mac(struct net_device
*dev
, int vf
, u8
*mac
)
2680 struct port_info
*pi
= netdev_priv(dev
);
2681 struct adapter
*adap
= pi
->adapter
;
2684 /* verify MAC addr is valid */
2685 if (!is_valid_ether_addr(mac
)) {
2686 dev_err(pi
->adapter
->pdev_dev
,
2687 "Invalid Ethernet address %pM for VF %d\n",
2692 dev_info(pi
->adapter
->pdev_dev
,
2693 "Setting MAC %pM on VF %d\n", mac
, vf
);
2694 ret
= t4_set_vf_mac_acl(adap
, vf
+ 1, 1, mac
);
2696 ether_addr_copy(adap
->vfinfo
[vf
].vf_mac_addr
, mac
);
2700 static int cxgb4_mgmt_get_vf_config(struct net_device
*dev
,
2701 int vf
, struct ifla_vf_info
*ivi
)
2703 struct port_info
*pi
= netdev_priv(dev
);
2704 struct adapter
*adap
= pi
->adapter
;
2705 struct vf_info
*vfinfo
;
2707 if (vf
>= adap
->num_vfs
)
2709 vfinfo
= &adap
->vfinfo
[vf
];
2712 ivi
->max_tx_rate
= vfinfo
->tx_rate
;
2713 ivi
->min_tx_rate
= 0;
2714 ether_addr_copy(ivi
->mac
, vfinfo
->vf_mac_addr
);
2715 ivi
->vlan
= vfinfo
->vlan
;
2719 static int cxgb4_mgmt_get_phys_port_id(struct net_device
*dev
,
2720 struct netdev_phys_item_id
*ppid
)
2722 struct port_info
*pi
= netdev_priv(dev
);
2723 unsigned int phy_port_id
;
2725 phy_port_id
= pi
->adapter
->adap_idx
* 10 + pi
->port_id
;
2726 ppid
->id_len
= sizeof(phy_port_id
);
2727 memcpy(ppid
->id
, &phy_port_id
, ppid
->id_len
);
2731 static int cxgb4_mgmt_set_vf_rate(struct net_device
*dev
, int vf
,
2732 int min_tx_rate
, int max_tx_rate
)
2734 struct port_info
*pi
= netdev_priv(dev
);
2735 struct adapter
*adap
= pi
->adapter
;
2736 unsigned int link_ok
, speed
, mtu
;
2737 u32 fw_pfvf
, fw_class
;
2742 if (vf
>= adap
->num_vfs
)
2746 dev_err(adap
->pdev_dev
,
2747 "Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
2752 ret
= t4_get_link_params(pi
, &link_ok
, &speed
, &mtu
);
2753 if (ret
!= FW_SUCCESS
) {
2754 dev_err(adap
->pdev_dev
,
2755 "Failed to get link information for VF %d\n", vf
);
2760 dev_err(adap
->pdev_dev
, "Link down for VF %d\n", vf
);
2764 if (max_tx_rate
> speed
) {
2765 dev_err(adap
->pdev_dev
,
2766 "Max tx rate %d for VF %d can't be > link-speed %u",
2767 max_tx_rate
, vf
, speed
);
2772 /* subtract ethhdr size and 4 bytes crc since, f/w appends it */
2773 pktsize
= pktsize
- sizeof(struct ethhdr
) - 4;
2774 /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */
2775 pktsize
= pktsize
- sizeof(struct iphdr
) - sizeof(struct tcphdr
);
2776 /* configure Traffic Class for rate-limiting */
2777 ret
= t4_sched_params(adap
, SCHED_CLASS_TYPE_PACKET
,
2778 SCHED_CLASS_LEVEL_CL_RL
,
2779 SCHED_CLASS_MODE_CLASS
,
2780 SCHED_CLASS_RATEUNIT_BITS
,
2781 SCHED_CLASS_RATEMODE_ABS
,
2782 pi
->tx_chan
, class_id
, 0,
2783 max_tx_rate
* 1000, 0, pktsize
);
2785 dev_err(adap
->pdev_dev
, "Err %d for Traffic Class config\n",
2789 dev_info(adap
->pdev_dev
,
2790 "Class %d with MSS %u configured with rate %u\n",
2791 class_id
, pktsize
, max_tx_rate
);
2793 /* bind VF to configured Traffic Class */
2794 fw_pfvf
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF
) |
2795 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH
));
2796 fw_class
= class_id
;
2797 ret
= t4_set_params(adap
, adap
->mbox
, adap
->pf
, vf
+ 1, 1, &fw_pfvf
,
2800 dev_err(adap
->pdev_dev
,
2801 "Err %d in binding VF %d to Traffic Class %d\n",
2805 dev_info(adap
->pdev_dev
, "PF %d VF %d is bound to Class %d\n",
2806 adap
->pf
, vf
, class_id
);
2807 adap
->vfinfo
[vf
].tx_rate
= max_tx_rate
;
2811 static int cxgb4_mgmt_set_vf_vlan(struct net_device
*dev
, int vf
,
2812 u16 vlan
, u8 qos
, __be16 vlan_proto
)
2814 struct port_info
*pi
= netdev_priv(dev
);
2815 struct adapter
*adap
= pi
->adapter
;
2818 if (vf
>= adap
->num_vfs
|| vlan
> 4095 || qos
> 7)
2821 if (vlan_proto
!= htons(ETH_P_8021Q
) || qos
!= 0)
2822 return -EPROTONOSUPPORT
;
2824 ret
= t4_set_vlan_acl(adap
, adap
->mbox
, vf
+ 1, vlan
);
2826 adap
->vfinfo
[vf
].vlan
= vlan
;
2830 dev_err(adap
->pdev_dev
, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
2831 ret
, (vlan
? "setting" : "clearing"), adap
->pf
, vf
);
2834 #endif /* CONFIG_PCI_IOV */
2836 static int cxgb_set_mac_addr(struct net_device
*dev
, void *p
)
2839 struct sockaddr
*addr
= p
;
2840 struct port_info
*pi
= netdev_priv(dev
);
2842 if (!is_valid_ether_addr(addr
->sa_data
))
2843 return -EADDRNOTAVAIL
;
2845 ret
= t4_change_mac(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
,
2846 pi
->xact_addr_filt
, addr
->sa_data
, true, true);
2850 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
2851 pi
->xact_addr_filt
= ret
;
2855 #ifdef CONFIG_NET_POLL_CONTROLLER
2856 static void cxgb_netpoll(struct net_device
*dev
)
2858 struct port_info
*pi
= netdev_priv(dev
);
2859 struct adapter
*adap
= pi
->adapter
;
2861 if (adap
->flags
& USING_MSIX
) {
2863 struct sge_eth_rxq
*rx
= &adap
->sge
.ethrxq
[pi
->first_qset
];
2865 for (i
= pi
->nqsets
; i
; i
--, rx
++)
2866 t4_sge_intr_msix(0, &rx
->rspq
);
2868 t4_intr_handler(adap
)(0, adap
);
2872 static int cxgb_set_tx_maxrate(struct net_device
*dev
, int index
, u32 rate
)
2874 struct port_info
*pi
= netdev_priv(dev
);
2875 struct adapter
*adap
= pi
->adapter
;
2876 struct sched_class
*e
;
2877 struct ch_sched_params p
;
2878 struct ch_sched_queue qe
;
2882 if (!can_sched(dev
))
2885 if (index
< 0 || index
> pi
->nqsets
- 1)
2888 if (!(adap
->flags
& FULL_INIT_DONE
)) {
2889 dev_err(adap
->pdev_dev
,
2890 "Failed to rate limit on queue %d. Link Down?\n",
2895 /* Convert from Mbps to Kbps */
2896 req_rate
= rate
* 1000;
2898 /* Max rate is 100 Gbps */
2899 if (req_rate
> SCHED_MAX_RATE_KBPS
) {
2900 dev_err(adap
->pdev_dev
,
2901 "Invalid rate %u Mbps, Max rate is %u Mbps\n",
2902 rate
, SCHED_MAX_RATE_KBPS
/ 1000);
2906 /* First unbind the queue from any existing class */
2907 memset(&qe
, 0, sizeof(qe
));
2909 qe
.class = SCHED_CLS_NONE
;
2911 err
= cxgb4_sched_class_unbind(dev
, (void *)(&qe
), SCHED_QUEUE
);
2913 dev_err(adap
->pdev_dev
,
2914 "Unbinding Queue %d on port %d fail. Err: %d\n",
2915 index
, pi
->port_id
, err
);
2919 /* Queue already unbound */
2923 /* Fetch any available unused or matching scheduling class */
2924 memset(&p
, 0, sizeof(p
));
2925 p
.type
= SCHED_CLASS_TYPE_PACKET
;
2926 p
.u
.params
.level
= SCHED_CLASS_LEVEL_CL_RL
;
2927 p
.u
.params
.mode
= SCHED_CLASS_MODE_CLASS
;
2928 p
.u
.params
.rateunit
= SCHED_CLASS_RATEUNIT_BITS
;
2929 p
.u
.params
.ratemode
= SCHED_CLASS_RATEMODE_ABS
;
2930 p
.u
.params
.channel
= pi
->tx_chan
;
2931 p
.u
.params
.class = SCHED_CLS_NONE
;
2932 p
.u
.params
.minrate
= 0;
2933 p
.u
.params
.maxrate
= req_rate
;
2934 p
.u
.params
.weight
= 0;
2935 p
.u
.params
.pktsize
= dev
->mtu
;
2937 e
= cxgb4_sched_class_alloc(dev
, &p
);
2941 /* Bind the queue to a scheduling class */
2942 memset(&qe
, 0, sizeof(qe
));
2946 err
= cxgb4_sched_class_bind(dev
, (void *)(&qe
), SCHED_QUEUE
);
2948 dev_err(adap
->pdev_dev
,
2949 "Queue rate limiting failed. Err: %d\n", err
);
2953 static int cxgb_setup_tc_flower(struct net_device
*dev
,
2954 struct tc_cls_flower_offload
*cls_flower
)
2956 switch (cls_flower
->command
) {
2957 case TC_CLSFLOWER_REPLACE
:
2958 return cxgb4_tc_flower_replace(dev
, cls_flower
);
2959 case TC_CLSFLOWER_DESTROY
:
2960 return cxgb4_tc_flower_destroy(dev
, cls_flower
);
2961 case TC_CLSFLOWER_STATS
:
2962 return cxgb4_tc_flower_stats(dev
, cls_flower
);
2968 static int cxgb_setup_tc_cls_u32(struct net_device
*dev
,
2969 struct tc_cls_u32_offload
*cls_u32
)
2971 switch (cls_u32
->command
) {
2972 case TC_CLSU32_NEW_KNODE
:
2973 case TC_CLSU32_REPLACE_KNODE
:
2974 return cxgb4_config_knode(dev
, cls_u32
);
2975 case TC_CLSU32_DELETE_KNODE
:
2976 return cxgb4_delete_knode(dev
, cls_u32
);
2982 static int cxgb_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
2985 struct net_device
*dev
= cb_priv
;
2986 struct port_info
*pi
= netdev2pinfo(dev
);
2987 struct adapter
*adap
= netdev2adap(dev
);
2989 if (!(adap
->flags
& FULL_INIT_DONE
)) {
2990 dev_err(adap
->pdev_dev
,
2991 "Failed to setup tc on port %d. Link Down?\n",
2996 if (!tc_cls_can_offload_and_chain0(dev
, type_data
))
3000 case TC_SETUP_CLSU32
:
3001 return cxgb_setup_tc_cls_u32(dev
, type_data
);
3002 case TC_SETUP_CLSFLOWER
:
3003 return cxgb_setup_tc_flower(dev
, type_data
);
3009 static int cxgb_setup_tc_block(struct net_device
*dev
,
3010 struct tc_block_offload
*f
)
3012 struct port_info
*pi
= netdev2pinfo(dev
);
3014 if (f
->binder_type
!= TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS
)
3017 switch (f
->command
) {
3019 return tcf_block_cb_register(f
->block
, cxgb_setup_tc_block_cb
,
3020 pi
, dev
, f
->extack
);
3021 case TC_BLOCK_UNBIND
:
3022 tcf_block_cb_unregister(f
->block
, cxgb_setup_tc_block_cb
, pi
);
3029 static int cxgb_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
3033 case TC_SETUP_BLOCK
:
3034 return cxgb_setup_tc_block(dev
, type_data
);
3040 static void cxgb_del_udp_tunnel(struct net_device
*netdev
,
3041 struct udp_tunnel_info
*ti
)
3043 struct port_info
*pi
= netdev_priv(netdev
);
3044 struct adapter
*adapter
= pi
->adapter
;
3045 unsigned int chip_ver
= CHELSIO_CHIP_VERSION(adapter
->params
.chip
);
3046 u8 match_all_mac
[] = { 0, 0, 0, 0, 0, 0 };
3049 if (chip_ver
< CHELSIO_T6
)
3053 case UDP_TUNNEL_TYPE_VXLAN
:
3054 if (!adapter
->vxlan_port_cnt
||
3055 adapter
->vxlan_port
!= ti
->port
)
3056 return; /* Invalid VxLAN destination port */
3058 adapter
->vxlan_port_cnt
--;
3059 if (adapter
->vxlan_port_cnt
)
3062 adapter
->vxlan_port
= 0;
3063 t4_write_reg(adapter
, MPS_RX_VXLAN_TYPE_A
, 0);
3065 case UDP_TUNNEL_TYPE_GENEVE
:
3066 if (!adapter
->geneve_port_cnt
||
3067 adapter
->geneve_port
!= ti
->port
)
3068 return; /* Invalid GENEVE destination port */
3070 adapter
->geneve_port_cnt
--;
3071 if (adapter
->geneve_port_cnt
)
3074 adapter
->geneve_port
= 0;
3075 t4_write_reg(adapter
, MPS_RX_GENEVE_TYPE_A
, 0);
3081 /* Matchall mac entries can be deleted only after all tunnel ports
3082 * are brought down or removed.
3084 if (!adapter
->rawf_cnt
)
3086 for_each_port(adapter
, i
) {
3087 pi
= adap2pinfo(adapter
, i
);
3088 ret
= t4_free_raw_mac_filt(adapter
, pi
->viid
,
3089 match_all_mac
, match_all_mac
,
3090 adapter
->rawf_start
+
3092 1, pi
->port_id
, false);
3094 netdev_info(netdev
, "Failed to free mac filter entry, for port %d\n",
3098 atomic_dec(&adapter
->mps_encap
[adapter
->rawf_start
+
3099 pi
->port_id
].refcnt
);
3103 static void cxgb_add_udp_tunnel(struct net_device
*netdev
,
3104 struct udp_tunnel_info
*ti
)
3106 struct port_info
*pi
= netdev_priv(netdev
);
3107 struct adapter
*adapter
= pi
->adapter
;
3108 unsigned int chip_ver
= CHELSIO_CHIP_VERSION(adapter
->params
.chip
);
3109 u8 match_all_mac
[] = { 0, 0, 0, 0, 0, 0 };
3112 if (chip_ver
< CHELSIO_T6
|| !adapter
->rawf_cnt
)
3116 case UDP_TUNNEL_TYPE_VXLAN
:
3117 /* Callback for adding vxlan port can be called with the same
3118 * port for both IPv4 and IPv6. We should not disable the
3119 * offloading when the same port for both protocols is added
3120 * and later one of them is removed.
3122 if (adapter
->vxlan_port_cnt
&&
3123 adapter
->vxlan_port
== ti
->port
) {
3124 adapter
->vxlan_port_cnt
++;
3128 /* We will support only one VxLAN port */
3129 if (adapter
->vxlan_port_cnt
) {
3130 netdev_info(netdev
, "UDP port %d already offloaded, not adding port %d\n",
3131 be16_to_cpu(adapter
->vxlan_port
),
3132 be16_to_cpu(ti
->port
));
3136 adapter
->vxlan_port
= ti
->port
;
3137 adapter
->vxlan_port_cnt
= 1;
3139 t4_write_reg(adapter
, MPS_RX_VXLAN_TYPE_A
,
3140 VXLAN_V(be16_to_cpu(ti
->port
)) | VXLAN_EN_F
);
3142 case UDP_TUNNEL_TYPE_GENEVE
:
3143 if (adapter
->geneve_port_cnt
&&
3144 adapter
->geneve_port
== ti
->port
) {
3145 adapter
->geneve_port_cnt
++;
3149 /* We will support only one GENEVE port */
3150 if (adapter
->geneve_port_cnt
) {
3151 netdev_info(netdev
, "UDP port %d already offloaded, not adding port %d\n",
3152 be16_to_cpu(adapter
->geneve_port
),
3153 be16_to_cpu(ti
->port
));
3157 adapter
->geneve_port
= ti
->port
;
3158 adapter
->geneve_port_cnt
= 1;
3160 t4_write_reg(adapter
, MPS_RX_GENEVE_TYPE_A
,
3161 GENEVE_V(be16_to_cpu(ti
->port
)) | GENEVE_EN_F
);
3167 /* Create a 'match all' mac filter entry for inner mac,
3168 * if raw mac interface is supported. Once the linux kernel provides
3169 * driver entry points for adding/deleting the inner mac addresses,
3170 * we will remove this 'match all' entry and fallback to adding
3171 * exact match filters.
3173 for_each_port(adapter
, i
) {
3174 pi
= adap2pinfo(adapter
, i
);
3176 ret
= t4_alloc_raw_mac_filt(adapter
, pi
->viid
,
3179 adapter
->rawf_start
+
3181 1, pi
->port_id
, false);
3183 netdev_info(netdev
, "Failed to allocate a mac filter entry, not adding port %d\n",
3184 be16_to_cpu(ti
->port
));
3185 cxgb_del_udp_tunnel(netdev
, ti
);
3188 atomic_inc(&adapter
->mps_encap
[ret
].refcnt
);
3192 static netdev_features_t
cxgb_features_check(struct sk_buff
*skb
,
3193 struct net_device
*dev
,
3194 netdev_features_t features
)
3196 struct port_info
*pi
= netdev_priv(dev
);
3197 struct adapter
*adapter
= pi
->adapter
;
3199 if (CHELSIO_CHIP_VERSION(adapter
->params
.chip
) < CHELSIO_T6
)
3202 /* Check if hw supports offload for this packet */
3203 if (!skb
->encapsulation
|| cxgb_encap_offload_supported(skb
))
3206 /* Offload is not supported for this encapsulated packet */
3207 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
3210 static netdev_features_t
cxgb_fix_features(struct net_device
*dev
,
3211 netdev_features_t features
)
3213 /* Disable GRO, if RX_CSUM is disabled */
3214 if (!(features
& NETIF_F_RXCSUM
))
3215 features
&= ~NETIF_F_GRO
;
3220 static const struct net_device_ops cxgb4_netdev_ops
= {
3221 .ndo_open
= cxgb_open
,
3222 .ndo_stop
= cxgb_close
,
3223 .ndo_start_xmit
= t4_start_xmit
,
3224 .ndo_select_queue
= cxgb_select_queue
,
3225 .ndo_get_stats64
= cxgb_get_stats
,
3226 .ndo_set_rx_mode
= cxgb_set_rxmode
,
3227 .ndo_set_mac_address
= cxgb_set_mac_addr
,
3228 .ndo_set_features
= cxgb_set_features
,
3229 .ndo_validate_addr
= eth_validate_addr
,
3230 .ndo_do_ioctl
= cxgb_ioctl
,
3231 .ndo_change_mtu
= cxgb_change_mtu
,
3232 #ifdef CONFIG_NET_POLL_CONTROLLER
3233 .ndo_poll_controller
= cxgb_netpoll
,
3235 #ifdef CONFIG_CHELSIO_T4_FCOE
3236 .ndo_fcoe_enable
= cxgb_fcoe_enable
,
3237 .ndo_fcoe_disable
= cxgb_fcoe_disable
,
3238 #endif /* CONFIG_CHELSIO_T4_FCOE */
3239 .ndo_set_tx_maxrate
= cxgb_set_tx_maxrate
,
3240 .ndo_setup_tc
= cxgb_setup_tc
,
3241 .ndo_udp_tunnel_add
= cxgb_add_udp_tunnel
,
3242 .ndo_udp_tunnel_del
= cxgb_del_udp_tunnel
,
3243 .ndo_features_check
= cxgb_features_check
,
3244 .ndo_fix_features
= cxgb_fix_features
,
3247 #ifdef CONFIG_PCI_IOV
3248 static const struct net_device_ops cxgb4_mgmt_netdev_ops
= {
3249 .ndo_open
= cxgb4_mgmt_open
,
3250 .ndo_set_vf_mac
= cxgb4_mgmt_set_vf_mac
,
3251 .ndo_get_vf_config
= cxgb4_mgmt_get_vf_config
,
3252 .ndo_set_vf_rate
= cxgb4_mgmt_set_vf_rate
,
3253 .ndo_get_phys_port_id
= cxgb4_mgmt_get_phys_port_id
,
3254 .ndo_set_vf_vlan
= cxgb4_mgmt_set_vf_vlan
,
3258 static void cxgb4_mgmt_get_drvinfo(struct net_device
*dev
,
3259 struct ethtool_drvinfo
*info
)
3261 struct adapter
*adapter
= netdev2adap(dev
);
3263 strlcpy(info
->driver
, cxgb4_driver_name
, sizeof(info
->driver
));
3264 strlcpy(info
->version
, cxgb4_driver_version
,
3265 sizeof(info
->version
));
3266 strlcpy(info
->bus_info
, pci_name(adapter
->pdev
),
3267 sizeof(info
->bus_info
));
3270 static const struct ethtool_ops cxgb4_mgmt_ethtool_ops
= {
3271 .get_drvinfo
= cxgb4_mgmt_get_drvinfo
,
3274 static void notify_fatal_err(struct work_struct
*work
)
3276 struct adapter
*adap
;
3278 adap
= container_of(work
, struct adapter
, fatal_err_notify_task
);
3279 notify_ulds(adap
, CXGB4_STATE_FATAL_ERROR
);
3282 void t4_fatal_err(struct adapter
*adap
)
3286 if (pci_channel_offline(adap
->pdev
))
3289 /* Disable the SGE since ULDs are going to free resources that
3290 * could be exposed to the adapter. RDMA MWs for example...
3292 t4_shutdown_adapter(adap
);
3293 for_each_port(adap
, port
) {
3294 struct net_device
*dev
= adap
->port
[port
];
3296 /* If we get here in very early initialization the network
3297 * devices may not have been set up yet.
3302 netif_tx_stop_all_queues(dev
);
3303 netif_carrier_off(dev
);
3305 dev_alert(adap
->pdev_dev
, "encountered fatal error, adapter stopped\n");
3306 queue_work(adap
->workq
, &adap
->fatal_err_notify_task
);
3309 static void setup_memwin(struct adapter
*adap
)
3311 u32 nic_win_base
= t4_get_util_window(adap
);
3313 t4_setup_memwin(adap
, nic_win_base
, MEMWIN_NIC
);
3316 static void setup_memwin_rdma(struct adapter
*adap
)
3318 if (adap
->vres
.ocq
.size
) {
3322 start
= t4_read_pcie_cfg4(adap
, PCI_BASE_ADDRESS_2
);
3323 start
&= PCI_BASE_ADDRESS_MEM_MASK
;
3324 start
+= OCQ_WIN_OFFSET(adap
->pdev
, &adap
->vres
);
3325 sz_kb
= roundup_pow_of_two(adap
->vres
.ocq
.size
) >> 10;
3327 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A
, 3),
3328 start
| BIR_V(1) | WINDOW_V(ilog2(sz_kb
)));
3330 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A
, 3),
3331 adap
->vres
.ocq
.start
);
3333 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A
, 3));
3337 /* HMA Definitions */
3339 /* The maximum number of address that can be send in a single FW cmd */
3340 #define HMA_MAX_ADDR_IN_CMD 5
3342 #define HMA_PAGE_SIZE PAGE_SIZE
3344 #define HMA_MAX_NO_FW_ADDRESS (16 << 10) /* FW supports 16K addresses */
3346 #define HMA_PAGE_ORDER \
3347 ((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ? \
3348 ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0)
3350 /* The minimum and maximum possible HMA sizes that can be specified in the FW
3351 * configuration(in units of MB).
3353 #define HMA_MIN_TOTAL_SIZE 1
3354 #define HMA_MAX_TOTAL_SIZE \
3355 (((HMA_PAGE_SIZE << HMA_PAGE_ORDER) * \
3356 HMA_MAX_NO_FW_ADDRESS) >> 20)
3358 static void adap_free_hma_mem(struct adapter
*adapter
)
3360 struct scatterlist
*iter
;
3364 if (!adapter
->hma
.sgt
)
3367 if (adapter
->hma
.flags
& HMA_DMA_MAPPED_FLAG
) {
3368 dma_unmap_sg(adapter
->pdev_dev
, adapter
->hma
.sgt
->sgl
,
3369 adapter
->hma
.sgt
->nents
, PCI_DMA_BIDIRECTIONAL
);
3370 adapter
->hma
.flags
&= ~HMA_DMA_MAPPED_FLAG
;
3373 for_each_sg(adapter
->hma
.sgt
->sgl
, iter
,
3374 adapter
->hma
.sgt
->orig_nents
, i
) {
3375 page
= sg_page(iter
);
3377 __free_pages(page
, HMA_PAGE_ORDER
);
3380 kfree(adapter
->hma
.phy_addr
);
3381 sg_free_table(adapter
->hma
.sgt
);
3382 kfree(adapter
->hma
.sgt
);
3383 adapter
->hma
.sgt
= NULL
;
3386 static int adap_config_hma(struct adapter
*adapter
)
3388 struct scatterlist
*sgl
, *iter
;
3389 struct sg_table
*sgt
;
3390 struct page
*newpage
;
3391 unsigned int i
, j
, k
;
3392 u32 param
, hma_size
;
3398 /* HMA is supported only for T6+ cards.
3399 * Avoid initializing HMA in kdump kernels.
3401 if (is_kdump_kernel() ||
3402 CHELSIO_CHIP_VERSION(adapter
->params
.chip
) < CHELSIO_T6
)
3405 /* Get the HMA region size required by fw */
3406 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
3407 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE
));
3408 ret
= t4_query_params(adapter
, adapter
->mbox
, adapter
->pf
, 0,
3409 1, ¶m
, &hma_size
);
3410 /* An error means card has its own memory or HMA is not supported by
3411 * the firmware. Return without any errors.
3413 if (ret
|| !hma_size
)
3416 if (hma_size
< HMA_MIN_TOTAL_SIZE
||
3417 hma_size
> HMA_MAX_TOTAL_SIZE
) {
3418 dev_err(adapter
->pdev_dev
,
3419 "HMA size %uMB beyond bounds(%u-%lu)MB\n",
3420 hma_size
, HMA_MIN_TOTAL_SIZE
, HMA_MAX_TOTAL_SIZE
);
3424 page_size
= HMA_PAGE_SIZE
;
3425 page_order
= HMA_PAGE_ORDER
;
3426 adapter
->hma
.sgt
= kzalloc(sizeof(*adapter
->hma
.sgt
), GFP_KERNEL
);
3427 if (unlikely(!adapter
->hma
.sgt
)) {
3428 dev_err(adapter
->pdev_dev
, "HMA SG table allocation failed\n");
3431 sgt
= adapter
->hma
.sgt
;
3432 /* FW returned value will be in MB's
3434 sgt
->orig_nents
= (hma_size
<< 20) / (page_size
<< page_order
);
3435 if (sg_alloc_table(sgt
, sgt
->orig_nents
, GFP_KERNEL
)) {
3436 dev_err(adapter
->pdev_dev
, "HMA SGL allocation failed\n");
3437 kfree(adapter
->hma
.sgt
);
3438 adapter
->hma
.sgt
= NULL
;
3442 sgl
= adapter
->hma
.sgt
->sgl
;
3443 node
= dev_to_node(adapter
->pdev_dev
);
3444 for_each_sg(sgl
, iter
, sgt
->orig_nents
, i
) {
3445 newpage
= alloc_pages_node(node
, __GFP_NOWARN
| GFP_KERNEL
|
3446 __GFP_ZERO
, page_order
);
3448 dev_err(adapter
->pdev_dev
,
3449 "Not enough memory for HMA page allocation\n");
3453 sg_set_page(iter
, newpage
, page_size
<< page_order
, 0);
3456 sgt
->nents
= dma_map_sg(adapter
->pdev_dev
, sgl
, sgt
->orig_nents
,
3459 dev_err(adapter
->pdev_dev
,
3460 "Not enough memory for HMA DMA mapping");
3464 adapter
->hma
.flags
|= HMA_DMA_MAPPED_FLAG
;
3466 adapter
->hma
.phy_addr
= kcalloc(sgt
->nents
, sizeof(dma_addr_t
),
3468 if (unlikely(!adapter
->hma
.phy_addr
))
3471 for_each_sg(sgl
, iter
, sgt
->nents
, i
) {
3472 newpage
= sg_page(iter
);
3473 adapter
->hma
.phy_addr
[i
] = sg_dma_address(iter
);
3476 ncmds
= DIV_ROUND_UP(sgt
->nents
, HMA_MAX_ADDR_IN_CMD
);
3477 /* Pass on the addresses to firmware */
3478 for (i
= 0, k
= 0; i
< ncmds
; i
++, k
+= HMA_MAX_ADDR_IN_CMD
) {
3479 struct fw_hma_cmd hma_cmd
;
3480 u8 naddr
= HMA_MAX_ADDR_IN_CMD
;
3481 u8 soc
= 0, eoc
= 0;
3482 u8 hma_mode
= 1; /* Presently we support only Page table mode */
3484 soc
= (i
== 0) ? 1 : 0;
3485 eoc
= (i
== ncmds
- 1) ? 1 : 0;
3487 /* For last cmd, set naddr corresponding to remaining
3490 if (i
== ncmds
- 1) {
3491 naddr
= sgt
->nents
% HMA_MAX_ADDR_IN_CMD
;
3492 naddr
= naddr
? naddr
: HMA_MAX_ADDR_IN_CMD
;
3494 memset(&hma_cmd
, 0, sizeof(hma_cmd
));
3495 hma_cmd
.op_pkd
= htonl(FW_CMD_OP_V(FW_HMA_CMD
) |
3496 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
);
3497 hma_cmd
.retval_len16
= htonl(FW_LEN16(hma_cmd
));
3499 hma_cmd
.mode_to_pcie_params
=
3500 htonl(FW_HMA_CMD_MODE_V(hma_mode
) |
3501 FW_HMA_CMD_SOC_V(soc
) | FW_HMA_CMD_EOC_V(eoc
));
3503 /* HMA cmd size specified in MB's */
3504 hma_cmd
.naddr_size
=
3505 htonl(FW_HMA_CMD_SIZE_V(hma_size
) |
3506 FW_HMA_CMD_NADDR_V(naddr
));
3508 /* Total Page size specified in units of 4K */
3509 hma_cmd
.addr_size_pkd
=
3510 htonl(FW_HMA_CMD_ADDR_SIZE_V
3511 ((page_size
<< page_order
) >> 12));
3513 /* Fill the 5 addresses */
3514 for (j
= 0; j
< naddr
; j
++) {
3515 hma_cmd
.phy_address
[j
] =
3516 cpu_to_be64(adapter
->hma
.phy_addr
[j
+ k
]);
3518 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &hma_cmd
,
3519 sizeof(hma_cmd
), &hma_cmd
);
3521 dev_err(adapter
->pdev_dev
,
3522 "HMA FW command failed with err %d\n", ret
);
3528 dev_info(adapter
->pdev_dev
,
3529 "Reserved %uMB host memory for HMA\n", hma_size
);
3533 adap_free_hma_mem(adapter
);
3537 static int adap_init1(struct adapter
*adap
, struct fw_caps_config_cmd
*c
)
3542 /* Now that we've successfully configured and initialized the adapter
3543 * can ask the Firmware what resources it has provisioned for us.
3545 ret
= t4_get_pfres(adap
);
3547 dev_err(adap
->pdev_dev
,
3548 "Unable to retrieve resource provisioning information\n");
3552 /* get device capabilities */
3553 memset(c
, 0, sizeof(*c
));
3554 c
->op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3555 FW_CMD_REQUEST_F
| FW_CMD_READ_F
);
3556 c
->cfvalid_to_len16
= htonl(FW_LEN16(*c
));
3557 ret
= t4_wr_mbox(adap
, adap
->mbox
, c
, sizeof(*c
), c
);
3561 c
->op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3562 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
);
3563 ret
= t4_wr_mbox(adap
, adap
->mbox
, c
, sizeof(*c
), NULL
);
3567 ret
= t4_config_glbl_rss(adap
, adap
->pf
,
3568 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
,
3569 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F
|
3570 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F
);
3574 ret
= t4_cfg_pfvf(adap
, adap
->mbox
, adap
->pf
, 0, adap
->sge
.egr_sz
, 64,
3575 MAX_INGQ
, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF
,
3582 /* tweak some settings */
3583 t4_write_reg(adap
, TP_SHIFT_CNT_A
, 0x64f8849);
3584 t4_write_reg(adap
, ULP_RX_TDDP_PSZ_A
, HPZ0_V(PAGE_SHIFT
- 12));
3585 t4_write_reg(adap
, TP_PIO_ADDR_A
, TP_INGRESS_CONFIG_A
);
3586 v
= t4_read_reg(adap
, TP_PIO_DATA_A
);
3587 t4_write_reg(adap
, TP_PIO_DATA_A
, v
& ~CSUM_HAS_PSEUDO_HDR_F
);
3589 /* first 4 Tx modulation queues point to consecutive Tx channels */
3590 adap
->params
.tp
.tx_modq_map
= 0xE4;
3591 t4_write_reg(adap
, TP_TX_MOD_QUEUE_REQ_MAP_A
,
3592 TX_MOD_QUEUE_REQ_MAP_V(adap
->params
.tp
.tx_modq_map
));
3594 /* associate each Tx modulation queue with consecutive Tx channels */
3596 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3597 &v
, 1, TP_TX_SCHED_HDR_A
);
3598 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3599 &v
, 1, TP_TX_SCHED_FIFO_A
);
3600 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3601 &v
, 1, TP_TX_SCHED_PCMD_A
);
3603 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3604 if (is_offload(adap
)) {
3605 t4_write_reg(adap
, TP_TX_MOD_QUEUE_WEIGHT0_A
,
3606 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3607 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3608 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3609 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
));
3610 t4_write_reg(adap
, TP_TX_MOD_CHANNEL_WEIGHT_A
,
3611 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3612 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3613 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3614 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
));
3617 /* get basic stuff going */
3618 return t4_early_init(adap
, adap
->pf
);
3622 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3624 #define MAX_ATIDS 8192U
3627 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3629 * If the firmware we're dealing with has Configuration File support, then
3630 * we use that to perform all configuration
3634 * Tweak configuration based on module parameters, etc. Most of these have
3635 * defaults assigned to them by Firmware Configuration Files (if we're using
3636 * them) but need to be explicitly set if we're using hard-coded
3637 * initialization. But even in the case of using Firmware Configuration
3638 * Files, we'd like to expose the ability to change these via module
3639 * parameters so these are essentially common tweaks/settings for
3640 * Configuration Files and hard-coded initialization ...
3642 static int adap_init0_tweaks(struct adapter
*adapter
)
3645 * Fix up various Host-Dependent Parameters like Page Size, Cache
3646 * Line Size, etc. The firmware default is for a 4KB Page Size and
3647 * 64B Cache Line Size ...
3649 t4_fixup_host_params(adapter
, PAGE_SIZE
, L1_CACHE_BYTES
);
3652 * Process module parameters which affect early initialization.
3654 if (rx_dma_offset
!= 2 && rx_dma_offset
!= 0) {
3655 dev_err(&adapter
->pdev
->dev
,
3656 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3660 t4_set_reg_field(adapter
, SGE_CONTROL_A
,
3661 PKTSHIFT_V(PKTSHIFT_M
),
3662 PKTSHIFT_V(rx_dma_offset
));
3665 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3666 * adds the pseudo header itself.
3668 t4_tp_wr_bits_indirect(adapter
, TP_INGRESS_CONFIG_A
,
3669 CSUM_HAS_PSEUDO_HDR_F
, 0);
3674 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
3675 * unto themselves and they contain their own firmware to perform their
3678 static int phy_aq1202_version(const u8
*phy_fw_data
,
3683 /* At offset 0x8 you're looking for the primary image's
3684 * starting offset which is 3 Bytes wide
3686 * At offset 0xa of the primary image, you look for the offset
3687 * of the DRAM segment which is 3 Bytes wide.
3689 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
3692 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
3693 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
3694 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
3696 offset
= le24(phy_fw_data
+ 0x8) << 12;
3697 offset
= le24(phy_fw_data
+ offset
+ 0xa);
3698 return be16(phy_fw_data
+ offset
+ 0x27e);
3705 static struct info_10gbt_phy_fw
{
3706 unsigned int phy_fw_id
; /* PCI Device ID */
3707 char *phy_fw_file
; /* /lib/firmware/ PHY Firmware file */
3708 int (*phy_fw_version
)(const u8
*phy_fw_data
, size_t phy_fw_size
);
3709 int phy_flash
; /* Has FLASH for PHY Firmware */
3710 } phy_info_array
[] = {
3712 PHY_AQ1202_DEVICEID
,
3713 PHY_AQ1202_FIRMWARE
,
3718 PHY_BCM84834_DEVICEID
,
3719 PHY_BCM84834_FIRMWARE
,
3726 static struct info_10gbt_phy_fw
*find_phy_info(int devid
)
3730 for (i
= 0; i
< ARRAY_SIZE(phy_info_array
); i
++) {
3731 if (phy_info_array
[i
].phy_fw_id
== devid
)
3732 return &phy_info_array
[i
];
3737 /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
3738 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
3739 * we return a negative error number. If we transfer new firmware we return 1
3740 * (from t4_load_phy_fw()). If we don't do anything we return 0.
3742 static int adap_init0_phy(struct adapter
*adap
)
3744 const struct firmware
*phyf
;
3746 struct info_10gbt_phy_fw
*phy_info
;
3748 /* Use the device ID to determine which PHY file to flash.
3750 phy_info
= find_phy_info(adap
->pdev
->device
);
3752 dev_warn(adap
->pdev_dev
,
3753 "No PHY Firmware file found for this PHY\n");
3757 /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
3758 * use that. The adapter firmware provides us with a memory buffer
3759 * where we can load a PHY firmware file from the host if we want to
3760 * override the PHY firmware File in flash.
3762 ret
= request_firmware_direct(&phyf
, phy_info
->phy_fw_file
,
3765 /* For adapters without FLASH attached to PHY for their
3766 * firmware, it's obviously a fatal error if we can't get the
3767 * firmware to the adapter. For adapters with PHY firmware
3768 * FLASH storage, it's worth a warning if we can't find the
3769 * PHY Firmware but we'll neuter the error ...
3771 dev_err(adap
->pdev_dev
, "unable to find PHY Firmware image "
3772 "/lib/firmware/%s, error %d\n",
3773 phy_info
->phy_fw_file
, -ret
);
3774 if (phy_info
->phy_flash
) {
3775 int cur_phy_fw_ver
= 0;
3777 t4_phy_fw_ver(adap
, &cur_phy_fw_ver
);
3778 dev_warn(adap
->pdev_dev
, "continuing with, on-adapter "
3779 "FLASH copy, version %#x\n", cur_phy_fw_ver
);
3786 /* Load PHY Firmware onto adapter.
3788 ret
= t4_load_phy_fw(adap
, MEMWIN_NIC
, &adap
->win0_lock
,
3789 phy_info
->phy_fw_version
,
3790 (u8
*)phyf
->data
, phyf
->size
);
3792 dev_err(adap
->pdev_dev
, "PHY Firmware transfer error %d\n",
3795 int new_phy_fw_ver
= 0;
3797 if (phy_info
->phy_fw_version
)
3798 new_phy_fw_ver
= phy_info
->phy_fw_version(phyf
->data
,
3800 dev_info(adap
->pdev_dev
, "Successfully transferred PHY "
3801 "Firmware /lib/firmware/%s, version %#x\n",
3802 phy_info
->phy_fw_file
, new_phy_fw_ver
);
3805 release_firmware(phyf
);
3811 * Attempt to initialize the adapter via a Firmware Configuration File.
3813 static int adap_init0_config(struct adapter
*adapter
, int reset
)
3815 struct fw_caps_config_cmd caps_cmd
;
3816 const struct firmware
*cf
;
3817 unsigned long mtype
= 0, maddr
= 0;
3818 u32 finiver
, finicsum
, cfcsum
;
3820 int config_issued
= 0;
3821 char *fw_config_file
, fw_config_file_path
[256];
3822 char *config_name
= NULL
;
3825 * Reset device if necessary.
3828 ret
= t4_fw_reset(adapter
, adapter
->mbox
,
3829 PIORSTMODE_F
| PIORST_F
);
3834 /* If this is a 10Gb/s-BT adapter make sure the chip-external
3835 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
3836 * to be performed after any global adapter RESET above since some
3837 * PHYs only have local RAM copies of the PHY firmware.
3839 if (is_10gbt_device(adapter
->pdev
->device
)) {
3840 ret
= adap_init0_phy(adapter
);
3845 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3846 * then use that. Otherwise, use the configuration file stored
3847 * in the adapter flash ...
3849 switch (CHELSIO_CHIP_VERSION(adapter
->params
.chip
)) {
3851 fw_config_file
= FW4_CFNAME
;
3854 fw_config_file
= FW5_CFNAME
;
3857 fw_config_file
= FW6_CFNAME
;
3860 dev_err(adapter
->pdev_dev
, "Device %d is not supported\n",
3861 adapter
->pdev
->device
);
3866 ret
= request_firmware(&cf
, fw_config_file
, adapter
->pdev_dev
);
3868 config_name
= "On FLASH";
3869 mtype
= FW_MEMTYPE_CF_FLASH
;
3870 maddr
= t4_flash_cfg_addr(adapter
);
3872 u32 params
[7], val
[7];
3874 sprintf(fw_config_file_path
,
3875 "/lib/firmware/%s", fw_config_file
);
3876 config_name
= fw_config_file_path
;
3878 if (cf
->size
>= FLASH_CFG_MAX_SIZE
)
3881 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
3882 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF
));
3883 ret
= t4_query_params(adapter
, adapter
->mbox
,
3884 adapter
->pf
, 0, 1, params
, val
);
3887 * For t4_memory_rw() below addresses and
3888 * sizes have to be in terms of multiples of 4
3889 * bytes. So, if the Configuration File isn't
3890 * a multiple of 4 bytes in length we'll have
3891 * to write that out separately since we can't
3892 * guarantee that the bytes following the
3893 * residual byte in the buffer returned by
3894 * request_firmware() are zeroed out ...
3896 size_t resid
= cf
->size
& 0x3;
3897 size_t size
= cf
->size
& ~0x3;
3898 __be32
*data
= (__be32
*)cf
->data
;
3900 mtype
= FW_PARAMS_PARAM_Y_G(val
[0]);
3901 maddr
= FW_PARAMS_PARAM_Z_G(val
[0]) << 16;
3903 spin_lock(&adapter
->win0_lock
);
3904 ret
= t4_memory_rw(adapter
, 0, mtype
, maddr
,
3905 size
, data
, T4_MEMORY_WRITE
);
3906 if (ret
== 0 && resid
!= 0) {
3913 last
.word
= data
[size
>> 2];
3914 for (i
= resid
; i
< 4; i
++)
3916 ret
= t4_memory_rw(adapter
, 0, mtype
,
3921 spin_unlock(&adapter
->win0_lock
);
3925 release_firmware(cf
);
3931 * Issue a Capability Configuration command to the firmware to get it
3932 * to parse the Configuration File. We don't use t4_fw_config_file()
3933 * because we want the ability to modify various features after we've
3934 * processed the configuration file ...
3936 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
3937 caps_cmd
.op_to_write
=
3938 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3941 caps_cmd
.cfvalid_to_len16
=
3942 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F
|
3943 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype
) |
3944 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr
>> 16) |
3945 FW_LEN16(caps_cmd
));
3946 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
3949 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
3950 * Configuration File in FLASH), our last gasp effort is to use the
3951 * Firmware Configuration File which is embedded in the firmware. A
3952 * very few early versions of the firmware didn't have one embedded
3953 * but we can ignore those.
3955 if (ret
== -ENOENT
) {
3956 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
3957 caps_cmd
.op_to_write
=
3958 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3961 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
3962 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
,
3963 sizeof(caps_cmd
), &caps_cmd
);
3964 config_name
= "Firmware Default";
3971 finiver
= ntohl(caps_cmd
.finiver
);
3972 finicsum
= ntohl(caps_cmd
.finicsum
);
3973 cfcsum
= ntohl(caps_cmd
.cfcsum
);
3974 if (finicsum
!= cfcsum
)
3975 dev_warn(adapter
->pdev_dev
, "Configuration File checksum "\
3976 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3980 * And now tell the firmware to use the configuration we just loaded.
3982 caps_cmd
.op_to_write
=
3983 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3986 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
3987 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
3993 * Tweak configuration based on system architecture, module
3996 ret
= adap_init0_tweaks(adapter
);
4000 /* We will proceed even if HMA init fails. */
4001 ret
= adap_config_hma(adapter
);
4003 dev_err(adapter
->pdev_dev
,
4004 "HMA configuration failed with error %d\n", ret
);
4007 * And finally tell the firmware to initialize itself using the
4008 * parameters from the Configuration File.
4010 ret
= t4_fw_initialize(adapter
, adapter
->mbox
);
4014 /* Emit Firmware Configuration File information and return
4017 dev_info(adapter
->pdev_dev
, "Successfully configured using Firmware "\
4018 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4019 config_name
, finiver
, cfcsum
);
4023 * Something bad happened. Return the error ... (If the "error"
4024 * is that there's no Configuration File on the adapter we don't
4025 * want to issue a warning since this is fairly common.)
4028 if (config_issued
&& ret
!= -ENOENT
)
4029 dev_warn(adapter
->pdev_dev
, "\"%s\" configuration file error %d\n",
4034 static struct fw_info fw_info_array
[] = {
4037 .fs_name
= FW4_CFNAME
,
4038 .fw_mod_name
= FW4_FNAME
,
4040 .chip
= FW_HDR_CHIP_T4
,
4041 .fw_ver
= __cpu_to_be32(FW_VERSION(T4
)),
4042 .intfver_nic
= FW_INTFVER(T4
, NIC
),
4043 .intfver_vnic
= FW_INTFVER(T4
, VNIC
),
4044 .intfver_ri
= FW_INTFVER(T4
, RI
),
4045 .intfver_iscsi
= FW_INTFVER(T4
, ISCSI
),
4046 .intfver_fcoe
= FW_INTFVER(T4
, FCOE
),
4050 .fs_name
= FW5_CFNAME
,
4051 .fw_mod_name
= FW5_FNAME
,
4053 .chip
= FW_HDR_CHIP_T5
,
4054 .fw_ver
= __cpu_to_be32(FW_VERSION(T5
)),
4055 .intfver_nic
= FW_INTFVER(T5
, NIC
),
4056 .intfver_vnic
= FW_INTFVER(T5
, VNIC
),
4057 .intfver_ri
= FW_INTFVER(T5
, RI
),
4058 .intfver_iscsi
= FW_INTFVER(T5
, ISCSI
),
4059 .intfver_fcoe
= FW_INTFVER(T5
, FCOE
),
4063 .fs_name
= FW6_CFNAME
,
4064 .fw_mod_name
= FW6_FNAME
,
4066 .chip
= FW_HDR_CHIP_T6
,
4067 .fw_ver
= __cpu_to_be32(FW_VERSION(T6
)),
4068 .intfver_nic
= FW_INTFVER(T6
, NIC
),
4069 .intfver_vnic
= FW_INTFVER(T6
, VNIC
),
4070 .intfver_ofld
= FW_INTFVER(T6
, OFLD
),
4071 .intfver_ri
= FW_INTFVER(T6
, RI
),
4072 .intfver_iscsipdu
= FW_INTFVER(T6
, ISCSIPDU
),
4073 .intfver_iscsi
= FW_INTFVER(T6
, ISCSI
),
4074 .intfver_fcoepdu
= FW_INTFVER(T6
, FCOEPDU
),
4075 .intfver_fcoe
= FW_INTFVER(T6
, FCOE
),
4081 static struct fw_info
*find_fw_info(int chip
)
4085 for (i
= 0; i
< ARRAY_SIZE(fw_info_array
); i
++) {
4086 if (fw_info_array
[i
].chip
== chip
)
4087 return &fw_info_array
[i
];
4093 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4095 static int adap_init0(struct adapter
*adap
)
4099 enum dev_state state
;
4100 u32 params
[7], val
[7];
4101 struct fw_caps_config_cmd caps_cmd
;
4104 /* Grab Firmware Device Log parameters as early as possible so we have
4105 * access to it for debugging, etc.
4107 ret
= t4_init_devlog_params(adap
);
4111 /* Contact FW, advertising Master capability */
4112 ret
= t4_fw_hello(adap
, adap
->mbox
, adap
->mbox
,
4113 is_kdump_kernel() ? MASTER_MUST
: MASTER_MAY
, &state
);
4115 dev_err(adap
->pdev_dev
, "could not connect to FW, error %d\n",
4119 if (ret
== adap
->mbox
)
4120 adap
->flags
|= MASTER_PF
;
4123 * If we're the Master PF Driver and the device is uninitialized,
4124 * then let's consider upgrading the firmware ... (We always want
4125 * to check the firmware version number in order to A. get it for
4126 * later reporting and B. to warn if the currently loaded firmware
4127 * is excessively mismatched relative to the driver.)
4130 t4_get_version_info(adap
);
4131 ret
= t4_check_fw_version(adap
);
4132 /* If firmware is too old (not supported by driver) force an update. */
4134 state
= DEV_STATE_UNINIT
;
4135 if ((adap
->flags
& MASTER_PF
) && state
!= DEV_STATE_INIT
) {
4136 struct fw_info
*fw_info
;
4137 struct fw_hdr
*card_fw
;
4138 const struct firmware
*fw
;
4139 const u8
*fw_data
= NULL
;
4140 unsigned int fw_size
= 0;
4142 /* This is the firmware whose headers the driver was compiled
4145 fw_info
= find_fw_info(CHELSIO_CHIP_VERSION(adap
->params
.chip
));
4146 if (fw_info
== NULL
) {
4147 dev_err(adap
->pdev_dev
,
4148 "unable to get firmware info for chip %d.\n",
4149 CHELSIO_CHIP_VERSION(adap
->params
.chip
));
4153 /* allocate memory to read the header of the firmware on the
4156 card_fw
= kvzalloc(sizeof(*card_fw
), GFP_KERNEL
);
4162 /* Get FW from from /lib/firmware/ */
4163 ret
= request_firmware(&fw
, fw_info
->fw_mod_name
,
4166 dev_err(adap
->pdev_dev
,
4167 "unable to load firmware image %s, error %d\n",
4168 fw_info
->fw_mod_name
, ret
);
4174 /* upgrade FW logic */
4175 ret
= t4_prep_fw(adap
, fw_info
, fw_data
, fw_size
, card_fw
,
4179 release_firmware(fw
);
4186 /* If the firmware is initialized already, emit a simply note to that
4187 * effect. Otherwise, it's time to try initializing the adapter.
4189 if (state
== DEV_STATE_INIT
) {
4190 ret
= adap_config_hma(adap
);
4192 dev_err(adap
->pdev_dev
,
4193 "HMA configuration failed with error %d\n",
4195 dev_info(adap
->pdev_dev
, "Coming up as %s: "\
4196 "Adapter already initialized\n",
4197 adap
->flags
& MASTER_PF
? "MASTER" : "SLAVE");
4199 dev_info(adap
->pdev_dev
, "Coming up as MASTER: "\
4200 "Initializing adapter\n");
4202 /* Find out whether we're dealing with a version of the
4203 * firmware which has configuration file support.
4205 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
4206 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF
));
4207 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 1,
4210 /* If the firmware doesn't support Configuration Files,
4214 dev_err(adap
->pdev_dev
, "firmware doesn't support "
4215 "Firmware Configuration Files\n");
4219 /* The firmware provides us with a memory buffer where we can
4220 * load a Configuration File from the host if we want to
4221 * override the Configuration File in flash.
4223 ret
= adap_init0_config(adap
, reset
);
4224 if (ret
== -ENOENT
) {
4225 dev_err(adap
->pdev_dev
, "no Configuration File "
4226 "present on adapter.\n");
4230 dev_err(adap
->pdev_dev
, "could not initialize "
4231 "adapter, error %d\n", -ret
);
4236 /* Now that we've successfully configured and initialized the adapter
4237 * (or found it already initialized), we can ask the Firmware what
4238 * resources it has provisioned for us.
4240 ret
= t4_get_pfres(adap
);
4242 dev_err(adap
->pdev_dev
,
4243 "Unable to retrieve resource provisioning information\n");
4247 /* Grab VPD parameters. This should be done after we establish a
4248 * connection to the firmware since some of the VPD parameters
4249 * (notably the Core Clock frequency) are retrieved via requests to
4250 * the firmware. On the other hand, we need these fairly early on
4251 * so we do this right after getting ahold of the firmware.
4253 * We need to do this after initializing the adapter because someone
4254 * could have FLASHed a new VPD which won't be read by the firmware
4255 * until we do the RESET ...
4257 ret
= t4_get_vpd_params(adap
, &adap
->params
.vpd
);
4261 /* Find out what ports are available to us. Note that we need to do
4262 * this before calling adap_init0_no_config() since it needs nports
4266 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
4267 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC
);
4268 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, &v
, &port_vec
);
4272 adap
->params
.nports
= hweight32(port_vec
);
4273 adap
->params
.portvec
= port_vec
;
4275 /* Give the SGE code a chance to pull in anything that it needs ...
4276 * Note that this must be called after we retrieve our VPD parameters
4277 * in order to know how to convert core ticks to seconds, etc.
4279 ret
= t4_sge_init(adap
);
4283 if (is_bypass_device(adap
->pdev
->device
))
4284 adap
->params
.bypass
= 1;
4287 * Grab some of our basic fundamental operating parameters.
4289 #define FW_PARAM_DEV(param) \
4290 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
4291 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
4293 #define FW_PARAM_PFVF(param) \
4294 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
4295 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
4296 FW_PARAMS_PARAM_Y_V(0) | \
4297 FW_PARAMS_PARAM_Z_V(0)
4299 params
[0] = FW_PARAM_PFVF(EQ_START
);
4300 params
[1] = FW_PARAM_PFVF(L2T_START
);
4301 params
[2] = FW_PARAM_PFVF(L2T_END
);
4302 params
[3] = FW_PARAM_PFVF(FILTER_START
);
4303 params
[4] = FW_PARAM_PFVF(FILTER_END
);
4304 params
[5] = FW_PARAM_PFVF(IQFLINT_START
);
4305 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6, params
, val
);
4308 adap
->sge
.egr_start
= val
[0];
4309 adap
->l2t_start
= val
[1];
4310 adap
->l2t_end
= val
[2];
4311 adap
->tids
.ftid_base
= val
[3];
4312 adap
->tids
.nftids
= val
[4] - val
[3] + 1;
4313 adap
->sge
.ingr_start
= val
[5];
4315 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) > CHELSIO_T5
) {
4316 /* Read the raw mps entries. In T6, the last 2 tcam entries
4317 * are reserved for raw mac addresses (rawf = 2, one per port).
4319 params
[0] = FW_PARAM_PFVF(RAWF_START
);
4320 params
[1] = FW_PARAM_PFVF(RAWF_END
);
4321 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2,
4324 adap
->rawf_start
= val
[0];
4325 adap
->rawf_cnt
= val
[1] - val
[0] + 1;
4329 /* qids (ingress/egress) returned from firmware can be anywhere
4330 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
4331 * Hence driver needs to allocate memory for this range to
4332 * store the queue info. Get the highest IQFLINT/EQ index returned
4333 * in FW_EQ_*_CMD.alloc command.
4335 params
[0] = FW_PARAM_PFVF(EQ_END
);
4336 params
[1] = FW_PARAM_PFVF(IQFLINT_END
);
4337 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
4340 adap
->sge
.egr_sz
= val
[0] - adap
->sge
.egr_start
+ 1;
4341 adap
->sge
.ingr_sz
= val
[1] - adap
->sge
.ingr_start
+ 1;
4343 adap
->sge
.egr_map
= kcalloc(adap
->sge
.egr_sz
,
4344 sizeof(*adap
->sge
.egr_map
), GFP_KERNEL
);
4345 if (!adap
->sge
.egr_map
) {
4350 adap
->sge
.ingr_map
= kcalloc(adap
->sge
.ingr_sz
,
4351 sizeof(*adap
->sge
.ingr_map
), GFP_KERNEL
);
4352 if (!adap
->sge
.ingr_map
) {
4357 /* Allocate the memory for the vaious egress queue bitmaps
4358 * ie starving_fl, txq_maperr and blocked_fl.
4360 adap
->sge
.starving_fl
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
4361 sizeof(long), GFP_KERNEL
);
4362 if (!adap
->sge
.starving_fl
) {
4367 adap
->sge
.txq_maperr
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
4368 sizeof(long), GFP_KERNEL
);
4369 if (!adap
->sge
.txq_maperr
) {
4374 #ifdef CONFIG_DEBUG_FS
4375 adap
->sge
.blocked_fl
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
4376 sizeof(long), GFP_KERNEL
);
4377 if (!adap
->sge
.blocked_fl
) {
4383 params
[0] = FW_PARAM_PFVF(CLIP_START
);
4384 params
[1] = FW_PARAM_PFVF(CLIP_END
);
4385 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
4388 adap
->clipt_start
= val
[0];
4389 adap
->clipt_end
= val
[1];
4391 /* We don't yet have a PARAMs calls to retrieve the number of Traffic
4392 * Classes supported by the hardware/firmware so we hard code it here
4395 adap
->params
.nsched_cls
= is_t4(adap
->params
.chip
) ? 15 : 16;
4397 /* query params related to active filter region */
4398 params
[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START
);
4399 params
[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END
);
4400 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
4401 /* If Active filter size is set we enable establishing
4402 * offload connection through firmware work request
4404 if ((val
[0] != val
[1]) && (ret
>= 0)) {
4405 adap
->flags
|= FW_OFLD_CONN
;
4406 adap
->tids
.aftid_base
= val
[0];
4407 adap
->tids
.aftid_end
= val
[1];
4410 /* If we're running on newer firmware, let it know that we're
4411 * prepared to deal with encapsulated CPL messages. Older
4412 * firmware won't understand this and we'll just get
4413 * unencapsulated messages ...
4415 params
[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP
);
4417 (void)t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, params
, val
);
4420 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
4421 * capability. Earlier versions of the firmware didn't have the
4422 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
4423 * permission to use ULPTX MEMWRITE DSGL.
4425 if (is_t4(adap
->params
.chip
)) {
4426 adap
->params
.ulptx_memwrite_dsgl
= false;
4428 params
[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL
);
4429 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0,
4431 adap
->params
.ulptx_memwrite_dsgl
= (ret
== 0 && val
[0] != 0);
4434 /* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
4435 params
[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR
);
4436 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0,
4438 adap
->params
.fr_nsmr_tpte_wr_support
= (ret
== 0 && val
[0] != 0);
4440 /* See if FW supports FW_FILTER2 work request */
4441 if (is_t4(adap
->params
.chip
)) {
4442 adap
->params
.filter2_wr_support
= 0;
4444 params
[0] = FW_PARAM_DEV(FILTER2_WR
);
4445 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0,
4447 adap
->params
.filter2_wr_support
= (ret
== 0 && val
[0] != 0);
4451 * Get device capabilities so we can determine what resources we need
4454 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
4455 caps_cmd
.op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
4456 FW_CMD_REQUEST_F
| FW_CMD_READ_F
);
4457 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
4458 ret
= t4_wr_mbox(adap
, adap
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
4463 if (caps_cmd
.ofldcaps
||
4464 (caps_cmd
.niccaps
& htons(FW_CAPS_CONFIG_NIC_HASHFILTER
))) {
4465 /* query offload-related parameters */
4466 params
[0] = FW_PARAM_DEV(NTID
);
4467 params
[1] = FW_PARAM_PFVF(SERVER_START
);
4468 params
[2] = FW_PARAM_PFVF(SERVER_END
);
4469 params
[3] = FW_PARAM_PFVF(TDDP_START
);
4470 params
[4] = FW_PARAM_PFVF(TDDP_END
);
4471 params
[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ
);
4472 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6,
4476 adap
->tids
.ntids
= val
[0];
4477 adap
->tids
.natids
= min(adap
->tids
.ntids
/ 2, MAX_ATIDS
);
4478 adap
->tids
.stid_base
= val
[1];
4479 adap
->tids
.nstids
= val
[2] - val
[1] + 1;
4481 * Setup server filter region. Divide the available filter
4482 * region into two parts. Regular filters get 1/3rd and server
4483 * filters get 2/3rd part. This is only enabled if workarond
4485 * 1. For regular filters.
4486 * 2. Server filter: This are special filters which are used
4487 * to redirect SYN packets to offload queue.
4489 if (adap
->flags
& FW_OFLD_CONN
&& !is_bypass(adap
)) {
4490 adap
->tids
.sftid_base
= adap
->tids
.ftid_base
+
4491 DIV_ROUND_UP(adap
->tids
.nftids
, 3);
4492 adap
->tids
.nsftids
= adap
->tids
.nftids
-
4493 DIV_ROUND_UP(adap
->tids
.nftids
, 3);
4494 adap
->tids
.nftids
= adap
->tids
.sftid_base
-
4495 adap
->tids
.ftid_base
;
4497 adap
->vres
.ddp
.start
= val
[3];
4498 adap
->vres
.ddp
.size
= val
[4] - val
[3] + 1;
4499 adap
->params
.ofldq_wr_cred
= val
[5];
4501 if (caps_cmd
.niccaps
& htons(FW_CAPS_CONFIG_NIC_HASHFILTER
)) {
4502 ret
= init_hash_filter(adap
);
4506 adap
->params
.offload
= 1;
4507 adap
->num_ofld_uld
+= 1;
4510 if (caps_cmd
.rdmacaps
) {
4511 params
[0] = FW_PARAM_PFVF(STAG_START
);
4512 params
[1] = FW_PARAM_PFVF(STAG_END
);
4513 params
[2] = FW_PARAM_PFVF(RQ_START
);
4514 params
[3] = FW_PARAM_PFVF(RQ_END
);
4515 params
[4] = FW_PARAM_PFVF(PBL_START
);
4516 params
[5] = FW_PARAM_PFVF(PBL_END
);
4517 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6,
4521 adap
->vres
.stag
.start
= val
[0];
4522 adap
->vres
.stag
.size
= val
[1] - val
[0] + 1;
4523 adap
->vres
.rq
.start
= val
[2];
4524 adap
->vres
.rq
.size
= val
[3] - val
[2] + 1;
4525 adap
->vres
.pbl
.start
= val
[4];
4526 adap
->vres
.pbl
.size
= val
[5] - val
[4] + 1;
4528 params
[0] = FW_PARAM_PFVF(SRQ_START
);
4529 params
[1] = FW_PARAM_PFVF(SRQ_END
);
4530 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2,
4533 adap
->vres
.srq
.start
= val
[0];
4534 adap
->vres
.srq
.size
= val
[1] - val
[0] + 1;
4536 if (adap
->vres
.srq
.size
) {
4537 adap
->srq
= t4_init_srq(adap
->vres
.srq
.size
);
4539 dev_warn(&adap
->pdev
->dev
, "could not allocate SRQ, continuing\n");
4542 params
[0] = FW_PARAM_PFVF(SQRQ_START
);
4543 params
[1] = FW_PARAM_PFVF(SQRQ_END
);
4544 params
[2] = FW_PARAM_PFVF(CQ_START
);
4545 params
[3] = FW_PARAM_PFVF(CQ_END
);
4546 params
[4] = FW_PARAM_PFVF(OCQ_START
);
4547 params
[5] = FW_PARAM_PFVF(OCQ_END
);
4548 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6, params
,
4552 adap
->vres
.qp
.start
= val
[0];
4553 adap
->vres
.qp
.size
= val
[1] - val
[0] + 1;
4554 adap
->vres
.cq
.start
= val
[2];
4555 adap
->vres
.cq
.size
= val
[3] - val
[2] + 1;
4556 adap
->vres
.ocq
.start
= val
[4];
4557 adap
->vres
.ocq
.size
= val
[5] - val
[4] + 1;
4559 params
[0] = FW_PARAM_DEV(MAXORDIRD_QP
);
4560 params
[1] = FW_PARAM_DEV(MAXIRD_ADAPTER
);
4561 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
,
4564 adap
->params
.max_ordird_qp
= 8;
4565 adap
->params
.max_ird_adapter
= 32 * adap
->tids
.ntids
;
4568 adap
->params
.max_ordird_qp
= val
[0];
4569 adap
->params
.max_ird_adapter
= val
[1];
4571 dev_info(adap
->pdev_dev
,
4572 "max_ordird_qp %d max_ird_adapter %d\n",
4573 adap
->params
.max_ordird_qp
,
4574 adap
->params
.max_ird_adapter
);
4576 /* Enable write_with_immediate if FW supports it */
4577 params
[0] = FW_PARAM_DEV(RDMA_WRITE_WITH_IMM
);
4578 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, params
,
4580 adap
->params
.write_w_imm_support
= (ret
== 0 && val
[0] != 0);
4582 /* Enable write_cmpl if FW supports it */
4583 params
[0] = FW_PARAM_DEV(RI_WRITE_CMPL_WR
);
4584 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, params
,
4586 adap
->params
.write_cmpl_support
= (ret
== 0 && val
[0] != 0);
4587 adap
->num_ofld_uld
+= 2;
4589 if (caps_cmd
.iscsicaps
) {
4590 params
[0] = FW_PARAM_PFVF(ISCSI_START
);
4591 params
[1] = FW_PARAM_PFVF(ISCSI_END
);
4592 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2,
4596 adap
->vres
.iscsi
.start
= val
[0];
4597 adap
->vres
.iscsi
.size
= val
[1] - val
[0] + 1;
4598 /* LIO target and cxgb4i initiaitor */
4599 adap
->num_ofld_uld
+= 2;
4601 if (caps_cmd
.cryptocaps
) {
4602 if (ntohs(caps_cmd
.cryptocaps
) &
4603 FW_CAPS_CONFIG_CRYPTO_LOOKASIDE
) {
4604 params
[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE
);
4605 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0,
4611 adap
->vres
.ncrypto_fc
= val
[0];
4613 adap
->num_ofld_uld
+= 1;
4615 if (ntohs(caps_cmd
.cryptocaps
) &
4616 FW_CAPS_CONFIG_TLS_INLINE
) {
4617 params
[0] = FW_PARAM_PFVF(TLS_START
);
4618 params
[1] = FW_PARAM_PFVF(TLS_END
);
4619 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0,
4623 adap
->vres
.key
.start
= val
[0];
4624 adap
->vres
.key
.size
= val
[1] - val
[0] + 1;
4627 adap
->params
.crypto
= ntohs(caps_cmd
.cryptocaps
);
4629 #undef FW_PARAM_PFVF
4632 /* The MTU/MSS Table is initialized by now, so load their values. If
4633 * we're initializing the adapter, then we'll make any modifications
4634 * we want to the MTU/MSS Table and also initialize the congestion
4637 t4_read_mtu_tbl(adap
, adap
->params
.mtus
, NULL
);
4638 if (state
!= DEV_STATE_INIT
) {
4641 /* The default MTU Table contains values 1492 and 1500.
4642 * However, for TCP, it's better to have two values which are
4643 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
4644 * This allows us to have a TCP Data Payload which is a
4645 * multiple of 8 regardless of what combination of TCP Options
4646 * are in use (always a multiple of 4 bytes) which is
4647 * important for performance reasons. For instance, if no
4648 * options are in use, then we have a 20-byte IP header and a
4649 * 20-byte TCP header. In this case, a 1500-byte MSS would
4650 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
4651 * which is not a multiple of 8. So using an MSS of 1488 in
4652 * this case results in a TCP Data Payload of 1448 bytes which
4653 * is a multiple of 8. On the other hand, if 12-byte TCP Time
4654 * Stamps have been negotiated, then an MTU of 1500 bytes
4655 * results in a TCP Data Payload of 1448 bytes which, as
4656 * above, is a multiple of 8 bytes ...
4658 for (i
= 0; i
< NMTUS
; i
++)
4659 if (adap
->params
.mtus
[i
] == 1492) {
4660 adap
->params
.mtus
[i
] = 1488;
4664 t4_load_mtus(adap
, adap
->params
.mtus
, adap
->params
.a_wnd
,
4665 adap
->params
.b_wnd
);
4667 t4_init_sge_params(adap
);
4668 adap
->flags
|= FW_OK
;
4669 t4_init_tp_params(adap
, true);
4673 * Something bad happened. If a command timed out or failed with EIO
4674 * FW does not operate within its spec or something catastrophic
4675 * happened to HW/FW, stop issuing commands.
4678 adap_free_hma_mem(adap
);
4679 kfree(adap
->sge
.egr_map
);
4680 kfree(adap
->sge
.ingr_map
);
4681 kfree(adap
->sge
.starving_fl
);
4682 kfree(adap
->sge
.txq_maperr
);
4683 #ifdef CONFIG_DEBUG_FS
4684 kfree(adap
->sge
.blocked_fl
);
4686 if (ret
!= -ETIMEDOUT
&& ret
!= -EIO
)
4687 t4_fw_bye(adap
, adap
->mbox
);
4693 static pci_ers_result_t
eeh_err_detected(struct pci_dev
*pdev
,
4694 pci_channel_state_t state
)
4697 struct adapter
*adap
= pci_get_drvdata(pdev
);
4703 adap
->flags
&= ~FW_OK
;
4704 notify_ulds(adap
, CXGB4_STATE_START_RECOVERY
);
4705 spin_lock(&adap
->stats_lock
);
4706 for_each_port(adap
, i
) {
4707 struct net_device
*dev
= adap
->port
[i
];
4709 netif_device_detach(dev
);
4710 netif_carrier_off(dev
);
4713 spin_unlock(&adap
->stats_lock
);
4714 disable_interrupts(adap
);
4715 if (adap
->flags
& FULL_INIT_DONE
)
4718 if ((adap
->flags
& DEV_ENABLED
)) {
4719 pci_disable_device(pdev
);
4720 adap
->flags
&= ~DEV_ENABLED
;
4722 out
: return state
== pci_channel_io_perm_failure
?
4723 PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_NEED_RESET
;
4726 static pci_ers_result_t
eeh_slot_reset(struct pci_dev
*pdev
)
4729 struct fw_caps_config_cmd c
;
4730 struct adapter
*adap
= pci_get_drvdata(pdev
);
4733 pci_restore_state(pdev
);
4734 pci_save_state(pdev
);
4735 return PCI_ERS_RESULT_RECOVERED
;
4738 if (!(adap
->flags
& DEV_ENABLED
)) {
4739 if (pci_enable_device(pdev
)) {
4740 dev_err(&pdev
->dev
, "Cannot reenable PCI "
4741 "device after reset\n");
4742 return PCI_ERS_RESULT_DISCONNECT
;
4744 adap
->flags
|= DEV_ENABLED
;
4747 pci_set_master(pdev
);
4748 pci_restore_state(pdev
);
4749 pci_save_state(pdev
);
4750 pci_cleanup_aer_uncorrect_error_status(pdev
);
4752 if (t4_wait_dev_ready(adap
->regs
) < 0)
4753 return PCI_ERS_RESULT_DISCONNECT
;
4754 if (t4_fw_hello(adap
, adap
->mbox
, adap
->pf
, MASTER_MUST
, NULL
) < 0)
4755 return PCI_ERS_RESULT_DISCONNECT
;
4756 adap
->flags
|= FW_OK
;
4757 if (adap_init1(adap
, &c
))
4758 return PCI_ERS_RESULT_DISCONNECT
;
4760 for_each_port(adap
, i
) {
4761 struct port_info
*p
= adap2pinfo(adap
, i
);
4763 ret
= t4_alloc_vi(adap
, adap
->mbox
, p
->tx_chan
, adap
->pf
, 0, 1,
4766 return PCI_ERS_RESULT_DISCONNECT
;
4768 p
->xact_addr_filt
= -1;
4771 t4_load_mtus(adap
, adap
->params
.mtus
, adap
->params
.a_wnd
,
4772 adap
->params
.b_wnd
);
4775 return PCI_ERS_RESULT_DISCONNECT
;
4776 return PCI_ERS_RESULT_RECOVERED
;
4779 static void eeh_resume(struct pci_dev
*pdev
)
4782 struct adapter
*adap
= pci_get_drvdata(pdev
);
4788 for_each_port(adap
, i
) {
4789 struct net_device
*dev
= adap
->port
[i
];
4791 if (netif_running(dev
)) {
4793 cxgb_set_rxmode(dev
);
4795 netif_device_attach(dev
);
4801 static const struct pci_error_handlers cxgb4_eeh
= {
4802 .error_detected
= eeh_err_detected
,
4803 .slot_reset
= eeh_slot_reset
,
4804 .resume
= eeh_resume
,
4807 /* Return true if the Link Configuration supports "High Speeds" (those greater
4810 static inline bool is_x_10g_port(const struct link_config
*lc
)
4812 unsigned int speeds
, high_speeds
;
4814 speeds
= FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc
->pcaps
));
4815 high_speeds
= speeds
&
4816 ~(FW_PORT_CAP32_SPEED_100M
| FW_PORT_CAP32_SPEED_1G
);
4818 return high_speeds
!= 0;
4822 * Perform default configuration of DMA queues depending on the number and type
4823 * of ports we found and the number of available CPUs. Most settings can be
4824 * modified by the admin prior to actual use.
4826 static int cfg_queues(struct adapter
*adap
)
4828 struct sge
*s
= &adap
->sge
;
4829 int i
, n10g
= 0, qidx
= 0;
4830 int niqflint
, neq
, avail_eth_qsets
;
4831 int max_eth_qsets
= 32;
4832 #ifndef CONFIG_CHELSIO_T4_DCB
4836 /* Reduce memory usage in kdump environment, disable all offload.
4838 if (is_kdump_kernel() || (is_uld(adap
) && t4_uld_mem_alloc(adap
))) {
4839 adap
->params
.offload
= 0;
4840 adap
->params
.crypto
= 0;
4843 /* Calculate the number of Ethernet Queue Sets available based on
4844 * resources provisioned for us. We always have an Asynchronous
4845 * Firmware Event Ingress Queue. If we're operating in MSI or Legacy
4846 * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt
4847 * Ingress Queue. Meanwhile, we need two Egress Queues for each
4848 * Queue Set: one for the Free List and one for the Ethernet TX Queue.
4850 * Note that we should also take into account all of the various
4851 * Offload Queues. But, in any situation where we're operating in
4852 * a Resource Constrained Provisioning environment, doing any Offload
4853 * at all is problematic ...
4855 niqflint
= adap
->params
.pfres
.niqflint
- 1;
4856 if (!(adap
->flags
& USING_MSIX
))
4858 neq
= adap
->params
.pfres
.neq
/ 2;
4859 avail_eth_qsets
= min(niqflint
, neq
);
4861 if (avail_eth_qsets
> max_eth_qsets
)
4862 avail_eth_qsets
= max_eth_qsets
;
4864 if (avail_eth_qsets
< adap
->params
.nports
) {
4865 dev_err(adap
->pdev_dev
, "avail_eth_qsets=%d < nports=%d\n",
4866 avail_eth_qsets
, adap
->params
.nports
);
4870 /* Count the number of 10Gb/s or better ports */
4871 for_each_port(adap
, i
)
4872 n10g
+= is_x_10g_port(&adap2pinfo(adap
, i
)->link_cfg
);
4874 #ifdef CONFIG_CHELSIO_T4_DCB
4875 /* For Data Center Bridging support we need to be able to support up
4876 * to 8 Traffic Priorities; each of which will be assigned to its
4877 * own TX Queue in order to prevent Head-Of-Line Blocking.
4879 if (adap
->params
.nports
* 8 > avail_eth_qsets
) {
4880 dev_err(adap
->pdev_dev
, "DCB avail_eth_qsets=%d < %d!\n",
4881 avail_eth_qsets
, adap
->params
.nports
* 8);
4885 for_each_port(adap
, i
) {
4886 struct port_info
*pi
= adap2pinfo(adap
, i
);
4888 pi
->first_qset
= qidx
;
4889 pi
->nqsets
= is_kdump_kernel() ? 1 : 8;
4892 #else /* !CONFIG_CHELSIO_T4_DCB */
4894 * We default to 1 queue per non-10G port and up to # of cores queues
4898 q10g
= (avail_eth_qsets
- (adap
->params
.nports
- n10g
)) / n10g
;
4899 if (q10g
> netif_get_num_default_rss_queues())
4900 q10g
= netif_get_num_default_rss_queues();
4902 if (is_kdump_kernel())
4905 for_each_port(adap
, i
) {
4906 struct port_info
*pi
= adap2pinfo(adap
, i
);
4908 pi
->first_qset
= qidx
;
4909 pi
->nqsets
= is_x_10g_port(&pi
->link_cfg
) ? q10g
: 1;
4912 #endif /* !CONFIG_CHELSIO_T4_DCB */
4915 s
->max_ethqsets
= qidx
; /* MSI-X may lower it later */
4919 * For offload we use 1 queue/channel if all ports are up to 1G,
4920 * otherwise we divide all available queues amongst the channels
4921 * capped by the number of available cores.
4924 i
= min_t(int, MAX_OFLD_QSETS
, num_online_cpus());
4925 s
->ofldqsets
= roundup(i
, adap
->params
.nports
);
4927 s
->ofldqsets
= adap
->params
.nports
;
4931 for (i
= 0; i
< ARRAY_SIZE(s
->ethrxq
); i
++) {
4932 struct sge_eth_rxq
*r
= &s
->ethrxq
[i
];
4934 init_rspq(adap
, &r
->rspq
, 5, 10, 1024, 64);
4938 for (i
= 0; i
< ARRAY_SIZE(s
->ethtxq
); i
++)
4939 s
->ethtxq
[i
].q
.size
= 1024;
4941 for (i
= 0; i
< ARRAY_SIZE(s
->ctrlq
); i
++)
4942 s
->ctrlq
[i
].q
.size
= 512;
4944 if (!is_t4(adap
->params
.chip
))
4945 s
->ptptxq
.q
.size
= 8;
4947 init_rspq(adap
, &s
->fw_evtq
, 0, 1, 1024, 64);
4948 init_rspq(adap
, &s
->intrq
, 0, 1, 512, 64);
4954 * Reduce the number of Ethernet queues across all ports to at most n.
4955 * n provides at least one queue per port.
4957 static void reduce_ethqs(struct adapter
*adap
, int n
)
4960 struct port_info
*pi
;
4962 while (n
< adap
->sge
.ethqsets
)
4963 for_each_port(adap
, i
) {
4964 pi
= adap2pinfo(adap
, i
);
4965 if (pi
->nqsets
> 1) {
4967 adap
->sge
.ethqsets
--;
4968 if (adap
->sge
.ethqsets
<= n
)
4974 for_each_port(adap
, i
) {
4975 pi
= adap2pinfo(adap
, i
);
4981 static int get_msix_info(struct adapter
*adap
)
4983 struct uld_msix_info
*msix_info
;
4984 unsigned int max_ingq
= 0;
4986 if (is_offload(adap
))
4987 max_ingq
+= MAX_OFLD_QSETS
* adap
->num_ofld_uld
;
4988 if (is_pci_uld(adap
))
4989 max_ingq
+= MAX_OFLD_QSETS
* adap
->num_uld
;
4994 msix_info
= kcalloc(max_ingq
, sizeof(*msix_info
), GFP_KERNEL
);
4998 adap
->msix_bmap_ulds
.msix_bmap
= kcalloc(BITS_TO_LONGS(max_ingq
),
4999 sizeof(long), GFP_KERNEL
);
5000 if (!adap
->msix_bmap_ulds
.msix_bmap
) {
5004 spin_lock_init(&adap
->msix_bmap_ulds
.lock
);
5005 adap
->msix_info_ulds
= msix_info
;
5010 static void free_msix_info(struct adapter
*adap
)
5012 if (!(adap
->num_uld
&& adap
->num_ofld_uld
))
5015 kfree(adap
->msix_info_ulds
);
5016 kfree(adap
->msix_bmap_ulds
.msix_bmap
);
5019 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5020 #define EXTRA_VECS 2
5022 static int enable_msix(struct adapter
*adap
)
5024 int ofld_need
= 0, uld_need
= 0;
5025 int i
, j
, want
, need
, allocated
;
5026 struct sge
*s
= &adap
->sge
;
5027 unsigned int nchan
= adap
->params
.nports
;
5028 struct msix_entry
*entries
;
5029 int max_ingq
= MAX_INGQ
;
5031 if (is_pci_uld(adap
))
5032 max_ingq
+= (MAX_OFLD_QSETS
* adap
->num_uld
);
5033 if (is_offload(adap
))
5034 max_ingq
+= (MAX_OFLD_QSETS
* adap
->num_ofld_uld
);
5035 entries
= kmalloc_array(max_ingq
+ 1, sizeof(*entries
),
5041 if (get_msix_info(adap
)) {
5042 adap
->params
.offload
= 0;
5043 adap
->params
.crypto
= 0;
5046 for (i
= 0; i
< max_ingq
+ 1; ++i
)
5047 entries
[i
].entry
= i
;
5049 want
= s
->max_ethqsets
+ EXTRA_VECS
;
5050 if (is_offload(adap
)) {
5051 want
+= adap
->num_ofld_uld
* s
->ofldqsets
;
5052 ofld_need
= adap
->num_ofld_uld
* nchan
;
5054 if (is_pci_uld(adap
)) {
5055 want
+= adap
->num_uld
* s
->ofldqsets
;
5056 uld_need
= adap
->num_uld
* nchan
;
5058 #ifdef CONFIG_CHELSIO_T4_DCB
5059 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
5062 need
= 8 * adap
->params
.nports
+ EXTRA_VECS
+ ofld_need
+ uld_need
;
5064 need
= adap
->params
.nports
+ EXTRA_VECS
+ ofld_need
+ uld_need
;
5066 allocated
= pci_enable_msix_range(adap
->pdev
, entries
, need
, want
);
5067 if (allocated
< 0) {
5068 dev_info(adap
->pdev_dev
, "not enough MSI-X vectors left,"
5069 " not using MSI-X\n");
5074 /* Distribute available vectors to the various queue groups.
5075 * Every group gets its minimum requirement and NIC gets top
5076 * priority for leftovers.
5078 i
= allocated
- EXTRA_VECS
- ofld_need
- uld_need
;
5079 if (i
< s
->max_ethqsets
) {
5080 s
->max_ethqsets
= i
;
5081 if (i
< s
->ethqsets
)
5082 reduce_ethqs(adap
, i
);
5085 if (allocated
< want
)
5086 s
->nqs_per_uld
= nchan
;
5088 s
->nqs_per_uld
= s
->ofldqsets
;
5091 for (i
= 0; i
< (s
->max_ethqsets
+ EXTRA_VECS
); ++i
)
5092 adap
->msix_info
[i
].vec
= entries
[i
].vector
;
5094 for (j
= 0 ; i
< allocated
; ++i
, j
++) {
5095 adap
->msix_info_ulds
[j
].vec
= entries
[i
].vector
;
5096 adap
->msix_info_ulds
[j
].idx
= i
;
5098 adap
->msix_bmap_ulds
.mapsize
= j
;
5100 dev_info(adap
->pdev_dev
, "%d MSI-X vectors allocated, "
5101 "nic %d per uld %d\n",
5102 allocated
, s
->max_ethqsets
, s
->nqs_per_uld
);
5110 static int init_rss(struct adapter
*adap
)
5115 err
= t4_init_rss_mode(adap
, adap
->mbox
);
5119 for_each_port(adap
, i
) {
5120 struct port_info
*pi
= adap2pinfo(adap
, i
);
5122 pi
->rss
= kcalloc(pi
->rss_size
, sizeof(u16
), GFP_KERNEL
);
5129 /* Dump basic information about the adapter */
5130 static void print_adapter_info(struct adapter
*adapter
)
5132 /* Hardware/Firmware/etc. Version/Revision IDs */
5133 t4_dump_version_info(adapter
);
5135 /* Software/Hardware configuration */
5136 dev_info(adapter
->pdev_dev
, "Configuration: %sNIC %s, %s capable\n",
5137 is_offload(adapter
) ? "R" : "",
5138 ((adapter
->flags
& USING_MSIX
) ? "MSI-X" :
5139 (adapter
->flags
& USING_MSI
) ? "MSI" : ""),
5140 is_offload(adapter
) ? "Offload" : "non-Offload");
5143 static void print_port_info(const struct net_device
*dev
)
5147 const struct port_info
*pi
= netdev_priv(dev
);
5148 const struct adapter
*adap
= pi
->adapter
;
5150 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_SPEED_100M
)
5151 bufp
+= sprintf(bufp
, "100M/");
5152 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_SPEED_1G
)
5153 bufp
+= sprintf(bufp
, "1G/");
5154 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_SPEED_10G
)
5155 bufp
+= sprintf(bufp
, "10G/");
5156 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_SPEED_25G
)
5157 bufp
+= sprintf(bufp
, "25G/");
5158 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_SPEED_40G
)
5159 bufp
+= sprintf(bufp
, "40G/");
5160 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_SPEED_50G
)
5161 bufp
+= sprintf(bufp
, "50G/");
5162 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_SPEED_100G
)
5163 bufp
+= sprintf(bufp
, "100G/");
5164 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_SPEED_200G
)
5165 bufp
+= sprintf(bufp
, "200G/");
5166 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_SPEED_400G
)
5167 bufp
+= sprintf(bufp
, "400G/");
5170 sprintf(bufp
, "BASE-%s", t4_get_port_type_description(pi
->port_type
));
5172 netdev_info(dev
, "%s: Chelsio %s (%s) %s\n",
5173 dev
->name
, adap
->params
.vpd
.id
, adap
->name
, buf
);
5177 * Free the following resources:
5178 * - memory used for tables
5181 * - resources FW is holding for us
5183 static void free_some_resources(struct adapter
*adapter
)
5187 kvfree(adapter
->mps_encap
);
5188 kvfree(adapter
->smt
);
5189 kvfree(adapter
->l2t
);
5190 kvfree(adapter
->srq
);
5191 t4_cleanup_sched(adapter
);
5192 kvfree(adapter
->tids
.tid_tab
);
5193 cxgb4_cleanup_tc_flower(adapter
);
5194 cxgb4_cleanup_tc_u32(adapter
);
5195 kfree(adapter
->sge
.egr_map
);
5196 kfree(adapter
->sge
.ingr_map
);
5197 kfree(adapter
->sge
.starving_fl
);
5198 kfree(adapter
->sge
.txq_maperr
);
5199 #ifdef CONFIG_DEBUG_FS
5200 kfree(adapter
->sge
.blocked_fl
);
5202 disable_msi(adapter
);
5204 for_each_port(adapter
, i
)
5205 if (adapter
->port
[i
]) {
5206 struct port_info
*pi
= adap2pinfo(adapter
, i
);
5209 t4_free_vi(adapter
, adapter
->mbox
, adapter
->pf
,
5211 kfree(adap2pinfo(adapter
, i
)->rss
);
5212 free_netdev(adapter
->port
[i
]);
5214 if (adapter
->flags
& FW_OK
)
5215 t4_fw_bye(adapter
, adapter
->pf
);
5218 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
5219 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5220 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5221 #define SEGMENT_SIZE 128
5223 static int t4_get_chip_type(struct adapter
*adap
, int ver
)
5225 u32 pl_rev
= REV_G(t4_read_reg(adap
, PL_REV_A
));
5229 return CHELSIO_CHIP_CODE(CHELSIO_T4
, pl_rev
);
5231 return CHELSIO_CHIP_CODE(CHELSIO_T5
, pl_rev
);
5233 return CHELSIO_CHIP_CODE(CHELSIO_T6
, pl_rev
);
5240 #ifdef CONFIG_PCI_IOV
5241 static void cxgb4_mgmt_setup(struct net_device
*dev
)
5243 dev
->type
= ARPHRD_NONE
;
5245 dev
->hard_header_len
= 0;
5247 dev
->tx_queue_len
= 0;
5248 dev
->flags
|= IFF_NOARP
;
5249 dev
->priv_flags
|= IFF_NO_QUEUE
;
5251 /* Initialize the device structure. */
5252 dev
->netdev_ops
= &cxgb4_mgmt_netdev_ops
;
5253 dev
->ethtool_ops
= &cxgb4_mgmt_ethtool_ops
;
5256 static int cxgb4_iov_configure(struct pci_dev
*pdev
, int num_vfs
)
5258 struct adapter
*adap
= pci_get_drvdata(pdev
);
5260 int current_vfs
= pci_num_vf(pdev
);
5263 pcie_fw
= readl(adap
->regs
+ PCIE_FW_A
);
5264 /* Check if fw is initialized */
5265 if (!(pcie_fw
& PCIE_FW_INIT_F
)) {
5266 dev_warn(&pdev
->dev
, "Device not initialized\n");
5270 /* If any of the VF's is already assigned to Guest OS, then
5271 * SRIOV for the same cannot be modified
5273 if (current_vfs
&& pci_vfs_assigned(pdev
)) {
5275 "Cannot modify SR-IOV while VFs are assigned\n");
5278 /* Note that the upper-level code ensures that we're never called with
5279 * a non-zero "num_vfs" when we already have VFs instantiated. But
5280 * it never hurts to code defensively.
5282 if (num_vfs
!= 0 && current_vfs
!= 0)
5285 /* Nothing to do for no change. */
5286 if (num_vfs
== current_vfs
)
5289 /* Disable SRIOV when zero is passed. */
5291 pci_disable_sriov(pdev
);
5292 /* free VF Management Interface */
5293 unregister_netdev(adap
->port
[0]);
5294 free_netdev(adap
->port
[0]);
5295 adap
->port
[0] = NULL
;
5297 /* free VF resources */
5299 kfree(adap
->vfinfo
);
5300 adap
->vfinfo
= NULL
;
5305 struct fw_pfvf_cmd port_cmd
, port_rpl
;
5306 struct net_device
*netdev
;
5307 unsigned int pmask
, port
;
5308 struct pci_dev
*pbridge
;
5309 struct port_info
*pi
;
5310 char name
[IFNAMSIZ
];
5315 /* If we want to instantiate Virtual Functions, then our
5316 * parent bridge's PCI-E needs to support Alternative Routing
5317 * ID (ARI) because our VFs will show up at function offset 8
5320 pbridge
= pdev
->bus
->self
;
5321 pos
= pci_find_capability(pbridge
, PCI_CAP_ID_EXP
);
5322 pci_read_config_word(pbridge
, pos
+ PCI_EXP_FLAGS
, &flags
);
5323 pci_read_config_dword(pbridge
, pos
+ PCI_EXP_DEVCAP2
, &devcap2
);
5325 if ((flags
& PCI_EXP_FLAGS_VERS
) < 2 ||
5326 !(devcap2
& PCI_EXP_DEVCAP2_ARI
)) {
5327 /* Our parent bridge does not support ARI so issue a
5328 * warning and skip instantiating the VFs. They
5329 * won't be reachable.
5331 dev_warn(&pdev
->dev
, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
5332 pbridge
->bus
->number
, PCI_SLOT(pbridge
->devfn
),
5333 PCI_FUNC(pbridge
->devfn
));
5336 memset(&port_cmd
, 0, sizeof(port_cmd
));
5337 port_cmd
.op_to_vfn
= cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD
) |
5340 FW_PFVF_CMD_PFN_V(adap
->pf
) |
5341 FW_PFVF_CMD_VFN_V(0));
5342 port_cmd
.retval_len16
= cpu_to_be32(FW_LEN16(port_cmd
));
5343 err
= t4_wr_mbox(adap
, adap
->mbox
, &port_cmd
, sizeof(port_cmd
),
5347 pmask
= FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl
.type_to_neq
));
5348 port
= ffs(pmask
) - 1;
5349 /* Allocate VF Management Interface. */
5350 snprintf(name
, IFNAMSIZ
, "mgmtpf%d,%d", adap
->adap_idx
,
5352 netdev
= alloc_netdev(sizeof(struct port_info
),
5353 name
, NET_NAME_UNKNOWN
, cxgb4_mgmt_setup
);
5357 pi
= netdev_priv(netdev
);
5361 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
5363 adap
->port
[0] = netdev
;
5366 err
= register_netdev(adap
->port
[0]);
5368 pr_info("Unable to register VF mgmt netdev %s\n", name
);
5369 free_netdev(adap
->port
[0]);
5370 adap
->port
[0] = NULL
;
5373 /* Allocate and set up VF Information. */
5374 adap
->vfinfo
= kcalloc(pci_sriov_get_totalvfs(pdev
),
5375 sizeof(struct vf_info
), GFP_KERNEL
);
5376 if (!adap
->vfinfo
) {
5377 unregister_netdev(adap
->port
[0]);
5378 free_netdev(adap
->port
[0]);
5379 adap
->port
[0] = NULL
;
5382 cxgb4_mgmt_fill_vf_station_mac_addr(adap
);
5384 /* Instantiate the requested number of VFs. */
5385 err
= pci_enable_sriov(pdev
, num_vfs
);
5387 pr_info("Unable to instantiate %d VFs\n", num_vfs
);
5389 unregister_netdev(adap
->port
[0]);
5390 free_netdev(adap
->port
[0]);
5391 adap
->port
[0] = NULL
;
5392 kfree(adap
->vfinfo
);
5393 adap
->vfinfo
= NULL
;
5398 adap
->num_vfs
= num_vfs
;
5401 #endif /* CONFIG_PCI_IOV */
5403 static int init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
5405 struct net_device
*netdev
;
5406 struct adapter
*adapter
;
5407 static int adap_idx
= 1;
5408 int s_qpp
, qpp
, num_seg
;
5409 struct port_info
*pi
;
5410 bool highdma
= false;
5411 enum chip_type chip
;
5418 printk_once(KERN_INFO
"%s - version %s\n", DRV_DESC
, DRV_VERSION
);
5420 err
= pci_request_regions(pdev
, KBUILD_MODNAME
);
5422 /* Just info, some other driver may have claimed the device. */
5423 dev_info(&pdev
->dev
, "cannot obtain PCI resources\n");
5427 err
= pci_enable_device(pdev
);
5429 dev_err(&pdev
->dev
, "cannot enable PCI device\n");
5430 goto out_release_regions
;
5433 regs
= pci_ioremap_bar(pdev
, 0);
5435 dev_err(&pdev
->dev
, "cannot map device registers\n");
5437 goto out_disable_device
;
5440 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
5443 goto out_unmap_bar0
;
5446 adapter
->regs
= regs
;
5447 err
= t4_wait_dev_ready(regs
);
5449 goto out_free_adapter
;
5451 /* We control everything through one PF */
5452 whoami
= t4_read_reg(adapter
, PL_WHOAMI_A
);
5453 pci_read_config_word(pdev
, PCI_DEVICE_ID
, &device_id
);
5454 chip
= t4_get_chip_type(adapter
, CHELSIO_PCI_ID_VER(device_id
));
5456 dev_err(&pdev
->dev
, "Device %d is not supported\n", device_id
);
5458 goto out_free_adapter
;
5460 chip_ver
= CHELSIO_CHIP_VERSION(chip
);
5461 func
= chip_ver
<= CHELSIO_T5
?
5462 SOURCEPF_G(whoami
) : T6_SOURCEPF_G(whoami
);
5464 adapter
->pdev
= pdev
;
5465 adapter
->pdev_dev
= &pdev
->dev
;
5466 adapter
->name
= pci_name(pdev
);
5467 adapter
->mbox
= func
;
5469 adapter
->params
.chip
= chip
;
5470 adapter
->adap_idx
= adap_idx
;
5471 adapter
->msg_enable
= DFLT_MSG_ENABLE
;
5472 adapter
->mbox_log
= kzalloc(sizeof(*adapter
->mbox_log
) +
5473 (sizeof(struct mbox_cmd
) *
5474 T4_OS_LOG_MBOX_CMDS
),
5476 if (!adapter
->mbox_log
) {
5478 goto out_free_adapter
;
5480 spin_lock_init(&adapter
->mbox_lock
);
5481 INIT_LIST_HEAD(&adapter
->mlist
.list
);
5482 adapter
->mbox_log
->size
= T4_OS_LOG_MBOX_CMDS
;
5483 pci_set_drvdata(pdev
, adapter
);
5485 if (func
!= ent
->driver_data
) {
5486 pci_disable_device(pdev
);
5487 pci_save_state(pdev
); /* to restore SR-IOV later */
5491 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
5493 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
5495 dev_err(&pdev
->dev
, "unable to obtain 64-bit DMA for "
5496 "coherent allocations\n");
5497 goto out_free_adapter
;
5500 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
5502 dev_err(&pdev
->dev
, "no usable DMA configuration\n");
5503 goto out_free_adapter
;
5507 pci_enable_pcie_error_reporting(pdev
);
5508 pci_set_master(pdev
);
5509 pci_save_state(pdev
);
5511 adapter
->workq
= create_singlethread_workqueue("cxgb4");
5512 if (!adapter
->workq
) {
5514 goto out_free_adapter
;
5517 /* PCI device has been enabled */
5518 adapter
->flags
|= DEV_ENABLED
;
5519 memset(adapter
->chan_map
, 0xff, sizeof(adapter
->chan_map
));
5521 /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
5522 * Ingress Packet Data to Free List Buffers in order to allow for
5523 * chipset performance optimizations between the Root Complex and
5524 * Memory Controllers. (Messages to the associated Ingress Queue
5525 * notifying new Packet Placement in the Free Lists Buffers will be
5526 * send without the Relaxed Ordering Attribute thus guaranteeing that
5527 * all preceding PCIe Transaction Layer Packets will be processed
5528 * first.) But some Root Complexes have various issues with Upstream
5529 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
5530 * The PCIe devices which under the Root Complexes will be cleared the
5531 * Relaxed Ordering bit in the configuration space, So we check our
5532 * PCIe configuration space to see if it's flagged with advice against
5533 * using Relaxed Ordering.
5535 if (!pcie_relaxed_ordering_enabled(pdev
))
5536 adapter
->flags
|= ROOT_NO_RELAXED_ORDERING
;
5538 spin_lock_init(&adapter
->stats_lock
);
5539 spin_lock_init(&adapter
->tid_release_lock
);
5540 spin_lock_init(&adapter
->win0_lock
);
5542 INIT_WORK(&adapter
->tid_release_task
, process_tid_release_list
);
5543 INIT_WORK(&adapter
->db_full_task
, process_db_full
);
5544 INIT_WORK(&adapter
->db_drop_task
, process_db_drop
);
5545 INIT_WORK(&adapter
->fatal_err_notify_task
, notify_fatal_err
);
5547 err
= t4_prep_adapter(adapter
);
5549 goto out_free_adapter
;
5551 if (is_kdump_kernel()) {
5552 /* Collect hardware state and append to /proc/vmcore */
5553 err
= cxgb4_cudbg_vmcore_add_dump(adapter
);
5555 dev_warn(adapter
->pdev_dev
,
5556 "Fail collecting vmcore device dump, err: %d. Continuing\n",
5562 if (!is_t4(adapter
->params
.chip
)) {
5563 s_qpp
= (QUEUESPERPAGEPF0_S
+
5564 (QUEUESPERPAGEPF1_S
- QUEUESPERPAGEPF0_S
) *
5566 qpp
= 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter
,
5567 SGE_EGRESS_QUEUES_PER_PAGE_PF_A
) >> s_qpp
);
5568 num_seg
= PAGE_SIZE
/ SEGMENT_SIZE
;
5570 /* Each segment size is 128B. Write coalescing is enabled only
5571 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5572 * queue is less no of segments that can be accommodated in
5575 if (qpp
> num_seg
) {
5577 "Incorrect number of egress queues per page\n");
5579 goto out_free_adapter
;
5581 adapter
->bar2
= ioremap_wc(pci_resource_start(pdev
, 2),
5582 pci_resource_len(pdev
, 2));
5583 if (!adapter
->bar2
) {
5584 dev_err(&pdev
->dev
, "cannot map device bar2 region\n");
5586 goto out_free_adapter
;
5590 setup_memwin(adapter
);
5591 err
= adap_init0(adapter
);
5592 #ifdef CONFIG_DEBUG_FS
5593 bitmap_zero(adapter
->sge
.blocked_fl
, adapter
->sge
.egr_sz
);
5595 setup_memwin_rdma(adapter
);
5599 /* configure SGE_STAT_CFG_A to read WC stats */
5600 if (!is_t4(adapter
->params
.chip
))
5601 t4_write_reg(adapter
, SGE_STAT_CFG_A
, STATSOURCE_T5_V(7) |
5602 (is_t5(adapter
->params
.chip
) ? STATMODE_V(0) :
5605 for_each_port(adapter
, i
) {
5606 netdev
= alloc_etherdev_mq(sizeof(struct port_info
),
5613 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
5615 adapter
->port
[i
] = netdev
;
5616 pi
= netdev_priv(netdev
);
5617 pi
->adapter
= adapter
;
5618 pi
->xact_addr_filt
= -1;
5620 netdev
->irq
= pdev
->irq
;
5622 netdev
->hw_features
= NETIF_F_SG
| TSO_FLAGS
|
5623 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
5624 NETIF_F_RXCSUM
| NETIF_F_RXHASH
|
5625 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
5628 if (chip_ver
> CHELSIO_T5
) {
5629 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
|
5632 NETIF_F_GSO_UDP_TUNNEL
|
5633 NETIF_F_TSO
| NETIF_F_TSO6
;
5635 netdev
->hw_features
|= NETIF_F_GSO_UDP_TUNNEL
;
5639 netdev
->hw_features
|= NETIF_F_HIGHDMA
;
5640 netdev
->features
|= netdev
->hw_features
;
5641 netdev
->vlan_features
= netdev
->features
& VLAN_FEAT
;
5643 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
5645 /* MTU range: 81 - 9600 */
5646 netdev
->min_mtu
= 81; /* accommodate SACK */
5647 netdev
->max_mtu
= MAX_MTU
;
5649 netdev
->netdev_ops
= &cxgb4_netdev_ops
;
5650 #ifdef CONFIG_CHELSIO_T4_DCB
5651 netdev
->dcbnl_ops
= &cxgb4_dcb_ops
;
5652 cxgb4_dcb_state_init(netdev
);
5653 cxgb4_dcb_version_init(netdev
);
5655 cxgb4_set_ethtool_ops(netdev
);
5658 cxgb4_init_ethtool_dump(adapter
);
5660 pci_set_drvdata(pdev
, adapter
);
5662 if (adapter
->flags
& FW_OK
) {
5663 err
= t4_port_init(adapter
, func
, func
, 0);
5666 } else if (adapter
->params
.nports
== 1) {
5667 /* If we don't have a connection to the firmware -- possibly
5668 * because of an error -- grab the raw VPD parameters so we
5669 * can set the proper MAC Address on the debug network
5670 * interface that we've created.
5672 u8 hw_addr
[ETH_ALEN
];
5673 u8
*na
= adapter
->params
.vpd
.na
;
5675 err
= t4_get_raw_vpd_params(adapter
, &adapter
->params
.vpd
);
5677 for (i
= 0; i
< ETH_ALEN
; i
++)
5678 hw_addr
[i
] = (hex2val(na
[2 * i
+ 0]) * 16 +
5679 hex2val(na
[2 * i
+ 1]));
5680 t4_set_hw_addr(adapter
, 0, hw_addr
);
5684 if (!(adapter
->flags
& FW_OK
))
5685 goto fw_attach_fail
;
5687 /* Configure queues and allocate tables now, they can be needed as
5688 * soon as the first register_netdev completes.
5690 err
= cfg_queues(adapter
);
5694 adapter
->smt
= t4_init_smt();
5695 if (!adapter
->smt
) {
5696 /* We tolerate a lack of SMT, giving up some functionality */
5697 dev_warn(&pdev
->dev
, "could not allocate SMT, continuing\n");
5700 adapter
->l2t
= t4_init_l2t(adapter
->l2t_start
, adapter
->l2t_end
);
5701 if (!adapter
->l2t
) {
5702 /* We tolerate a lack of L2T, giving up some functionality */
5703 dev_warn(&pdev
->dev
, "could not allocate L2T, continuing\n");
5704 adapter
->params
.offload
= 0;
5707 adapter
->mps_encap
= kvcalloc(adapter
->params
.arch
.mps_tcam_size
,
5708 sizeof(struct mps_encap_entry
),
5710 if (!adapter
->mps_encap
)
5711 dev_warn(&pdev
->dev
, "could not allocate MPS Encap entries, continuing\n");
5713 #if IS_ENABLED(CONFIG_IPV6)
5714 if (chip_ver
<= CHELSIO_T5
&&
5715 (!(t4_read_reg(adapter
, LE_DB_CONFIG_A
) & ASLIPCOMPEN_F
))) {
5716 /* CLIP functionality is not present in hardware,
5717 * hence disable all offload features
5719 dev_warn(&pdev
->dev
,
5720 "CLIP not enabled in hardware, continuing\n");
5721 adapter
->params
.offload
= 0;
5723 adapter
->clipt
= t4_init_clip_tbl(adapter
->clipt_start
,
5724 adapter
->clipt_end
);
5725 if (!adapter
->clipt
) {
5726 /* We tolerate a lack of clip_table, giving up
5727 * some functionality
5729 dev_warn(&pdev
->dev
,
5730 "could not allocate Clip table, continuing\n");
5731 adapter
->params
.offload
= 0;
5736 for_each_port(adapter
, i
) {
5737 pi
= adap2pinfo(adapter
, i
);
5738 pi
->sched_tbl
= t4_init_sched(adapter
->params
.nsched_cls
);
5740 dev_warn(&pdev
->dev
,
5741 "could not activate scheduling on port %d\n",
5745 if (tid_init(&adapter
->tids
) < 0) {
5746 dev_warn(&pdev
->dev
, "could not allocate TID table, "
5748 adapter
->params
.offload
= 0;
5750 adapter
->tc_u32
= cxgb4_init_tc_u32(adapter
);
5751 if (!adapter
->tc_u32
)
5752 dev_warn(&pdev
->dev
,
5753 "could not offload tc u32, continuing\n");
5755 if (cxgb4_init_tc_flower(adapter
))
5756 dev_warn(&pdev
->dev
,
5757 "could not offload tc flower, continuing\n");
5760 if (is_offload(adapter
) || is_hashfilter(adapter
)) {
5761 if (t4_read_reg(adapter
, LE_DB_CONFIG_A
) & HASHEN_F
) {
5762 u32 hash_base
, hash_reg
;
5764 if (chip_ver
<= CHELSIO_T5
) {
5765 hash_reg
= LE_DB_TID_HASHBASE_A
;
5766 hash_base
= t4_read_reg(adapter
, hash_reg
);
5767 adapter
->tids
.hash_base
= hash_base
/ 4;
5769 hash_reg
= T6_LE_DB_HASH_TID_BASE_A
;
5770 hash_base
= t4_read_reg(adapter
, hash_reg
);
5771 adapter
->tids
.hash_base
= hash_base
;
5776 /* See what interrupts we'll be using */
5777 if (msi
> 1 && enable_msix(adapter
) == 0)
5778 adapter
->flags
|= USING_MSIX
;
5779 else if (msi
> 0 && pci_enable_msi(pdev
) == 0) {
5780 adapter
->flags
|= USING_MSI
;
5782 free_msix_info(adapter
);
5785 /* check for PCI Express bandwidth capabiltites */
5786 pcie_print_link_status(pdev
);
5788 err
= init_rss(adapter
);
5792 err
= setup_fw_sge_queues(adapter
);
5794 dev_err(adapter
->pdev_dev
,
5795 "FW sge queue allocation failed, err %d", err
);
5801 * The card is now ready to go. If any errors occur during device
5802 * registration we do not fail the whole card but rather proceed only
5803 * with the ports we manage to register successfully. However we must
5804 * register at least one net device.
5806 for_each_port(adapter
, i
) {
5807 pi
= adap2pinfo(adapter
, i
);
5808 adapter
->port
[i
]->dev_port
= pi
->lport
;
5809 netif_set_real_num_tx_queues(adapter
->port
[i
], pi
->nqsets
);
5810 netif_set_real_num_rx_queues(adapter
->port
[i
], pi
->nqsets
);
5812 netif_carrier_off(adapter
->port
[i
]);
5814 err
= register_netdev(adapter
->port
[i
]);
5817 adapter
->chan_map
[pi
->tx_chan
] = i
;
5818 print_port_info(adapter
->port
[i
]);
5821 dev_err(&pdev
->dev
, "could not register any net devices\n");
5825 dev_warn(&pdev
->dev
, "only %d net devices registered\n", i
);
5829 if (cxgb4_debugfs_root
) {
5830 adapter
->debugfs_root
= debugfs_create_dir(pci_name(pdev
),
5831 cxgb4_debugfs_root
);
5832 setup_debugfs(adapter
);
5835 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5836 pdev
->needs_freset
= 1;
5838 if (is_uld(adapter
)) {
5839 mutex_lock(&uld_mutex
);
5840 list_add_tail(&adapter
->list_node
, &adapter_list
);
5841 mutex_unlock(&uld_mutex
);
5844 if (!is_t4(adapter
->params
.chip
))
5845 cxgb4_ptp_init(adapter
);
5847 print_adapter_info(adapter
);
5851 t4_free_sge_resources(adapter
);
5852 free_some_resources(adapter
);
5853 if (adapter
->flags
& USING_MSIX
)
5854 free_msix_info(adapter
);
5855 if (adapter
->num_uld
|| adapter
->num_ofld_uld
)
5856 t4_uld_mem_free(adapter
);
5858 if (!is_t4(adapter
->params
.chip
))
5859 iounmap(adapter
->bar2
);
5862 destroy_workqueue(adapter
->workq
);
5864 kfree(adapter
->mbox_log
);
5869 pci_disable_pcie_error_reporting(pdev
);
5870 pci_disable_device(pdev
);
5871 out_release_regions
:
5872 pci_release_regions(pdev
);
5876 static void remove_one(struct pci_dev
*pdev
)
5878 struct adapter
*adapter
= pci_get_drvdata(pdev
);
5881 pci_release_regions(pdev
);
5885 adapter
->flags
|= SHUTTING_DOWN
;
5887 if (adapter
->pf
== 4) {
5890 /* Tear down per-adapter Work Queue first since it can contain
5891 * references to our adapter data structure.
5893 destroy_workqueue(adapter
->workq
);
5895 if (is_uld(adapter
)) {
5896 detach_ulds(adapter
);
5897 t4_uld_clean_up(adapter
);
5900 adap_free_hma_mem(adapter
);
5902 disable_interrupts(adapter
);
5904 for_each_port(adapter
, i
)
5905 if (adapter
->port
[i
]->reg_state
== NETREG_REGISTERED
)
5906 unregister_netdev(adapter
->port
[i
]);
5908 debugfs_remove_recursive(adapter
->debugfs_root
);
5910 if (!is_t4(adapter
->params
.chip
))
5911 cxgb4_ptp_stop(adapter
);
5913 /* If we allocated filters, free up state associated with any
5916 clear_all_filters(adapter
);
5918 if (adapter
->flags
& FULL_INIT_DONE
)
5921 if (adapter
->flags
& USING_MSIX
)
5922 free_msix_info(adapter
);
5923 if (adapter
->num_uld
|| adapter
->num_ofld_uld
)
5924 t4_uld_mem_free(adapter
);
5925 free_some_resources(adapter
);
5926 #if IS_ENABLED(CONFIG_IPV6)
5927 t4_cleanup_clip_tbl(adapter
);
5929 if (!is_t4(adapter
->params
.chip
))
5930 iounmap(adapter
->bar2
);
5932 #ifdef CONFIG_PCI_IOV
5934 cxgb4_iov_configure(adapter
->pdev
, 0);
5937 iounmap(adapter
->regs
);
5938 pci_disable_pcie_error_reporting(pdev
);
5939 if ((adapter
->flags
& DEV_ENABLED
)) {
5940 pci_disable_device(pdev
);
5941 adapter
->flags
&= ~DEV_ENABLED
;
5943 pci_release_regions(pdev
);
5944 kfree(adapter
->mbox_log
);
5949 /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
5950 * delivery. This is essentially a stripped down version of the PCI remove()
5951 * function where we do the minimal amount of work necessary to shutdown any
5954 static void shutdown_one(struct pci_dev
*pdev
)
5956 struct adapter
*adapter
= pci_get_drvdata(pdev
);
5958 /* As with remove_one() above (see extended comment), we only want do
5959 * do cleanup on PCI Devices which went all the way through init_one()
5963 pci_release_regions(pdev
);
5967 adapter
->flags
|= SHUTTING_DOWN
;
5969 if (adapter
->pf
== 4) {
5972 for_each_port(adapter
, i
)
5973 if (adapter
->port
[i
]->reg_state
== NETREG_REGISTERED
)
5974 cxgb_close(adapter
->port
[i
]);
5976 if (is_uld(adapter
)) {
5977 detach_ulds(adapter
);
5978 t4_uld_clean_up(adapter
);
5981 disable_interrupts(adapter
);
5982 disable_msi(adapter
);
5984 t4_sge_stop(adapter
);
5985 if (adapter
->flags
& FW_OK
)
5986 t4_fw_bye(adapter
, adapter
->mbox
);
5990 static struct pci_driver cxgb4_driver
= {
5991 .name
= KBUILD_MODNAME
,
5992 .id_table
= cxgb4_pci_tbl
,
5994 .remove
= remove_one
,
5995 .shutdown
= shutdown_one
,
5996 #ifdef CONFIG_PCI_IOV
5997 .sriov_configure
= cxgb4_iov_configure
,
5999 .err_handler
= &cxgb4_eeh
,
6002 static int __init
cxgb4_init_module(void)
6006 /* Debugfs support is optional, just warn if this fails */
6007 cxgb4_debugfs_root
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
6008 if (!cxgb4_debugfs_root
)
6009 pr_warn("could not create debugfs entry, continuing\n");
6011 ret
= pci_register_driver(&cxgb4_driver
);
6013 debugfs_remove(cxgb4_debugfs_root
);
6015 #if IS_ENABLED(CONFIG_IPV6)
6016 if (!inet6addr_registered
) {
6017 register_inet6addr_notifier(&cxgb4_inet6addr_notifier
);
6018 inet6addr_registered
= true;
6025 static void __exit
cxgb4_cleanup_module(void)
6027 #if IS_ENABLED(CONFIG_IPV6)
6028 if (inet6addr_registered
) {
6029 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier
);
6030 inet6addr_registered
= false;
6033 pci_unregister_driver(&cxgb4_driver
);
6034 debugfs_remove(cxgb4_debugfs_root
); /* NULL ok */
6037 module_init(cxgb4_init_module
);
6038 module_exit(cxgb4_cleanup_module
);