2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/etherdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/log2.h>
17 #include <linux/prefetch.h>
18 #include <linux/irq.h>
22 #include "nicvf_queues.h"
23 #include "thunder_bgx.h"
25 #define DRV_NAME "thunder-nicvf"
26 #define DRV_VERSION "1.0"
28 /* Supported devices */
29 static const struct pci_device_id nicvf_id_table
[] = {
30 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM
,
31 PCI_DEVICE_ID_THUNDER_NIC_VF
,
33 PCI_SUBSYS_DEVID_88XX_NIC_VF
) },
34 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM
,
35 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF
,
37 PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF
) },
38 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM
,
39 PCI_DEVICE_ID_THUNDER_NIC_VF
,
41 PCI_SUBSYS_DEVID_81XX_NIC_VF
) },
42 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM
,
43 PCI_DEVICE_ID_THUNDER_NIC_VF
,
45 PCI_SUBSYS_DEVID_83XX_NIC_VF
) },
46 { 0, } /* end of table */
49 MODULE_AUTHOR("Sunil Goutham");
50 MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
51 MODULE_LICENSE("GPL v2");
52 MODULE_VERSION(DRV_VERSION
);
53 MODULE_DEVICE_TABLE(pci
, nicvf_id_table
);
55 static int debug
= 0x00;
56 module_param(debug
, int, 0644);
57 MODULE_PARM_DESC(debug
, "Debug message level bitmap");
59 static int cpi_alg
= CPI_ALG_NONE
;
60 module_param(cpi_alg
, int, S_IRUGO
);
61 MODULE_PARM_DESC(cpi_alg
,
62 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
64 static inline u8
nicvf_netdev_qidx(struct nicvf
*nic
, u8 qidx
)
67 return qidx
+ ((nic
->sqs_id
+ 1) * MAX_CMP_QUEUES_PER_QS
);
72 /* The Cavium ThunderX network controller can *only* be found in SoCs
73 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
74 * registers on this platform are implicitly strongly ordered with respect
75 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
76 * with no memory barriers in this driver. The readq()/writeq() functions add
77 * explicit ordering operation which in this case are redundant, and only
81 /* Register read/write APIs */
82 void nicvf_reg_write(struct nicvf
*nic
, u64 offset
, u64 val
)
84 writeq_relaxed(val
, nic
->reg_base
+ offset
);
87 u64
nicvf_reg_read(struct nicvf
*nic
, u64 offset
)
89 return readq_relaxed(nic
->reg_base
+ offset
);
92 void nicvf_queue_reg_write(struct nicvf
*nic
, u64 offset
,
95 void __iomem
*addr
= nic
->reg_base
+ offset
;
97 writeq_relaxed(val
, addr
+ (qidx
<< NIC_Q_NUM_SHIFT
));
100 u64
nicvf_queue_reg_read(struct nicvf
*nic
, u64 offset
, u64 qidx
)
102 void __iomem
*addr
= nic
->reg_base
+ offset
;
104 return readq_relaxed(addr
+ (qidx
<< NIC_Q_NUM_SHIFT
));
107 /* VF -> PF mailbox communication */
108 static void nicvf_write_to_mbx(struct nicvf
*nic
, union nic_mbx
*mbx
)
110 u64
*msg
= (u64
*)mbx
;
112 nicvf_reg_write(nic
, NIC_VF_PF_MAILBOX_0_1
+ 0, msg
[0]);
113 nicvf_reg_write(nic
, NIC_VF_PF_MAILBOX_0_1
+ 8, msg
[1]);
116 int nicvf_send_msg_to_pf(struct nicvf
*nic
, union nic_mbx
*mbx
)
118 int timeout
= NIC_MBOX_MSG_TIMEOUT
;
121 nic
->pf_acked
= false;
122 nic
->pf_nacked
= false;
124 nicvf_write_to_mbx(nic
, mbx
);
126 /* Wait for previous message to be acked, timeout 2sec */
127 while (!nic
->pf_acked
) {
128 if (nic
->pf_nacked
) {
129 netdev_err(nic
->netdev
,
130 "PF NACK to mbox msg 0x%02x from VF%d\n",
131 (mbx
->msg
.msg
& 0xFF), nic
->vf_id
);
139 netdev_err(nic
->netdev
,
140 "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
141 (mbx
->msg
.msg
& 0xFF), nic
->vf_id
);
148 /* Checks if VF is able to comminicate with PF
149 * and also gets the VNIC number this VF is associated to.
151 static int nicvf_check_pf_ready(struct nicvf
*nic
)
153 union nic_mbx mbx
= {};
155 mbx
.msg
.msg
= NIC_MBOX_MSG_READY
;
156 if (nicvf_send_msg_to_pf(nic
, &mbx
)) {
157 netdev_err(nic
->netdev
,
158 "PF didn't respond to READY msg\n");
165 static void nicvf_read_bgx_stats(struct nicvf
*nic
, struct bgx_stats_msg
*bgx
)
168 nic
->bgx_stats
.rx_stats
[bgx
->idx
] = bgx
->stats
;
170 nic
->bgx_stats
.tx_stats
[bgx
->idx
] = bgx
->stats
;
173 static void nicvf_handle_mbx_intr(struct nicvf
*nic
)
175 union nic_mbx mbx
= {};
180 mbx_addr
= NIC_VF_PF_MAILBOX_0_1
;
181 mbx_data
= (u64
*)&mbx
;
183 for (i
= 0; i
< NIC_PF_VF_MAILBOX_SIZE
; i
++) {
184 *mbx_data
= nicvf_reg_read(nic
, mbx_addr
);
186 mbx_addr
+= sizeof(u64
);
189 netdev_dbg(nic
->netdev
, "Mbox message: msg: 0x%x\n", mbx
.msg
.msg
);
190 switch (mbx
.msg
.msg
) {
191 case NIC_MBOX_MSG_READY
:
192 nic
->pf_acked
= true;
193 nic
->vf_id
= mbx
.nic_cfg
.vf_id
& 0x7F;
194 nic
->tns_mode
= mbx
.nic_cfg
.tns_mode
& 0x7F;
195 nic
->node
= mbx
.nic_cfg
.node_id
;
196 if (!nic
->set_mac_pending
)
197 ether_addr_copy(nic
->netdev
->dev_addr
,
198 mbx
.nic_cfg
.mac_addr
);
199 nic
->sqs_mode
= mbx
.nic_cfg
.sqs_mode
;
200 nic
->loopback_supported
= mbx
.nic_cfg
.loopback_supported
;
201 nic
->link_up
= false;
205 case NIC_MBOX_MSG_ACK
:
206 nic
->pf_acked
= true;
208 case NIC_MBOX_MSG_NACK
:
209 nic
->pf_nacked
= true;
211 case NIC_MBOX_MSG_RSS_SIZE
:
212 nic
->rss_info
.rss_size
= mbx
.rss_size
.ind_tbl_size
;
213 nic
->pf_acked
= true;
215 case NIC_MBOX_MSG_BGX_STATS
:
216 nicvf_read_bgx_stats(nic
, &mbx
.bgx_stats
);
217 nic
->pf_acked
= true;
219 case NIC_MBOX_MSG_BGX_LINK_CHANGE
:
220 nic
->pf_acked
= true;
221 nic
->link_up
= mbx
.link_status
.link_up
;
222 nic
->duplex
= mbx
.link_status
.duplex
;
223 nic
->speed
= mbx
.link_status
.speed
;
225 netdev_info(nic
->netdev
, "%s: Link is Up %d Mbps %s\n",
226 nic
->netdev
->name
, nic
->speed
,
227 nic
->duplex
== DUPLEX_FULL
?
228 "Full duplex" : "Half duplex");
229 netif_carrier_on(nic
->netdev
);
230 netif_tx_start_all_queues(nic
->netdev
);
232 netdev_info(nic
->netdev
, "%s: Link is Down\n",
234 netif_carrier_off(nic
->netdev
);
235 netif_tx_stop_all_queues(nic
->netdev
);
238 case NIC_MBOX_MSG_ALLOC_SQS
:
239 nic
->sqs_count
= mbx
.sqs_alloc
.qs_count
;
240 nic
->pf_acked
= true;
242 case NIC_MBOX_MSG_SNICVF_PTR
:
243 /* Primary VF: make note of secondary VF's pointer
244 * to be used while packet transmission.
246 nic
->snicvf
[mbx
.nicvf
.sqs_id
] =
247 (struct nicvf
*)mbx
.nicvf
.nicvf
;
248 nic
->pf_acked
= true;
250 case NIC_MBOX_MSG_PNICVF_PTR
:
251 /* Secondary VF/Qset: make note of primary VF's pointer
252 * to be used while packet reception, to handover packet
253 * to primary VF's netdev.
255 nic
->pnicvf
= (struct nicvf
*)mbx
.nicvf
.nicvf
;
256 nic
->pf_acked
= true;
259 netdev_err(nic
->netdev
,
260 "Invalid message from PF, msg 0x%x\n", mbx
.msg
.msg
);
263 nicvf_clear_intr(nic
, NICVF_INTR_MBOX
, 0);
266 static int nicvf_hw_set_mac_addr(struct nicvf
*nic
, struct net_device
*netdev
)
268 union nic_mbx mbx
= {};
270 mbx
.mac
.msg
= NIC_MBOX_MSG_SET_MAC
;
271 mbx
.mac
.vf_id
= nic
->vf_id
;
272 ether_addr_copy(mbx
.mac
.mac_addr
, netdev
->dev_addr
);
274 return nicvf_send_msg_to_pf(nic
, &mbx
);
277 static void nicvf_config_cpi(struct nicvf
*nic
)
279 union nic_mbx mbx
= {};
281 mbx
.cpi_cfg
.msg
= NIC_MBOX_MSG_CPI_CFG
;
282 mbx
.cpi_cfg
.vf_id
= nic
->vf_id
;
283 mbx
.cpi_cfg
.cpi_alg
= nic
->cpi_alg
;
284 mbx
.cpi_cfg
.rq_cnt
= nic
->qs
->rq_cnt
;
286 nicvf_send_msg_to_pf(nic
, &mbx
);
289 static void nicvf_get_rss_size(struct nicvf
*nic
)
291 union nic_mbx mbx
= {};
293 mbx
.rss_size
.msg
= NIC_MBOX_MSG_RSS_SIZE
;
294 mbx
.rss_size
.vf_id
= nic
->vf_id
;
295 nicvf_send_msg_to_pf(nic
, &mbx
);
298 void nicvf_config_rss(struct nicvf
*nic
)
300 union nic_mbx mbx
= {};
301 struct nicvf_rss_info
*rss
= &nic
->rss_info
;
302 int ind_tbl_len
= rss
->rss_size
;
305 mbx
.rss_cfg
.vf_id
= nic
->vf_id
;
306 mbx
.rss_cfg
.hash_bits
= rss
->hash_bits
;
307 while (ind_tbl_len
) {
308 mbx
.rss_cfg
.tbl_offset
= nextq
;
309 mbx
.rss_cfg
.tbl_len
= min(ind_tbl_len
,
310 RSS_IND_TBL_LEN_PER_MBX_MSG
);
311 mbx
.rss_cfg
.msg
= mbx
.rss_cfg
.tbl_offset
?
312 NIC_MBOX_MSG_RSS_CFG_CONT
: NIC_MBOX_MSG_RSS_CFG
;
314 for (i
= 0; i
< mbx
.rss_cfg
.tbl_len
; i
++)
315 mbx
.rss_cfg
.ind_tbl
[i
] = rss
->ind_tbl
[nextq
++];
317 nicvf_send_msg_to_pf(nic
, &mbx
);
319 ind_tbl_len
-= mbx
.rss_cfg
.tbl_len
;
323 void nicvf_set_rss_key(struct nicvf
*nic
)
325 struct nicvf_rss_info
*rss
= &nic
->rss_info
;
326 u64 key_addr
= NIC_VNIC_RSS_KEY_0_4
;
329 for (idx
= 0; idx
< RSS_HASH_KEY_SIZE
; idx
++) {
330 nicvf_reg_write(nic
, key_addr
, rss
->key
[idx
]);
331 key_addr
+= sizeof(u64
);
335 static int nicvf_rss_init(struct nicvf
*nic
)
337 struct nicvf_rss_info
*rss
= &nic
->rss_info
;
340 nicvf_get_rss_size(nic
);
342 if (cpi_alg
!= CPI_ALG_NONE
) {
350 netdev_rss_key_fill(rss
->key
, RSS_HASH_KEY_SIZE
* sizeof(u64
));
351 nicvf_set_rss_key(nic
);
353 rss
->cfg
= RSS_IP_HASH_ENA
| RSS_TCP_HASH_ENA
| RSS_UDP_HASH_ENA
;
354 nicvf_reg_write(nic
, NIC_VNIC_RSS_CFG
, rss
->cfg
);
356 rss
->hash_bits
= ilog2(rounddown_pow_of_two(rss
->rss_size
));
358 for (idx
= 0; idx
< rss
->rss_size
; idx
++)
359 rss
->ind_tbl
[idx
] = ethtool_rxfh_indir_default(idx
,
361 nicvf_config_rss(nic
);
365 /* Request PF to allocate additional Qsets */
366 static void nicvf_request_sqs(struct nicvf
*nic
)
368 union nic_mbx mbx
= {};
370 int sqs_count
= nic
->sqs_count
;
371 int rx_queues
= 0, tx_queues
= 0;
373 /* Only primary VF should request */
374 if (nic
->sqs_mode
|| !nic
->sqs_count
)
377 mbx
.sqs_alloc
.msg
= NIC_MBOX_MSG_ALLOC_SQS
;
378 mbx
.sqs_alloc
.vf_id
= nic
->vf_id
;
379 mbx
.sqs_alloc
.qs_count
= nic
->sqs_count
;
380 if (nicvf_send_msg_to_pf(nic
, &mbx
)) {
381 /* No response from PF */
386 /* Return if no Secondary Qsets available */
390 if (nic
->rx_queues
> MAX_RCV_QUEUES_PER_QS
)
391 rx_queues
= nic
->rx_queues
- MAX_RCV_QUEUES_PER_QS
;
392 if (nic
->tx_queues
> MAX_SND_QUEUES_PER_QS
)
393 tx_queues
= nic
->tx_queues
- MAX_SND_QUEUES_PER_QS
;
395 /* Set no of Rx/Tx queues in each of the SQsets */
396 for (sqs
= 0; sqs
< nic
->sqs_count
; sqs
++) {
397 mbx
.nicvf
.msg
= NIC_MBOX_MSG_SNICVF_PTR
;
398 mbx
.nicvf
.vf_id
= nic
->vf_id
;
399 mbx
.nicvf
.sqs_id
= sqs
;
400 nicvf_send_msg_to_pf(nic
, &mbx
);
402 nic
->snicvf
[sqs
]->sqs_id
= sqs
;
403 if (rx_queues
> MAX_RCV_QUEUES_PER_QS
) {
404 nic
->snicvf
[sqs
]->qs
->rq_cnt
= MAX_RCV_QUEUES_PER_QS
;
405 rx_queues
-= MAX_RCV_QUEUES_PER_QS
;
407 nic
->snicvf
[sqs
]->qs
->rq_cnt
= rx_queues
;
411 if (tx_queues
> MAX_SND_QUEUES_PER_QS
) {
412 nic
->snicvf
[sqs
]->qs
->sq_cnt
= MAX_SND_QUEUES_PER_QS
;
413 tx_queues
-= MAX_SND_QUEUES_PER_QS
;
415 nic
->snicvf
[sqs
]->qs
->sq_cnt
= tx_queues
;
419 nic
->snicvf
[sqs
]->qs
->cq_cnt
=
420 max(nic
->snicvf
[sqs
]->qs
->rq_cnt
, nic
->snicvf
[sqs
]->qs
->sq_cnt
);
422 /* Initialize secondary Qset's queues and its interrupts */
423 nicvf_open(nic
->snicvf
[sqs
]->netdev
);
426 /* Update stack with actual Rx/Tx queue count allocated */
427 if (sqs_count
!= nic
->sqs_count
)
428 nicvf_set_real_num_queues(nic
->netdev
,
429 nic
->tx_queues
, nic
->rx_queues
);
432 /* Send this Qset's nicvf pointer to PF.
433 * PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs
434 * so that packets received by these Qsets can use primary VF's netdev
436 static void nicvf_send_vf_struct(struct nicvf
*nic
)
438 union nic_mbx mbx
= {};
440 mbx
.nicvf
.msg
= NIC_MBOX_MSG_NICVF_PTR
;
441 mbx
.nicvf
.sqs_mode
= nic
->sqs_mode
;
442 mbx
.nicvf
.nicvf
= (u64
)nic
;
443 nicvf_send_msg_to_pf(nic
, &mbx
);
446 static void nicvf_get_primary_vf_struct(struct nicvf
*nic
)
448 union nic_mbx mbx
= {};
450 mbx
.nicvf
.msg
= NIC_MBOX_MSG_PNICVF_PTR
;
451 nicvf_send_msg_to_pf(nic
, &mbx
);
454 int nicvf_set_real_num_queues(struct net_device
*netdev
,
455 int tx_queues
, int rx_queues
)
459 err
= netif_set_real_num_tx_queues(netdev
, tx_queues
);
462 "Failed to set no of Tx queues: %d\n", tx_queues
);
466 err
= netif_set_real_num_rx_queues(netdev
, rx_queues
);
469 "Failed to set no of Rx queues: %d\n", rx_queues
);
473 static int nicvf_init_resources(struct nicvf
*nic
)
476 union nic_mbx mbx
= {};
478 mbx
.msg
.msg
= NIC_MBOX_MSG_CFG_DONE
;
481 nicvf_qset_config(nic
, true);
483 /* Initialize queues and HW for data transfer */
484 err
= nicvf_config_data_transfer(nic
, true);
486 netdev_err(nic
->netdev
,
487 "Failed to alloc/config VF's QSet resources\n");
491 /* Send VF config done msg to PF */
492 nicvf_write_to_mbx(nic
, &mbx
);
497 static void nicvf_snd_pkt_handler(struct net_device
*netdev
,
498 struct cqe_send_t
*cqe_tx
,
499 int cqe_type
, int budget
,
500 unsigned int *tx_pkts
, unsigned int *tx_bytes
)
502 struct sk_buff
*skb
= NULL
;
503 struct nicvf
*nic
= netdev_priv(netdev
);
504 struct snd_queue
*sq
;
505 struct sq_hdr_subdesc
*hdr
;
506 struct sq_hdr_subdesc
*tso_sqe
;
508 sq
= &nic
->qs
->sq
[cqe_tx
->sq_idx
];
510 hdr
= (struct sq_hdr_subdesc
*)GET_SQ_DESC(sq
, cqe_tx
->sqe_ptr
);
511 if (hdr
->subdesc_type
!= SQ_DESC_TYPE_HEADER
)
514 netdev_dbg(nic
->netdev
,
515 "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
516 __func__
, cqe_tx
->sq_qs
, cqe_tx
->sq_idx
,
517 cqe_tx
->sqe_ptr
, hdr
->subdesc_cnt
);
519 nicvf_check_cqe_tx_errs(nic
, cqe_tx
);
520 skb
= (struct sk_buff
*)sq
->skbuff
[cqe_tx
->sqe_ptr
];
522 /* Check for dummy descriptor used for HW TSO offload on 88xx */
523 if (hdr
->dont_send
) {
524 /* Get actual TSO descriptors and free them */
526 (struct sq_hdr_subdesc
*)GET_SQ_DESC(sq
, hdr
->rsvd2
);
527 nicvf_put_sq_desc(sq
, tso_sqe
->subdesc_cnt
+ 1);
529 nicvf_put_sq_desc(sq
, hdr
->subdesc_cnt
+ 1);
532 *tx_bytes
+= skb
->len
;
533 napi_consume_skb(skb
, budget
);
534 sq
->skbuff
[cqe_tx
->sqe_ptr
] = (u64
)NULL
;
536 /* In case of SW TSO on 88xx, only last segment will have
537 * a SKB attached, so just free SQEs here.
540 nicvf_put_sq_desc(sq
, hdr
->subdesc_cnt
+ 1);
544 static inline void nicvf_set_rxhash(struct net_device
*netdev
,
545 struct cqe_rx_t
*cqe_rx
,
551 if (!(netdev
->features
& NETIF_F_RXHASH
))
554 switch (cqe_rx
->rss_alg
) {
557 hash_type
= PKT_HASH_TYPE_L4
;
558 hash
= cqe_rx
->rss_tag
;
561 hash_type
= PKT_HASH_TYPE_L3
;
562 hash
= cqe_rx
->rss_tag
;
565 hash_type
= PKT_HASH_TYPE_NONE
;
569 skb_set_hash(skb
, hash
, hash_type
);
572 static void nicvf_rcv_pkt_handler(struct net_device
*netdev
,
573 struct napi_struct
*napi
,
574 struct cqe_rx_t
*cqe_rx
)
577 struct nicvf
*nic
= netdev_priv(netdev
);
581 rq_idx
= nicvf_netdev_qidx(nic
, cqe_rx
->rq_idx
);
584 /* Use primary VF's 'nicvf' struct */
586 netdev
= nic
->netdev
;
589 /* Check for errors */
590 err
= nicvf_check_cqe_rx_errs(nic
, cqe_rx
);
591 if (err
&& !cqe_rx
->rb_cnt
)
594 skb
= nicvf_get_rcv_skb(nic
, cqe_rx
);
596 netdev_dbg(nic
->netdev
, "Packet not received\n");
600 if (netif_msg_pktdata(nic
)) {
601 netdev_info(nic
->netdev
, "%s: skb 0x%p, len=%d\n", netdev
->name
,
603 print_hex_dump(KERN_INFO
, "", DUMP_PREFIX_OFFSET
, 16, 1,
604 skb
->data
, skb
->len
, true);
607 /* If error packet, drop it here */
609 dev_kfree_skb_any(skb
);
613 nicvf_set_rxhash(netdev
, cqe_rx
, skb
);
615 skb_record_rx_queue(skb
, rq_idx
);
616 if (netdev
->hw_features
& NETIF_F_RXCSUM
) {
617 /* HW by default verifies TCP/UDP/SCTP checksums */
618 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
620 skb_checksum_none_assert(skb
);
623 skb
->protocol
= eth_type_trans(skb
, netdev
);
625 /* Check for stripped VLAN */
626 if (cqe_rx
->vlan_found
&& cqe_rx
->vlan_stripped
)
627 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
628 ntohs((__force __be16
)cqe_rx
->vlan_tci
));
630 if (napi
&& (netdev
->features
& NETIF_F_GRO
))
631 napi_gro_receive(napi
, skb
);
633 netif_receive_skb(skb
);
636 static int nicvf_cq_intr_handler(struct net_device
*netdev
, u8 cq_idx
,
637 struct napi_struct
*napi
, int budget
)
639 int processed_cqe
, work_done
= 0, tx_done
= 0;
640 int cqe_count
, cqe_head
;
641 struct nicvf
*nic
= netdev_priv(netdev
);
642 struct queue_set
*qs
= nic
->qs
;
643 struct cmp_queue
*cq
= &qs
->cq
[cq_idx
];
644 struct cqe_rx_t
*cq_desc
;
645 struct netdev_queue
*txq
;
646 unsigned int tx_pkts
= 0, tx_bytes
= 0;
648 spin_lock_bh(&cq
->lock
);
651 /* Get no of valid CQ entries to process */
652 cqe_count
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_STATUS
, cq_idx
);
653 cqe_count
&= CQ_CQE_COUNT
;
657 /* Get head of the valid CQ entries */
658 cqe_head
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_HEAD
, cq_idx
) >> 9;
661 netdev_dbg(nic
->netdev
, "%s CQ%d cqe_count %d cqe_head %d\n",
662 __func__
, cq_idx
, cqe_count
, cqe_head
);
663 while (processed_cqe
< cqe_count
) {
664 /* Get the CQ descriptor */
665 cq_desc
= (struct cqe_rx_t
*)GET_CQ_DESC(cq
, cqe_head
);
667 cqe_head
&= (cq
->dmem
.q_len
- 1);
668 /* Initiate prefetch for next descriptor */
669 prefetch((struct cqe_rx_t
*)GET_CQ_DESC(cq
, cqe_head
));
671 if ((work_done
>= budget
) && napi
&&
672 (cq_desc
->cqe_type
!= CQE_TYPE_SEND
)) {
676 netdev_dbg(nic
->netdev
, "CQ%d cq_desc->cqe_type %d\n",
677 cq_idx
, cq_desc
->cqe_type
);
678 switch (cq_desc
->cqe_type
) {
680 nicvf_rcv_pkt_handler(netdev
, napi
, cq_desc
);
684 nicvf_snd_pkt_handler(netdev
,
685 (void *)cq_desc
, CQE_TYPE_SEND
,
686 budget
, &tx_pkts
, &tx_bytes
);
689 case CQE_TYPE_INVALID
:
690 case CQE_TYPE_RX_SPLIT
:
691 case CQE_TYPE_RX_TCP
:
692 case CQE_TYPE_SEND_PTP
:
698 netdev_dbg(nic
->netdev
,
699 "%s CQ%d processed_cqe %d work_done %d budget %d\n",
700 __func__
, cq_idx
, processed_cqe
, work_done
, budget
);
702 /* Ring doorbell to inform H/W to reuse processed CQEs */
703 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_DOOR
,
704 cq_idx
, processed_cqe
);
706 if ((work_done
< budget
) && napi
)
710 /* Wakeup TXQ if its stopped earlier due to SQ full */
712 netdev
= nic
->pnicvf
->netdev
;
713 txq
= netdev_get_tx_queue(netdev
,
714 nicvf_netdev_qidx(nic
, cq_idx
));
716 netdev_tx_completed_queue(txq
, tx_pkts
, tx_bytes
);
719 if (netif_tx_queue_stopped(txq
) && netif_carrier_ok(netdev
)) {
720 netif_tx_start_queue(txq
);
721 this_cpu_inc(nic
->drv_stats
->txq_wake
);
722 if (netif_msg_tx_err(nic
))
724 "%s: Transmit queue wakeup SQ%d\n",
725 netdev
->name
, cq_idx
);
729 spin_unlock_bh(&cq
->lock
);
733 static int nicvf_poll(struct napi_struct
*napi
, int budget
)
737 struct net_device
*netdev
= napi
->dev
;
738 struct nicvf
*nic
= netdev_priv(netdev
);
739 struct nicvf_cq_poll
*cq
;
741 cq
= container_of(napi
, struct nicvf_cq_poll
, napi
);
742 work_done
= nicvf_cq_intr_handler(netdev
, cq
->cq_idx
, napi
, budget
);
744 if (work_done
< budget
) {
745 /* Slow packet rate, exit polling */
747 /* Re-enable interrupts */
748 cq_head
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_HEAD
,
750 nicvf_clear_intr(nic
, NICVF_INTR_CQ
, cq
->cq_idx
);
751 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_HEAD
,
752 cq
->cq_idx
, cq_head
);
753 nicvf_enable_intr(nic
, NICVF_INTR_CQ
, cq
->cq_idx
);
758 /* Qset error interrupt handler
760 * As of now only CQ errors are handled
762 static void nicvf_handle_qs_err(unsigned long data
)
764 struct nicvf
*nic
= (struct nicvf
*)data
;
765 struct queue_set
*qs
= nic
->qs
;
769 netif_tx_disable(nic
->netdev
);
771 /* Check if it is CQ err */
772 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
773 status
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_STATUS
,
775 if (!(status
& CQ_ERR_MASK
))
777 /* Process already queued CQEs and reconfig CQ */
778 nicvf_disable_intr(nic
, NICVF_INTR_CQ
, qidx
);
779 nicvf_sq_disable(nic
, qidx
);
780 nicvf_cq_intr_handler(nic
->netdev
, qidx
, NULL
, 0);
781 nicvf_cmp_queue_config(nic
, qs
, qidx
, true);
782 nicvf_sq_free_used_descs(nic
->netdev
, &qs
->sq
[qidx
], qidx
);
783 nicvf_sq_enable(nic
, &qs
->sq
[qidx
], qidx
);
785 nicvf_enable_intr(nic
, NICVF_INTR_CQ
, qidx
);
788 netif_tx_start_all_queues(nic
->netdev
);
789 /* Re-enable Qset error interrupt */
790 nicvf_enable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
793 static void nicvf_dump_intr_status(struct nicvf
*nic
)
795 if (netif_msg_intr(nic
))
796 netdev_info(nic
->netdev
, "%s: interrupt status 0x%llx\n",
797 nic
->netdev
->name
, nicvf_reg_read(nic
, NIC_VF_INT
));
800 static irqreturn_t
nicvf_misc_intr_handler(int irq
, void *nicvf_irq
)
802 struct nicvf
*nic
= (struct nicvf
*)nicvf_irq
;
805 nicvf_dump_intr_status(nic
);
807 intr
= nicvf_reg_read(nic
, NIC_VF_INT
);
808 /* Check for spurious interrupt */
809 if (!(intr
& NICVF_INTR_MBOX_MASK
))
812 nicvf_handle_mbx_intr(nic
);
817 static irqreturn_t
nicvf_intr_handler(int irq
, void *cq_irq
)
819 struct nicvf_cq_poll
*cq_poll
= (struct nicvf_cq_poll
*)cq_irq
;
820 struct nicvf
*nic
= cq_poll
->nicvf
;
821 int qidx
= cq_poll
->cq_idx
;
823 nicvf_dump_intr_status(nic
);
825 /* Disable interrupts */
826 nicvf_disable_intr(nic
, NICVF_INTR_CQ
, qidx
);
829 napi_schedule_irqoff(&cq_poll
->napi
);
831 /* Clear interrupt */
832 nicvf_clear_intr(nic
, NICVF_INTR_CQ
, qidx
);
837 static irqreturn_t
nicvf_rbdr_intr_handler(int irq
, void *nicvf_irq
)
839 struct nicvf
*nic
= (struct nicvf
*)nicvf_irq
;
843 nicvf_dump_intr_status(nic
);
845 /* Disable RBDR interrupt and schedule softirq */
846 for (qidx
= 0; qidx
< nic
->qs
->rbdr_cnt
; qidx
++) {
847 if (!nicvf_is_intr_enabled(nic
, NICVF_INTR_RBDR
, qidx
))
849 nicvf_disable_intr(nic
, NICVF_INTR_RBDR
, qidx
);
850 tasklet_hi_schedule(&nic
->rbdr_task
);
851 /* Clear interrupt */
852 nicvf_clear_intr(nic
, NICVF_INTR_RBDR
, qidx
);
858 static irqreturn_t
nicvf_qs_err_intr_handler(int irq
, void *nicvf_irq
)
860 struct nicvf
*nic
= (struct nicvf
*)nicvf_irq
;
862 nicvf_dump_intr_status(nic
);
864 /* Disable Qset err interrupt and schedule softirq */
865 nicvf_disable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
866 tasklet_hi_schedule(&nic
->qs_err_task
);
867 nicvf_clear_intr(nic
, NICVF_INTR_QS_ERR
, 0);
872 static int nicvf_enable_msix(struct nicvf
*nic
)
876 nic
->num_vec
= NIC_VF_MSIX_VECTORS
;
878 for (vec
= 0; vec
< nic
->num_vec
; vec
++)
879 nic
->msix_entries
[vec
].entry
= vec
;
881 ret
= pci_enable_msix(nic
->pdev
, nic
->msix_entries
, nic
->num_vec
);
883 netdev_err(nic
->netdev
,
884 "Req for #%d msix vectors failed\n", nic
->num_vec
);
887 nic
->msix_enabled
= 1;
891 static void nicvf_disable_msix(struct nicvf
*nic
)
893 if (nic
->msix_enabled
) {
894 pci_disable_msix(nic
->pdev
);
895 nic
->msix_enabled
= 0;
900 static void nicvf_set_irq_affinity(struct nicvf
*nic
)
905 for (vec
= 0; vec
< nic
->num_vec
; vec
++) {
906 if (!nic
->irq_allocated
[vec
])
909 if (!zalloc_cpumask_var(&nic
->affinity_mask
[vec
], GFP_KERNEL
))
912 if (vec
< NICVF_INTR_ID_SQ
)
913 /* Leave CPU0 for RBDR and other interrupts */
914 cpu
= nicvf_netdev_qidx(nic
, vec
) + 1;
918 cpumask_set_cpu(cpumask_local_spread(cpu
, nic
->node
),
919 nic
->affinity_mask
[vec
]);
920 irqnum
= nic
->msix_entries
[vec
].vector
;
921 irq_set_affinity_hint(irqnum
, nic
->affinity_mask
[vec
]);
925 static int nicvf_register_interrupts(struct nicvf
*nic
)
931 sprintf(nic
->irq_name
[irq
], "%s-rxtx-%d",
932 nic
->pnicvf
->netdev
->name
,
933 nicvf_netdev_qidx(nic
, irq
));
936 sprintf(nic
->irq_name
[irq
], "%s-sq-%d",
937 nic
->pnicvf
->netdev
->name
,
938 nicvf_netdev_qidx(nic
, irq
- NICVF_INTR_ID_SQ
));
940 for_each_rbdr_irq(irq
)
941 sprintf(nic
->irq_name
[irq
], "%s-rbdr-%d",
942 nic
->pnicvf
->netdev
->name
,
943 nic
->sqs_mode
? (nic
->sqs_id
+ 1) : 0);
945 /* Register CQ interrupts */
946 for (irq
= 0; irq
< nic
->qs
->cq_cnt
; irq
++) {
947 vector
= nic
->msix_entries
[irq
].vector
;
948 ret
= request_irq(vector
, nicvf_intr_handler
,
949 0, nic
->irq_name
[irq
], nic
->napi
[irq
]);
952 nic
->irq_allocated
[irq
] = true;
955 /* Register RBDR interrupt */
956 for (irq
= NICVF_INTR_ID_RBDR
;
957 irq
< (NICVF_INTR_ID_RBDR
+ nic
->qs
->rbdr_cnt
); irq
++) {
958 vector
= nic
->msix_entries
[irq
].vector
;
959 ret
= request_irq(vector
, nicvf_rbdr_intr_handler
,
960 0, nic
->irq_name
[irq
], nic
);
963 nic
->irq_allocated
[irq
] = true;
966 /* Register QS error interrupt */
967 sprintf(nic
->irq_name
[NICVF_INTR_ID_QS_ERR
], "%s-qset-err-%d",
968 nic
->pnicvf
->netdev
->name
,
969 nic
->sqs_mode
? (nic
->sqs_id
+ 1) : 0);
970 irq
= NICVF_INTR_ID_QS_ERR
;
971 ret
= request_irq(nic
->msix_entries
[irq
].vector
,
972 nicvf_qs_err_intr_handler
,
973 0, nic
->irq_name
[irq
], nic
);
977 nic
->irq_allocated
[irq
] = true;
979 /* Set IRQ affinities */
980 nicvf_set_irq_affinity(nic
);
984 netdev_err(nic
->netdev
, "request_irq failed, vector %d\n", irq
);
989 static void nicvf_unregister_interrupts(struct nicvf
*nic
)
993 /* Free registered interrupts */
994 for (irq
= 0; irq
< nic
->num_vec
; irq
++) {
995 if (!nic
->irq_allocated
[irq
])
998 irq_set_affinity_hint(nic
->msix_entries
[irq
].vector
, NULL
);
999 free_cpumask_var(nic
->affinity_mask
[irq
]);
1001 if (irq
< NICVF_INTR_ID_SQ
)
1002 free_irq(nic
->msix_entries
[irq
].vector
, nic
->napi
[irq
]);
1004 free_irq(nic
->msix_entries
[irq
].vector
, nic
);
1006 nic
->irq_allocated
[irq
] = false;
1010 nicvf_disable_msix(nic
);
1013 /* Initialize MSIX vectors and register MISC interrupt.
1014 * Send READY message to PF to check if its alive
1016 static int nicvf_register_misc_interrupt(struct nicvf
*nic
)
1019 int irq
= NICVF_INTR_ID_MISC
;
1021 /* Return if mailbox interrupt is already registered */
1022 if (nic
->msix_enabled
)
1026 if (!nicvf_enable_msix(nic
))
1029 sprintf(nic
->irq_name
[irq
], "%s Mbox", "NICVF");
1030 /* Register Misc interrupt */
1031 ret
= request_irq(nic
->msix_entries
[irq
].vector
,
1032 nicvf_misc_intr_handler
, 0, nic
->irq_name
[irq
], nic
);
1036 nic
->irq_allocated
[irq
] = true;
1038 /* Enable mailbox interrupt */
1039 nicvf_enable_intr(nic
, NICVF_INTR_MBOX
, 0);
1041 /* Check if VF is able to communicate with PF */
1042 if (!nicvf_check_pf_ready(nic
)) {
1043 nicvf_disable_intr(nic
, NICVF_INTR_MBOX
, 0);
1044 nicvf_unregister_interrupts(nic
);
1051 static netdev_tx_t
nicvf_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1053 struct nicvf
*nic
= netdev_priv(netdev
);
1054 int qid
= skb_get_queue_mapping(skb
);
1055 struct netdev_queue
*txq
= netdev_get_tx_queue(netdev
, qid
);
1057 /* Check for minimum packet length */
1058 if (skb
->len
<= ETH_HLEN
) {
1060 return NETDEV_TX_OK
;
1063 if (!netif_tx_queue_stopped(txq
) && !nicvf_sq_append_skb(nic
, skb
)) {
1064 netif_tx_stop_queue(txq
);
1065 this_cpu_inc(nic
->drv_stats
->txq_stop
);
1066 if (netif_msg_tx_err(nic
))
1068 "%s: Transmit ring full, stopping SQ%d\n",
1070 return NETDEV_TX_BUSY
;
1073 return NETDEV_TX_OK
;
1076 static inline void nicvf_free_cq_poll(struct nicvf
*nic
)
1078 struct nicvf_cq_poll
*cq_poll
;
1081 for (qidx
= 0; qidx
< nic
->qs
->cq_cnt
; qidx
++) {
1082 cq_poll
= nic
->napi
[qidx
];
1085 nic
->napi
[qidx
] = NULL
;
1090 int nicvf_stop(struct net_device
*netdev
)
1093 struct nicvf
*nic
= netdev_priv(netdev
);
1094 struct queue_set
*qs
= nic
->qs
;
1095 struct nicvf_cq_poll
*cq_poll
= NULL
;
1096 union nic_mbx mbx
= {};
1098 mbx
.msg
.msg
= NIC_MBOX_MSG_SHUTDOWN
;
1099 nicvf_send_msg_to_pf(nic
, &mbx
);
1101 netif_carrier_off(netdev
);
1102 netif_tx_stop_all_queues(nic
->netdev
);
1103 nic
->link_up
= false;
1105 /* Teardown secondary qsets first */
1106 if (!nic
->sqs_mode
) {
1107 for (qidx
= 0; qidx
< nic
->sqs_count
; qidx
++) {
1108 if (!nic
->snicvf
[qidx
])
1110 nicvf_stop(nic
->snicvf
[qidx
]->netdev
);
1111 nic
->snicvf
[qidx
] = NULL
;
1115 /* Disable RBDR & QS error interrupts */
1116 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++) {
1117 nicvf_disable_intr(nic
, NICVF_INTR_RBDR
, qidx
);
1118 nicvf_clear_intr(nic
, NICVF_INTR_RBDR
, qidx
);
1120 nicvf_disable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
1121 nicvf_clear_intr(nic
, NICVF_INTR_QS_ERR
, 0);
1123 /* Wait for pending IRQ handlers to finish */
1124 for (irq
= 0; irq
< nic
->num_vec
; irq
++)
1125 synchronize_irq(nic
->msix_entries
[irq
].vector
);
1127 tasklet_kill(&nic
->rbdr_task
);
1128 tasklet_kill(&nic
->qs_err_task
);
1129 if (nic
->rb_work_scheduled
)
1130 cancel_delayed_work_sync(&nic
->rbdr_work
);
1132 for (qidx
= 0; qidx
< nic
->qs
->cq_cnt
; qidx
++) {
1133 cq_poll
= nic
->napi
[qidx
];
1136 napi_synchronize(&cq_poll
->napi
);
1137 /* CQ intr is enabled while napi_complete,
1140 nicvf_disable_intr(nic
, NICVF_INTR_CQ
, qidx
);
1141 nicvf_clear_intr(nic
, NICVF_INTR_CQ
, qidx
);
1142 napi_disable(&cq_poll
->napi
);
1143 netif_napi_del(&cq_poll
->napi
);
1146 netif_tx_disable(netdev
);
1148 for (qidx
= 0; qidx
< netdev
->num_tx_queues
; qidx
++)
1149 netdev_tx_reset_queue(netdev_get_tx_queue(netdev
, qidx
));
1151 /* Free resources */
1152 nicvf_config_data_transfer(nic
, false);
1154 /* Disable HW Qset */
1155 nicvf_qset_config(nic
, false);
1157 /* disable mailbox interrupt */
1158 nicvf_disable_intr(nic
, NICVF_INTR_MBOX
, 0);
1160 nicvf_unregister_interrupts(nic
);
1162 nicvf_free_cq_poll(nic
);
1164 /* Clear multiqset info */
1170 static int nicvf_update_hw_max_frs(struct nicvf
*nic
, int mtu
)
1172 union nic_mbx mbx
= {};
1174 mbx
.frs
.msg
= NIC_MBOX_MSG_SET_MAX_FRS
;
1175 mbx
.frs
.max_frs
= mtu
;
1176 mbx
.frs
.vf_id
= nic
->vf_id
;
1178 return nicvf_send_msg_to_pf(nic
, &mbx
);
1181 int nicvf_open(struct net_device
*netdev
)
1184 struct nicvf
*nic
= netdev_priv(netdev
);
1185 struct queue_set
*qs
= nic
->qs
;
1186 struct nicvf_cq_poll
*cq_poll
= NULL
;
1188 netif_carrier_off(netdev
);
1190 err
= nicvf_register_misc_interrupt(nic
);
1194 /* Register NAPI handler for processing CQEs */
1195 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
1196 cq_poll
= kzalloc(sizeof(*cq_poll
), GFP_KERNEL
);
1201 cq_poll
->cq_idx
= qidx
;
1202 cq_poll
->nicvf
= nic
;
1203 netif_napi_add(netdev
, &cq_poll
->napi
, nicvf_poll
,
1205 napi_enable(&cq_poll
->napi
);
1206 nic
->napi
[qidx
] = cq_poll
;
1209 /* Check if we got MAC address from PF or else generate a radom MAC */
1210 if (!nic
->sqs_mode
&& is_zero_ether_addr(netdev
->dev_addr
)) {
1211 eth_hw_addr_random(netdev
);
1212 nicvf_hw_set_mac_addr(nic
, netdev
);
1215 if (nic
->set_mac_pending
) {
1216 nic
->set_mac_pending
= false;
1217 nicvf_hw_set_mac_addr(nic
, netdev
);
1220 /* Init tasklet for handling Qset err interrupt */
1221 tasklet_init(&nic
->qs_err_task
, nicvf_handle_qs_err
,
1222 (unsigned long)nic
);
1224 /* Init RBDR tasklet which will refill RBDR */
1225 tasklet_init(&nic
->rbdr_task
, nicvf_rbdr_task
,
1226 (unsigned long)nic
);
1227 INIT_DELAYED_WORK(&nic
->rbdr_work
, nicvf_rbdr_work
);
1229 /* Configure CPI alorithm */
1230 nic
->cpi_alg
= cpi_alg
;
1232 nicvf_config_cpi(nic
);
1234 nicvf_request_sqs(nic
);
1236 nicvf_get_primary_vf_struct(nic
);
1238 /* Configure receive side scaling and MTU */
1239 if (!nic
->sqs_mode
) {
1240 nicvf_rss_init(nic
);
1241 if (nicvf_update_hw_max_frs(nic
, netdev
->mtu
))
1244 /* Clear percpu stats */
1245 for_each_possible_cpu(cpu
)
1246 memset(per_cpu_ptr(nic
->drv_stats
, cpu
), 0,
1247 sizeof(struct nicvf_drv_stats
));
1250 err
= nicvf_register_interrupts(nic
);
1254 /* Initialize the queues */
1255 err
= nicvf_init_resources(nic
);
1259 /* Make sure queue initialization is written */
1262 nicvf_reg_write(nic
, NIC_VF_INT
, -1);
1263 /* Enable Qset err interrupt */
1264 nicvf_enable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
1266 /* Enable completion queue interrupt */
1267 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++)
1268 nicvf_enable_intr(nic
, NICVF_INTR_CQ
, qidx
);
1270 /* Enable RBDR threshold interrupt */
1271 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++)
1272 nicvf_enable_intr(nic
, NICVF_INTR_RBDR
, qidx
);
1276 nicvf_disable_intr(nic
, NICVF_INTR_MBOX
, 0);
1277 nicvf_unregister_interrupts(nic
);
1278 tasklet_kill(&nic
->qs_err_task
);
1279 tasklet_kill(&nic
->rbdr_task
);
1281 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
1282 cq_poll
= nic
->napi
[qidx
];
1285 napi_disable(&cq_poll
->napi
);
1286 netif_napi_del(&cq_poll
->napi
);
1288 nicvf_free_cq_poll(nic
);
1292 static int nicvf_change_mtu(struct net_device
*netdev
, int new_mtu
)
1294 struct nicvf
*nic
= netdev_priv(netdev
);
1296 if (new_mtu
> NIC_HW_MAX_FRS
)
1299 if (new_mtu
< NIC_HW_MIN_FRS
)
1302 netdev
->mtu
= new_mtu
;
1304 if (!netif_running(netdev
))
1307 if (nicvf_update_hw_max_frs(nic
, new_mtu
))
1313 static int nicvf_set_mac_address(struct net_device
*netdev
, void *p
)
1315 struct sockaddr
*addr
= p
;
1316 struct nicvf
*nic
= netdev_priv(netdev
);
1318 if (!is_valid_ether_addr(addr
->sa_data
))
1319 return -EADDRNOTAVAIL
;
1321 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
1323 if (nic
->msix_enabled
) {
1324 if (nicvf_hw_set_mac_addr(nic
, netdev
))
1327 nic
->set_mac_pending
= true;
1333 void nicvf_update_lmac_stats(struct nicvf
*nic
)
1336 union nic_mbx mbx
= {};
1338 if (!netif_running(nic
->netdev
))
1341 mbx
.bgx_stats
.msg
= NIC_MBOX_MSG_BGX_STATS
;
1342 mbx
.bgx_stats
.vf_id
= nic
->vf_id
;
1344 mbx
.bgx_stats
.rx
= 1;
1345 while (stat
< BGX_RX_STATS_COUNT
) {
1346 mbx
.bgx_stats
.idx
= stat
;
1347 if (nicvf_send_msg_to_pf(nic
, &mbx
))
1355 mbx
.bgx_stats
.rx
= 0;
1356 while (stat
< BGX_TX_STATS_COUNT
) {
1357 mbx
.bgx_stats
.idx
= stat
;
1358 if (nicvf_send_msg_to_pf(nic
, &mbx
))
1364 void nicvf_update_stats(struct nicvf
*nic
)
1368 struct nicvf_hw_stats
*stats
= &nic
->hw_stats
;
1369 struct nicvf_drv_stats
*drv_stats
;
1370 struct queue_set
*qs
= nic
->qs
;
1372 #define GET_RX_STATS(reg) \
1373 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
1374 #define GET_TX_STATS(reg) \
1375 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
1377 stats
->rx_bytes
= GET_RX_STATS(RX_OCTS
);
1378 stats
->rx_ucast_frames
= GET_RX_STATS(RX_UCAST
);
1379 stats
->rx_bcast_frames
= GET_RX_STATS(RX_BCAST
);
1380 stats
->rx_mcast_frames
= GET_RX_STATS(RX_MCAST
);
1381 stats
->rx_fcs_errors
= GET_RX_STATS(RX_FCS
);
1382 stats
->rx_l2_errors
= GET_RX_STATS(RX_L2ERR
);
1383 stats
->rx_drop_red
= GET_RX_STATS(RX_RED
);
1384 stats
->rx_drop_red_bytes
= GET_RX_STATS(RX_RED_OCTS
);
1385 stats
->rx_drop_overrun
= GET_RX_STATS(RX_ORUN
);
1386 stats
->rx_drop_overrun_bytes
= GET_RX_STATS(RX_ORUN_OCTS
);
1387 stats
->rx_drop_bcast
= GET_RX_STATS(RX_DRP_BCAST
);
1388 stats
->rx_drop_mcast
= GET_RX_STATS(RX_DRP_MCAST
);
1389 stats
->rx_drop_l3_bcast
= GET_RX_STATS(RX_DRP_L3BCAST
);
1390 stats
->rx_drop_l3_mcast
= GET_RX_STATS(RX_DRP_L3MCAST
);
1392 stats
->tx_bytes
= GET_TX_STATS(TX_OCTS
);
1393 stats
->tx_ucast_frames
= GET_TX_STATS(TX_UCAST
);
1394 stats
->tx_bcast_frames
= GET_TX_STATS(TX_BCAST
);
1395 stats
->tx_mcast_frames
= GET_TX_STATS(TX_MCAST
);
1396 stats
->tx_drops
= GET_TX_STATS(TX_DROP
);
1398 /* On T88 pass 2.0, the dummy SQE added for TSO notification
1399 * via CQE has 'dont_send' set. Hence HW drops the pkt pointed
1400 * pointed by dummy SQE and results in tx_drops counter being
1401 * incremented. Subtracting it from tx_tso counter will give
1402 * exact tx_drops counter.
1404 if (nic
->t88
&& nic
->hw_tso
) {
1405 for_each_possible_cpu(cpu
) {
1406 drv_stats
= per_cpu_ptr(nic
->drv_stats
, cpu
);
1407 tmp_stats
+= drv_stats
->tx_tso
;
1409 stats
->tx_drops
= tmp_stats
- stats
->tx_drops
;
1411 stats
->tx_frames
= stats
->tx_ucast_frames
+
1412 stats
->tx_bcast_frames
+
1413 stats
->tx_mcast_frames
;
1414 stats
->rx_frames
= stats
->rx_ucast_frames
+
1415 stats
->rx_bcast_frames
+
1416 stats
->rx_mcast_frames
;
1417 stats
->rx_drops
= stats
->rx_drop_red
+
1418 stats
->rx_drop_overrun
;
1420 /* Update RQ and SQ stats */
1421 for (qidx
= 0; qidx
< qs
->rq_cnt
; qidx
++)
1422 nicvf_update_rq_stats(nic
, qidx
);
1423 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++)
1424 nicvf_update_sq_stats(nic
, qidx
);
1427 static struct rtnl_link_stats64
*nicvf_get_stats64(struct net_device
*netdev
,
1428 struct rtnl_link_stats64
*stats
)
1430 struct nicvf
*nic
= netdev_priv(netdev
);
1431 struct nicvf_hw_stats
*hw_stats
= &nic
->hw_stats
;
1433 nicvf_update_stats(nic
);
1435 stats
->rx_bytes
= hw_stats
->rx_bytes
;
1436 stats
->rx_packets
= hw_stats
->rx_frames
;
1437 stats
->rx_dropped
= hw_stats
->rx_drops
;
1438 stats
->multicast
= hw_stats
->rx_mcast_frames
;
1440 stats
->tx_bytes
= hw_stats
->tx_bytes
;
1441 stats
->tx_packets
= hw_stats
->tx_frames
;
1442 stats
->tx_dropped
= hw_stats
->tx_drops
;
1447 static void nicvf_tx_timeout(struct net_device
*dev
)
1449 struct nicvf
*nic
= netdev_priv(dev
);
1451 if (netif_msg_tx_err(nic
))
1452 netdev_warn(dev
, "%s: Transmit timed out, resetting\n",
1455 this_cpu_inc(nic
->drv_stats
->tx_timeout
);
1456 schedule_work(&nic
->reset_task
);
1459 static void nicvf_reset_task(struct work_struct
*work
)
1463 nic
= container_of(work
, struct nicvf
, reset_task
);
1465 if (!netif_running(nic
->netdev
))
1468 nicvf_stop(nic
->netdev
);
1469 nicvf_open(nic
->netdev
);
1470 netif_trans_update(nic
->netdev
);
1473 static int nicvf_config_loopback(struct nicvf
*nic
,
1474 netdev_features_t features
)
1476 union nic_mbx mbx
= {};
1478 mbx
.lbk
.msg
= NIC_MBOX_MSG_LOOPBACK
;
1479 mbx
.lbk
.vf_id
= nic
->vf_id
;
1480 mbx
.lbk
.enable
= (features
& NETIF_F_LOOPBACK
) != 0;
1482 return nicvf_send_msg_to_pf(nic
, &mbx
);
1485 static netdev_features_t
nicvf_fix_features(struct net_device
*netdev
,
1486 netdev_features_t features
)
1488 struct nicvf
*nic
= netdev_priv(netdev
);
1490 if ((features
& NETIF_F_LOOPBACK
) &&
1491 netif_running(netdev
) && !nic
->loopback_supported
)
1492 features
&= ~NETIF_F_LOOPBACK
;
1497 static int nicvf_set_features(struct net_device
*netdev
,
1498 netdev_features_t features
)
1500 struct nicvf
*nic
= netdev_priv(netdev
);
1501 netdev_features_t changed
= features
^ netdev
->features
;
1503 if (changed
& NETIF_F_HW_VLAN_CTAG_RX
)
1504 nicvf_config_vlan_stripping(nic
, features
);
1506 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(netdev
))
1507 return nicvf_config_loopback(nic
, features
);
1512 static const struct net_device_ops nicvf_netdev_ops
= {
1513 .ndo_open
= nicvf_open
,
1514 .ndo_stop
= nicvf_stop
,
1515 .ndo_start_xmit
= nicvf_xmit
,
1516 .ndo_change_mtu
= nicvf_change_mtu
,
1517 .ndo_set_mac_address
= nicvf_set_mac_address
,
1518 .ndo_get_stats64
= nicvf_get_stats64
,
1519 .ndo_tx_timeout
= nicvf_tx_timeout
,
1520 .ndo_fix_features
= nicvf_fix_features
,
1521 .ndo_set_features
= nicvf_set_features
,
1524 static int nicvf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1526 struct device
*dev
= &pdev
->dev
;
1527 struct net_device
*netdev
;
1532 err
= pci_enable_device(pdev
);
1534 dev_err(dev
, "Failed to enable PCI device\n");
1538 err
= pci_request_regions(pdev
, DRV_NAME
);
1540 dev_err(dev
, "PCI request regions failed 0x%x\n", err
);
1541 goto err_disable_device
;
1544 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(48));
1546 dev_err(dev
, "Unable to get usable DMA configuration\n");
1547 goto err_release_regions
;
1550 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(48));
1552 dev_err(dev
, "unable to get 48-bit DMA for consistent allocations\n");
1553 goto err_release_regions
;
1556 qcount
= netif_get_num_default_rss_queues();
1558 /* Restrict multiqset support only for host bound VFs */
1559 if (pdev
->is_virtfn
) {
1560 /* Set max number of queues per VF */
1561 qcount
= min_t(int, num_online_cpus(),
1562 (MAX_SQS_PER_VF
+ 1) * MAX_CMP_QUEUES_PER_QS
);
1565 netdev
= alloc_etherdev_mqs(sizeof(struct nicvf
), qcount
, qcount
);
1568 goto err_release_regions
;
1571 pci_set_drvdata(pdev
, netdev
);
1573 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1575 nic
= netdev_priv(netdev
);
1576 nic
->netdev
= netdev
;
1579 nic
->max_queues
= qcount
;
1581 /* MAP VF's configuration registers */
1582 nic
->reg_base
= pcim_iomap(pdev
, PCI_CFG_REG_BAR_NUM
, 0);
1583 if (!nic
->reg_base
) {
1584 dev_err(dev
, "Cannot map config register space, aborting\n");
1586 goto err_free_netdev
;
1589 nic
->drv_stats
= netdev_alloc_pcpu_stats(struct nicvf_drv_stats
);
1590 if (!nic
->drv_stats
) {
1592 goto err_free_netdev
;
1595 err
= nicvf_set_qset_resources(nic
);
1597 goto err_free_netdev
;
1599 /* Check if PF is alive and get MAC address for this VF */
1600 err
= nicvf_register_misc_interrupt(nic
);
1602 goto err_free_netdev
;
1604 nicvf_send_vf_struct(nic
);
1606 if (!pass1_silicon(nic
->pdev
))
1609 pci_read_config_word(nic
->pdev
, PCI_SUBSYSTEM_ID
, &sdevid
);
1610 if (sdevid
== 0xA134)
1613 /* Check if this VF is in QS only mode */
1617 err
= nicvf_set_real_num_queues(netdev
, nic
->tx_queues
, nic
->rx_queues
);
1619 goto err_unregister_interrupts
;
1621 netdev
->hw_features
= (NETIF_F_RXCSUM
| NETIF_F_IP_CSUM
| NETIF_F_SG
|
1622 NETIF_F_TSO
| NETIF_F_GRO
|
1623 NETIF_F_HW_VLAN_CTAG_RX
);
1625 netdev
->hw_features
|= NETIF_F_RXHASH
;
1627 netdev
->features
|= netdev
->hw_features
;
1628 netdev
->hw_features
|= NETIF_F_LOOPBACK
;
1630 netdev
->vlan_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
;
1632 netdev
->netdev_ops
= &nicvf_netdev_ops
;
1633 netdev
->watchdog_timeo
= NICVF_TX_TIMEOUT
;
1635 INIT_WORK(&nic
->reset_task
, nicvf_reset_task
);
1637 err
= register_netdev(netdev
);
1639 dev_err(dev
, "Failed to register netdevice\n");
1640 goto err_unregister_interrupts
;
1643 nic
->msg_enable
= debug
;
1645 nicvf_set_ethtool_ops(netdev
);
1649 err_unregister_interrupts
:
1650 nicvf_unregister_interrupts(nic
);
1652 pci_set_drvdata(pdev
, NULL
);
1654 free_percpu(nic
->drv_stats
);
1655 free_netdev(netdev
);
1656 err_release_regions
:
1657 pci_release_regions(pdev
);
1659 pci_disable_device(pdev
);
1663 static void nicvf_remove(struct pci_dev
*pdev
)
1665 struct net_device
*netdev
= pci_get_drvdata(pdev
);
1667 struct net_device
*pnetdev
;
1672 nic
= netdev_priv(netdev
);
1673 pnetdev
= nic
->pnicvf
->netdev
;
1675 /* Check if this Qset is assigned to different VF.
1676 * If yes, clean primary and all secondary Qsets.
1678 if (pnetdev
&& (pnetdev
->reg_state
== NETREG_REGISTERED
))
1679 unregister_netdev(pnetdev
);
1680 nicvf_unregister_interrupts(nic
);
1681 pci_set_drvdata(pdev
, NULL
);
1683 free_percpu(nic
->drv_stats
);
1684 free_netdev(netdev
);
1685 pci_release_regions(pdev
);
1686 pci_disable_device(pdev
);
1689 static void nicvf_shutdown(struct pci_dev
*pdev
)
1694 static struct pci_driver nicvf_driver
= {
1696 .id_table
= nicvf_id_table
,
1697 .probe
= nicvf_probe
,
1698 .remove
= nicvf_remove
,
1699 .shutdown
= nicvf_shutdown
,
1702 static int __init
nicvf_init_module(void)
1704 pr_info("%s, ver %s\n", DRV_NAME
, DRV_VERSION
);
1706 return pci_register_driver(&nicvf_driver
);
1709 static void __exit
nicvf_cleanup_module(void)
1711 pci_unregister_driver(&nicvf_driver
);
1714 module_init(nicvf_init_module
);
1715 module_exit(nicvf_cleanup_module
);