1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/types.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/vmalloc.h>
33 #include <linux/string.h>
35 #include <linux/interrupt.h>
37 #include <linux/tcp.h>
38 #include <linux/sctp.h>
39 #include <linux/pkt_sched.h>
40 #include <linux/ipv6.h>
41 #include <linux/slab.h>
42 #include <net/checksum.h>
43 #include <net/ip6_checksum.h>
44 #include <linux/ethtool.h>
46 #include <linux/if_vlan.h>
47 #include <linux/prefetch.h>
48 #include <scsi/fc/fc_fcoe.h>
51 #include "ixgbe_common.h"
52 #include "ixgbe_dcb_82599.h"
53 #include "ixgbe_sriov.h"
55 char ixgbe_driver_name
[] = "ixgbe";
56 static const char ixgbe_driver_string
[] =
57 "Intel(R) 10 Gigabit PCI Express Network Driver";
61 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
62 __stringify(BUILD) "-k"
63 const char ixgbe_driver_version
[] = DRV_VERSION
;
64 static const char ixgbe_copyright
[] =
65 "Copyright (c) 1999-2011 Intel Corporation.";
67 static const struct ixgbe_info
*ixgbe_info_tbl
[] = {
68 [board_82598
] = &ixgbe_82598_info
,
69 [board_82599
] = &ixgbe_82599_info
,
70 [board_X540
] = &ixgbe_X540_info
,
73 /* ixgbe_pci_tbl - PCI Device ID Table
75 * Wildcard entries (PCI_ANY_ID) should come last
76 * Last entry must be all 0s
78 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
79 * Class, Class Mask, private data (not used) }
81 static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl
) = {
82 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598
),
84 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598AF_DUAL_PORT
),
86 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598AF_SINGLE_PORT
),
88 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598AT
),
90 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598AT2
),
92 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598EB_CX4
),
94 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598_CX4_DUAL_PORT
),
96 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598_DA_DUAL_PORT
),
98 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM
),
100 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598EB_XF_LR
),
102 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598EB_SFP_LOM
),
104 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598_BX
),
106 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_KX4
),
108 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_XAUI_LOM
),
110 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_KR
),
112 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_SFP
),
114 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_SFP_EM
),
116 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_KX4_MEZZ
),
118 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_CX4
),
120 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_BACKPLANE_FCOE
),
122 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_SFP_FCOE
),
124 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_T3_LOM
),
126 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_COMBO_BACKPLANE
),
128 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X540T
),
130 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_SFP_SF2
),
132 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_LS
),
135 /* required last entry */
138 MODULE_DEVICE_TABLE(pci
, ixgbe_pci_tbl
);
140 #ifdef CONFIG_IXGBE_DCA
141 static int ixgbe_notify_dca(struct notifier_block
*, unsigned long event
,
143 static struct notifier_block dca_notifier
= {
144 .notifier_call
= ixgbe_notify_dca
,
150 #ifdef CONFIG_PCI_IOV
151 static unsigned int max_vfs
;
152 module_param(max_vfs
, uint
, 0);
153 MODULE_PARM_DESC(max_vfs
,
154 "Maximum number of virtual functions to allocate per physical function");
155 #endif /* CONFIG_PCI_IOV */
157 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
158 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
159 MODULE_LICENSE("GPL");
160 MODULE_VERSION(DRV_VERSION
);
162 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
164 static inline void ixgbe_disable_sriov(struct ixgbe_adapter
*adapter
)
166 struct ixgbe_hw
*hw
= &adapter
->hw
;
171 #ifdef CONFIG_PCI_IOV
172 /* disable iov and allow time for transactions to clear */
173 pci_disable_sriov(adapter
->pdev
);
176 /* turn off device IOV mode */
177 gcr
= IXGBE_READ_REG(hw
, IXGBE_GCR_EXT
);
178 gcr
&= ~(IXGBE_GCR_EXT_SRIOV
);
179 IXGBE_WRITE_REG(hw
, IXGBE_GCR_EXT
, gcr
);
180 gpie
= IXGBE_READ_REG(hw
, IXGBE_GPIE
);
181 gpie
&= ~IXGBE_GPIE_VTMODE_MASK
;
182 IXGBE_WRITE_REG(hw
, IXGBE_GPIE
, gpie
);
184 /* set default pool back to 0 */
185 vmdctl
= IXGBE_READ_REG(hw
, IXGBE_VT_CTL
);
186 vmdctl
&= ~IXGBE_VT_CTL_POOL_MASK
;
187 IXGBE_WRITE_REG(hw
, IXGBE_VT_CTL
, vmdctl
);
188 IXGBE_WRITE_FLUSH(hw
);
190 /* take a breather then clean up driver data */
193 kfree(adapter
->vfinfo
);
194 adapter
->vfinfo
= NULL
;
196 adapter
->num_vfs
= 0;
197 adapter
->flags
&= ~IXGBE_FLAG_SRIOV_ENABLED
;
200 static void ixgbe_service_event_schedule(struct ixgbe_adapter
*adapter
)
202 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
) &&
203 !test_and_set_bit(__IXGBE_SERVICE_SCHED
, &adapter
->state
))
204 schedule_work(&adapter
->service_task
);
207 static void ixgbe_service_event_complete(struct ixgbe_adapter
*adapter
)
209 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED
, &adapter
->state
));
211 /* flush memory to make sure state is correct before next watchog */
212 smp_mb__before_clear_bit();
213 clear_bit(__IXGBE_SERVICE_SCHED
, &adapter
->state
);
216 struct ixgbe_reg_info
{
221 static const struct ixgbe_reg_info ixgbe_reg_info_tbl
[] = {
223 /* General Registers */
224 {IXGBE_CTRL
, "CTRL"},
225 {IXGBE_STATUS
, "STATUS"},
226 {IXGBE_CTRL_EXT
, "CTRL_EXT"},
228 /* Interrupt Registers */
229 {IXGBE_EICR
, "EICR"},
232 {IXGBE_SRRCTL(0), "SRRCTL"},
233 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
234 {IXGBE_RDLEN(0), "RDLEN"},
235 {IXGBE_RDH(0), "RDH"},
236 {IXGBE_RDT(0), "RDT"},
237 {IXGBE_RXDCTL(0), "RXDCTL"},
238 {IXGBE_RDBAL(0), "RDBAL"},
239 {IXGBE_RDBAH(0), "RDBAH"},
242 {IXGBE_TDBAL(0), "TDBAL"},
243 {IXGBE_TDBAH(0), "TDBAH"},
244 {IXGBE_TDLEN(0), "TDLEN"},
245 {IXGBE_TDH(0), "TDH"},
246 {IXGBE_TDT(0), "TDT"},
247 {IXGBE_TXDCTL(0), "TXDCTL"},
249 /* List Terminator */
255 * ixgbe_regdump - register printout routine
257 static void ixgbe_regdump(struct ixgbe_hw
*hw
, struct ixgbe_reg_info
*reginfo
)
263 switch (reginfo
->ofs
) {
264 case IXGBE_SRRCTL(0):
265 for (i
= 0; i
< 64; i
++)
266 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_SRRCTL(i
));
268 case IXGBE_DCA_RXCTRL(0):
269 for (i
= 0; i
< 64; i
++)
270 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_DCA_RXCTRL(i
));
273 for (i
= 0; i
< 64; i
++)
274 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_RDLEN(i
));
277 for (i
= 0; i
< 64; i
++)
278 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_RDH(i
));
281 for (i
= 0; i
< 64; i
++)
282 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_RDT(i
));
284 case IXGBE_RXDCTL(0):
285 for (i
= 0; i
< 64; i
++)
286 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_RXDCTL(i
));
289 for (i
= 0; i
< 64; i
++)
290 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAL(i
));
293 for (i
= 0; i
< 64; i
++)
294 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAH(i
));
297 for (i
= 0; i
< 64; i
++)
298 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAL(i
));
301 for (i
= 0; i
< 64; i
++)
302 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAH(i
));
305 for (i
= 0; i
< 64; i
++)
306 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_TDLEN(i
));
309 for (i
= 0; i
< 64; i
++)
310 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_TDH(i
));
313 for (i
= 0; i
< 64; i
++)
314 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_TDT(i
));
316 case IXGBE_TXDCTL(0):
317 for (i
= 0; i
< 64; i
++)
318 regs
[i
] = IXGBE_READ_REG(hw
, IXGBE_TXDCTL(i
));
321 pr_info("%-15s %08x\n", reginfo
->name
,
322 IXGBE_READ_REG(hw
, reginfo
->ofs
));
326 for (i
= 0; i
< 8; i
++) {
327 snprintf(rname
, 16, "%s[%d-%d]", reginfo
->name
, i
*8, i
*8+7);
328 pr_err("%-15s", rname
);
329 for (j
= 0; j
< 8; j
++)
330 pr_cont(" %08x", regs
[i
*8+j
]);
337 * ixgbe_dump - Print registers, tx-rings and rx-rings
339 static void ixgbe_dump(struct ixgbe_adapter
*adapter
)
341 struct net_device
*netdev
= adapter
->netdev
;
342 struct ixgbe_hw
*hw
= &adapter
->hw
;
343 struct ixgbe_reg_info
*reginfo
;
345 struct ixgbe_ring
*tx_ring
;
346 struct ixgbe_tx_buffer
*tx_buffer_info
;
347 union ixgbe_adv_tx_desc
*tx_desc
;
348 struct my_u0
{ u64 a
; u64 b
; } *u0
;
349 struct ixgbe_ring
*rx_ring
;
350 union ixgbe_adv_rx_desc
*rx_desc
;
351 struct ixgbe_rx_buffer
*rx_buffer_info
;
355 if (!netif_msg_hw(adapter
))
358 /* Print netdevice Info */
360 dev_info(&adapter
->pdev
->dev
, "Net device Info\n");
361 pr_info("Device Name state "
362 "trans_start last_rx\n");
363 pr_info("%-15s %016lX %016lX %016lX\n",
370 /* Print Registers */
371 dev_info(&adapter
->pdev
->dev
, "Register Dump\n");
372 pr_info(" Register Name Value\n");
373 for (reginfo
= (struct ixgbe_reg_info
*)ixgbe_reg_info_tbl
;
374 reginfo
->name
; reginfo
++) {
375 ixgbe_regdump(hw
, reginfo
);
378 /* Print TX Ring Summary */
379 if (!netdev
|| !netif_running(netdev
))
382 dev_info(&adapter
->pdev
->dev
, "TX Rings Summary\n");
383 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
384 for (n
= 0; n
< adapter
->num_tx_queues
; n
++) {
385 tx_ring
= adapter
->tx_ring
[n
];
387 &tx_ring
->tx_buffer_info
[tx_ring
->next_to_clean
];
388 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
389 n
, tx_ring
->next_to_use
, tx_ring
->next_to_clean
,
390 (u64
)tx_buffer_info
->dma
,
391 tx_buffer_info
->length
,
392 tx_buffer_info
->next_to_watch
,
393 (u64
)tx_buffer_info
->time_stamp
);
397 if (!netif_msg_tx_done(adapter
))
398 goto rx_ring_summary
;
400 dev_info(&adapter
->pdev
->dev
, "TX Rings Dump\n");
402 /* Transmit Descriptor Formats
404 * Advanced Transmit Descriptor
405 * +--------------------------------------------------------------+
406 * 0 | Buffer Address [63:0] |
407 * +--------------------------------------------------------------+
408 * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
409 * +--------------------------------------------------------------+
410 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
413 for (n
= 0; n
< adapter
->num_tx_queues
; n
++) {
414 tx_ring
= adapter
->tx_ring
[n
];
415 pr_info("------------------------------------\n");
416 pr_info("TX QUEUE INDEX = %d\n", tx_ring
->queue_index
);
417 pr_info("------------------------------------\n");
418 pr_info("T [desc] [address 63:0 ] "
419 "[PlPOIdStDDt Ln] [bi->dma ] "
420 "leng ntw timestamp bi->skb\n");
422 for (i
= 0; tx_ring
->desc
&& (i
< tx_ring
->count
); i
++) {
423 tx_desc
= IXGBE_TX_DESC_ADV(tx_ring
, i
);
424 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
425 u0
= (struct my_u0
*)tx_desc
;
426 pr_info("T [0x%03X] %016llX %016llX %016llX"
427 " %04X %p %016llX %p", i
,
430 (u64
)tx_buffer_info
->dma
,
431 tx_buffer_info
->length
,
432 tx_buffer_info
->next_to_watch
,
433 (u64
)tx_buffer_info
->time_stamp
,
434 tx_buffer_info
->skb
);
435 if (i
== tx_ring
->next_to_use
&&
436 i
== tx_ring
->next_to_clean
)
438 else if (i
== tx_ring
->next_to_use
)
440 else if (i
== tx_ring
->next_to_clean
)
445 if (netif_msg_pktdata(adapter
) &&
446 tx_buffer_info
->dma
!= 0)
447 print_hex_dump(KERN_INFO
, "",
448 DUMP_PREFIX_ADDRESS
, 16, 1,
449 phys_to_virt(tx_buffer_info
->dma
),
450 tx_buffer_info
->length
, true);
454 /* Print RX Rings Summary */
456 dev_info(&adapter
->pdev
->dev
, "RX Rings Summary\n");
457 pr_info("Queue [NTU] [NTC]\n");
458 for (n
= 0; n
< adapter
->num_rx_queues
; n
++) {
459 rx_ring
= adapter
->rx_ring
[n
];
460 pr_info("%5d %5X %5X\n",
461 n
, rx_ring
->next_to_use
, rx_ring
->next_to_clean
);
465 if (!netif_msg_rx_status(adapter
))
468 dev_info(&adapter
->pdev
->dev
, "RX Rings Dump\n");
470 /* Advanced Receive Descriptor (Read) Format
472 * +-----------------------------------------------------+
473 * 0 | Packet Buffer Address [63:1] |A0/NSE|
474 * +----------------------------------------------+------+
475 * 8 | Header Buffer Address [63:1] | DD |
476 * +-----------------------------------------------------+
479 * Advanced Receive Descriptor (Write-Back) Format
481 * 63 48 47 32 31 30 21 20 16 15 4 3 0
482 * +------------------------------------------------------+
483 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
484 * | Checksum Ident | | | | Type | Type |
485 * +------------------------------------------------------+
486 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
487 * +------------------------------------------------------+
488 * 63 48 47 32 31 20 19 0
490 for (n
= 0; n
< adapter
->num_rx_queues
; n
++) {
491 rx_ring
= adapter
->rx_ring
[n
];
492 pr_info("------------------------------------\n");
493 pr_info("RX QUEUE INDEX = %d\n", rx_ring
->queue_index
);
494 pr_info("------------------------------------\n");
495 pr_info("R [desc] [ PktBuf A0] "
496 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
497 "<-- Adv Rx Read format\n");
498 pr_info("RWB[desc] [PcsmIpSHl PtRs] "
499 "[vl er S cks ln] ---------------- [bi->skb] "
500 "<-- Adv Rx Write-Back format\n");
502 for (i
= 0; i
< rx_ring
->count
; i
++) {
503 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
504 rx_desc
= IXGBE_RX_DESC_ADV(rx_ring
, i
);
505 u0
= (struct my_u0
*)rx_desc
;
506 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
507 if (staterr
& IXGBE_RXD_STAT_DD
) {
508 /* Descriptor Done */
509 pr_info("RWB[0x%03X] %016llX "
510 "%016llX ---------------- %p", i
,
513 rx_buffer_info
->skb
);
515 pr_info("R [0x%03X] %016llX "
516 "%016llX %016llX %p", i
,
519 (u64
)rx_buffer_info
->dma
,
520 rx_buffer_info
->skb
);
522 if (netif_msg_pktdata(adapter
)) {
523 print_hex_dump(KERN_INFO
, "",
524 DUMP_PREFIX_ADDRESS
, 16, 1,
525 phys_to_virt(rx_buffer_info
->dma
),
526 rx_ring
->rx_buf_len
, true);
528 if (rx_ring
->rx_buf_len
529 < IXGBE_RXBUFFER_2048
)
530 print_hex_dump(KERN_INFO
, "",
531 DUMP_PREFIX_ADDRESS
, 16, 1,
533 rx_buffer_info
->page_dma
+
534 rx_buffer_info
->page_offset
540 if (i
== rx_ring
->next_to_use
)
542 else if (i
== rx_ring
->next_to_clean
)
554 static void ixgbe_release_hw_control(struct ixgbe_adapter
*adapter
)
558 /* Let firmware take over control of h/w */
559 ctrl_ext
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_CTRL_EXT
);
560 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_CTRL_EXT
,
561 ctrl_ext
& ~IXGBE_CTRL_EXT_DRV_LOAD
);
564 static void ixgbe_get_hw_control(struct ixgbe_adapter
*adapter
)
568 /* Let firmware know the driver has taken over */
569 ctrl_ext
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_CTRL_EXT
);
570 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_CTRL_EXT
,
571 ctrl_ext
| IXGBE_CTRL_EXT_DRV_LOAD
);
575 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
576 * @adapter: pointer to adapter struct
577 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
578 * @queue: queue to map the corresponding interrupt to
579 * @msix_vector: the vector to map to the corresponding queue
582 static void ixgbe_set_ivar(struct ixgbe_adapter
*adapter
, s8 direction
,
583 u8 queue
, u8 msix_vector
)
586 struct ixgbe_hw
*hw
= &adapter
->hw
;
587 switch (hw
->mac
.type
) {
588 case ixgbe_mac_82598EB
:
589 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
592 index
= (((direction
* 64) + queue
) >> 2) & 0x1F;
593 ivar
= IXGBE_READ_REG(hw
, IXGBE_IVAR(index
));
594 ivar
&= ~(0xFF << (8 * (queue
& 0x3)));
595 ivar
|= (msix_vector
<< (8 * (queue
& 0x3)));
596 IXGBE_WRITE_REG(hw
, IXGBE_IVAR(index
), ivar
);
598 case ixgbe_mac_82599EB
:
600 if (direction
== -1) {
602 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
603 index
= ((queue
& 1) * 8);
604 ivar
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_IVAR_MISC
);
605 ivar
&= ~(0xFF << index
);
606 ivar
|= (msix_vector
<< index
);
607 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_IVAR_MISC
, ivar
);
610 /* tx or rx causes */
611 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
612 index
= ((16 * (queue
& 1)) + (8 * direction
));
613 ivar
= IXGBE_READ_REG(hw
, IXGBE_IVAR(queue
>> 1));
614 ivar
&= ~(0xFF << index
);
615 ivar
|= (msix_vector
<< index
);
616 IXGBE_WRITE_REG(hw
, IXGBE_IVAR(queue
>> 1), ivar
);
624 static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter
*adapter
,
629 switch (adapter
->hw
.mac
.type
) {
630 case ixgbe_mac_82598EB
:
631 mask
= (IXGBE_EIMS_RTX_QUEUE
& qmask
);
632 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
, mask
);
634 case ixgbe_mac_82599EB
:
636 mask
= (qmask
& 0xFFFFFFFF);
637 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS_EX(0), mask
);
638 mask
= (qmask
>> 32);
639 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS_EX(1), mask
);
646 static inline void ixgbe_unmap_tx_resource(struct ixgbe_ring
*ring
,
647 struct ixgbe_tx_buffer
*tx_buffer
)
649 if (tx_buffer
->dma
) {
650 if (tx_buffer
->tx_flags
& IXGBE_TX_FLAGS_MAPPED_AS_PAGE
)
651 dma_unmap_page(ring
->dev
,
656 dma_unmap_single(ring
->dev
,
664 void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring
*tx_ring
,
665 struct ixgbe_tx_buffer
*tx_buffer_info
)
667 ixgbe_unmap_tx_resource(tx_ring
, tx_buffer_info
);
668 if (tx_buffer_info
->skb
)
669 dev_kfree_skb_any(tx_buffer_info
->skb
);
670 tx_buffer_info
->skb
= NULL
;
671 /* tx_buffer_info must be completely set up in the transmit path */
674 static void ixgbe_update_xoff_received(struct ixgbe_adapter
*adapter
)
676 struct ixgbe_hw
*hw
= &adapter
->hw
;
677 struct ixgbe_hw_stats
*hwstats
= &adapter
->stats
;
682 if ((hw
->fc
.current_mode
== ixgbe_fc_full
) ||
683 (hw
->fc
.current_mode
== ixgbe_fc_rx_pause
)) {
684 switch (hw
->mac
.type
) {
685 case ixgbe_mac_82598EB
:
686 data
= IXGBE_READ_REG(hw
, IXGBE_LXOFFRXC
);
689 data
= IXGBE_READ_REG(hw
, IXGBE_LXOFFRXCNT
);
691 hwstats
->lxoffrxc
+= data
;
693 /* refill credits (no tx hang) if we received xoff */
697 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
698 clear_bit(__IXGBE_HANG_CHECK_ARMED
,
699 &adapter
->tx_ring
[i
]->state
);
701 } else if (!(adapter
->dcb_cfg
.pfc_mode_enable
))
704 /* update stats for each tc, only valid with PFC enabled */
705 for (i
= 0; i
< MAX_TX_PACKET_BUFFERS
; i
++) {
706 switch (hw
->mac
.type
) {
707 case ixgbe_mac_82598EB
:
708 xoff
[i
] = IXGBE_READ_REG(hw
, IXGBE_PXOFFRXC(i
));
711 xoff
[i
] = IXGBE_READ_REG(hw
, IXGBE_PXOFFRXCNT(i
));
713 hwstats
->pxoffrxc
[i
] += xoff
[i
];
716 /* disarm tx queues that have received xoff frames */
717 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
718 struct ixgbe_ring
*tx_ring
= adapter
->tx_ring
[i
];
719 u8 tc
= tx_ring
->dcb_tc
;
722 clear_bit(__IXGBE_HANG_CHECK_ARMED
, &tx_ring
->state
);
726 static u64
ixgbe_get_tx_completed(struct ixgbe_ring
*ring
)
728 return ring
->tx_stats
.completed
;
731 static u64
ixgbe_get_tx_pending(struct ixgbe_ring
*ring
)
733 struct ixgbe_adapter
*adapter
= netdev_priv(ring
->netdev
);
734 struct ixgbe_hw
*hw
= &adapter
->hw
;
736 u32 head
= IXGBE_READ_REG(hw
, IXGBE_TDH(ring
->reg_idx
));
737 u32 tail
= IXGBE_READ_REG(hw
, IXGBE_TDT(ring
->reg_idx
));
740 return (head
< tail
) ?
741 tail
- head
: (tail
+ ring
->count
- head
);
746 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring
*tx_ring
)
748 u32 tx_done
= ixgbe_get_tx_completed(tx_ring
);
749 u32 tx_done_old
= tx_ring
->tx_stats
.tx_done_old
;
750 u32 tx_pending
= ixgbe_get_tx_pending(tx_ring
);
753 clear_check_for_tx_hang(tx_ring
);
756 * Check for a hung queue, but be thorough. This verifies
757 * that a transmit has been completed since the previous
758 * check AND there is at least one packet pending. The
759 * ARMED bit is set to indicate a potential hang. The
760 * bit is cleared if a pause frame is received to remove
761 * false hang detection due to PFC or 802.3x frames. By
762 * requiring this to fail twice we avoid races with
763 * pfc clearing the ARMED bit and conditions where we
764 * run the check_tx_hang logic with a transmit completion
765 * pending but without time to complete it yet.
767 if ((tx_done_old
== tx_done
) && tx_pending
) {
768 /* make sure it is true for two checks in a row */
769 ret
= test_and_set_bit(__IXGBE_HANG_CHECK_ARMED
,
772 /* update completed stats and continue */
773 tx_ring
->tx_stats
.tx_done_old
= tx_done
;
774 /* reset the countdown */
775 clear_bit(__IXGBE_HANG_CHECK_ARMED
, &tx_ring
->state
);
782 * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
783 * @adapter: driver private struct
785 static void ixgbe_tx_timeout_reset(struct ixgbe_adapter
*adapter
)
788 /* Do the reset outside of interrupt context */
789 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
790 adapter
->flags2
|= IXGBE_FLAG2_RESET_REQUESTED
;
791 ixgbe_service_event_schedule(adapter
);
796 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
797 * @q_vector: structure containing interrupt and ring information
798 * @tx_ring: tx ring to clean
800 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector
*q_vector
,
801 struct ixgbe_ring
*tx_ring
)
803 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
804 struct ixgbe_tx_buffer
*tx_buffer
;
805 union ixgbe_adv_tx_desc
*tx_desc
;
806 unsigned int total_bytes
= 0, total_packets
= 0;
807 unsigned int budget
= q_vector
->tx
.work_limit
;
808 u16 i
= tx_ring
->next_to_clean
;
810 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
811 tx_desc
= IXGBE_TX_DESC_ADV(tx_ring
, i
);
813 for (; budget
; budget
--) {
814 union ixgbe_adv_tx_desc
*eop_desc
= tx_buffer
->next_to_watch
;
816 /* if next_to_watch is not set then there is no work pending */
820 /* if DD is not set pending work has not been completed */
821 if (!(eop_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)))
824 /* count the packet as being completed */
825 tx_ring
->tx_stats
.completed
++;
827 /* clear next_to_watch to prevent false hangs */
828 tx_buffer
->next_to_watch
= NULL
;
830 /* prevent any other reads prior to eop_desc being verified */
834 ixgbe_unmap_tx_resource(tx_ring
, tx_buffer
);
835 tx_desc
->wb
.status
= 0;
836 if (likely(tx_desc
== eop_desc
)) {
838 dev_kfree_skb_any(tx_buffer
->skb
);
839 tx_buffer
->skb
= NULL
;
841 total_bytes
+= tx_buffer
->bytecount
;
842 total_packets
+= tx_buffer
->gso_segs
;
848 if (unlikely(i
== tx_ring
->count
)) {
851 tx_buffer
= tx_ring
->tx_buffer_info
;
852 tx_desc
= IXGBE_TX_DESC_ADV(tx_ring
, 0);
858 tx_ring
->next_to_clean
= i
;
859 u64_stats_update_begin(&tx_ring
->syncp
);
860 tx_ring
->stats
.bytes
+= total_bytes
;
861 tx_ring
->stats
.packets
+= total_packets
;
862 u64_stats_update_end(&tx_ring
->syncp
);
863 q_vector
->tx
.total_bytes
+= total_bytes
;
864 q_vector
->tx
.total_packets
+= total_packets
;
866 if (check_for_tx_hang(tx_ring
) && ixgbe_check_tx_hang(tx_ring
)) {
867 /* schedule immediate reset if we believe we hung */
868 struct ixgbe_hw
*hw
= &adapter
->hw
;
869 tx_desc
= IXGBE_TX_DESC_ADV(tx_ring
, i
);
870 e_err(drv
, "Detected Tx Unit Hang\n"
872 " TDH, TDT <%x>, <%x>\n"
873 " next_to_use <%x>\n"
874 " next_to_clean <%x>\n"
875 "tx_buffer_info[next_to_clean]\n"
876 " time_stamp <%lx>\n"
878 tx_ring
->queue_index
,
879 IXGBE_READ_REG(hw
, IXGBE_TDH(tx_ring
->reg_idx
)),
880 IXGBE_READ_REG(hw
, IXGBE_TDT(tx_ring
->reg_idx
)),
881 tx_ring
->next_to_use
, i
,
882 tx_ring
->tx_buffer_info
[i
].time_stamp
, jiffies
);
884 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
887 "tx hang %d detected on queue %d, resetting adapter\n",
888 adapter
->tx_timeout_count
+ 1, tx_ring
->queue_index
);
890 /* schedule immediate reset if we believe we hung */
891 ixgbe_tx_timeout_reset(adapter
);
893 /* the adapter is about to reset, no point in enabling stuff */
897 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
898 if (unlikely(total_packets
&& netif_carrier_ok(tx_ring
->netdev
) &&
899 (ixgbe_desc_unused(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
900 /* Make sure that anybody stopping the queue after this
901 * sees the new next_to_clean.
904 if (__netif_subqueue_stopped(tx_ring
->netdev
, tx_ring
->queue_index
) &&
905 !test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
906 netif_wake_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
907 ++tx_ring
->tx_stats
.restart_queue
;
914 #ifdef CONFIG_IXGBE_DCA
915 static void ixgbe_update_rx_dca(struct ixgbe_adapter
*adapter
,
916 struct ixgbe_ring
*rx_ring
,
919 struct ixgbe_hw
*hw
= &adapter
->hw
;
921 u8 reg_idx
= rx_ring
->reg_idx
;
923 rxctrl
= IXGBE_READ_REG(hw
, IXGBE_DCA_RXCTRL(reg_idx
));
924 switch (hw
->mac
.type
) {
925 case ixgbe_mac_82598EB
:
926 rxctrl
&= ~IXGBE_DCA_RXCTRL_CPUID_MASK
;
927 rxctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
);
929 case ixgbe_mac_82599EB
:
931 rxctrl
&= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599
;
932 rxctrl
|= (dca3_get_tag(&adapter
->pdev
->dev
, cpu
) <<
933 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599
);
938 rxctrl
|= IXGBE_DCA_RXCTRL_DESC_DCA_EN
;
939 rxctrl
|= IXGBE_DCA_RXCTRL_HEAD_DCA_EN
;
940 rxctrl
&= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN
);
941 IXGBE_WRITE_REG(hw
, IXGBE_DCA_RXCTRL(reg_idx
), rxctrl
);
944 static void ixgbe_update_tx_dca(struct ixgbe_adapter
*adapter
,
945 struct ixgbe_ring
*tx_ring
,
948 struct ixgbe_hw
*hw
= &adapter
->hw
;
950 u8 reg_idx
= tx_ring
->reg_idx
;
952 switch (hw
->mac
.type
) {
953 case ixgbe_mac_82598EB
:
954 txctrl
= IXGBE_READ_REG(hw
, IXGBE_DCA_TXCTRL(reg_idx
));
955 txctrl
&= ~IXGBE_DCA_TXCTRL_CPUID_MASK
;
956 txctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
);
957 txctrl
|= IXGBE_DCA_TXCTRL_DESC_DCA_EN
;
958 IXGBE_WRITE_REG(hw
, IXGBE_DCA_TXCTRL(reg_idx
), txctrl
);
960 case ixgbe_mac_82599EB
:
962 txctrl
= IXGBE_READ_REG(hw
, IXGBE_DCA_TXCTRL_82599(reg_idx
));
963 txctrl
&= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599
;
964 txctrl
|= (dca3_get_tag(&adapter
->pdev
->dev
, cpu
) <<
965 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599
);
966 txctrl
|= IXGBE_DCA_TXCTRL_DESC_DCA_EN
;
967 IXGBE_WRITE_REG(hw
, IXGBE_DCA_TXCTRL_82599(reg_idx
), txctrl
);
974 static void ixgbe_update_dca(struct ixgbe_q_vector
*q_vector
)
976 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
977 struct ixgbe_ring
*ring
;
980 if (q_vector
->cpu
== cpu
)
983 for (ring
= q_vector
->tx
.ring
; ring
!= NULL
; ring
= ring
->next
)
984 ixgbe_update_tx_dca(adapter
, ring
, cpu
);
986 for (ring
= q_vector
->rx
.ring
; ring
!= NULL
; ring
= ring
->next
)
987 ixgbe_update_rx_dca(adapter
, ring
, cpu
);
994 static void ixgbe_setup_dca(struct ixgbe_adapter
*adapter
)
999 if (!(adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
))
1002 /* always use CB2 mode, difference is masked in the CB driver */
1003 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_CTRL
, 2);
1005 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
1006 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1010 for (i
= 0; i
< num_q_vectors
; i
++) {
1011 adapter
->q_vector
[i
]->cpu
= -1;
1012 ixgbe_update_dca(adapter
->q_vector
[i
]);
1016 static int __ixgbe_notify_dca(struct device
*dev
, void *data
)
1018 struct ixgbe_adapter
*adapter
= dev_get_drvdata(dev
);
1019 unsigned long event
= *(unsigned long *)data
;
1021 if (!(adapter
->flags
& IXGBE_FLAG_DCA_CAPABLE
))
1025 case DCA_PROVIDER_ADD
:
1026 /* if we're already enabled, don't do it again */
1027 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
)
1029 if (dca_add_requester(dev
) == 0) {
1030 adapter
->flags
|= IXGBE_FLAG_DCA_ENABLED
;
1031 ixgbe_setup_dca(adapter
);
1034 /* Fall Through since DCA is disabled. */
1035 case DCA_PROVIDER_REMOVE
:
1036 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
) {
1037 dca_remove_requester(dev
);
1038 adapter
->flags
&= ~IXGBE_FLAG_DCA_ENABLED
;
1039 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_CTRL
, 1);
1046 #endif /* CONFIG_IXGBE_DCA */
1048 static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc
*rx_desc
,
1049 struct sk_buff
*skb
)
1051 skb
->rxhash
= le32_to_cpu(rx_desc
->wb
.lower
.hi_dword
.rss
);
1055 * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
1056 * @adapter: address of board private structure
1057 * @rx_desc: advanced rx descriptor
1059 * Returns : true if it is FCoE pkt
1061 static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter
*adapter
,
1062 union ixgbe_adv_rx_desc
*rx_desc
)
1064 __le16 pkt_info
= rx_desc
->wb
.lower
.lo_dword
.hs_rss
.pkt_info
;
1066 return (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
) &&
1067 ((pkt_info
& cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK
)) ==
1068 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE
<<
1069 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT
)));
1073 * ixgbe_receive_skb - Send a completed packet up the stack
1074 * @adapter: board private structure
1075 * @skb: packet to send up
1076 * @status: hardware indication of status of receive
1077 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1078 * @rx_desc: rx descriptor
1080 static void ixgbe_receive_skb(struct ixgbe_q_vector
*q_vector
,
1081 struct sk_buff
*skb
, u8 status
,
1082 struct ixgbe_ring
*ring
,
1083 union ixgbe_adv_rx_desc
*rx_desc
)
1085 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
1086 struct napi_struct
*napi
= &q_vector
->napi
;
1087 bool is_vlan
= (status
& IXGBE_RXD_STAT_VP
);
1088 u16 tag
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
1090 if (is_vlan
&& (tag
& VLAN_VID_MASK
))
1091 __vlan_hwaccel_put_tag(skb
, tag
);
1093 if (!(adapter
->flags
& IXGBE_FLAG_IN_NETPOLL
))
1094 napi_gro_receive(napi
, skb
);
1100 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
1101 * @adapter: address of board private structure
1102 * @status_err: hardware indication of status of receive
1103 * @skb: skb currently being received and modified
1104 * @status_err: status error value of last descriptor in packet
1106 static inline void ixgbe_rx_checksum(struct ixgbe_adapter
*adapter
,
1107 union ixgbe_adv_rx_desc
*rx_desc
,
1108 struct sk_buff
*skb
,
1111 skb
->ip_summed
= CHECKSUM_NONE
;
1113 /* Rx csum disabled */
1114 if (!(adapter
->flags
& IXGBE_FLAG_RX_CSUM_ENABLED
))
1117 /* if IP and error */
1118 if ((status_err
& IXGBE_RXD_STAT_IPCS
) &&
1119 (status_err
& IXGBE_RXDADV_ERR_IPE
)) {
1120 adapter
->hw_csum_rx_error
++;
1124 if (!(status_err
& IXGBE_RXD_STAT_L4CS
))
1127 if (status_err
& IXGBE_RXDADV_ERR_TCPE
) {
1128 u16 pkt_info
= rx_desc
->wb
.lower
.lo_dword
.hs_rss
.pkt_info
;
1131 * 82599 errata, UDP frames with a 0 checksum can be marked as
1134 if ((pkt_info
& IXGBE_RXDADV_PKTTYPE_UDP
) &&
1135 (adapter
->hw
.mac
.type
== ixgbe_mac_82599EB
))
1138 adapter
->hw_csum_rx_error
++;
1142 /* It must be a TCP or UDP packet with a valid checksum */
1143 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1146 static inline void ixgbe_release_rx_desc(struct ixgbe_ring
*rx_ring
, u32 val
)
1149 * Force memory writes to complete before letting h/w
1150 * know there are new descriptors to fetch. (Only
1151 * applicable for weak-ordered memory model archs,
1155 writel(val
, rx_ring
->tail
);
1159 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
1160 * @rx_ring: ring to place buffers on
1161 * @cleaned_count: number of buffers to replace
1163 void ixgbe_alloc_rx_buffers(struct ixgbe_ring
*rx_ring
, u16 cleaned_count
)
1165 union ixgbe_adv_rx_desc
*rx_desc
;
1166 struct ixgbe_rx_buffer
*bi
;
1167 struct sk_buff
*skb
;
1168 u16 i
= rx_ring
->next_to_use
;
1170 /* do nothing if no valid netdev defined */
1171 if (!rx_ring
->netdev
)
1174 while (cleaned_count
--) {
1175 rx_desc
= IXGBE_RX_DESC_ADV(rx_ring
, i
);
1176 bi
= &rx_ring
->rx_buffer_info
[i
];
1180 skb
= netdev_alloc_skb_ip_align(rx_ring
->netdev
,
1181 rx_ring
->rx_buf_len
);
1183 rx_ring
->rx_stats
.alloc_rx_buff_failed
++;
1186 /* initialize queue mapping */
1187 skb_record_rx_queue(skb
, rx_ring
->queue_index
);
1192 bi
->dma
= dma_map_single(rx_ring
->dev
,
1194 rx_ring
->rx_buf_len
,
1196 if (dma_mapping_error(rx_ring
->dev
, bi
->dma
)) {
1197 rx_ring
->rx_stats
.alloc_rx_buff_failed
++;
1203 if (ring_is_ps_enabled(rx_ring
)) {
1205 bi
->page
= netdev_alloc_page(rx_ring
->netdev
);
1207 rx_ring
->rx_stats
.alloc_rx_page_failed
++;
1212 if (!bi
->page_dma
) {
1213 /* use a half page if we're re-using */
1214 bi
->page_offset
^= PAGE_SIZE
/ 2;
1215 bi
->page_dma
= dma_map_page(rx_ring
->dev
,
1220 if (dma_mapping_error(rx_ring
->dev
,
1222 rx_ring
->rx_stats
.alloc_rx_page_failed
++;
1228 /* Refresh the desc even if buffer_addrs didn't change
1229 * because each write-back erases this info. */
1230 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->page_dma
);
1231 rx_desc
->read
.hdr_addr
= cpu_to_le64(bi
->dma
);
1233 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
);
1234 rx_desc
->read
.hdr_addr
= 0;
1238 if (i
== rx_ring
->count
)
1243 if (rx_ring
->next_to_use
!= i
) {
1244 rx_ring
->next_to_use
= i
;
1245 ixgbe_release_rx_desc(rx_ring
, i
);
1249 static inline u16
ixgbe_get_hlen(union ixgbe_adv_rx_desc
*rx_desc
)
1251 /* HW will not DMA in data larger than the given buffer, even if it
1252 * parses the (NFS, of course) header to be larger. In that case, it
1253 * fills the header buffer and spills the rest into the page.
1255 u16 hdr_info
= le16_to_cpu(rx_desc
->wb
.lower
.lo_dword
.hs_rss
.hdr_info
);
1256 u16 hlen
= (hdr_info
& IXGBE_RXDADV_HDRBUFLEN_MASK
) >>
1257 IXGBE_RXDADV_HDRBUFLEN_SHIFT
;
1258 if (hlen
> IXGBE_RX_HDR_SIZE
)
1259 hlen
= IXGBE_RX_HDR_SIZE
;
1264 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
1265 * @skb: pointer to the last skb in the rsc queue
1267 * This function changes a queue full of hw rsc buffers into a completed
1268 * packet. It uses the ->prev pointers to find the first packet and then
1269 * turns it into the frag list owner.
1271 static inline struct sk_buff
*ixgbe_transform_rsc_queue(struct sk_buff
*skb
)
1273 unsigned int frag_list_size
= 0;
1274 unsigned int skb_cnt
= 1;
1277 struct sk_buff
*prev
= skb
->prev
;
1278 frag_list_size
+= skb
->len
;
1284 skb_shinfo(skb
)->frag_list
= skb
->next
;
1286 skb
->len
+= frag_list_size
;
1287 skb
->data_len
+= frag_list_size
;
1288 skb
->truesize
+= frag_list_size
;
1289 IXGBE_RSC_CB(skb
)->skb_cnt
= skb_cnt
;
1294 static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc
*rx_desc
)
1296 return !!(le32_to_cpu(rx_desc
->wb
.lower
.lo_dword
.data
) &
1297 IXGBE_RXDADV_RSCCNT_MASK
);
1300 static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector
*q_vector
,
1301 struct ixgbe_ring
*rx_ring
,
1304 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
1305 union ixgbe_adv_rx_desc
*rx_desc
, *next_rxd
;
1306 struct ixgbe_rx_buffer
*rx_buffer_info
, *next_buffer
;
1307 struct sk_buff
*skb
;
1308 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
1309 const int current_node
= numa_node_id();
1312 #endif /* IXGBE_FCOE */
1315 u16 cleaned_count
= 0;
1316 bool pkt_is_rsc
= false;
1318 i
= rx_ring
->next_to_clean
;
1319 rx_desc
= IXGBE_RX_DESC_ADV(rx_ring
, i
);
1320 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
1322 while (staterr
& IXGBE_RXD_STAT_DD
) {
1325 rmb(); /* read descriptor and rx_buffer_info after status DD */
1327 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
1329 skb
= rx_buffer_info
->skb
;
1330 rx_buffer_info
->skb
= NULL
;
1331 prefetch(skb
->data
);
1333 if (ring_is_rsc_enabled(rx_ring
))
1334 pkt_is_rsc
= ixgbe_get_rsc_state(rx_desc
);
1336 /* if this is a skb from previous receive DMA will be 0 */
1337 if (rx_buffer_info
->dma
) {
1340 !(staterr
& IXGBE_RXD_STAT_EOP
) &&
1343 * When HWRSC is enabled, delay unmapping
1344 * of the first packet. It carries the
1345 * header information, HW may still
1346 * access the header after the writeback.
1347 * Only unmap it when EOP is reached
1349 IXGBE_RSC_CB(skb
)->delay_unmap
= true;
1350 IXGBE_RSC_CB(skb
)->dma
= rx_buffer_info
->dma
;
1352 dma_unmap_single(rx_ring
->dev
,
1353 rx_buffer_info
->dma
,
1354 rx_ring
->rx_buf_len
,
1357 rx_buffer_info
->dma
= 0;
1359 if (ring_is_ps_enabled(rx_ring
)) {
1360 hlen
= ixgbe_get_hlen(rx_desc
);
1361 upper_len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
1363 hlen
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
1368 /* assume packet split since header is unmapped */
1369 upper_len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
1373 dma_unmap_page(rx_ring
->dev
,
1374 rx_buffer_info
->page_dma
,
1377 rx_buffer_info
->page_dma
= 0;
1378 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
1379 rx_buffer_info
->page
,
1380 rx_buffer_info
->page_offset
,
1383 if ((page_count(rx_buffer_info
->page
) == 1) &&
1384 (page_to_nid(rx_buffer_info
->page
) == current_node
))
1385 get_page(rx_buffer_info
->page
);
1387 rx_buffer_info
->page
= NULL
;
1389 skb
->len
+= upper_len
;
1390 skb
->data_len
+= upper_len
;
1391 skb
->truesize
+= upper_len
;
1395 if (i
== rx_ring
->count
)
1398 next_rxd
= IXGBE_RX_DESC_ADV(rx_ring
, i
);
1403 u32 nextp
= (staterr
& IXGBE_RXDADV_NEXTP_MASK
) >>
1404 IXGBE_RXDADV_NEXTP_SHIFT
;
1405 next_buffer
= &rx_ring
->rx_buffer_info
[nextp
];
1407 next_buffer
= &rx_ring
->rx_buffer_info
[i
];
1410 if (!(staterr
& IXGBE_RXD_STAT_EOP
)) {
1411 if (ring_is_ps_enabled(rx_ring
)) {
1412 rx_buffer_info
->skb
= next_buffer
->skb
;
1413 rx_buffer_info
->dma
= next_buffer
->dma
;
1414 next_buffer
->skb
= skb
;
1415 next_buffer
->dma
= 0;
1417 skb
->next
= next_buffer
->skb
;
1418 skb
->next
->prev
= skb
;
1420 rx_ring
->rx_stats
.non_eop_descs
++;
1425 skb
= ixgbe_transform_rsc_queue(skb
);
1426 /* if we got here without RSC the packet is invalid */
1428 __pskb_trim(skb
, 0);
1429 rx_buffer_info
->skb
= skb
;
1434 if (ring_is_rsc_enabled(rx_ring
)) {
1435 if (IXGBE_RSC_CB(skb
)->delay_unmap
) {
1436 dma_unmap_single(rx_ring
->dev
,
1437 IXGBE_RSC_CB(skb
)->dma
,
1438 rx_ring
->rx_buf_len
,
1440 IXGBE_RSC_CB(skb
)->dma
= 0;
1441 IXGBE_RSC_CB(skb
)->delay_unmap
= false;
1445 if (ring_is_ps_enabled(rx_ring
))
1446 rx_ring
->rx_stats
.rsc_count
+=
1447 skb_shinfo(skb
)->nr_frags
;
1449 rx_ring
->rx_stats
.rsc_count
+=
1450 IXGBE_RSC_CB(skb
)->skb_cnt
;
1451 rx_ring
->rx_stats
.rsc_flush
++;
1454 /* ERR_MASK will only have valid bits if EOP set */
1455 if (unlikely(staterr
& IXGBE_RXDADV_ERR_FRAME_ERR_MASK
)) {
1456 dev_kfree_skb_any(skb
);
1460 ixgbe_rx_checksum(adapter
, rx_desc
, skb
, staterr
);
1461 if (adapter
->netdev
->features
& NETIF_F_RXHASH
)
1462 ixgbe_rx_hash(rx_desc
, skb
);
1464 /* probably a little skewed due to removing CRC */
1465 total_rx_bytes
+= skb
->len
;
1468 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
1470 /* if ddp, not passing to ULD unless for FCP_RSP or error */
1471 if (ixgbe_rx_is_fcoe(adapter
, rx_desc
)) {
1472 ddp_bytes
= ixgbe_fcoe_ddp(adapter
, rx_desc
, skb
,
1475 dev_kfree_skb_any(skb
);
1479 #endif /* IXGBE_FCOE */
1480 ixgbe_receive_skb(q_vector
, skb
, staterr
, rx_ring
, rx_desc
);
1484 rx_desc
->wb
.upper
.status_error
= 0;
1489 /* return some buffers to hardware, one at a time is too slow */
1490 if (cleaned_count
>= IXGBE_RX_BUFFER_WRITE
) {
1491 ixgbe_alloc_rx_buffers(rx_ring
, cleaned_count
);
1495 /* use prefetched values */
1497 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
1500 rx_ring
->next_to_clean
= i
;
1501 cleaned_count
= ixgbe_desc_unused(rx_ring
);
1504 ixgbe_alloc_rx_buffers(rx_ring
, cleaned_count
);
1507 /* include DDPed FCoE data */
1508 if (ddp_bytes
> 0) {
1511 mss
= rx_ring
->netdev
->mtu
- sizeof(struct fcoe_hdr
) -
1512 sizeof(struct fc_frame_header
) -
1513 sizeof(struct fcoe_crc_eof
);
1516 total_rx_bytes
+= ddp_bytes
;
1517 total_rx_packets
+= DIV_ROUND_UP(ddp_bytes
, mss
);
1519 #endif /* IXGBE_FCOE */
1521 u64_stats_update_begin(&rx_ring
->syncp
);
1522 rx_ring
->stats
.packets
+= total_rx_packets
;
1523 rx_ring
->stats
.bytes
+= total_rx_bytes
;
1524 u64_stats_update_end(&rx_ring
->syncp
);
1525 q_vector
->rx
.total_packets
+= total_rx_packets
;
1526 q_vector
->rx
.total_bytes
+= total_rx_bytes
;
1532 * ixgbe_configure_msix - Configure MSI-X hardware
1533 * @adapter: board private structure
1535 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
1538 static void ixgbe_configure_msix(struct ixgbe_adapter
*adapter
)
1540 struct ixgbe_q_vector
*q_vector
;
1541 int q_vectors
, v_idx
;
1544 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1547 * Populate the IVAR table and set the ITR values to the
1548 * corresponding register.
1550 for (v_idx
= 0; v_idx
< q_vectors
; v_idx
++) {
1551 struct ixgbe_ring
*ring
;
1552 q_vector
= adapter
->q_vector
[v_idx
];
1554 for (ring
= q_vector
->rx
.ring
; ring
!= NULL
; ring
= ring
->next
)
1555 ixgbe_set_ivar(adapter
, 0, ring
->reg_idx
, v_idx
);
1557 for (ring
= q_vector
->tx
.ring
; ring
!= NULL
; ring
= ring
->next
)
1558 ixgbe_set_ivar(adapter
, 1, ring
->reg_idx
, v_idx
);
1560 if (q_vector
->tx
.ring
&& !q_vector
->rx
.ring
)
1562 q_vector
->eitr
= adapter
->tx_eitr_param
;
1563 else if (q_vector
->rx
.ring
)
1565 q_vector
->eitr
= adapter
->rx_eitr_param
;
1567 ixgbe_write_eitr(q_vector
);
1568 /* If ATR is enabled, set interrupt affinity */
1569 if (adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
) {
1571 * Allocate the affinity_hint cpumask, assign the mask
1572 * for this vector, and set our affinity_hint for
1575 if (!alloc_cpumask_var(&q_vector
->affinity_mask
,
1578 cpumask_set_cpu(v_idx
, q_vector
->affinity_mask
);
1579 irq_set_affinity_hint(adapter
->msix_entries
[v_idx
].vector
,
1580 q_vector
->affinity_mask
);
1584 switch (adapter
->hw
.mac
.type
) {
1585 case ixgbe_mac_82598EB
:
1586 ixgbe_set_ivar(adapter
, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX
,
1589 case ixgbe_mac_82599EB
:
1590 case ixgbe_mac_X540
:
1591 ixgbe_set_ivar(adapter
, -1, 1, v_idx
);
1597 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EITR(v_idx
), 1950);
1599 /* set up to autoclear timer, and the vectors */
1600 mask
= IXGBE_EIMS_ENABLE_MASK
;
1601 if (adapter
->num_vfs
)
1602 mask
&= ~(IXGBE_EIMS_OTHER
|
1603 IXGBE_EIMS_MAILBOX
|
1606 mask
&= ~(IXGBE_EIMS_OTHER
| IXGBE_EIMS_LSC
);
1607 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIAC
, mask
);
1610 enum latency_range
{
1614 latency_invalid
= 255
1618 * ixgbe_update_itr - update the dynamic ITR value based on statistics
1619 * @q_vector: structure containing interrupt and ring information
1620 * @ring_container: structure containing ring performance data
1622 * Stores a new ITR value based on packets and byte
1623 * counts during the last interrupt. The advantage of per interrupt
1624 * computation is faster updates and more accurate ITR for the current
1625 * traffic pattern. Constants in this function were computed
1626 * based on theoretical maximum wire speed and thresholds were set based
1627 * on testing data as well as attempting to minimize response time
1628 * while increasing bulk throughput.
1629 * this functionality is controlled by the InterruptThrottleRate module
1630 * parameter (see ixgbe_param.c)
1632 static void ixgbe_update_itr(struct ixgbe_q_vector
*q_vector
,
1633 struct ixgbe_ring_container
*ring_container
)
1636 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
1637 int bytes
= ring_container
->total_bytes
;
1638 int packets
= ring_container
->total_packets
;
1640 u8 itr_setting
= ring_container
->itr
;
1645 /* simple throttlerate management
1646 * 0-20MB/s lowest (100000 ints/s)
1647 * 20-100MB/s low (20000 ints/s)
1648 * 100-1249MB/s bulk (8000 ints/s)
1650 /* what was last interrupt timeslice? */
1651 timepassed_us
= 1000000/q_vector
->eitr
;
1652 bytes_perint
= bytes
/ timepassed_us
; /* bytes/usec */
1654 switch (itr_setting
) {
1655 case lowest_latency
:
1656 if (bytes_perint
> adapter
->eitr_low
)
1657 itr_setting
= low_latency
;
1660 if (bytes_perint
> adapter
->eitr_high
)
1661 itr_setting
= bulk_latency
;
1662 else if (bytes_perint
<= adapter
->eitr_low
)
1663 itr_setting
= lowest_latency
;
1666 if (bytes_perint
<= adapter
->eitr_high
)
1667 itr_setting
= low_latency
;
1671 /* clear work counters since we have the values we need */
1672 ring_container
->total_bytes
= 0;
1673 ring_container
->total_packets
= 0;
1675 /* write updated itr to ring container */
1676 ring_container
->itr
= itr_setting
;
1680 * ixgbe_write_eitr - write EITR register in hardware specific way
1681 * @q_vector: structure containing interrupt and ring information
1683 * This function is made to be called by ethtool and by the driver
1684 * when it needs to update EITR registers at runtime. Hardware
1685 * specific quirks/differences are taken care of here.
1687 void ixgbe_write_eitr(struct ixgbe_q_vector
*q_vector
)
1689 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
1690 struct ixgbe_hw
*hw
= &adapter
->hw
;
1691 int v_idx
= q_vector
->v_idx
;
1692 u32 itr_reg
= EITR_INTS_PER_SEC_TO_REG(q_vector
->eitr
);
1694 switch (adapter
->hw
.mac
.type
) {
1695 case ixgbe_mac_82598EB
:
1696 /* must write high and low 16 bits to reset counter */
1697 itr_reg
|= (itr_reg
<< 16);
1699 case ixgbe_mac_82599EB
:
1700 case ixgbe_mac_X540
:
1702 * 82599 and X540 can support a value of zero, so allow it for
1703 * max interrupt rate, but there is an errata where it can
1704 * not be zero with RSC
1707 !(adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
))
1711 * set the WDIS bit to not clear the timer bits and cause an
1712 * immediate assertion of the interrupt
1714 itr_reg
|= IXGBE_EITR_CNT_WDIS
;
1719 IXGBE_WRITE_REG(hw
, IXGBE_EITR(v_idx
), itr_reg
);
1722 static void ixgbe_set_itr(struct ixgbe_q_vector
*q_vector
)
1724 u32 new_itr
= q_vector
->eitr
;
1727 ixgbe_update_itr(q_vector
, &q_vector
->tx
);
1728 ixgbe_update_itr(q_vector
, &q_vector
->rx
);
1730 current_itr
= max(q_vector
->rx
.itr
, q_vector
->tx
.itr
);
1732 switch (current_itr
) {
1733 /* counts and packets in update_itr are dependent on these numbers */
1734 case lowest_latency
:
1738 new_itr
= 20000; /* aka hwitr = ~200 */
1747 if (new_itr
!= q_vector
->eitr
) {
1748 /* do an exponential smoothing */
1749 new_itr
= ((q_vector
->eitr
* 9) + new_itr
)/10;
1751 /* save the algorithm value here */
1752 q_vector
->eitr
= new_itr
;
1754 ixgbe_write_eitr(q_vector
);
1759 * ixgbe_check_overtemp_subtask - check for over tempurature
1760 * @adapter: pointer to adapter
1762 static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter
*adapter
)
1764 struct ixgbe_hw
*hw
= &adapter
->hw
;
1765 u32 eicr
= adapter
->interrupt_event
;
1767 if (test_bit(__IXGBE_DOWN
, &adapter
->state
))
1770 if (!(adapter
->flags2
& IXGBE_FLAG2_TEMP_SENSOR_CAPABLE
) &&
1771 !(adapter
->flags2
& IXGBE_FLAG2_TEMP_SENSOR_EVENT
))
1774 adapter
->flags2
&= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT
;
1776 switch (hw
->device_id
) {
1777 case IXGBE_DEV_ID_82599_T3_LOM
:
1779 * Since the warning interrupt is for both ports
1780 * we don't have to check if:
1781 * - This interrupt wasn't for our port.
1782 * - We may have missed the interrupt so always have to
1783 * check if we got a LSC
1785 if (!(eicr
& IXGBE_EICR_GPI_SDP0
) &&
1786 !(eicr
& IXGBE_EICR_LSC
))
1789 if (!(eicr
& IXGBE_EICR_LSC
) && hw
->mac
.ops
.check_link
) {
1791 bool link_up
= false;
1793 hw
->mac
.ops
.check_link(hw
, &autoneg
, &link_up
, false);
1799 /* Check if this is not due to overtemp */
1800 if (hw
->phy
.ops
.check_overtemp(hw
) != IXGBE_ERR_OVERTEMP
)
1805 if (!(eicr
& IXGBE_EICR_GPI_SDP0
))
1810 "Network adapter has been stopped because it has over heated. "
1811 "Restart the computer. If the problem persists, "
1812 "power off the system and replace the adapter\n");
1814 adapter
->interrupt_event
= 0;
1817 static void ixgbe_check_fan_failure(struct ixgbe_adapter
*adapter
, u32 eicr
)
1819 struct ixgbe_hw
*hw
= &adapter
->hw
;
1821 if ((adapter
->flags
& IXGBE_FLAG_FAN_FAIL_CAPABLE
) &&
1822 (eicr
& IXGBE_EICR_GPI_SDP1
)) {
1823 e_crit(probe
, "Fan has stopped, replace the adapter\n");
1824 /* write to clear the interrupt */
1825 IXGBE_WRITE_REG(hw
, IXGBE_EICR
, IXGBE_EICR_GPI_SDP1
);
1829 static void ixgbe_check_sfp_event(struct ixgbe_adapter
*adapter
, u32 eicr
)
1831 struct ixgbe_hw
*hw
= &adapter
->hw
;
1833 if (eicr
& IXGBE_EICR_GPI_SDP2
) {
1834 /* Clear the interrupt */
1835 IXGBE_WRITE_REG(hw
, IXGBE_EICR
, IXGBE_EICR_GPI_SDP2
);
1836 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
1837 adapter
->flags2
|= IXGBE_FLAG2_SFP_NEEDS_RESET
;
1838 ixgbe_service_event_schedule(adapter
);
1842 if (eicr
& IXGBE_EICR_GPI_SDP1
) {
1843 /* Clear the interrupt */
1844 IXGBE_WRITE_REG(hw
, IXGBE_EICR
, IXGBE_EICR_GPI_SDP1
);
1845 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
1846 adapter
->flags
|= IXGBE_FLAG_NEED_LINK_CONFIG
;
1847 ixgbe_service_event_schedule(adapter
);
1852 static void ixgbe_check_lsc(struct ixgbe_adapter
*adapter
)
1854 struct ixgbe_hw
*hw
= &adapter
->hw
;
1857 adapter
->flags
|= IXGBE_FLAG_NEED_LINK_UPDATE
;
1858 adapter
->link_check_timeout
= jiffies
;
1859 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
1860 IXGBE_WRITE_REG(hw
, IXGBE_EIMC
, IXGBE_EIMC_LSC
);
1861 IXGBE_WRITE_FLUSH(hw
);
1862 ixgbe_service_event_schedule(adapter
);
1866 static irqreturn_t
ixgbe_msix_lsc(int irq
, void *data
)
1868 struct ixgbe_adapter
*adapter
= data
;
1869 struct ixgbe_hw
*hw
= &adapter
->hw
;
1873 * Workaround for Silicon errata. Use clear-by-write instead
1874 * of clear-by-read. Reading with EICS will return the
1875 * interrupt causes without clearing, which later be done
1876 * with the write to EICR.
1878 eicr
= IXGBE_READ_REG(hw
, IXGBE_EICS
);
1879 IXGBE_WRITE_REG(hw
, IXGBE_EICR
, eicr
);
1881 if (eicr
& IXGBE_EICR_LSC
)
1882 ixgbe_check_lsc(adapter
);
1884 if (eicr
& IXGBE_EICR_MAILBOX
)
1885 ixgbe_msg_task(adapter
);
1887 switch (hw
->mac
.type
) {
1888 case ixgbe_mac_82599EB
:
1889 case ixgbe_mac_X540
:
1890 /* Handle Flow Director Full threshold interrupt */
1891 if (eicr
& IXGBE_EICR_FLOW_DIR
) {
1892 int reinit_count
= 0;
1894 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1895 struct ixgbe_ring
*ring
= adapter
->tx_ring
[i
];
1896 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE
,
1901 /* no more flow director interrupts until after init */
1902 IXGBE_WRITE_REG(hw
, IXGBE_EIMC
, IXGBE_EIMC_FLOW_DIR
);
1903 eicr
&= ~IXGBE_EICR_FLOW_DIR
;
1904 adapter
->flags2
|= IXGBE_FLAG2_FDIR_REQUIRES_REINIT
;
1905 ixgbe_service_event_schedule(adapter
);
1908 ixgbe_check_sfp_event(adapter
, eicr
);
1909 if ((adapter
->flags2
& IXGBE_FLAG2_TEMP_SENSOR_CAPABLE
) &&
1910 ((eicr
& IXGBE_EICR_GPI_SDP0
) || (eicr
& IXGBE_EICR_LSC
))) {
1911 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
1912 adapter
->interrupt_event
= eicr
;
1913 adapter
->flags2
|= IXGBE_FLAG2_TEMP_SENSOR_EVENT
;
1914 ixgbe_service_event_schedule(adapter
);
1922 ixgbe_check_fan_failure(adapter
, eicr
);
1924 /* re-enable the original interrupt state, no lsc, no queues */
1925 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1926 IXGBE_WRITE_REG(hw
, IXGBE_EIMS
, eicr
&
1927 ~(IXGBE_EIMS_LSC
| IXGBE_EIMS_RTX_QUEUE
));
1932 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter
*adapter
,
1936 struct ixgbe_hw
*hw
= &adapter
->hw
;
1938 switch (hw
->mac
.type
) {
1939 case ixgbe_mac_82598EB
:
1940 mask
= (IXGBE_EIMS_RTX_QUEUE
& qmask
);
1941 IXGBE_WRITE_REG(hw
, IXGBE_EIMS
, mask
);
1943 case ixgbe_mac_82599EB
:
1944 case ixgbe_mac_X540
:
1945 mask
= (qmask
& 0xFFFFFFFF);
1947 IXGBE_WRITE_REG(hw
, IXGBE_EIMS_EX(0), mask
);
1948 mask
= (qmask
>> 32);
1950 IXGBE_WRITE_REG(hw
, IXGBE_EIMS_EX(1), mask
);
1955 /* skip the flush */
1958 static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter
*adapter
,
1962 struct ixgbe_hw
*hw
= &adapter
->hw
;
1964 switch (hw
->mac
.type
) {
1965 case ixgbe_mac_82598EB
:
1966 mask
= (IXGBE_EIMS_RTX_QUEUE
& qmask
);
1967 IXGBE_WRITE_REG(hw
, IXGBE_EIMC
, mask
);
1969 case ixgbe_mac_82599EB
:
1970 case ixgbe_mac_X540
:
1971 mask
= (qmask
& 0xFFFFFFFF);
1973 IXGBE_WRITE_REG(hw
, IXGBE_EIMC_EX(0), mask
);
1974 mask
= (qmask
>> 32);
1976 IXGBE_WRITE_REG(hw
, IXGBE_EIMC_EX(1), mask
);
1981 /* skip the flush */
1984 static irqreturn_t
ixgbe_msix_clean_rings(int irq
, void *data
)
1986 struct ixgbe_q_vector
*q_vector
= data
;
1988 /* EIAM disabled interrupts (on this vector) for us */
1990 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
1991 napi_schedule(&q_vector
->napi
);
1996 static inline void map_vector_to_rxq(struct ixgbe_adapter
*a
, int v_idx
,
1999 struct ixgbe_q_vector
*q_vector
= a
->q_vector
[v_idx
];
2000 struct ixgbe_ring
*rx_ring
= a
->rx_ring
[r_idx
];
2002 rx_ring
->q_vector
= q_vector
;
2003 rx_ring
->next
= q_vector
->rx
.ring
;
2004 q_vector
->rx
.ring
= rx_ring
;
2005 q_vector
->rx
.count
++;
2008 static inline void map_vector_to_txq(struct ixgbe_adapter
*a
, int v_idx
,
2011 struct ixgbe_q_vector
*q_vector
= a
->q_vector
[v_idx
];
2012 struct ixgbe_ring
*tx_ring
= a
->tx_ring
[t_idx
];
2014 tx_ring
->q_vector
= q_vector
;
2015 tx_ring
->next
= q_vector
->tx
.ring
;
2016 q_vector
->tx
.ring
= tx_ring
;
2017 q_vector
->tx
.count
++;
2018 q_vector
->tx
.work_limit
= a
->tx_work_limit
;
2022 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
2023 * @adapter: board private structure to initialize
2025 * This function maps descriptor rings to the queue-specific vectors
2026 * we were allotted through the MSI-X enabling code. Ideally, we'd have
2027 * one vector per ring/queue, but on a constrained vector budget, we
2028 * group the rings as "efficiently" as possible. You would add new
2029 * mapping configurations in here.
2031 static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter
*adapter
)
2035 int rxr_idx
= 0, txr_idx
= 0;
2036 int rxr_remaining
= adapter
->num_rx_queues
;
2037 int txr_remaining
= adapter
->num_tx_queues
;
2042 /* No mapping required if MSI-X is disabled. */
2043 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
2046 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2049 * The ideal configuration...
2050 * We have enough vectors to map one per queue.
2052 if (q_vectors
== adapter
->num_rx_queues
+ adapter
->num_tx_queues
) {
2053 for (; rxr_idx
< rxr_remaining
; v_start
++, rxr_idx
++)
2054 map_vector_to_rxq(adapter
, v_start
, rxr_idx
);
2056 for (; txr_idx
< txr_remaining
; v_start
++, txr_idx
++)
2057 map_vector_to_txq(adapter
, v_start
, txr_idx
);
2063 * If we don't have enough vectors for a 1-to-1
2064 * mapping, we'll have to group them so there are
2065 * multiple queues per vector.
2067 /* Re-adjusting *qpv takes care of the remainder. */
2068 for (i
= v_start
; i
< q_vectors
; i
++) {
2069 rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- i
);
2070 for (j
= 0; j
< rqpv
; j
++) {
2071 map_vector_to_rxq(adapter
, i
, rxr_idx
);
2075 tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- i
);
2076 for (j
= 0; j
< tqpv
; j
++) {
2077 map_vector_to_txq(adapter
, i
, txr_idx
);
2087 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
2088 * @adapter: board private structure
2090 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
2091 * interrupts from the kernel.
2093 static int ixgbe_request_msix_irqs(struct ixgbe_adapter
*adapter
)
2095 struct net_device
*netdev
= adapter
->netdev
;
2096 int i
, vector
, q_vectors
, err
;
2099 /* Decrement for Other and TCP Timer vectors */
2100 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2102 err
= ixgbe_map_rings_to_vectors(adapter
);
2106 for (vector
= 0; vector
< q_vectors
; vector
++) {
2107 struct ixgbe_q_vector
*q_vector
= adapter
->q_vector
[vector
];
2109 if (q_vector
->tx
.ring
&& q_vector
->rx
.ring
) {
2110 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
2111 "%s-%s-%d", netdev
->name
, "TxRx", ri
++);
2113 } else if (q_vector
->rx
.ring
) {
2114 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
2115 "%s-%s-%d", netdev
->name
, "rx", ri
++);
2116 } else if (q_vector
->tx
.ring
) {
2117 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
2118 "%s-%s-%d", netdev
->name
, "tx", ti
++);
2120 /* skip this unused q_vector */
2123 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
2124 &ixgbe_msix_clean_rings
, 0, q_vector
->name
,
2127 e_err(probe
, "request_irq failed for MSIX interrupt "
2128 "Error: %d\n", err
);
2129 goto free_queue_irqs
;
2133 sprintf(adapter
->lsc_int_name
, "%s:lsc", netdev
->name
);
2134 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
2135 ixgbe_msix_lsc
, 0, adapter
->lsc_int_name
, adapter
);
2137 e_err(probe
, "request_irq for msix_lsc failed: %d\n", err
);
2138 goto free_queue_irqs
;
2144 for (i
= vector
- 1; i
>= 0; i
--)
2145 free_irq(adapter
->msix_entries
[--vector
].vector
,
2146 adapter
->q_vector
[i
]);
2147 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
2148 pci_disable_msix(adapter
->pdev
);
2149 kfree(adapter
->msix_entries
);
2150 adapter
->msix_entries
= NULL
;
2155 * ixgbe_irq_enable - Enable default interrupt generation settings
2156 * @adapter: board private structure
2158 static inline void ixgbe_irq_enable(struct ixgbe_adapter
*adapter
, bool queues
,
2163 mask
= (IXGBE_EIMS_ENABLE_MASK
& ~IXGBE_EIMS_RTX_QUEUE
);
2164 if (adapter
->flags2
& IXGBE_FLAG2_TEMP_SENSOR_CAPABLE
)
2165 mask
|= IXGBE_EIMS_GPI_SDP0
;
2166 if (adapter
->flags
& IXGBE_FLAG_FAN_FAIL_CAPABLE
)
2167 mask
|= IXGBE_EIMS_GPI_SDP1
;
2168 switch (adapter
->hw
.mac
.type
) {
2169 case ixgbe_mac_82599EB
:
2170 case ixgbe_mac_X540
:
2171 mask
|= IXGBE_EIMS_ECC
;
2172 mask
|= IXGBE_EIMS_GPI_SDP1
;
2173 mask
|= IXGBE_EIMS_GPI_SDP2
;
2174 if (adapter
->num_vfs
)
2175 mask
|= IXGBE_EIMS_MAILBOX
;
2180 if (adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
)
2181 mask
|= IXGBE_EIMS_FLOW_DIR
;
2183 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMS
, mask
);
2185 ixgbe_irq_enable_queues(adapter
, ~0);
2187 IXGBE_WRITE_FLUSH(&adapter
->hw
);
2189 if (adapter
->num_vfs
> 32) {
2190 u32 eitrsel
= (1 << (adapter
->num_vfs
- 32)) - 1;
2191 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EITRSEL
, eitrsel
);
2196 * ixgbe_intr - legacy mode Interrupt Handler
2197 * @irq: interrupt number
2198 * @data: pointer to a network interface device structure
2200 static irqreturn_t
ixgbe_intr(int irq
, void *data
)
2202 struct ixgbe_adapter
*adapter
= data
;
2203 struct ixgbe_hw
*hw
= &adapter
->hw
;
2204 struct ixgbe_q_vector
*q_vector
= adapter
->q_vector
[0];
2208 * Workaround for silicon errata on 82598. Mask the interrupts
2209 * before the read of EICR.
2211 IXGBE_WRITE_REG(hw
, IXGBE_EIMC
, IXGBE_IRQ_CLEAR_MASK
);
2213 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
2214 * therefore no explict interrupt disable is necessary */
2215 eicr
= IXGBE_READ_REG(hw
, IXGBE_EICR
);
2218 * shared interrupt alert!
2219 * make sure interrupts are enabled because the read will
2220 * have disabled interrupts due to EIAM
2221 * finish the workaround of silicon errata on 82598. Unmask
2222 * the interrupt that we masked before the EICR read.
2224 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
2225 ixgbe_irq_enable(adapter
, true, true);
2226 return IRQ_NONE
; /* Not our interrupt */
2229 if (eicr
& IXGBE_EICR_LSC
)
2230 ixgbe_check_lsc(adapter
);
2232 switch (hw
->mac
.type
) {
2233 case ixgbe_mac_82599EB
:
2234 ixgbe_check_sfp_event(adapter
, eicr
);
2235 if ((adapter
->flags2
& IXGBE_FLAG2_TEMP_SENSOR_CAPABLE
) &&
2236 ((eicr
& IXGBE_EICR_GPI_SDP0
) || (eicr
& IXGBE_EICR_LSC
))) {
2237 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
2238 adapter
->interrupt_event
= eicr
;
2239 adapter
->flags2
|= IXGBE_FLAG2_TEMP_SENSOR_EVENT
;
2240 ixgbe_service_event_schedule(adapter
);
2248 ixgbe_check_fan_failure(adapter
, eicr
);
2250 if (napi_schedule_prep(&(q_vector
->napi
))) {
2251 /* would disable interrupts here but EIAM disabled it */
2252 __napi_schedule(&(q_vector
->napi
));
2256 * re-enable link(maybe) and non-queue interrupts, no flush.
2257 * ixgbe_poll will re-enable the queue interrupts
2260 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
2261 ixgbe_irq_enable(adapter
, false, false);
2266 static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter
*adapter
)
2268 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2271 /* legacy and MSI only use one vector */
2272 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
2275 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2276 adapter
->rx_ring
[i
]->q_vector
= NULL
;
2277 adapter
->rx_ring
[i
]->next
= NULL
;
2279 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2280 adapter
->tx_ring
[i
]->q_vector
= NULL
;
2281 adapter
->tx_ring
[i
]->next
= NULL
;
2284 for (i
= 0; i
< q_vectors
; i
++) {
2285 struct ixgbe_q_vector
*q_vector
= adapter
->q_vector
[i
];
2286 memset(&q_vector
->rx
, 0, sizeof(struct ixgbe_ring_container
));
2287 memset(&q_vector
->tx
, 0, sizeof(struct ixgbe_ring_container
));
2292 * ixgbe_request_irq - initialize interrupts
2293 * @adapter: board private structure
2295 * Attempts to configure interrupts using the best available
2296 * capabilities of the hardware and kernel.
2298 static int ixgbe_request_irq(struct ixgbe_adapter
*adapter
)
2300 struct net_device
*netdev
= adapter
->netdev
;
2303 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
2304 err
= ixgbe_request_msix_irqs(adapter
);
2305 } else if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
2306 err
= request_irq(adapter
->pdev
->irq
, ixgbe_intr
, 0,
2307 netdev
->name
, adapter
);
2309 err
= request_irq(adapter
->pdev
->irq
, ixgbe_intr
, IRQF_SHARED
,
2310 netdev
->name
, adapter
);
2314 e_err(probe
, "request_irq failed, Error %d\n", err
);
2319 static void ixgbe_free_irq(struct ixgbe_adapter
*adapter
)
2321 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
2324 q_vectors
= adapter
->num_msix_vectors
;
2327 free_irq(adapter
->msix_entries
[i
].vector
, adapter
);
2330 for (; i
>= 0; i
--) {
2331 /* free only the irqs that were actually requested */
2332 if (!adapter
->q_vector
[i
]->rx
.ring
&&
2333 !adapter
->q_vector
[i
]->tx
.ring
)
2336 free_irq(adapter
->msix_entries
[i
].vector
,
2337 adapter
->q_vector
[i
]);
2340 ixgbe_reset_q_vectors(adapter
);
2342 free_irq(adapter
->pdev
->irq
, adapter
);
2347 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
2348 * @adapter: board private structure
2350 static inline void ixgbe_irq_disable(struct ixgbe_adapter
*adapter
)
2352 switch (adapter
->hw
.mac
.type
) {
2353 case ixgbe_mac_82598EB
:
2354 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, ~0);
2356 case ixgbe_mac_82599EB
:
2357 case ixgbe_mac_X540
:
2358 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, 0xFFFF0000);
2359 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC_EX(0), ~0);
2360 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC_EX(1), ~0);
2361 if (adapter
->num_vfs
> 32)
2362 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EITRSEL
, 0);
2367 IXGBE_WRITE_FLUSH(&adapter
->hw
);
2368 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
2370 for (i
= 0; i
< adapter
->num_msix_vectors
; i
++)
2371 synchronize_irq(adapter
->msix_entries
[i
].vector
);
2373 synchronize_irq(adapter
->pdev
->irq
);
2378 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
2381 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter
*adapter
)
2383 struct ixgbe_hw
*hw
= &adapter
->hw
;
2385 IXGBE_WRITE_REG(hw
, IXGBE_EITR(0),
2386 EITR_INTS_PER_SEC_TO_REG(adapter
->rx_eitr_param
));
2388 ixgbe_set_ivar(adapter
, 0, 0, 0);
2389 ixgbe_set_ivar(adapter
, 1, 0, 0);
2391 map_vector_to_rxq(adapter
, 0, 0);
2392 map_vector_to_txq(adapter
, 0, 0);
2394 e_info(hw
, "Legacy interrupt IVAR setup done\n");
2398 * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
2399 * @adapter: board private structure
2400 * @ring: structure containing ring specific data
2402 * Configure the Tx descriptor ring after a reset.
2404 void ixgbe_configure_tx_ring(struct ixgbe_adapter
*adapter
,
2405 struct ixgbe_ring
*ring
)
2407 struct ixgbe_hw
*hw
= &adapter
->hw
;
2408 u64 tdba
= ring
->dma
;
2411 u8 reg_idx
= ring
->reg_idx
;
2413 /* disable queue to avoid issues while updating state */
2414 txdctl
= IXGBE_READ_REG(hw
, IXGBE_TXDCTL(reg_idx
));
2415 IXGBE_WRITE_REG(hw
, IXGBE_TXDCTL(reg_idx
),
2416 txdctl
& ~IXGBE_TXDCTL_ENABLE
);
2417 IXGBE_WRITE_FLUSH(hw
);
2419 IXGBE_WRITE_REG(hw
, IXGBE_TDBAL(reg_idx
),
2420 (tdba
& DMA_BIT_MASK(32)));
2421 IXGBE_WRITE_REG(hw
, IXGBE_TDBAH(reg_idx
), (tdba
>> 32));
2422 IXGBE_WRITE_REG(hw
, IXGBE_TDLEN(reg_idx
),
2423 ring
->count
* sizeof(union ixgbe_adv_tx_desc
));
2424 IXGBE_WRITE_REG(hw
, IXGBE_TDH(reg_idx
), 0);
2425 IXGBE_WRITE_REG(hw
, IXGBE_TDT(reg_idx
), 0);
2426 ring
->tail
= hw
->hw_addr
+ IXGBE_TDT(reg_idx
);
2428 /* configure fetching thresholds */
2429 if (adapter
->rx_itr_setting
== 0) {
2430 /* cannot set wthresh when itr==0 */
2431 txdctl
&= ~0x007F0000;
2433 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2434 txdctl
|= (8 << 16);
2436 if (adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
) {
2437 /* PThresh workaround for Tx hang with DFP enabled. */
2441 /* reinitialize flowdirector state */
2442 if ((adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
) &&
2443 adapter
->atr_sample_rate
) {
2444 ring
->atr_sample_rate
= adapter
->atr_sample_rate
;
2445 ring
->atr_count
= 0;
2446 set_bit(__IXGBE_TX_FDIR_INIT_DONE
, &ring
->state
);
2448 ring
->atr_sample_rate
= 0;
2451 clear_bit(__IXGBE_HANG_CHECK_ARMED
, &ring
->state
);
2454 txdctl
|= IXGBE_TXDCTL_ENABLE
;
2455 IXGBE_WRITE_REG(hw
, IXGBE_TXDCTL(reg_idx
), txdctl
);
2457 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2458 if (hw
->mac
.type
== ixgbe_mac_82598EB
&&
2459 !(IXGBE_READ_REG(hw
, IXGBE_LINKS
) & IXGBE_LINKS_UP
))
2462 /* poll to verify queue is enabled */
2464 usleep_range(1000, 2000);
2465 txdctl
= IXGBE_READ_REG(hw
, IXGBE_TXDCTL(reg_idx
));
2466 } while (--wait_loop
&& !(txdctl
& IXGBE_TXDCTL_ENABLE
));
2468 e_err(drv
, "Could not enable Tx Queue %d\n", reg_idx
);
2471 static void ixgbe_setup_mtqc(struct ixgbe_adapter
*adapter
)
2473 struct ixgbe_hw
*hw
= &adapter
->hw
;
2476 u8 tcs
= netdev_get_num_tc(adapter
->netdev
);
2478 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
2481 /* disable the arbiter while setting MTQC */
2482 rttdcs
= IXGBE_READ_REG(hw
, IXGBE_RTTDCS
);
2483 rttdcs
|= IXGBE_RTTDCS_ARBDIS
;
2484 IXGBE_WRITE_REG(hw
, IXGBE_RTTDCS
, rttdcs
);
2486 /* set transmit pool layout */
2487 switch (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
2488 case (IXGBE_FLAG_SRIOV_ENABLED
):
2489 IXGBE_WRITE_REG(hw
, IXGBE_MTQC
,
2490 (IXGBE_MTQC_VT_ENA
| IXGBE_MTQC_64VF
));
2494 reg
= IXGBE_MTQC_64Q_1PB
;
2496 reg
= IXGBE_MTQC_RT_ENA
| IXGBE_MTQC_4TC_4TQ
;
2498 reg
= IXGBE_MTQC_RT_ENA
| IXGBE_MTQC_8TC_8TQ
;
2500 IXGBE_WRITE_REG(hw
, IXGBE_MTQC
, reg
);
2502 /* Enable Security TX Buffer IFG for multiple pb */
2504 reg
= IXGBE_READ_REG(hw
, IXGBE_SECTXMINIFG
);
2505 reg
|= IXGBE_SECTX_DCB
;
2506 IXGBE_WRITE_REG(hw
, IXGBE_SECTXMINIFG
, reg
);
2511 /* re-enable the arbiter */
2512 rttdcs
&= ~IXGBE_RTTDCS_ARBDIS
;
2513 IXGBE_WRITE_REG(hw
, IXGBE_RTTDCS
, rttdcs
);
2517 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
2518 * @adapter: board private structure
2520 * Configure the Tx unit of the MAC after a reset.
2522 static void ixgbe_configure_tx(struct ixgbe_adapter
*adapter
)
2524 struct ixgbe_hw
*hw
= &adapter
->hw
;
2528 ixgbe_setup_mtqc(adapter
);
2530 if (hw
->mac
.type
!= ixgbe_mac_82598EB
) {
2531 /* DMATXCTL.EN must be before Tx queues are enabled */
2532 dmatxctl
= IXGBE_READ_REG(hw
, IXGBE_DMATXCTL
);
2533 dmatxctl
|= IXGBE_DMATXCTL_TE
;
2534 IXGBE_WRITE_REG(hw
, IXGBE_DMATXCTL
, dmatxctl
);
2537 /* Setup the HW Tx Head and Tail descriptor pointers */
2538 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2539 ixgbe_configure_tx_ring(adapter
, adapter
->tx_ring
[i
]);
2542 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2544 static void ixgbe_configure_srrctl(struct ixgbe_adapter
*adapter
,
2545 struct ixgbe_ring
*rx_ring
)
2548 u8 reg_idx
= rx_ring
->reg_idx
;
2550 switch (adapter
->hw
.mac
.type
) {
2551 case ixgbe_mac_82598EB
: {
2552 struct ixgbe_ring_feature
*feature
= adapter
->ring_feature
;
2553 const int mask
= feature
[RING_F_RSS
].mask
;
2554 reg_idx
= reg_idx
& mask
;
2557 case ixgbe_mac_82599EB
:
2558 case ixgbe_mac_X540
:
2563 srrctl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_SRRCTL(reg_idx
));
2565 srrctl
&= ~IXGBE_SRRCTL_BSIZEHDR_MASK
;
2566 srrctl
&= ~IXGBE_SRRCTL_BSIZEPKT_MASK
;
2567 if (adapter
->num_vfs
)
2568 srrctl
|= IXGBE_SRRCTL_DROP_EN
;
2570 srrctl
|= (IXGBE_RX_HDR_SIZE
<< IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT
) &
2571 IXGBE_SRRCTL_BSIZEHDR_MASK
;
2573 if (ring_is_ps_enabled(rx_ring
)) {
2574 #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
2575 srrctl
|= IXGBE_MAX_RXBUFFER
>> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
2577 srrctl
|= (PAGE_SIZE
/ 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
2579 srrctl
|= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
2581 srrctl
|= ALIGN(rx_ring
->rx_buf_len
, 1024) >>
2582 IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
2583 srrctl
|= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
2586 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_SRRCTL(reg_idx
), srrctl
);
2589 static void ixgbe_setup_mrqc(struct ixgbe_adapter
*adapter
)
2591 struct ixgbe_hw
*hw
= &adapter
->hw
;
2592 static const u32 seed
[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
2593 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2594 0x6A3E67EA, 0x14364D17, 0x3BED200D};
2595 u32 mrqc
= 0, reta
= 0;
2598 u8 tcs
= netdev_get_num_tc(adapter
->netdev
);
2599 int maxq
= adapter
->ring_feature
[RING_F_RSS
].indices
;
2602 maxq
= min(maxq
, adapter
->num_tx_queues
/ tcs
);
2604 /* Fill out hash function seeds */
2605 for (i
= 0; i
< 10; i
++)
2606 IXGBE_WRITE_REG(hw
, IXGBE_RSSRK(i
), seed
[i
]);
2608 /* Fill out redirection table */
2609 for (i
= 0, j
= 0; i
< 128; i
++, j
++) {
2612 /* reta = 4-byte sliding window of
2613 * 0x00..(indices-1)(indices-1)00..etc. */
2614 reta
= (reta
<< 8) | (j
* 0x11);
2616 IXGBE_WRITE_REG(hw
, IXGBE_RETA(i
>> 2), reta
);
2619 /* Disable indicating checksum in descriptor, enables RSS hash */
2620 rxcsum
= IXGBE_READ_REG(hw
, IXGBE_RXCSUM
);
2621 rxcsum
|= IXGBE_RXCSUM_PCSD
;
2622 IXGBE_WRITE_REG(hw
, IXGBE_RXCSUM
, rxcsum
);
2624 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
&&
2625 (adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
)) {
2626 mrqc
= IXGBE_MRQC_RSSEN
;
2628 int mask
= adapter
->flags
& (IXGBE_FLAG_RSS_ENABLED
2629 | IXGBE_FLAG_SRIOV_ENABLED
);
2632 case (IXGBE_FLAG_RSS_ENABLED
):
2634 mrqc
= IXGBE_MRQC_RSSEN
;
2636 mrqc
= IXGBE_MRQC_RTRSS4TCEN
;
2638 mrqc
= IXGBE_MRQC_RTRSS8TCEN
;
2640 case (IXGBE_FLAG_SRIOV_ENABLED
):
2641 mrqc
= IXGBE_MRQC_VMDQEN
;
2648 /* Perform hash on these packet types */
2649 mrqc
|= IXGBE_MRQC_RSS_FIELD_IPV4
2650 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2651 | IXGBE_MRQC_RSS_FIELD_IPV6
2652 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
;
2654 IXGBE_WRITE_REG(hw
, IXGBE_MRQC
, mrqc
);
2658 * ixgbe_configure_rscctl - enable RSC for the indicated ring
2659 * @adapter: address of board private structure
2660 * @index: index of ring to set
2662 static void ixgbe_configure_rscctl(struct ixgbe_adapter
*adapter
,
2663 struct ixgbe_ring
*ring
)
2665 struct ixgbe_hw
*hw
= &adapter
->hw
;
2668 u8 reg_idx
= ring
->reg_idx
;
2670 if (!ring_is_rsc_enabled(ring
))
2673 rx_buf_len
= ring
->rx_buf_len
;
2674 rscctrl
= IXGBE_READ_REG(hw
, IXGBE_RSCCTL(reg_idx
));
2675 rscctrl
|= IXGBE_RSCCTL_RSCEN
;
2677 * we must limit the number of descriptors so that the
2678 * total size of max desc * buf_len is not greater
2681 if (ring_is_ps_enabled(ring
)) {
2682 #if (MAX_SKB_FRAGS > 16)
2683 rscctrl
|= IXGBE_RSCCTL_MAXDESC_16
;
2684 #elif (MAX_SKB_FRAGS > 8)
2685 rscctrl
|= IXGBE_RSCCTL_MAXDESC_8
;
2686 #elif (MAX_SKB_FRAGS > 4)
2687 rscctrl
|= IXGBE_RSCCTL_MAXDESC_4
;
2689 rscctrl
|= IXGBE_RSCCTL_MAXDESC_1
;
2692 if (rx_buf_len
< IXGBE_RXBUFFER_4096
)
2693 rscctrl
|= IXGBE_RSCCTL_MAXDESC_16
;
2694 else if (rx_buf_len
< IXGBE_RXBUFFER_8192
)
2695 rscctrl
|= IXGBE_RSCCTL_MAXDESC_8
;
2697 rscctrl
|= IXGBE_RSCCTL_MAXDESC_4
;
2699 IXGBE_WRITE_REG(hw
, IXGBE_RSCCTL(reg_idx
), rscctrl
);
2703 * ixgbe_set_uta - Set unicast filter table address
2704 * @adapter: board private structure
2706 * The unicast table address is a register array of 32-bit registers.
2707 * The table is meant to be used in a way similar to how the MTA is used
2708 * however due to certain limitations in the hardware it is necessary to
2709 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
2710 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
2712 static void ixgbe_set_uta(struct ixgbe_adapter
*adapter
)
2714 struct ixgbe_hw
*hw
= &adapter
->hw
;
2717 /* The UTA table only exists on 82599 hardware and newer */
2718 if (hw
->mac
.type
< ixgbe_mac_82599EB
)
2721 /* we only need to do this if VMDq is enabled */
2722 if (!(adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
))
2725 for (i
= 0; i
< 128; i
++)
2726 IXGBE_WRITE_REG(hw
, IXGBE_UTA(i
), ~0);
2729 #define IXGBE_MAX_RX_DESC_POLL 10
2730 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter
*adapter
,
2731 struct ixgbe_ring
*ring
)
2733 struct ixgbe_hw
*hw
= &adapter
->hw
;
2734 int wait_loop
= IXGBE_MAX_RX_DESC_POLL
;
2736 u8 reg_idx
= ring
->reg_idx
;
2738 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2739 if (hw
->mac
.type
== ixgbe_mac_82598EB
&&
2740 !(IXGBE_READ_REG(hw
, IXGBE_LINKS
) & IXGBE_LINKS_UP
))
2744 usleep_range(1000, 2000);
2745 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_RXDCTL(reg_idx
));
2746 } while (--wait_loop
&& !(rxdctl
& IXGBE_RXDCTL_ENABLE
));
2749 e_err(drv
, "RXDCTL.ENABLE on Rx queue %d not set within "
2750 "the polling period\n", reg_idx
);
2754 void ixgbe_disable_rx_queue(struct ixgbe_adapter
*adapter
,
2755 struct ixgbe_ring
*ring
)
2757 struct ixgbe_hw
*hw
= &adapter
->hw
;
2758 int wait_loop
= IXGBE_MAX_RX_DESC_POLL
;
2760 u8 reg_idx
= ring
->reg_idx
;
2762 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_RXDCTL(reg_idx
));
2763 rxdctl
&= ~IXGBE_RXDCTL_ENABLE
;
2765 /* write value back with RXDCTL.ENABLE bit cleared */
2766 IXGBE_WRITE_REG(hw
, IXGBE_RXDCTL(reg_idx
), rxdctl
);
2768 if (hw
->mac
.type
== ixgbe_mac_82598EB
&&
2769 !(IXGBE_READ_REG(hw
, IXGBE_LINKS
) & IXGBE_LINKS_UP
))
2772 /* the hardware may take up to 100us to really disable the rx queue */
2775 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_RXDCTL(reg_idx
));
2776 } while (--wait_loop
&& (rxdctl
& IXGBE_RXDCTL_ENABLE
));
2779 e_err(drv
, "RXDCTL.ENABLE on Rx queue %d not cleared within "
2780 "the polling period\n", reg_idx
);
2784 void ixgbe_configure_rx_ring(struct ixgbe_adapter
*adapter
,
2785 struct ixgbe_ring
*ring
)
2787 struct ixgbe_hw
*hw
= &adapter
->hw
;
2788 u64 rdba
= ring
->dma
;
2790 u8 reg_idx
= ring
->reg_idx
;
2792 /* disable queue to avoid issues while updating state */
2793 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_RXDCTL(reg_idx
));
2794 ixgbe_disable_rx_queue(adapter
, ring
);
2796 IXGBE_WRITE_REG(hw
, IXGBE_RDBAL(reg_idx
), (rdba
& DMA_BIT_MASK(32)));
2797 IXGBE_WRITE_REG(hw
, IXGBE_RDBAH(reg_idx
), (rdba
>> 32));
2798 IXGBE_WRITE_REG(hw
, IXGBE_RDLEN(reg_idx
),
2799 ring
->count
* sizeof(union ixgbe_adv_rx_desc
));
2800 IXGBE_WRITE_REG(hw
, IXGBE_RDH(reg_idx
), 0);
2801 IXGBE_WRITE_REG(hw
, IXGBE_RDT(reg_idx
), 0);
2802 ring
->tail
= hw
->hw_addr
+ IXGBE_RDT(reg_idx
);
2804 ixgbe_configure_srrctl(adapter
, ring
);
2805 ixgbe_configure_rscctl(adapter
, ring
);
2807 /* If operating in IOV mode set RLPML for X540 */
2808 if ((adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) &&
2809 hw
->mac
.type
== ixgbe_mac_X540
) {
2810 rxdctl
&= ~IXGBE_RXDCTL_RLPMLMASK
;
2811 rxdctl
|= ((ring
->netdev
->mtu
+ ETH_HLEN
+
2812 ETH_FCS_LEN
+ VLAN_HLEN
) | IXGBE_RXDCTL_RLPML_EN
);
2815 if (hw
->mac
.type
== ixgbe_mac_82598EB
) {
2817 * enable cache line friendly hardware writes:
2818 * PTHRESH=32 descriptors (half the internal cache),
2819 * this also removes ugly rx_no_buffer_count increment
2820 * HTHRESH=4 descriptors (to minimize latency on fetch)
2821 * WTHRESH=8 burst writeback up to two cache lines
2823 rxdctl
&= ~0x3FFFFF;
2827 /* enable receive descriptor ring */
2828 rxdctl
|= IXGBE_RXDCTL_ENABLE
;
2829 IXGBE_WRITE_REG(hw
, IXGBE_RXDCTL(reg_idx
), rxdctl
);
2831 ixgbe_rx_desc_queue_enable(adapter
, ring
);
2832 ixgbe_alloc_rx_buffers(ring
, ixgbe_desc_unused(ring
));
2835 static void ixgbe_setup_psrtype(struct ixgbe_adapter
*adapter
)
2837 struct ixgbe_hw
*hw
= &adapter
->hw
;
2840 /* PSRTYPE must be initialized in non 82598 adapters */
2841 u32 psrtype
= IXGBE_PSRTYPE_TCPHDR
|
2842 IXGBE_PSRTYPE_UDPHDR
|
2843 IXGBE_PSRTYPE_IPV4HDR
|
2844 IXGBE_PSRTYPE_L2HDR
|
2845 IXGBE_PSRTYPE_IPV6HDR
;
2847 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
2850 if (adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
)
2851 psrtype
|= (adapter
->num_rx_queues_per_pool
<< 29);
2853 for (p
= 0; p
< adapter
->num_rx_pools
; p
++)
2854 IXGBE_WRITE_REG(hw
, IXGBE_PSRTYPE(adapter
->num_vfs
+ p
),
2858 static void ixgbe_configure_virtualization(struct ixgbe_adapter
*adapter
)
2860 struct ixgbe_hw
*hw
= &adapter
->hw
;
2863 u32 reg_offset
, vf_shift
;
2866 if (!(adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
))
2869 vmdctl
= IXGBE_READ_REG(hw
, IXGBE_VT_CTL
);
2870 vt_reg_bits
= IXGBE_VMD_CTL_VMDQ_EN
| IXGBE_VT_CTL_REPLEN
;
2871 vt_reg_bits
|= (adapter
->num_vfs
<< IXGBE_VT_CTL_POOL_SHIFT
);
2872 IXGBE_WRITE_REG(hw
, IXGBE_VT_CTL
, vmdctl
| vt_reg_bits
);
2874 vf_shift
= adapter
->num_vfs
% 32;
2875 reg_offset
= (adapter
->num_vfs
> 32) ? 1 : 0;
2877 /* Enable only the PF's pool for Tx/Rx */
2878 IXGBE_WRITE_REG(hw
, IXGBE_VFRE(reg_offset
), (1 << vf_shift
));
2879 IXGBE_WRITE_REG(hw
, IXGBE_VFRE(reg_offset
^ 1), 0);
2880 IXGBE_WRITE_REG(hw
, IXGBE_VFTE(reg_offset
), (1 << vf_shift
));
2881 IXGBE_WRITE_REG(hw
, IXGBE_VFTE(reg_offset
^ 1), 0);
2882 IXGBE_WRITE_REG(hw
, IXGBE_PFDTXGSWC
, IXGBE_PFDTXGSWC_VT_LBEN
);
2884 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
2885 hw
->mac
.ops
.set_vmdq(hw
, 0, adapter
->num_vfs
);
2888 * Set up VF register offsets for selected VT Mode,
2889 * i.e. 32 or 64 VFs for SR-IOV
2891 gcr_ext
= IXGBE_READ_REG(hw
, IXGBE_GCR_EXT
);
2892 gcr_ext
|= IXGBE_GCR_EXT_MSIX_EN
;
2893 gcr_ext
|= IXGBE_GCR_EXT_VT_MODE_64
;
2894 IXGBE_WRITE_REG(hw
, IXGBE_GCR_EXT
, gcr_ext
);
2896 /* enable Tx loopback for VF/PF communication */
2897 IXGBE_WRITE_REG(hw
, IXGBE_PFDTXGSWC
, IXGBE_PFDTXGSWC_VT_LBEN
);
2898 /* Enable MAC Anti-Spoofing */
2899 hw
->mac
.ops
.set_mac_anti_spoofing(hw
,
2900 (adapter
->antispoofing_enabled
=
2901 (adapter
->num_vfs
!= 0)),
2905 static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter
*adapter
)
2907 struct ixgbe_hw
*hw
= &adapter
->hw
;
2908 struct net_device
*netdev
= adapter
->netdev
;
2909 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
2911 struct ixgbe_ring
*rx_ring
;
2915 /* Decide whether to use packet split mode or not */
2917 adapter
->flags
|= IXGBE_FLAG_RX_PS_ENABLED
;
2919 /* Do not use packet split if we're in SR-IOV Mode */
2920 if (adapter
->num_vfs
)
2921 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_ENABLED
;
2923 /* Disable packet split due to 82599 erratum #45 */
2924 if (hw
->mac
.type
== ixgbe_mac_82599EB
)
2925 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_ENABLED
;
2927 /* Set the RX buffer length according to the mode */
2928 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
2929 rx_buf_len
= IXGBE_RX_HDR_SIZE
;
2931 if (!(adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
) &&
2932 (netdev
->mtu
<= ETH_DATA_LEN
))
2933 rx_buf_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
2935 rx_buf_len
= ALIGN(max_frame
+ VLAN_HLEN
, 1024);
2939 /* adjust max frame to be able to do baby jumbo for FCoE */
2940 if ((adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
) &&
2941 (max_frame
< IXGBE_FCOE_JUMBO_FRAME_SIZE
))
2942 max_frame
= IXGBE_FCOE_JUMBO_FRAME_SIZE
;
2944 #endif /* IXGBE_FCOE */
2945 mhadd
= IXGBE_READ_REG(hw
, IXGBE_MHADD
);
2946 if (max_frame
!= (mhadd
>> IXGBE_MHADD_MFS_SHIFT
)) {
2947 mhadd
&= ~IXGBE_MHADD_MFS_MASK
;
2948 mhadd
|= max_frame
<< IXGBE_MHADD_MFS_SHIFT
;
2950 IXGBE_WRITE_REG(hw
, IXGBE_MHADD
, mhadd
);
2953 hlreg0
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
2954 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
2955 hlreg0
|= IXGBE_HLREG0_JUMBOEN
;
2956 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, hlreg0
);
2959 * Setup the HW Rx Head and Tail Descriptor Pointers and
2960 * the Base and Length of the Rx Descriptor Ring
2962 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2963 rx_ring
= adapter
->rx_ring
[i
];
2964 rx_ring
->rx_buf_len
= rx_buf_len
;
2966 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
)
2967 set_ring_ps_enabled(rx_ring
);
2969 clear_ring_ps_enabled(rx_ring
);
2971 if (adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
)
2972 set_ring_rsc_enabled(rx_ring
);
2974 clear_ring_rsc_enabled(rx_ring
);
2977 if (netdev
->features
& NETIF_F_FCOE_MTU
) {
2978 struct ixgbe_ring_feature
*f
;
2979 f
= &adapter
->ring_feature
[RING_F_FCOE
];
2980 if ((i
>= f
->mask
) && (i
< f
->mask
+ f
->indices
)) {
2981 clear_ring_ps_enabled(rx_ring
);
2982 if (rx_buf_len
< IXGBE_FCOE_JUMBO_FRAME_SIZE
)
2983 rx_ring
->rx_buf_len
=
2984 IXGBE_FCOE_JUMBO_FRAME_SIZE
;
2985 } else if (!ring_is_rsc_enabled(rx_ring
) &&
2986 !ring_is_ps_enabled(rx_ring
)) {
2987 rx_ring
->rx_buf_len
=
2988 IXGBE_FCOE_JUMBO_FRAME_SIZE
;
2991 #endif /* IXGBE_FCOE */
2995 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter
*adapter
)
2997 struct ixgbe_hw
*hw
= &adapter
->hw
;
2998 u32 rdrxctl
= IXGBE_READ_REG(hw
, IXGBE_RDRXCTL
);
3000 switch (hw
->mac
.type
) {
3001 case ixgbe_mac_82598EB
:
3003 * For VMDq support of different descriptor types or
3004 * buffer sizes through the use of multiple SRRCTL
3005 * registers, RDRXCTL.MVMEN must be set to 1
3007 * also, the manual doesn't mention it clearly but DCA hints
3008 * will only use queue 0's tags unless this bit is set. Side
3009 * effects of setting this bit are only that SRRCTL must be
3010 * fully programmed [0..15]
3012 rdrxctl
|= IXGBE_RDRXCTL_MVMEN
;
3014 case ixgbe_mac_82599EB
:
3015 case ixgbe_mac_X540
:
3016 /* Disable RSC for ACK packets */
3017 IXGBE_WRITE_REG(hw
, IXGBE_RSCDBU
,
3018 (IXGBE_RSCDBU_RSCACKDIS
| IXGBE_READ_REG(hw
, IXGBE_RSCDBU
)));
3019 rdrxctl
&= ~IXGBE_RDRXCTL_RSCFRSTSIZE
;
3020 /* hardware requires some bits to be set by default */
3021 rdrxctl
|= (IXGBE_RDRXCTL_RSCACKC
| IXGBE_RDRXCTL_FCOE_WRFIX
);
3022 rdrxctl
|= IXGBE_RDRXCTL_CRCSTRIP
;
3025 /* We should do nothing since we don't know this hardware */
3029 IXGBE_WRITE_REG(hw
, IXGBE_RDRXCTL
, rdrxctl
);
3033 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
3034 * @adapter: board private structure
3036 * Configure the Rx unit of the MAC after a reset.
3038 static void ixgbe_configure_rx(struct ixgbe_adapter
*adapter
)
3040 struct ixgbe_hw
*hw
= &adapter
->hw
;
3044 /* disable receives while setting up the descriptors */
3045 rxctrl
= IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
3046 IXGBE_WRITE_REG(hw
, IXGBE_RXCTRL
, rxctrl
& ~IXGBE_RXCTRL_RXEN
);
3048 ixgbe_setup_psrtype(adapter
);
3049 ixgbe_setup_rdrxctl(adapter
);
3051 /* Program registers for the distribution of queues */
3052 ixgbe_setup_mrqc(adapter
);
3054 ixgbe_set_uta(adapter
);
3056 /* set_rx_buffer_len must be called before ring initialization */
3057 ixgbe_set_rx_buffer_len(adapter
);
3060 * Setup the HW Rx Head and Tail Descriptor Pointers and
3061 * the Base and Length of the Rx Descriptor Ring
3063 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3064 ixgbe_configure_rx_ring(adapter
, adapter
->rx_ring
[i
]);
3066 /* disable drop enable for 82598 parts */
3067 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
3068 rxctrl
|= IXGBE_RXCTRL_DMBYPS
;
3070 /* enable all receives */
3071 rxctrl
|= IXGBE_RXCTRL_RXEN
;
3072 hw
->mac
.ops
.enable_rx_dma(hw
, rxctrl
);
3075 static void ixgbe_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
3077 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3078 struct ixgbe_hw
*hw
= &adapter
->hw
;
3079 int pool_ndx
= adapter
->num_vfs
;
3081 /* add VID to filter table */
3082 hw
->mac
.ops
.set_vfta(&adapter
->hw
, vid
, pool_ndx
, true);
3083 set_bit(vid
, adapter
->active_vlans
);
3086 static void ixgbe_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
3088 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3089 struct ixgbe_hw
*hw
= &adapter
->hw
;
3090 int pool_ndx
= adapter
->num_vfs
;
3092 /* remove VID from filter table */
3093 hw
->mac
.ops
.set_vfta(&adapter
->hw
, vid
, pool_ndx
, false);
3094 clear_bit(vid
, adapter
->active_vlans
);
3098 * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
3099 * @adapter: driver data
3101 static void ixgbe_vlan_filter_disable(struct ixgbe_adapter
*adapter
)
3103 struct ixgbe_hw
*hw
= &adapter
->hw
;
3106 vlnctrl
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
3107 vlnctrl
&= ~(IXGBE_VLNCTRL_VFE
| IXGBE_VLNCTRL_CFIEN
);
3108 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, vlnctrl
);
3112 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
3113 * @adapter: driver data
3115 static void ixgbe_vlan_filter_enable(struct ixgbe_adapter
*adapter
)
3117 struct ixgbe_hw
*hw
= &adapter
->hw
;
3120 vlnctrl
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
3121 vlnctrl
|= IXGBE_VLNCTRL_VFE
;
3122 vlnctrl
&= ~IXGBE_VLNCTRL_CFIEN
;
3123 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, vlnctrl
);
3127 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
3128 * @adapter: driver data
3130 static void ixgbe_vlan_strip_disable(struct ixgbe_adapter
*adapter
)
3132 struct ixgbe_hw
*hw
= &adapter
->hw
;
3136 switch (hw
->mac
.type
) {
3137 case ixgbe_mac_82598EB
:
3138 vlnctrl
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
3139 vlnctrl
&= ~IXGBE_VLNCTRL_VME
;
3140 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, vlnctrl
);
3142 case ixgbe_mac_82599EB
:
3143 case ixgbe_mac_X540
:
3144 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3145 j
= adapter
->rx_ring
[i
]->reg_idx
;
3146 vlnctrl
= IXGBE_READ_REG(hw
, IXGBE_RXDCTL(j
));
3147 vlnctrl
&= ~IXGBE_RXDCTL_VME
;
3148 IXGBE_WRITE_REG(hw
, IXGBE_RXDCTL(j
), vlnctrl
);
3157 * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
3158 * @adapter: driver data
3160 static void ixgbe_vlan_strip_enable(struct ixgbe_adapter
*adapter
)
3162 struct ixgbe_hw
*hw
= &adapter
->hw
;
3166 switch (hw
->mac
.type
) {
3167 case ixgbe_mac_82598EB
:
3168 vlnctrl
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
3169 vlnctrl
|= IXGBE_VLNCTRL_VME
;
3170 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, vlnctrl
);
3172 case ixgbe_mac_82599EB
:
3173 case ixgbe_mac_X540
:
3174 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3175 j
= adapter
->rx_ring
[i
]->reg_idx
;
3176 vlnctrl
= IXGBE_READ_REG(hw
, IXGBE_RXDCTL(j
));
3177 vlnctrl
|= IXGBE_RXDCTL_VME
;
3178 IXGBE_WRITE_REG(hw
, IXGBE_RXDCTL(j
), vlnctrl
);
3186 static void ixgbe_restore_vlan(struct ixgbe_adapter
*adapter
)
3190 ixgbe_vlan_rx_add_vid(adapter
->netdev
, 0);
3192 for_each_set_bit(vid
, adapter
->active_vlans
, VLAN_N_VID
)
3193 ixgbe_vlan_rx_add_vid(adapter
->netdev
, vid
);
3197 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
3198 * @netdev: network interface device structure
3200 * Writes unicast address list to the RAR table.
3201 * Returns: -ENOMEM on failure/insufficient address space
3202 * 0 on no addresses written
3203 * X on writing X addresses to the RAR table
3205 static int ixgbe_write_uc_addr_list(struct net_device
*netdev
)
3207 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3208 struct ixgbe_hw
*hw
= &adapter
->hw
;
3209 unsigned int vfn
= adapter
->num_vfs
;
3210 unsigned int rar_entries
= IXGBE_MAX_PF_MACVLANS
;
3213 /* return ENOMEM indicating insufficient memory for addresses */
3214 if (netdev_uc_count(netdev
) > rar_entries
)
3217 if (!netdev_uc_empty(netdev
) && rar_entries
) {
3218 struct netdev_hw_addr
*ha
;
3219 /* return error if we do not support writing to RAR table */
3220 if (!hw
->mac
.ops
.set_rar
)
3223 netdev_for_each_uc_addr(ha
, netdev
) {
3226 hw
->mac
.ops
.set_rar(hw
, rar_entries
--, ha
->addr
,
3231 /* write the addresses in reverse order to avoid write combining */
3232 for (; rar_entries
> 0 ; rar_entries
--)
3233 hw
->mac
.ops
.clear_rar(hw
, rar_entries
);
3239 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
3240 * @netdev: network interface device structure
3242 * The set_rx_method entry point is called whenever the unicast/multicast
3243 * address list or the network interface flags are updated. This routine is
3244 * responsible for configuring the hardware for proper unicast, multicast and
3247 void ixgbe_set_rx_mode(struct net_device
*netdev
)
3249 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3250 struct ixgbe_hw
*hw
= &adapter
->hw
;
3251 u32 fctrl
, vmolr
= IXGBE_VMOLR_BAM
| IXGBE_VMOLR_AUPE
;
3254 /* Check for Promiscuous and All Multicast modes */
3256 fctrl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
3258 /* set all bits that we expect to always be set */
3259 fctrl
|= IXGBE_FCTRL_BAM
;
3260 fctrl
|= IXGBE_FCTRL_DPF
; /* discard pause frames when FC enabled */
3261 fctrl
|= IXGBE_FCTRL_PMCF
;
3263 /* clear the bits we are changing the status of */
3264 fctrl
&= ~(IXGBE_FCTRL_UPE
| IXGBE_FCTRL_MPE
);
3266 if (netdev
->flags
& IFF_PROMISC
) {
3267 hw
->addr_ctrl
.user_set_promisc
= true;
3268 fctrl
|= (IXGBE_FCTRL_UPE
| IXGBE_FCTRL_MPE
);
3269 vmolr
|= (IXGBE_VMOLR_ROPE
| IXGBE_VMOLR_MPE
);
3270 /* don't hardware filter vlans in promisc mode */
3271 ixgbe_vlan_filter_disable(adapter
);
3273 if (netdev
->flags
& IFF_ALLMULTI
) {
3274 fctrl
|= IXGBE_FCTRL_MPE
;
3275 vmolr
|= IXGBE_VMOLR_MPE
;
3278 * Write addresses to the MTA, if the attempt fails
3279 * then we should just turn on promiscuous mode so
3280 * that we can at least receive multicast traffic
3282 hw
->mac
.ops
.update_mc_addr_list(hw
, netdev
);
3283 vmolr
|= IXGBE_VMOLR_ROMPE
;
3285 ixgbe_vlan_filter_enable(adapter
);
3286 hw
->addr_ctrl
.user_set_promisc
= false;
3288 * Write addresses to available RAR registers, if there is not
3289 * sufficient space to store all the addresses then enable
3290 * unicast promiscuous mode
3292 count
= ixgbe_write_uc_addr_list(netdev
);
3294 fctrl
|= IXGBE_FCTRL_UPE
;
3295 vmolr
|= IXGBE_VMOLR_ROPE
;
3299 if (adapter
->num_vfs
) {
3300 ixgbe_restore_vf_multicasts(adapter
);
3301 vmolr
|= IXGBE_READ_REG(hw
, IXGBE_VMOLR(adapter
->num_vfs
)) &
3302 ~(IXGBE_VMOLR_MPE
| IXGBE_VMOLR_ROMPE
|
3304 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(adapter
->num_vfs
), vmolr
);
3307 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, fctrl
);
3309 if (netdev
->features
& NETIF_F_HW_VLAN_RX
)
3310 ixgbe_vlan_strip_enable(adapter
);
3312 ixgbe_vlan_strip_disable(adapter
);
3315 static void ixgbe_napi_enable_all(struct ixgbe_adapter
*adapter
)
3318 struct ixgbe_q_vector
*q_vector
;
3319 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
3321 /* legacy and MSI only use one vector */
3322 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
3325 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
3326 q_vector
= adapter
->q_vector
[q_idx
];
3327 napi_enable(&q_vector
->napi
);
3331 static void ixgbe_napi_disable_all(struct ixgbe_adapter
*adapter
)
3334 struct ixgbe_q_vector
*q_vector
;
3335 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
3337 /* legacy and MSI only use one vector */
3338 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
3341 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
3342 q_vector
= adapter
->q_vector
[q_idx
];
3343 napi_disable(&q_vector
->napi
);
3347 #ifdef CONFIG_IXGBE_DCB
3349 * ixgbe_configure_dcb - Configure DCB hardware
3350 * @adapter: ixgbe adapter struct
3352 * This is called by the driver on open to configure the DCB hardware.
3353 * This is also called by the gennetlink interface when reconfiguring
3356 static void ixgbe_configure_dcb(struct ixgbe_adapter
*adapter
)
3358 struct ixgbe_hw
*hw
= &adapter
->hw
;
3359 int max_frame
= adapter
->netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3361 if (!(adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
)) {
3362 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
3363 netif_set_gso_max_size(adapter
->netdev
, 65536);
3367 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
3368 netif_set_gso_max_size(adapter
->netdev
, 32768);
3371 /* Enable VLAN tag insert/strip */
3372 adapter
->netdev
->features
|= NETIF_F_HW_VLAN_RX
;
3374 hw
->mac
.ops
.set_vfta(&adapter
->hw
, 0, 0, true);
3376 /* reconfigure the hardware */
3377 if (adapter
->dcbx_cap
& DCB_CAP_DCBX_VER_CEE
) {
3379 if (adapter
->netdev
->features
& NETIF_F_FCOE_MTU
)
3380 max_frame
= max(max_frame
, IXGBE_FCOE_JUMBO_FRAME_SIZE
);
3382 ixgbe_dcb_calculate_tc_credits(hw
, &adapter
->dcb_cfg
, max_frame
,
3384 ixgbe_dcb_calculate_tc_credits(hw
, &adapter
->dcb_cfg
, max_frame
,
3386 ixgbe_dcb_hw_config(hw
, &adapter
->dcb_cfg
);
3388 struct net_device
*dev
= adapter
->netdev
;
3390 if (adapter
->ixgbe_ieee_ets
)
3391 dev
->dcbnl_ops
->ieee_setets(dev
,
3392 adapter
->ixgbe_ieee_ets
);
3393 if (adapter
->ixgbe_ieee_pfc
)
3394 dev
->dcbnl_ops
->ieee_setpfc(dev
,
3395 adapter
->ixgbe_ieee_pfc
);
3398 /* Enable RSS Hash per TC */
3399 if (hw
->mac
.type
!= ixgbe_mac_82598EB
) {
3403 for (i
= 0; i
< MAX_TRAFFIC_CLASS
; i
++) {
3405 u8 cnt
= adapter
->netdev
->tc_to_txq
[i
].count
;
3410 reg
|= msb
<< IXGBE_RQTC_SHIFT_TC(i
);
3412 IXGBE_WRITE_REG(hw
, IXGBE_RQTC
, reg
);
3418 static void ixgbe_configure_pb(struct ixgbe_adapter
*adapter
)
3421 int num_tc
= netdev_get_num_tc(adapter
->netdev
);
3422 struct ixgbe_hw
*hw
= &adapter
->hw
;
3424 if (adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
||
3425 adapter
->flags
& IXGBE_FLAG_FDIR_PERFECT_CAPABLE
)
3426 hdrm
= 64 << adapter
->fdir_pballoc
;
3428 hw
->mac
.ops
.set_rxpba(&adapter
->hw
, num_tc
, hdrm
, PBA_STRATEGY_EQUAL
);
3431 static void ixgbe_fdir_filter_restore(struct ixgbe_adapter
*adapter
)
3433 struct ixgbe_hw
*hw
= &adapter
->hw
;
3434 struct hlist_node
*node
, *node2
;
3435 struct ixgbe_fdir_filter
*filter
;
3437 spin_lock(&adapter
->fdir_perfect_lock
);
3439 if (!hlist_empty(&adapter
->fdir_filter_list
))
3440 ixgbe_fdir_set_input_mask_82599(hw
, &adapter
->fdir_mask
);
3442 hlist_for_each_entry_safe(filter
, node
, node2
,
3443 &adapter
->fdir_filter_list
, fdir_node
) {
3444 ixgbe_fdir_write_perfect_filter_82599(hw
,
3447 (filter
->action
== IXGBE_FDIR_DROP_QUEUE
) ?
3448 IXGBE_FDIR_DROP_QUEUE
:
3449 adapter
->rx_ring
[filter
->action
]->reg_idx
);
3452 spin_unlock(&adapter
->fdir_perfect_lock
);
3455 static void ixgbe_configure(struct ixgbe_adapter
*adapter
)
3457 struct net_device
*netdev
= adapter
->netdev
;
3458 struct ixgbe_hw
*hw
= &adapter
->hw
;
3461 ixgbe_configure_pb(adapter
);
3462 #ifdef CONFIG_IXGBE_DCB
3463 ixgbe_configure_dcb(adapter
);
3466 ixgbe_set_rx_mode(netdev
);
3467 ixgbe_restore_vlan(adapter
);
3470 if (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
)
3471 ixgbe_configure_fcoe(adapter
);
3473 #endif /* IXGBE_FCOE */
3474 if (adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
) {
3475 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
3476 adapter
->tx_ring
[i
]->atr_sample_rate
=
3477 adapter
->atr_sample_rate
;
3478 ixgbe_init_fdir_signature_82599(hw
, adapter
->fdir_pballoc
);
3479 } else if (adapter
->flags
& IXGBE_FLAG_FDIR_PERFECT_CAPABLE
) {
3480 ixgbe_init_fdir_perfect_82599(&adapter
->hw
,
3481 adapter
->fdir_pballoc
);
3482 ixgbe_fdir_filter_restore(adapter
);
3484 ixgbe_configure_virtualization(adapter
);
3486 ixgbe_configure_tx(adapter
);
3487 ixgbe_configure_rx(adapter
);
3490 static inline bool ixgbe_is_sfp(struct ixgbe_hw
*hw
)
3492 switch (hw
->phy
.type
) {
3493 case ixgbe_phy_sfp_avago
:
3494 case ixgbe_phy_sfp_ftl
:
3495 case ixgbe_phy_sfp_intel
:
3496 case ixgbe_phy_sfp_unknown
:
3497 case ixgbe_phy_sfp_passive_tyco
:
3498 case ixgbe_phy_sfp_passive_unknown
:
3499 case ixgbe_phy_sfp_active_unknown
:
3500 case ixgbe_phy_sfp_ftl_active
:
3508 * ixgbe_sfp_link_config - set up SFP+ link
3509 * @adapter: pointer to private adapter struct
3511 static void ixgbe_sfp_link_config(struct ixgbe_adapter
*adapter
)
3514 * We are assuming the worst case scenerio here, and that
3515 * is that an SFP was inserted/removed after the reset
3516 * but before SFP detection was enabled. As such the best
3517 * solution is to just start searching as soon as we start
3519 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
)
3520 adapter
->flags2
|= IXGBE_FLAG2_SEARCH_FOR_SFP
;
3522 adapter
->flags2
|= IXGBE_FLAG2_SFP_NEEDS_RESET
;
3526 * ixgbe_non_sfp_link_config - set up non-SFP+ link
3527 * @hw: pointer to private hardware struct
3529 * Returns 0 on success, negative on failure
3531 static int ixgbe_non_sfp_link_config(struct ixgbe_hw
*hw
)
3534 bool negotiation
, link_up
= false;
3535 u32 ret
= IXGBE_ERR_LINK_SETUP
;
3537 if (hw
->mac
.ops
.check_link
)
3538 ret
= hw
->mac
.ops
.check_link(hw
, &autoneg
, &link_up
, false);
3543 autoneg
= hw
->phy
.autoneg_advertised
;
3544 if ((!autoneg
) && (hw
->mac
.ops
.get_link_capabilities
))
3545 ret
= hw
->mac
.ops
.get_link_capabilities(hw
, &autoneg
,
3550 if (hw
->mac
.ops
.setup_link
)
3551 ret
= hw
->mac
.ops
.setup_link(hw
, autoneg
, negotiation
, link_up
);
3556 static void ixgbe_setup_gpie(struct ixgbe_adapter
*adapter
)
3558 struct ixgbe_hw
*hw
= &adapter
->hw
;
3561 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
3562 gpie
= IXGBE_GPIE_MSIX_MODE
| IXGBE_GPIE_PBA_SUPPORT
|
3564 gpie
|= IXGBE_GPIE_EIAME
;
3566 * use EIAM to auto-mask when MSI-X interrupt is asserted
3567 * this saves a register write for every interrupt
3569 switch (hw
->mac
.type
) {
3570 case ixgbe_mac_82598EB
:
3571 IXGBE_WRITE_REG(hw
, IXGBE_EIAM
, IXGBE_EICS_RTX_QUEUE
);
3573 case ixgbe_mac_82599EB
:
3574 case ixgbe_mac_X540
:
3576 IXGBE_WRITE_REG(hw
, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3577 IXGBE_WRITE_REG(hw
, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3581 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
3582 * specifically only auto mask tx and rx interrupts */
3583 IXGBE_WRITE_REG(hw
, IXGBE_EIAM
, IXGBE_EICS_RTX_QUEUE
);
3586 /* XXX: to interrupt immediately for EICS writes, enable this */
3587 /* gpie |= IXGBE_GPIE_EIMEN; */
3589 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
3590 gpie
&= ~IXGBE_GPIE_VTMODE_MASK
;
3591 gpie
|= IXGBE_GPIE_VTMODE_64
;
3594 /* Enable fan failure interrupt */
3595 if (adapter
->flags
& IXGBE_FLAG_FAN_FAIL_CAPABLE
)
3596 gpie
|= IXGBE_SDP1_GPIEN
;
3598 if (hw
->mac
.type
== ixgbe_mac_82599EB
) {
3599 gpie
|= IXGBE_SDP1_GPIEN
;
3600 gpie
|= IXGBE_SDP2_GPIEN
;
3603 IXGBE_WRITE_REG(hw
, IXGBE_GPIE
, gpie
);
3606 static int ixgbe_up_complete(struct ixgbe_adapter
*adapter
)
3608 struct ixgbe_hw
*hw
= &adapter
->hw
;
3612 ixgbe_get_hw_control(adapter
);
3613 ixgbe_setup_gpie(adapter
);
3615 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
3616 ixgbe_configure_msix(adapter
);
3618 ixgbe_configure_msi_and_legacy(adapter
);
3620 /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
3621 if (hw
->mac
.ops
.enable_tx_laser
&&
3622 ((hw
->phy
.multispeed_fiber
) ||
3623 ((hw
->mac
.ops
.get_media_type(hw
) == ixgbe_media_type_fiber
) &&
3624 (hw
->mac
.type
== ixgbe_mac_82599EB
))))
3625 hw
->mac
.ops
.enable_tx_laser(hw
);
3627 clear_bit(__IXGBE_DOWN
, &adapter
->state
);
3628 ixgbe_napi_enable_all(adapter
);
3630 if (ixgbe_is_sfp(hw
)) {
3631 ixgbe_sfp_link_config(adapter
);
3633 err
= ixgbe_non_sfp_link_config(hw
);
3635 e_err(probe
, "link_config FAILED %d\n", err
);
3638 /* clear any pending interrupts, may auto mask */
3639 IXGBE_READ_REG(hw
, IXGBE_EICR
);
3640 ixgbe_irq_enable(adapter
, true, true);
3643 * If this adapter has a fan, check to see if we had a failure
3644 * before we enabled the interrupt.
3646 if (adapter
->flags
& IXGBE_FLAG_FAN_FAIL_CAPABLE
) {
3647 u32 esdp
= IXGBE_READ_REG(hw
, IXGBE_ESDP
);
3648 if (esdp
& IXGBE_ESDP_SDP1
)
3649 e_crit(drv
, "Fan has stopped, replace the adapter\n");
3652 /* enable transmits */
3653 netif_tx_start_all_queues(adapter
->netdev
);
3655 /* bring the link up in the watchdog, this could race with our first
3656 * link up interrupt but shouldn't be a problem */
3657 adapter
->flags
|= IXGBE_FLAG_NEED_LINK_UPDATE
;
3658 adapter
->link_check_timeout
= jiffies
;
3659 mod_timer(&adapter
->service_timer
, jiffies
);
3661 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
3662 ctrl_ext
= IXGBE_READ_REG(hw
, IXGBE_CTRL_EXT
);
3663 ctrl_ext
|= IXGBE_CTRL_EXT_PFRSTD
;
3664 IXGBE_WRITE_REG(hw
, IXGBE_CTRL_EXT
, ctrl_ext
);
3669 void ixgbe_reinit_locked(struct ixgbe_adapter
*adapter
)
3671 WARN_ON(in_interrupt());
3672 /* put off any impending NetWatchDogTimeout */
3673 adapter
->netdev
->trans_start
= jiffies
;
3675 while (test_and_set_bit(__IXGBE_RESETTING
, &adapter
->state
))
3676 usleep_range(1000, 2000);
3677 ixgbe_down(adapter
);
3679 * If SR-IOV enabled then wait a bit before bringing the adapter
3680 * back up to give the VFs time to respond to the reset. The
3681 * two second wait is based upon the watchdog timer cycle in
3684 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
3687 clear_bit(__IXGBE_RESETTING
, &adapter
->state
);
3690 int ixgbe_up(struct ixgbe_adapter
*adapter
)
3692 /* hardware has been reset, we need to reload some things */
3693 ixgbe_configure(adapter
);
3695 return ixgbe_up_complete(adapter
);
3698 void ixgbe_reset(struct ixgbe_adapter
*adapter
)
3700 struct ixgbe_hw
*hw
= &adapter
->hw
;
3703 /* lock SFP init bit to prevent race conditions with the watchdog */
3704 while (test_and_set_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
))
3705 usleep_range(1000, 2000);
3707 /* clear all SFP and link config related flags while holding SFP_INIT */
3708 adapter
->flags2
&= ~(IXGBE_FLAG2_SEARCH_FOR_SFP
|
3709 IXGBE_FLAG2_SFP_NEEDS_RESET
);
3710 adapter
->flags
&= ~IXGBE_FLAG_NEED_LINK_CONFIG
;
3712 err
= hw
->mac
.ops
.init_hw(hw
);
3715 case IXGBE_ERR_SFP_NOT_PRESENT
:
3716 case IXGBE_ERR_SFP_NOT_SUPPORTED
:
3718 case IXGBE_ERR_MASTER_REQUESTS_PENDING
:
3719 e_dev_err("master disable timed out\n");
3721 case IXGBE_ERR_EEPROM_VERSION
:
3722 /* We are running on a pre-production device, log a warning */
3723 e_dev_warn("This device is a pre-production adapter/LOM. "
3724 "Please be aware there may be issuesassociated with "
3725 "your hardware. If you are experiencing problems "
3726 "please contact your Intel or hardware "
3727 "representative who provided you with this "
3731 e_dev_err("Hardware Error: %d\n", err
);
3734 clear_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
);
3736 /* reprogram the RAR[0] in case user changed it. */
3737 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, adapter
->num_vfs
,
3742 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
3743 * @rx_ring: ring to free buffers from
3745 static void ixgbe_clean_rx_ring(struct ixgbe_ring
*rx_ring
)
3747 struct device
*dev
= rx_ring
->dev
;
3751 /* ring already cleared, nothing to do */
3752 if (!rx_ring
->rx_buffer_info
)
3755 /* Free all the Rx ring sk_buffs */
3756 for (i
= 0; i
< rx_ring
->count
; i
++) {
3757 struct ixgbe_rx_buffer
*rx_buffer_info
;
3759 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
3760 if (rx_buffer_info
->dma
) {
3761 dma_unmap_single(rx_ring
->dev
, rx_buffer_info
->dma
,
3762 rx_ring
->rx_buf_len
,
3764 rx_buffer_info
->dma
= 0;
3766 if (rx_buffer_info
->skb
) {
3767 struct sk_buff
*skb
= rx_buffer_info
->skb
;
3768 rx_buffer_info
->skb
= NULL
;
3770 struct sk_buff
*this = skb
;
3771 if (IXGBE_RSC_CB(this)->delay_unmap
) {
3772 dma_unmap_single(dev
,
3773 IXGBE_RSC_CB(this)->dma
,
3774 rx_ring
->rx_buf_len
,
3776 IXGBE_RSC_CB(this)->dma
= 0;
3777 IXGBE_RSC_CB(skb
)->delay_unmap
= false;
3780 dev_kfree_skb(this);
3783 if (!rx_buffer_info
->page
)
3785 if (rx_buffer_info
->page_dma
) {
3786 dma_unmap_page(dev
, rx_buffer_info
->page_dma
,
3787 PAGE_SIZE
/ 2, DMA_FROM_DEVICE
);
3788 rx_buffer_info
->page_dma
= 0;
3790 put_page(rx_buffer_info
->page
);
3791 rx_buffer_info
->page
= NULL
;
3792 rx_buffer_info
->page_offset
= 0;
3795 size
= sizeof(struct ixgbe_rx_buffer
) * rx_ring
->count
;
3796 memset(rx_ring
->rx_buffer_info
, 0, size
);
3798 /* Zero out the descriptor ring */
3799 memset(rx_ring
->desc
, 0, rx_ring
->size
);
3801 rx_ring
->next_to_clean
= 0;
3802 rx_ring
->next_to_use
= 0;
3806 * ixgbe_clean_tx_ring - Free Tx Buffers
3807 * @tx_ring: ring to be cleaned
3809 static void ixgbe_clean_tx_ring(struct ixgbe_ring
*tx_ring
)
3811 struct ixgbe_tx_buffer
*tx_buffer_info
;
3815 /* ring already cleared, nothing to do */
3816 if (!tx_ring
->tx_buffer_info
)
3819 /* Free all the Tx ring sk_buffs */
3820 for (i
= 0; i
< tx_ring
->count
; i
++) {
3821 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3822 ixgbe_unmap_and_free_tx_resource(tx_ring
, tx_buffer_info
);
3825 size
= sizeof(struct ixgbe_tx_buffer
) * tx_ring
->count
;
3826 memset(tx_ring
->tx_buffer_info
, 0, size
);
3828 /* Zero out the descriptor ring */
3829 memset(tx_ring
->desc
, 0, tx_ring
->size
);
3831 tx_ring
->next_to_use
= 0;
3832 tx_ring
->next_to_clean
= 0;
3836 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
3837 * @adapter: board private structure
3839 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter
*adapter
)
3843 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3844 ixgbe_clean_rx_ring(adapter
->rx_ring
[i
]);
3848 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
3849 * @adapter: board private structure
3851 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter
*adapter
)
3855 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
3856 ixgbe_clean_tx_ring(adapter
->tx_ring
[i
]);
3859 static void ixgbe_fdir_filter_exit(struct ixgbe_adapter
*adapter
)
3861 struct hlist_node
*node
, *node2
;
3862 struct ixgbe_fdir_filter
*filter
;
3864 spin_lock(&adapter
->fdir_perfect_lock
);
3866 hlist_for_each_entry_safe(filter
, node
, node2
,
3867 &adapter
->fdir_filter_list
, fdir_node
) {
3868 hlist_del(&filter
->fdir_node
);
3871 adapter
->fdir_filter_count
= 0;
3873 spin_unlock(&adapter
->fdir_perfect_lock
);
3876 void ixgbe_down(struct ixgbe_adapter
*adapter
)
3878 struct net_device
*netdev
= adapter
->netdev
;
3879 struct ixgbe_hw
*hw
= &adapter
->hw
;
3882 int num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
3884 /* signal that we are down to the interrupt handler */
3885 set_bit(__IXGBE_DOWN
, &adapter
->state
);
3887 /* disable receives */
3888 rxctrl
= IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
3889 IXGBE_WRITE_REG(hw
, IXGBE_RXCTRL
, rxctrl
& ~IXGBE_RXCTRL_RXEN
);
3891 /* disable all enabled rx queues */
3892 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3893 /* this call also flushes the previous write */
3894 ixgbe_disable_rx_queue(adapter
, adapter
->rx_ring
[i
]);
3896 usleep_range(10000, 20000);
3898 netif_tx_stop_all_queues(netdev
);
3900 /* call carrier off first to avoid false dev_watchdog timeouts */
3901 netif_carrier_off(netdev
);
3902 netif_tx_disable(netdev
);
3904 ixgbe_irq_disable(adapter
);
3906 ixgbe_napi_disable_all(adapter
);
3908 adapter
->flags2
&= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT
|
3909 IXGBE_FLAG2_RESET_REQUESTED
);
3910 adapter
->flags
&= ~IXGBE_FLAG_NEED_LINK_UPDATE
;
3912 del_timer_sync(&adapter
->service_timer
);
3914 /* disable receive for all VFs and wait one second */
3915 if (adapter
->num_vfs
) {
3916 /* ping all the active vfs to let them know we are going down */
3917 ixgbe_ping_all_vfs(adapter
);
3919 /* Disable all VFTE/VFRE TX/RX */
3920 ixgbe_disable_tx_rx(adapter
);
3922 /* Mark all the VFs as inactive */
3923 for (i
= 0 ; i
< adapter
->num_vfs
; i
++)
3924 adapter
->vfinfo
[i
].clear_to_send
= 0;
3927 /* Cleanup the affinity_hint CPU mask memory and callback */
3928 for (i
= 0; i
< num_q_vectors
; i
++) {
3929 struct ixgbe_q_vector
*q_vector
= adapter
->q_vector
[i
];
3930 /* clear the affinity_mask in the IRQ descriptor */
3931 irq_set_affinity_hint(adapter
->msix_entries
[i
]. vector
, NULL
);
3932 /* release the CPU mask memory */
3933 free_cpumask_var(q_vector
->affinity_mask
);
3936 /* disable transmits in the hardware now that interrupts are off */
3937 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
3938 u8 reg_idx
= adapter
->tx_ring
[i
]->reg_idx
;
3939 IXGBE_WRITE_REG(hw
, IXGBE_TXDCTL(reg_idx
), IXGBE_TXDCTL_SWFLSH
);
3942 /* Disable the Tx DMA engine on 82599 and X540 */
3943 switch (hw
->mac
.type
) {
3944 case ixgbe_mac_82599EB
:
3945 case ixgbe_mac_X540
:
3946 IXGBE_WRITE_REG(hw
, IXGBE_DMATXCTL
,
3947 (IXGBE_READ_REG(hw
, IXGBE_DMATXCTL
) &
3948 ~IXGBE_DMATXCTL_TE
));
3954 if (!pci_channel_offline(adapter
->pdev
))
3955 ixgbe_reset(adapter
);
3957 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
3958 if (hw
->mac
.ops
.disable_tx_laser
&&
3959 ((hw
->phy
.multispeed_fiber
) ||
3960 ((hw
->mac
.ops
.get_media_type(hw
) == ixgbe_media_type_fiber
) &&
3961 (hw
->mac
.type
== ixgbe_mac_82599EB
))))
3962 hw
->mac
.ops
.disable_tx_laser(hw
);
3964 ixgbe_clean_all_tx_rings(adapter
);
3965 ixgbe_clean_all_rx_rings(adapter
);
3967 #ifdef CONFIG_IXGBE_DCA
3968 /* since we reset the hardware DCA settings were cleared */
3969 ixgbe_setup_dca(adapter
);
3974 * ixgbe_poll - NAPI Rx polling callback
3975 * @napi: structure for representing this polling device
3976 * @budget: how many packets driver is allowed to clean
3978 * This function is used for legacy and MSI, NAPI mode
3980 static int ixgbe_poll(struct napi_struct
*napi
, int budget
)
3982 struct ixgbe_q_vector
*q_vector
=
3983 container_of(napi
, struct ixgbe_q_vector
, napi
);
3984 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
3985 struct ixgbe_ring
*ring
;
3986 int per_ring_budget
;
3987 bool clean_complete
= true;
3989 #ifdef CONFIG_IXGBE_DCA
3990 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
)
3991 ixgbe_update_dca(q_vector
);
3994 for (ring
= q_vector
->tx
.ring
; ring
!= NULL
; ring
= ring
->next
)
3995 clean_complete
&= !!ixgbe_clean_tx_irq(q_vector
, ring
);
3997 /* attempt to distribute budget to each queue fairly, but don't allow
3998 * the budget to go below 1 because we'll exit polling */
3999 if (q_vector
->rx
.count
> 1)
4000 per_ring_budget
= max(budget
/q_vector
->rx
.count
, 1);
4002 per_ring_budget
= budget
;
4004 for (ring
= q_vector
->rx
.ring
; ring
!= NULL
; ring
= ring
->next
)
4005 clean_complete
&= ixgbe_clean_rx_irq(q_vector
, ring
,
4008 /* If all work not completed, return budget and keep polling */
4009 if (!clean_complete
)
4012 /* all work done, exit the polling mode */
4013 napi_complete(napi
);
4014 if (adapter
->rx_itr_setting
& 1)
4015 ixgbe_set_itr(q_vector
);
4016 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
4017 ixgbe_irq_enable_queues(adapter
, ((u64
)1 << q_vector
->v_idx
));
4023 * ixgbe_tx_timeout - Respond to a Tx Hang
4024 * @netdev: network interface device structure
4026 static void ixgbe_tx_timeout(struct net_device
*netdev
)
4028 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
4030 /* Do the reset outside of interrupt context */
4031 ixgbe_tx_timeout_reset(adapter
);
4035 * ixgbe_set_rss_queues: Allocate queues for RSS
4036 * @adapter: board private structure to initialize
4038 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
4039 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
4042 static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter
*adapter
)
4045 struct ixgbe_ring_feature
*f
= &adapter
->ring_feature
[RING_F_RSS
];
4047 if (adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
) {
4049 adapter
->num_rx_queues
= f
->indices
;
4050 adapter
->num_tx_queues
= f
->indices
;
4060 * ixgbe_set_fdir_queues: Allocate queues for Flow Director
4061 * @adapter: board private structure to initialize
4063 * Flow Director is an advanced Rx filter, attempting to get Rx flows back
4064 * to the original CPU that initiated the Tx session. This runs in addition
4065 * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
4066 * Rx load across CPUs using RSS.
4069 static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter
*adapter
)
4072 struct ixgbe_ring_feature
*f_fdir
= &adapter
->ring_feature
[RING_F_FDIR
];
4074 f_fdir
->indices
= min((int)num_online_cpus(), f_fdir
->indices
);
4077 /* Flow Director must have RSS enabled */
4078 if ((adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
) &&
4079 (adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
)) {
4080 adapter
->num_tx_queues
= f_fdir
->indices
;
4081 adapter
->num_rx_queues
= f_fdir
->indices
;
4084 adapter
->flags
&= ~IXGBE_FLAG_FDIR_HASH_CAPABLE
;
4091 * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
4092 * @adapter: board private structure to initialize
4094 * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
4095 * The ring feature mask is not used as a mask for FCoE, as it can take any 8
4096 * rx queues out of the max number of rx queues, instead, it is used as the
4097 * index of the first rx queue used by FCoE.
4100 static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter
*adapter
)
4102 struct ixgbe_ring_feature
*f
= &adapter
->ring_feature
[RING_F_FCOE
];
4104 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
))
4107 f
->indices
= min((int)num_online_cpus(), f
->indices
);
4109 adapter
->num_rx_queues
= 1;
4110 adapter
->num_tx_queues
= 1;
4112 if (adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
) {
4113 e_info(probe
, "FCoE enabled with RSS\n");
4114 if (adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
)
4115 ixgbe_set_fdir_queues(adapter
);
4117 ixgbe_set_rss_queues(adapter
);
4120 /* adding FCoE rx rings to the end */
4121 f
->mask
= adapter
->num_rx_queues
;
4122 adapter
->num_rx_queues
+= f
->indices
;
4123 adapter
->num_tx_queues
+= f
->indices
;
4127 #endif /* IXGBE_FCOE */
4129 /* Artificial max queue cap per traffic class in DCB mode */
4130 #define DCB_QUEUE_CAP 8
4132 #ifdef CONFIG_IXGBE_DCB
4133 static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter
*adapter
)
4135 int per_tc_q
, q
, i
, offset
= 0;
4136 struct net_device
*dev
= adapter
->netdev
;
4137 int tcs
= netdev_get_num_tc(dev
);
4142 /* Map queue offset and counts onto allocated tx queues */
4143 per_tc_q
= min(dev
->num_tx_queues
/ tcs
, (unsigned int)DCB_QUEUE_CAP
);
4144 q
= min((int)num_online_cpus(), per_tc_q
);
4146 for (i
= 0; i
< tcs
; i
++) {
4147 netdev_set_prio_tc_map(dev
, i
, i
);
4148 netdev_set_tc_queue(dev
, i
, q
, offset
);
4152 adapter
->num_tx_queues
= q
* tcs
;
4153 adapter
->num_rx_queues
= q
* tcs
;
4156 /* FCoE enabled queues require special configuration indexed
4157 * by feature specific indices and mask. Here we map FCoE
4158 * indices onto the DCB queue pairs allowing FCoE to own
4159 * configuration later.
4161 if (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
) {
4163 struct ixgbe_ring_feature
*f
=
4164 &adapter
->ring_feature
[RING_F_FCOE
];
4166 tc
= netdev_get_prio_tc_map(dev
, adapter
->fcoe
.up
);
4167 f
->indices
= dev
->tc_to_txq
[tc
].count
;
4168 f
->mask
= dev
->tc_to_txq
[tc
].offset
;
4177 * ixgbe_set_sriov_queues: Allocate queues for IOV use
4178 * @adapter: board private structure to initialize
4180 * IOV doesn't actually use anything, so just NAK the
4181 * request for now and let the other queue routines
4182 * figure out what to do.
4184 static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter
*adapter
)
4190 * ixgbe_set_num_queues: Allocate queues for device, feature dependent
4191 * @adapter: board private structure to initialize
4193 * This is the top level queue allocation routine. The order here is very
4194 * important, starting with the "most" number of features turned on at once,
4195 * and ending with the smallest set of features. This way large combinations
4196 * can be allocated if they're turned on, and smaller combinations are the
4197 * fallthrough conditions.
4200 static int ixgbe_set_num_queues(struct ixgbe_adapter
*adapter
)
4202 /* Start with base case */
4203 adapter
->num_rx_queues
= 1;
4204 adapter
->num_tx_queues
= 1;
4205 adapter
->num_rx_pools
= adapter
->num_rx_queues
;
4206 adapter
->num_rx_queues_per_pool
= 1;
4208 if (ixgbe_set_sriov_queues(adapter
))
4211 #ifdef CONFIG_IXGBE_DCB
4212 if (ixgbe_set_dcb_queues(adapter
))
4217 if (ixgbe_set_fcoe_queues(adapter
))
4220 #endif /* IXGBE_FCOE */
4221 if (ixgbe_set_fdir_queues(adapter
))
4224 if (ixgbe_set_rss_queues(adapter
))
4227 /* fallback to base case */
4228 adapter
->num_rx_queues
= 1;
4229 adapter
->num_tx_queues
= 1;
4232 /* Notify the stack of the (possibly) reduced queue counts. */
4233 netif_set_real_num_tx_queues(adapter
->netdev
, adapter
->num_tx_queues
);
4234 return netif_set_real_num_rx_queues(adapter
->netdev
,
4235 adapter
->num_rx_queues
);
4238 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter
*adapter
,
4241 int err
, vector_threshold
;
4243 /* We'll want at least 3 (vector_threshold):
4246 * 3) Other (Link Status Change, etc.)
4247 * 4) TCP Timer (optional)
4249 vector_threshold
= MIN_MSIX_COUNT
;
4251 /* The more we get, the more we will assign to Tx/Rx Cleanup
4252 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
4253 * Right now, we simply care about how many we'll get; we'll
4254 * set them up later while requesting irq's.
4256 while (vectors
>= vector_threshold
) {
4257 err
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
4259 if (!err
) /* Success in acquiring all requested vectors. */
4262 vectors
= 0; /* Nasty failure, quit now */
4263 else /* err == number of vectors we should try again with */
4267 if (vectors
< vector_threshold
) {
4268 /* Can't allocate enough MSI-X interrupts? Oh well.
4269 * This just means we'll go with either a single MSI
4270 * vector or fall back to legacy interrupts.
4272 netif_printk(adapter
, hw
, KERN_DEBUG
, adapter
->netdev
,
4273 "Unable to allocate MSI-X interrupts\n");
4274 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
4275 kfree(adapter
->msix_entries
);
4276 adapter
->msix_entries
= NULL
;
4278 adapter
->flags
|= IXGBE_FLAG_MSIX_ENABLED
; /* Woot! */
4280 * Adjust for only the vectors we'll use, which is minimum
4281 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
4282 * vectors we were allocated.
4284 adapter
->num_msix_vectors
= min(vectors
,
4285 adapter
->max_msix_q_vectors
+ NON_Q_VECTORS
);
4290 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
4291 * @adapter: board private structure to initialize
4293 * Cache the descriptor ring offsets for RSS to the assigned rings.
4296 static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter
*adapter
)
4300 if (!(adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
))
4303 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
4304 adapter
->rx_ring
[i
]->reg_idx
= i
;
4305 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
4306 adapter
->tx_ring
[i
]->reg_idx
= i
;
4311 #ifdef CONFIG_IXGBE_DCB
4313 /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
4314 static void ixgbe_get_first_reg_idx(struct ixgbe_adapter
*adapter
, u8 tc
,
4315 unsigned int *tx
, unsigned int *rx
)
4317 struct net_device
*dev
= adapter
->netdev
;
4318 struct ixgbe_hw
*hw
= &adapter
->hw
;
4319 u8 num_tcs
= netdev_get_num_tc(dev
);
4324 switch (hw
->mac
.type
) {
4325 case ixgbe_mac_82598EB
:
4329 case ixgbe_mac_82599EB
:
4330 case ixgbe_mac_X540
:
4335 } else if (tc
< 5) {
4336 *tx
= ((tc
+ 2) << 4);
4338 } else if (tc
< num_tcs
) {
4339 *tx
= ((tc
+ 8) << 3);
4368 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
4369 * @adapter: board private structure to initialize
4371 * Cache the descriptor ring offsets for DCB to the assigned rings.
4374 static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter
*adapter
)
4376 struct net_device
*dev
= adapter
->netdev
;
4378 u8 num_tcs
= netdev_get_num_tc(dev
);
4383 for (i
= 0, k
= 0; i
< num_tcs
; i
++) {
4384 unsigned int tx_s
, rx_s
;
4385 u16 count
= dev
->tc_to_txq
[i
].count
;
4387 ixgbe_get_first_reg_idx(adapter
, i
, &tx_s
, &rx_s
);
4388 for (j
= 0; j
< count
; j
++, k
++) {
4389 adapter
->tx_ring
[k
]->reg_idx
= tx_s
+ j
;
4390 adapter
->rx_ring
[k
]->reg_idx
= rx_s
+ j
;
4391 adapter
->tx_ring
[k
]->dcb_tc
= i
;
4392 adapter
->rx_ring
[k
]->dcb_tc
= i
;
4401 * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
4402 * @adapter: board private structure to initialize
4404 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
4407 static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter
*adapter
)
4412 if ((adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
) &&
4413 (adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
)) {
4414 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
4415 adapter
->rx_ring
[i
]->reg_idx
= i
;
4416 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
4417 adapter
->tx_ring
[i
]->reg_idx
= i
;
4426 * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
4427 * @adapter: board private structure to initialize
4429 * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
4432 static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter
*adapter
)
4434 struct ixgbe_ring_feature
*f
= &adapter
->ring_feature
[RING_F_FCOE
];
4436 u8 fcoe_rx_i
= 0, fcoe_tx_i
= 0;
4438 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
))
4441 if (adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
) {
4442 if (adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
)
4443 ixgbe_cache_ring_fdir(adapter
);
4445 ixgbe_cache_ring_rss(adapter
);
4447 fcoe_rx_i
= f
->mask
;
4448 fcoe_tx_i
= f
->mask
;
4450 for (i
= 0; i
< f
->indices
; i
++, fcoe_rx_i
++, fcoe_tx_i
++) {
4451 adapter
->rx_ring
[f
->mask
+ i
]->reg_idx
= fcoe_rx_i
;
4452 adapter
->tx_ring
[f
->mask
+ i
]->reg_idx
= fcoe_tx_i
;
4457 #endif /* IXGBE_FCOE */
4459 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
4460 * @adapter: board private structure to initialize
4462 * SR-IOV doesn't use any descriptor rings but changes the default if
4463 * no other mapping is used.
4466 static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter
*adapter
)
4468 adapter
->rx_ring
[0]->reg_idx
= adapter
->num_vfs
* 2;
4469 adapter
->tx_ring
[0]->reg_idx
= adapter
->num_vfs
* 2;
4470 if (adapter
->num_vfs
)
4477 * ixgbe_cache_ring_register - Descriptor ring to register mapping
4478 * @adapter: board private structure to initialize
4480 * Once we know the feature-set enabled for the device, we'll cache
4481 * the register offset the descriptor ring is assigned to.
4483 * Note, the order the various feature calls is important. It must start with
4484 * the "most" features enabled at the same time, then trickle down to the
4485 * least amount of features turned on at once.
4487 static void ixgbe_cache_ring_register(struct ixgbe_adapter
*adapter
)
4489 /* start with default case */
4490 adapter
->rx_ring
[0]->reg_idx
= 0;
4491 adapter
->tx_ring
[0]->reg_idx
= 0;
4493 if (ixgbe_cache_ring_sriov(adapter
))
4496 #ifdef CONFIG_IXGBE_DCB
4497 if (ixgbe_cache_ring_dcb(adapter
))
4502 if (ixgbe_cache_ring_fcoe(adapter
))
4504 #endif /* IXGBE_FCOE */
4506 if (ixgbe_cache_ring_fdir(adapter
))
4509 if (ixgbe_cache_ring_rss(adapter
))
4514 * ixgbe_alloc_queues - Allocate memory for all rings
4515 * @adapter: board private structure to initialize
4517 * We allocate one ring per queue at run-time since we don't know the
4518 * number of queues at compile-time. The polling_netdev array is
4519 * intended for Multiqueue, but should work fine with a single queue.
4521 static int ixgbe_alloc_queues(struct ixgbe_adapter
*adapter
)
4523 int rx
= 0, tx
= 0, nid
= adapter
->node
;
4525 if (nid
< 0 || !node_online(nid
))
4526 nid
= first_online_node
;
4528 for (; tx
< adapter
->num_tx_queues
; tx
++) {
4529 struct ixgbe_ring
*ring
;
4531 ring
= kzalloc_node(sizeof(*ring
), GFP_KERNEL
, nid
);
4533 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
4535 goto err_allocation
;
4536 ring
->count
= adapter
->tx_ring_count
;
4537 ring
->queue_index
= tx
;
4538 ring
->numa_node
= nid
;
4539 ring
->dev
= &adapter
->pdev
->dev
;
4540 ring
->netdev
= adapter
->netdev
;
4542 adapter
->tx_ring
[tx
] = ring
;
4545 for (; rx
< adapter
->num_rx_queues
; rx
++) {
4546 struct ixgbe_ring
*ring
;
4548 ring
= kzalloc_node(sizeof(*ring
), GFP_KERNEL
, nid
);
4550 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
4552 goto err_allocation
;
4553 ring
->count
= adapter
->rx_ring_count
;
4554 ring
->queue_index
= rx
;
4555 ring
->numa_node
= nid
;
4556 ring
->dev
= &adapter
->pdev
->dev
;
4557 ring
->netdev
= adapter
->netdev
;
4559 adapter
->rx_ring
[rx
] = ring
;
4562 ixgbe_cache_ring_register(adapter
);
4568 kfree(adapter
->tx_ring
[--tx
]);
4571 kfree(adapter
->rx_ring
[--rx
]);
4576 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
4577 * @adapter: board private structure to initialize
4579 * Attempt to configure the interrupts using the best available
4580 * capabilities of the hardware and the kernel.
4582 static int ixgbe_set_interrupt_capability(struct ixgbe_adapter
*adapter
)
4584 struct ixgbe_hw
*hw
= &adapter
->hw
;
4586 int vector
, v_budget
;
4589 * It's easy to be greedy for MSI-X vectors, but it really
4590 * doesn't do us much good if we have a lot more vectors
4591 * than CPU's. So let's be conservative and only ask for
4592 * (roughly) the same number of vectors as there are CPU's.
4594 v_budget
= min(adapter
->num_rx_queues
+ adapter
->num_tx_queues
,
4595 (int)num_online_cpus()) + NON_Q_VECTORS
;
4598 * At the same time, hardware can only support a maximum of
4599 * hw.mac->max_msix_vectors vectors. With features
4600 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
4601 * descriptor queues supported by our device. Thus, we cap it off in
4602 * those rare cases where the cpu count also exceeds our vector limit.
4604 v_budget
= min(v_budget
, (int)hw
->mac
.max_msix_vectors
);
4606 /* A failure in MSI-X entry allocation isn't fatal, but it does
4607 * mean we disable MSI-X capabilities of the adapter. */
4608 adapter
->msix_entries
= kcalloc(v_budget
,
4609 sizeof(struct msix_entry
), GFP_KERNEL
);
4610 if (adapter
->msix_entries
) {
4611 for (vector
= 0; vector
< v_budget
; vector
++)
4612 adapter
->msix_entries
[vector
].entry
= vector
;
4614 ixgbe_acquire_msix_vectors(adapter
, v_budget
);
4616 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
4620 adapter
->flags
&= ~IXGBE_FLAG_DCB_ENABLED
;
4621 adapter
->flags
&= ~IXGBE_FLAG_RSS_ENABLED
;
4622 if (adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
) {
4624 "ATR is not supported while multiple "
4625 "queues are disabled. Disabling Flow Director\n");
4627 adapter
->flags
&= ~IXGBE_FLAG_FDIR_HASH_CAPABLE
;
4628 adapter
->atr_sample_rate
= 0;
4629 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
4630 ixgbe_disable_sriov(adapter
);
4632 err
= ixgbe_set_num_queues(adapter
);
4636 err
= pci_enable_msi(adapter
->pdev
);
4638 adapter
->flags
|= IXGBE_FLAG_MSI_ENABLED
;
4640 netif_printk(adapter
, hw
, KERN_DEBUG
, adapter
->netdev
,
4641 "Unable to allocate MSI interrupt, "
4642 "falling back to legacy. Error: %d\n", err
);
4652 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
4653 * @adapter: board private structure to initialize
4655 * We allocate one q_vector per queue interrupt. If allocation fails we
4658 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter
*adapter
)
4660 int v_idx
, num_q_vectors
;
4661 struct ixgbe_q_vector
*q_vector
;
4663 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
4664 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
4668 for (v_idx
= 0; v_idx
< num_q_vectors
; v_idx
++) {
4669 q_vector
= kzalloc_node(sizeof(struct ixgbe_q_vector
),
4670 GFP_KERNEL
, adapter
->node
);
4672 q_vector
= kzalloc(sizeof(struct ixgbe_q_vector
),
4677 q_vector
->adapter
= adapter
;
4678 q_vector
->v_idx
= v_idx
;
4680 if (q_vector
->tx
.count
&& !q_vector
->rx
.count
)
4681 q_vector
->eitr
= adapter
->tx_eitr_param
;
4683 q_vector
->eitr
= adapter
->rx_eitr_param
;
4685 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
4687 adapter
->q_vector
[v_idx
] = q_vector
;
4695 q_vector
= adapter
->q_vector
[v_idx
];
4696 netif_napi_del(&q_vector
->napi
);
4698 adapter
->q_vector
[v_idx
] = NULL
;
4704 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
4705 * @adapter: board private structure to initialize
4707 * This function frees the memory allocated to the q_vectors. In addition if
4708 * NAPI is enabled it will delete any references to the NAPI struct prior
4709 * to freeing the q_vector.
4711 static void ixgbe_free_q_vectors(struct ixgbe_adapter
*adapter
)
4713 int q_idx
, num_q_vectors
;
4715 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
4716 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
4720 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
4721 struct ixgbe_q_vector
*q_vector
= adapter
->q_vector
[q_idx
];
4722 adapter
->q_vector
[q_idx
] = NULL
;
4723 netif_napi_del(&q_vector
->napi
);
4728 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter
*adapter
)
4730 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
4731 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
4732 pci_disable_msix(adapter
->pdev
);
4733 kfree(adapter
->msix_entries
);
4734 adapter
->msix_entries
= NULL
;
4735 } else if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
4736 adapter
->flags
&= ~IXGBE_FLAG_MSI_ENABLED
;
4737 pci_disable_msi(adapter
->pdev
);
4742 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
4743 * @adapter: board private structure to initialize
4745 * We determine which interrupt scheme to use based on...
4746 * - Kernel support (MSI, MSI-X)
4747 * - which can be user-defined (via MODULE_PARAM)
4748 * - Hardware queue count (num_*_queues)
4749 * - defined by miscellaneous hardware support/features (RSS, etc.)
4751 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter
*adapter
)
4755 /* Number of supported queues */
4756 err
= ixgbe_set_num_queues(adapter
);
4760 err
= ixgbe_set_interrupt_capability(adapter
);
4762 e_dev_err("Unable to setup interrupt capabilities\n");
4763 goto err_set_interrupt
;
4766 err
= ixgbe_alloc_q_vectors(adapter
);
4768 e_dev_err("Unable to allocate memory for queue vectors\n");
4769 goto err_alloc_q_vectors
;
4772 err
= ixgbe_alloc_queues(adapter
);
4774 e_dev_err("Unable to allocate memory for queues\n");
4775 goto err_alloc_queues
;
4778 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
4779 (adapter
->num_rx_queues
> 1) ? "Enabled" : "Disabled",
4780 adapter
->num_rx_queues
, adapter
->num_tx_queues
);
4782 set_bit(__IXGBE_DOWN
, &adapter
->state
);
4787 ixgbe_free_q_vectors(adapter
);
4788 err_alloc_q_vectors
:
4789 ixgbe_reset_interrupt_capability(adapter
);
4795 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
4796 * @adapter: board private structure to clear interrupt scheme on
4798 * We go through and clear interrupt specific resources and reset the structure
4799 * to pre-load conditions
4801 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter
*adapter
)
4805 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
4806 kfree(adapter
->tx_ring
[i
]);
4807 adapter
->tx_ring
[i
] = NULL
;
4809 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
4810 struct ixgbe_ring
*ring
= adapter
->rx_ring
[i
];
4812 /* ixgbe_get_stats64() might access this ring, we must wait
4813 * a grace period before freeing it.
4815 kfree_rcu(ring
, rcu
);
4816 adapter
->rx_ring
[i
] = NULL
;
4819 adapter
->num_tx_queues
= 0;
4820 adapter
->num_rx_queues
= 0;
4822 ixgbe_free_q_vectors(adapter
);
4823 ixgbe_reset_interrupt_capability(adapter
);
4827 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
4828 * @adapter: board private structure to initialize
4830 * ixgbe_sw_init initializes the Adapter private data structure.
4831 * Fields are initialized based on PCI device information and
4832 * OS network device settings (MTU size).
4834 static int __devinit
ixgbe_sw_init(struct ixgbe_adapter
*adapter
)
4836 struct ixgbe_hw
*hw
= &adapter
->hw
;
4837 struct pci_dev
*pdev
= adapter
->pdev
;
4838 struct net_device
*dev
= adapter
->netdev
;
4840 #ifdef CONFIG_IXGBE_DCB
4842 struct tc_configuration
*tc
;
4844 int max_frame
= dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
4846 /* PCI config space info */
4848 hw
->vendor_id
= pdev
->vendor
;
4849 hw
->device_id
= pdev
->device
;
4850 hw
->revision_id
= pdev
->revision
;
4851 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
4852 hw
->subsystem_device_id
= pdev
->subsystem_device
;
4854 /* Set capability flags */
4855 rss
= min(IXGBE_MAX_RSS_INDICES
, (int)num_online_cpus());
4856 adapter
->ring_feature
[RING_F_RSS
].indices
= rss
;
4857 adapter
->flags
|= IXGBE_FLAG_RSS_ENABLED
;
4858 switch (hw
->mac
.type
) {
4859 case ixgbe_mac_82598EB
:
4860 if (hw
->device_id
== IXGBE_DEV_ID_82598AT
)
4861 adapter
->flags
|= IXGBE_FLAG_FAN_FAIL_CAPABLE
;
4862 adapter
->max_msix_q_vectors
= MAX_MSIX_Q_VECTORS_82598
;
4864 case ixgbe_mac_82599EB
:
4865 case ixgbe_mac_X540
:
4866 adapter
->max_msix_q_vectors
= MAX_MSIX_Q_VECTORS_82599
;
4867 adapter
->flags2
|= IXGBE_FLAG2_RSC_CAPABLE
;
4868 adapter
->flags2
|= IXGBE_FLAG2_RSC_ENABLED
;
4869 if (hw
->device_id
== IXGBE_DEV_ID_82599_T3_LOM
)
4870 adapter
->flags2
|= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE
;
4871 /* Flow Director hash filters enabled */
4872 adapter
->flags
|= IXGBE_FLAG_FDIR_HASH_CAPABLE
;
4873 adapter
->atr_sample_rate
= 20;
4874 adapter
->ring_feature
[RING_F_FDIR
].indices
=
4875 IXGBE_MAX_FDIR_INDICES
;
4876 adapter
->fdir_pballoc
= IXGBE_FDIR_PBALLOC_64K
;
4878 adapter
->flags
|= IXGBE_FLAG_FCOE_CAPABLE
;
4879 adapter
->flags
&= ~IXGBE_FLAG_FCOE_ENABLED
;
4880 adapter
->ring_feature
[RING_F_FCOE
].indices
= 0;
4881 #ifdef CONFIG_IXGBE_DCB
4882 /* Default traffic class to use for FCoE */
4883 adapter
->fcoe
.up
= IXGBE_FCOE_DEFTC
;
4885 #endif /* IXGBE_FCOE */
4891 /* n-tuple support exists, always init our spinlock */
4892 spin_lock_init(&adapter
->fdir_perfect_lock
);
4894 #ifdef CONFIG_IXGBE_DCB
4895 /* Configure DCB traffic classes */
4896 for (j
= 0; j
< MAX_TRAFFIC_CLASS
; j
++) {
4897 tc
= &adapter
->dcb_cfg
.tc_config
[j
];
4898 tc
->path
[DCB_TX_CONFIG
].bwg_id
= 0;
4899 tc
->path
[DCB_TX_CONFIG
].bwg_percent
= 12 + (j
& 1);
4900 tc
->path
[DCB_RX_CONFIG
].bwg_id
= 0;
4901 tc
->path
[DCB_RX_CONFIG
].bwg_percent
= 12 + (j
& 1);
4902 tc
->dcb_pfc
= pfc_disabled
;
4904 adapter
->dcb_cfg
.bw_percentage
[DCB_TX_CONFIG
][0] = 100;
4905 adapter
->dcb_cfg
.bw_percentage
[DCB_RX_CONFIG
][0] = 100;
4906 adapter
->dcb_cfg
.pfc_mode_enable
= false;
4907 adapter
->dcb_set_bitmap
= 0x00;
4908 adapter
->dcbx_cap
= DCB_CAP_DCBX_HOST
| DCB_CAP_DCBX_VER_CEE
;
4909 ixgbe_copy_dcb_cfg(&adapter
->dcb_cfg
, &adapter
->temp_dcb_cfg
,
4914 /* default flow control settings */
4915 hw
->fc
.requested_mode
= ixgbe_fc_full
;
4916 hw
->fc
.current_mode
= ixgbe_fc_full
; /* init for ethtool output */
4918 adapter
->last_lfc_mode
= hw
->fc
.current_mode
;
4920 hw
->fc
.high_water
= FC_HIGH_WATER(max_frame
);
4921 hw
->fc
.low_water
= FC_LOW_WATER(max_frame
);
4922 hw
->fc
.pause_time
= IXGBE_DEFAULT_FCPAUSE
;
4923 hw
->fc
.send_xon
= true;
4924 hw
->fc
.disable_fc_autoneg
= false;
4926 /* enable itr by default in dynamic mode */
4927 adapter
->rx_itr_setting
= 1;
4928 adapter
->rx_eitr_param
= 20000;
4929 adapter
->tx_itr_setting
= 1;
4930 adapter
->tx_eitr_param
= 10000;
4932 /* set defaults for eitr in MegaBytes */
4933 adapter
->eitr_low
= 10;
4934 adapter
->eitr_high
= 20;
4936 /* set default ring sizes */
4937 adapter
->tx_ring_count
= IXGBE_DEFAULT_TXD
;
4938 adapter
->rx_ring_count
= IXGBE_DEFAULT_RXD
;
4940 /* set default work limits */
4941 adapter
->tx_work_limit
= IXGBE_DEFAULT_TX_WORK
;
4943 /* initialize eeprom parameters */
4944 if (ixgbe_init_eeprom_params_generic(hw
)) {
4945 e_dev_err("EEPROM initialization failed\n");
4949 /* enable rx csum by default */
4950 adapter
->flags
|= IXGBE_FLAG_RX_CSUM_ENABLED
;
4952 /* get assigned NUMA node */
4953 adapter
->node
= dev_to_node(&pdev
->dev
);
4955 set_bit(__IXGBE_DOWN
, &adapter
->state
);
4961 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
4962 * @tx_ring: tx descriptor ring (for a specific queue) to setup
4964 * Return 0 on success, negative on failure
4966 int ixgbe_setup_tx_resources(struct ixgbe_ring
*tx_ring
)
4968 struct device
*dev
= tx_ring
->dev
;
4971 size
= sizeof(struct ixgbe_tx_buffer
) * tx_ring
->count
;
4972 tx_ring
->tx_buffer_info
= vzalloc_node(size
, tx_ring
->numa_node
);
4973 if (!tx_ring
->tx_buffer_info
)
4974 tx_ring
->tx_buffer_info
= vzalloc(size
);
4975 if (!tx_ring
->tx_buffer_info
)
4978 /* round up to nearest 4K */
4979 tx_ring
->size
= tx_ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
4980 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
4982 tx_ring
->desc
= dma_alloc_coherent(dev
, tx_ring
->size
,
4983 &tx_ring
->dma
, GFP_KERNEL
);
4987 tx_ring
->next_to_use
= 0;
4988 tx_ring
->next_to_clean
= 0;
4992 vfree(tx_ring
->tx_buffer_info
);
4993 tx_ring
->tx_buffer_info
= NULL
;
4994 dev_err(dev
, "Unable to allocate memory for the Tx descriptor ring\n");
4999 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
5000 * @adapter: board private structure
5002 * If this function returns with an error, then it's possible one or
5003 * more of the rings is populated (while the rest are not). It is the
5004 * callers duty to clean those orphaned rings.
5006 * Return 0 on success, negative on failure
5008 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter
*adapter
)
5012 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
5013 err
= ixgbe_setup_tx_resources(adapter
->tx_ring
[i
]);
5016 e_err(probe
, "Allocation for Tx Queue %u failed\n", i
);
5024 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
5025 * @rx_ring: rx descriptor ring (for a specific queue) to setup
5027 * Returns 0 on success, negative on failure
5029 int ixgbe_setup_rx_resources(struct ixgbe_ring
*rx_ring
)
5031 struct device
*dev
= rx_ring
->dev
;
5034 size
= sizeof(struct ixgbe_rx_buffer
) * rx_ring
->count
;
5035 rx_ring
->rx_buffer_info
= vzalloc_node(size
, rx_ring
->numa_node
);
5036 if (!rx_ring
->rx_buffer_info
)
5037 rx_ring
->rx_buffer_info
= vzalloc(size
);
5038 if (!rx_ring
->rx_buffer_info
)
5041 /* Round up to nearest 4K */
5042 rx_ring
->size
= rx_ring
->count
* sizeof(union ixgbe_adv_rx_desc
);
5043 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
5045 rx_ring
->desc
= dma_alloc_coherent(dev
, rx_ring
->size
,
5046 &rx_ring
->dma
, GFP_KERNEL
);
5051 rx_ring
->next_to_clean
= 0;
5052 rx_ring
->next_to_use
= 0;
5056 vfree(rx_ring
->rx_buffer_info
);
5057 rx_ring
->rx_buffer_info
= NULL
;
5058 dev_err(dev
, "Unable to allocate memory for the Rx descriptor ring\n");
5063 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
5064 * @adapter: board private structure
5066 * If this function returns with an error, then it's possible one or
5067 * more of the rings is populated (while the rest are not). It is the
5068 * callers duty to clean those orphaned rings.
5070 * Return 0 on success, negative on failure
5072 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter
*adapter
)
5076 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
5077 err
= ixgbe_setup_rx_resources(adapter
->rx_ring
[i
]);
5080 e_err(probe
, "Allocation for Rx Queue %u failed\n", i
);
5088 * ixgbe_free_tx_resources - Free Tx Resources per Queue
5089 * @tx_ring: Tx descriptor ring for a specific queue
5091 * Free all transmit software resources
5093 void ixgbe_free_tx_resources(struct ixgbe_ring
*tx_ring
)
5095 ixgbe_clean_tx_ring(tx_ring
);
5097 vfree(tx_ring
->tx_buffer_info
);
5098 tx_ring
->tx_buffer_info
= NULL
;
5100 /* if not set, then don't free */
5104 dma_free_coherent(tx_ring
->dev
, tx_ring
->size
,
5105 tx_ring
->desc
, tx_ring
->dma
);
5107 tx_ring
->desc
= NULL
;
5111 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
5112 * @adapter: board private structure
5114 * Free all transmit software resources
5116 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter
*adapter
)
5120 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
5121 if (adapter
->tx_ring
[i
]->desc
)
5122 ixgbe_free_tx_resources(adapter
->tx_ring
[i
]);
5126 * ixgbe_free_rx_resources - Free Rx Resources
5127 * @rx_ring: ring to clean the resources from
5129 * Free all receive software resources
5131 void ixgbe_free_rx_resources(struct ixgbe_ring
*rx_ring
)
5133 ixgbe_clean_rx_ring(rx_ring
);
5135 vfree(rx_ring
->rx_buffer_info
);
5136 rx_ring
->rx_buffer_info
= NULL
;
5138 /* if not set, then don't free */
5142 dma_free_coherent(rx_ring
->dev
, rx_ring
->size
,
5143 rx_ring
->desc
, rx_ring
->dma
);
5145 rx_ring
->desc
= NULL
;
5149 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
5150 * @adapter: board private structure
5152 * Free all receive software resources
5154 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter
*adapter
)
5158 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
5159 if (adapter
->rx_ring
[i
]->desc
)
5160 ixgbe_free_rx_resources(adapter
->rx_ring
[i
]);
5164 * ixgbe_change_mtu - Change the Maximum Transfer Unit
5165 * @netdev: network interface device structure
5166 * @new_mtu: new value for maximum frame size
5168 * Returns 0 on success, negative on failure
5170 static int ixgbe_change_mtu(struct net_device
*netdev
, int new_mtu
)
5172 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
5173 struct ixgbe_hw
*hw
= &adapter
->hw
;
5174 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
5176 /* MTU < 68 is an error and causes problems on some kernels */
5177 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
&&
5178 hw
->mac
.type
!= ixgbe_mac_X540
) {
5179 if ((new_mtu
< 68) || (max_frame
> MAXIMUM_ETHERNET_VLAN_SIZE
))
5182 if ((new_mtu
< 68) || (max_frame
> IXGBE_MAX_JUMBO_FRAME_SIZE
))
5186 e_info(probe
, "changing MTU from %d to %d\n", netdev
->mtu
, new_mtu
);
5187 /* must set new MTU before calling down or up */
5188 netdev
->mtu
= new_mtu
;
5190 hw
->fc
.high_water
= FC_HIGH_WATER(max_frame
);
5191 hw
->fc
.low_water
= FC_LOW_WATER(max_frame
);
5193 if (netif_running(netdev
))
5194 ixgbe_reinit_locked(adapter
);
5200 * ixgbe_open - Called when a network interface is made active
5201 * @netdev: network interface device structure
5203 * Returns 0 on success, negative value on failure
5205 * The open entry point is called when a network interface is made
5206 * active by the system (IFF_UP). At this point all resources needed
5207 * for transmit and receive operations are allocated, the interrupt
5208 * handler is registered with the OS, the watchdog timer is started,
5209 * and the stack is notified that the interface is ready.
5211 static int ixgbe_open(struct net_device
*netdev
)
5213 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
5216 /* disallow open during test */
5217 if (test_bit(__IXGBE_TESTING
, &adapter
->state
))
5220 netif_carrier_off(netdev
);
5222 /* allocate transmit descriptors */
5223 err
= ixgbe_setup_all_tx_resources(adapter
);
5227 /* allocate receive descriptors */
5228 err
= ixgbe_setup_all_rx_resources(adapter
);
5232 ixgbe_configure(adapter
);
5234 err
= ixgbe_request_irq(adapter
);
5238 err
= ixgbe_up_complete(adapter
);
5242 netif_tx_start_all_queues(netdev
);
5247 ixgbe_release_hw_control(adapter
);
5248 ixgbe_free_irq(adapter
);
5251 ixgbe_free_all_rx_resources(adapter
);
5253 ixgbe_free_all_tx_resources(adapter
);
5254 ixgbe_reset(adapter
);
5260 * ixgbe_close - Disables a network interface
5261 * @netdev: network interface device structure
5263 * Returns 0, this is not allowed to fail
5265 * The close entry point is called when an interface is de-activated
5266 * by the OS. The hardware is still under the drivers control, but
5267 * needs to be disabled. A global MAC reset is issued to stop the
5268 * hardware, and all transmit and receive resources are freed.
5270 static int ixgbe_close(struct net_device
*netdev
)
5272 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
5274 ixgbe_down(adapter
);
5275 ixgbe_free_irq(adapter
);
5277 ixgbe_fdir_filter_exit(adapter
);
5279 ixgbe_free_all_tx_resources(adapter
);
5280 ixgbe_free_all_rx_resources(adapter
);
5282 ixgbe_release_hw_control(adapter
);
5288 static int ixgbe_resume(struct pci_dev
*pdev
)
5290 struct ixgbe_adapter
*adapter
= pci_get_drvdata(pdev
);
5291 struct net_device
*netdev
= adapter
->netdev
;
5294 pci_set_power_state(pdev
, PCI_D0
);
5295 pci_restore_state(pdev
);
5297 * pci_restore_state clears dev->state_saved so call
5298 * pci_save_state to restore it.
5300 pci_save_state(pdev
);
5302 err
= pci_enable_device_mem(pdev
);
5304 e_dev_err("Cannot enable PCI device from suspend\n");
5307 pci_set_master(pdev
);
5309 pci_wake_from_d3(pdev
, false);
5311 err
= ixgbe_init_interrupt_scheme(adapter
);
5313 e_dev_err("Cannot initialize interrupts for device\n");
5317 ixgbe_reset(adapter
);
5319 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_WUS
, ~0);
5321 if (netif_running(netdev
)) {
5322 err
= ixgbe_open(netdev
);
5327 netif_device_attach(netdev
);
5331 #endif /* CONFIG_PM */
5333 static int __ixgbe_shutdown(struct pci_dev
*pdev
, bool *enable_wake
)
5335 struct ixgbe_adapter
*adapter
= pci_get_drvdata(pdev
);
5336 struct net_device
*netdev
= adapter
->netdev
;
5337 struct ixgbe_hw
*hw
= &adapter
->hw
;
5339 u32 wufc
= adapter
->wol
;
5344 netif_device_detach(netdev
);
5346 if (netif_running(netdev
)) {
5347 ixgbe_down(adapter
);
5348 ixgbe_free_irq(adapter
);
5349 ixgbe_free_all_tx_resources(adapter
);
5350 ixgbe_free_all_rx_resources(adapter
);
5353 ixgbe_clear_interrupt_scheme(adapter
);
5355 kfree(adapter
->ixgbe_ieee_pfc
);
5356 kfree(adapter
->ixgbe_ieee_ets
);
5360 retval
= pci_save_state(pdev
);
5366 ixgbe_set_rx_mode(netdev
);
5368 /* turn on all-multi mode if wake on multicast is enabled */
5369 if (wufc
& IXGBE_WUFC_MC
) {
5370 fctrl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
5371 fctrl
|= IXGBE_FCTRL_MPE
;
5372 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, fctrl
);
5375 ctrl
= IXGBE_READ_REG(hw
, IXGBE_CTRL
);
5376 ctrl
|= IXGBE_CTRL_GIO_DIS
;
5377 IXGBE_WRITE_REG(hw
, IXGBE_CTRL
, ctrl
);
5379 IXGBE_WRITE_REG(hw
, IXGBE_WUFC
, wufc
);
5381 IXGBE_WRITE_REG(hw
, IXGBE_WUC
, 0);
5382 IXGBE_WRITE_REG(hw
, IXGBE_WUFC
, 0);
5385 switch (hw
->mac
.type
) {
5386 case ixgbe_mac_82598EB
:
5387 pci_wake_from_d3(pdev
, false);
5389 case ixgbe_mac_82599EB
:
5390 case ixgbe_mac_X540
:
5391 pci_wake_from_d3(pdev
, !!wufc
);
5397 *enable_wake
= !!wufc
;
5399 ixgbe_release_hw_control(adapter
);
5401 pci_disable_device(pdev
);
5407 static int ixgbe_suspend(struct pci_dev
*pdev
, pm_message_t state
)
5412 retval
= __ixgbe_shutdown(pdev
, &wake
);
5417 pci_prepare_to_sleep(pdev
);
5419 pci_wake_from_d3(pdev
, false);
5420 pci_set_power_state(pdev
, PCI_D3hot
);
5425 #endif /* CONFIG_PM */
5427 static void ixgbe_shutdown(struct pci_dev
*pdev
)
5431 __ixgbe_shutdown(pdev
, &wake
);
5433 if (system_state
== SYSTEM_POWER_OFF
) {
5434 pci_wake_from_d3(pdev
, wake
);
5435 pci_set_power_state(pdev
, PCI_D3hot
);
5440 * ixgbe_update_stats - Update the board statistics counters.
5441 * @adapter: board private structure
5443 void ixgbe_update_stats(struct ixgbe_adapter
*adapter
)
5445 struct net_device
*netdev
= adapter
->netdev
;
5446 struct ixgbe_hw
*hw
= &adapter
->hw
;
5447 struct ixgbe_hw_stats
*hwstats
= &adapter
->stats
;
5449 u32 i
, missed_rx
= 0, mpc
, bprc
, lxon
, lxoff
, xon_off_tot
;
5450 u64 non_eop_descs
= 0, restart_queue
= 0, tx_busy
= 0;
5451 u64 alloc_rx_page_failed
= 0, alloc_rx_buff_failed
= 0;
5452 u64 bytes
= 0, packets
= 0;
5454 if (test_bit(__IXGBE_DOWN
, &adapter
->state
) ||
5455 test_bit(__IXGBE_RESETTING
, &adapter
->state
))
5458 if (adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
) {
5461 for (i
= 0; i
< 16; i
++)
5462 adapter
->hw_rx_no_dma_resources
+=
5463 IXGBE_READ_REG(hw
, IXGBE_QPRDC(i
));
5464 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
5465 rsc_count
+= adapter
->rx_ring
[i
]->rx_stats
.rsc_count
;
5466 rsc_flush
+= adapter
->rx_ring
[i
]->rx_stats
.rsc_flush
;
5468 adapter
->rsc_total_count
= rsc_count
;
5469 adapter
->rsc_total_flush
= rsc_flush
;
5472 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
5473 struct ixgbe_ring
*rx_ring
= adapter
->rx_ring
[i
];
5474 non_eop_descs
+= rx_ring
->rx_stats
.non_eop_descs
;
5475 alloc_rx_page_failed
+= rx_ring
->rx_stats
.alloc_rx_page_failed
;
5476 alloc_rx_buff_failed
+= rx_ring
->rx_stats
.alloc_rx_buff_failed
;
5477 bytes
+= rx_ring
->stats
.bytes
;
5478 packets
+= rx_ring
->stats
.packets
;
5480 adapter
->non_eop_descs
= non_eop_descs
;
5481 adapter
->alloc_rx_page_failed
= alloc_rx_page_failed
;
5482 adapter
->alloc_rx_buff_failed
= alloc_rx_buff_failed
;
5483 netdev
->stats
.rx_bytes
= bytes
;
5484 netdev
->stats
.rx_packets
= packets
;
5488 /* gather some stats to the adapter struct that are per queue */
5489 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
5490 struct ixgbe_ring
*tx_ring
= adapter
->tx_ring
[i
];
5491 restart_queue
+= tx_ring
->tx_stats
.restart_queue
;
5492 tx_busy
+= tx_ring
->tx_stats
.tx_busy
;
5493 bytes
+= tx_ring
->stats
.bytes
;
5494 packets
+= tx_ring
->stats
.packets
;
5496 adapter
->restart_queue
= restart_queue
;
5497 adapter
->tx_busy
= tx_busy
;
5498 netdev
->stats
.tx_bytes
= bytes
;
5499 netdev
->stats
.tx_packets
= packets
;
5501 hwstats
->crcerrs
+= IXGBE_READ_REG(hw
, IXGBE_CRCERRS
);
5502 for (i
= 0; i
< 8; i
++) {
5503 /* for packet buffers not used, the register should read 0 */
5504 mpc
= IXGBE_READ_REG(hw
, IXGBE_MPC(i
));
5506 hwstats
->mpc
[i
] += mpc
;
5507 total_mpc
+= hwstats
->mpc
[i
];
5508 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
5509 hwstats
->rnbc
[i
] += IXGBE_READ_REG(hw
, IXGBE_RNBC(i
));
5510 hwstats
->qptc
[i
] += IXGBE_READ_REG(hw
, IXGBE_QPTC(i
));
5511 hwstats
->qbtc
[i
] += IXGBE_READ_REG(hw
, IXGBE_QBTC(i
));
5512 hwstats
->qprc
[i
] += IXGBE_READ_REG(hw
, IXGBE_QPRC(i
));
5513 hwstats
->qbrc
[i
] += IXGBE_READ_REG(hw
, IXGBE_QBRC(i
));
5514 switch (hw
->mac
.type
) {
5515 case ixgbe_mac_82598EB
:
5516 hwstats
->pxonrxc
[i
] +=
5517 IXGBE_READ_REG(hw
, IXGBE_PXONRXC(i
));
5519 case ixgbe_mac_82599EB
:
5520 case ixgbe_mac_X540
:
5521 hwstats
->pxonrxc
[i
] +=
5522 IXGBE_READ_REG(hw
, IXGBE_PXONRXCNT(i
));
5527 hwstats
->pxontxc
[i
] += IXGBE_READ_REG(hw
, IXGBE_PXONTXC(i
));
5528 hwstats
->pxofftxc
[i
] += IXGBE_READ_REG(hw
, IXGBE_PXOFFTXC(i
));
5530 hwstats
->gprc
+= IXGBE_READ_REG(hw
, IXGBE_GPRC
);
5531 /* work around hardware counting issue */
5532 hwstats
->gprc
-= missed_rx
;
5534 ixgbe_update_xoff_received(adapter
);
5536 /* 82598 hardware only has a 32 bit counter in the high register */
5537 switch (hw
->mac
.type
) {
5538 case ixgbe_mac_82598EB
:
5539 hwstats
->lxonrxc
+= IXGBE_READ_REG(hw
, IXGBE_LXONRXC
);
5540 hwstats
->gorc
+= IXGBE_READ_REG(hw
, IXGBE_GORCH
);
5541 hwstats
->gotc
+= IXGBE_READ_REG(hw
, IXGBE_GOTCH
);
5542 hwstats
->tor
+= IXGBE_READ_REG(hw
, IXGBE_TORH
);
5544 case ixgbe_mac_X540
:
5545 /* OS2BMC stats are X540 only*/
5546 hwstats
->o2bgptc
+= IXGBE_READ_REG(hw
, IXGBE_O2BGPTC
);
5547 hwstats
->o2bspc
+= IXGBE_READ_REG(hw
, IXGBE_O2BSPC
);
5548 hwstats
->b2ospc
+= IXGBE_READ_REG(hw
, IXGBE_B2OSPC
);
5549 hwstats
->b2ogprc
+= IXGBE_READ_REG(hw
, IXGBE_B2OGPRC
);
5550 case ixgbe_mac_82599EB
:
5551 hwstats
->gorc
+= IXGBE_READ_REG(hw
, IXGBE_GORCL
);
5552 IXGBE_READ_REG(hw
, IXGBE_GORCH
); /* to clear */
5553 hwstats
->gotc
+= IXGBE_READ_REG(hw
, IXGBE_GOTCL
);
5554 IXGBE_READ_REG(hw
, IXGBE_GOTCH
); /* to clear */
5555 hwstats
->tor
+= IXGBE_READ_REG(hw
, IXGBE_TORL
);
5556 IXGBE_READ_REG(hw
, IXGBE_TORH
); /* to clear */
5557 hwstats
->lxonrxc
+= IXGBE_READ_REG(hw
, IXGBE_LXONRXCNT
);
5558 hwstats
->fdirmatch
+= IXGBE_READ_REG(hw
, IXGBE_FDIRMATCH
);
5559 hwstats
->fdirmiss
+= IXGBE_READ_REG(hw
, IXGBE_FDIRMISS
);
5561 hwstats
->fccrc
+= IXGBE_READ_REG(hw
, IXGBE_FCCRC
);
5562 hwstats
->fcoerpdc
+= IXGBE_READ_REG(hw
, IXGBE_FCOERPDC
);
5563 hwstats
->fcoeprc
+= IXGBE_READ_REG(hw
, IXGBE_FCOEPRC
);
5564 hwstats
->fcoeptc
+= IXGBE_READ_REG(hw
, IXGBE_FCOEPTC
);
5565 hwstats
->fcoedwrc
+= IXGBE_READ_REG(hw
, IXGBE_FCOEDWRC
);
5566 hwstats
->fcoedwtc
+= IXGBE_READ_REG(hw
, IXGBE_FCOEDWTC
);
5567 #endif /* IXGBE_FCOE */
5572 bprc
= IXGBE_READ_REG(hw
, IXGBE_BPRC
);
5573 hwstats
->bprc
+= bprc
;
5574 hwstats
->mprc
+= IXGBE_READ_REG(hw
, IXGBE_MPRC
);
5575 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
5576 hwstats
->mprc
-= bprc
;
5577 hwstats
->roc
+= IXGBE_READ_REG(hw
, IXGBE_ROC
);
5578 hwstats
->prc64
+= IXGBE_READ_REG(hw
, IXGBE_PRC64
);
5579 hwstats
->prc127
+= IXGBE_READ_REG(hw
, IXGBE_PRC127
);
5580 hwstats
->prc255
+= IXGBE_READ_REG(hw
, IXGBE_PRC255
);
5581 hwstats
->prc511
+= IXGBE_READ_REG(hw
, IXGBE_PRC511
);
5582 hwstats
->prc1023
+= IXGBE_READ_REG(hw
, IXGBE_PRC1023
);
5583 hwstats
->prc1522
+= IXGBE_READ_REG(hw
, IXGBE_PRC1522
);
5584 hwstats
->rlec
+= IXGBE_READ_REG(hw
, IXGBE_RLEC
);
5585 lxon
= IXGBE_READ_REG(hw
, IXGBE_LXONTXC
);
5586 hwstats
->lxontxc
+= lxon
;
5587 lxoff
= IXGBE_READ_REG(hw
, IXGBE_LXOFFTXC
);
5588 hwstats
->lxofftxc
+= lxoff
;
5589 hwstats
->ruc
+= IXGBE_READ_REG(hw
, IXGBE_RUC
);
5590 hwstats
->gptc
+= IXGBE_READ_REG(hw
, IXGBE_GPTC
);
5591 hwstats
->mptc
+= IXGBE_READ_REG(hw
, IXGBE_MPTC
);
5593 * 82598 errata - tx of flow control packets is included in tx counters
5595 xon_off_tot
= lxon
+ lxoff
;
5596 hwstats
->gptc
-= xon_off_tot
;
5597 hwstats
->mptc
-= xon_off_tot
;
5598 hwstats
->gotc
-= (xon_off_tot
* (ETH_ZLEN
+ ETH_FCS_LEN
));
5599 hwstats
->ruc
+= IXGBE_READ_REG(hw
, IXGBE_RUC
);
5600 hwstats
->rfc
+= IXGBE_READ_REG(hw
, IXGBE_RFC
);
5601 hwstats
->rjc
+= IXGBE_READ_REG(hw
, IXGBE_RJC
);
5602 hwstats
->tpr
+= IXGBE_READ_REG(hw
, IXGBE_TPR
);
5603 hwstats
->ptc64
+= IXGBE_READ_REG(hw
, IXGBE_PTC64
);
5604 hwstats
->ptc64
-= xon_off_tot
;
5605 hwstats
->ptc127
+= IXGBE_READ_REG(hw
, IXGBE_PTC127
);
5606 hwstats
->ptc255
+= IXGBE_READ_REG(hw
, IXGBE_PTC255
);
5607 hwstats
->ptc511
+= IXGBE_READ_REG(hw
, IXGBE_PTC511
);
5608 hwstats
->ptc1023
+= IXGBE_READ_REG(hw
, IXGBE_PTC1023
);
5609 hwstats
->ptc1522
+= IXGBE_READ_REG(hw
, IXGBE_PTC1522
);
5610 hwstats
->bptc
+= IXGBE_READ_REG(hw
, IXGBE_BPTC
);
5612 /* Fill out the OS statistics structure */
5613 netdev
->stats
.multicast
= hwstats
->mprc
;
5616 netdev
->stats
.rx_errors
= hwstats
->crcerrs
+ hwstats
->rlec
;
5617 netdev
->stats
.rx_dropped
= 0;
5618 netdev
->stats
.rx_length_errors
= hwstats
->rlec
;
5619 netdev
->stats
.rx_crc_errors
= hwstats
->crcerrs
;
5620 netdev
->stats
.rx_missed_errors
= total_mpc
;
5624 * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
5625 * @adapter - pointer to the device adapter structure
5627 static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter
*adapter
)
5629 struct ixgbe_hw
*hw
= &adapter
->hw
;
5632 if (!(adapter
->flags2
& IXGBE_FLAG2_FDIR_REQUIRES_REINIT
))
5635 adapter
->flags2
&= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT
;
5637 /* if interface is down do nothing */
5638 if (test_bit(__IXGBE_DOWN
, &adapter
->state
))
5641 /* do nothing if we are not using signature filters */
5642 if (!(adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
))
5645 adapter
->fdir_overflow
++;
5647 if (ixgbe_reinit_fdir_tables_82599(hw
) == 0) {
5648 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
5649 set_bit(__IXGBE_TX_FDIR_INIT_DONE
,
5650 &(adapter
->tx_ring
[i
]->state
));
5651 /* re-enable flow director interrupts */
5652 IXGBE_WRITE_REG(hw
, IXGBE_EIMS
, IXGBE_EIMS_FLOW_DIR
);
5654 e_err(probe
, "failed to finish FDIR re-initialization, "
5655 "ignored adding FDIR ATR filters\n");
5660 * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
5661 * @adapter - pointer to the device adapter structure
5663 * This function serves two purposes. First it strobes the interrupt lines
5664 * in order to make certain interrupts are occuring. Secondly it sets the
5665 * bits needed to check for TX hangs. As a result we should immediately
5666 * determine if a hang has occured.
5668 static void ixgbe_check_hang_subtask(struct ixgbe_adapter
*adapter
)
5670 struct ixgbe_hw
*hw
= &adapter
->hw
;
5674 /* If we're down or resetting, just bail */
5675 if (test_bit(__IXGBE_DOWN
, &adapter
->state
) ||
5676 test_bit(__IXGBE_RESETTING
, &adapter
->state
))
5679 /* Force detection of hung controller */
5680 if (netif_carrier_ok(adapter
->netdev
)) {
5681 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
5682 set_check_for_tx_hang(adapter
->tx_ring
[i
]);
5685 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)) {
5687 * for legacy and MSI interrupts don't set any bits
5688 * that are enabled for EIAM, because this operation
5689 * would set *both* EIMS and EICS for any bit in EIAM
5691 IXGBE_WRITE_REG(hw
, IXGBE_EICS
,
5692 (IXGBE_EICS_TCP_TIMER
| IXGBE_EICS_OTHER
));
5694 /* get one bit for every active tx/rx interrupt vector */
5695 for (i
= 0; i
< adapter
->num_msix_vectors
- NON_Q_VECTORS
; i
++) {
5696 struct ixgbe_q_vector
*qv
= adapter
->q_vector
[i
];
5697 if (qv
->rx
.ring
|| qv
->tx
.ring
)
5698 eics
|= ((u64
)1 << i
);
5702 /* Cause software interrupt to ensure rings are cleaned */
5703 ixgbe_irq_rearm_queues(adapter
, eics
);
5708 * ixgbe_watchdog_update_link - update the link status
5709 * @adapter - pointer to the device adapter structure
5710 * @link_speed - pointer to a u32 to store the link_speed
5712 static void ixgbe_watchdog_update_link(struct ixgbe_adapter
*adapter
)
5714 struct ixgbe_hw
*hw
= &adapter
->hw
;
5715 u32 link_speed
= adapter
->link_speed
;
5716 bool link_up
= adapter
->link_up
;
5719 if (!(adapter
->flags
& IXGBE_FLAG_NEED_LINK_UPDATE
))
5722 if (hw
->mac
.ops
.check_link
) {
5723 hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, false);
5725 /* always assume link is up, if no check link function */
5726 link_speed
= IXGBE_LINK_SPEED_10GB_FULL
;
5730 if (adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
) {
5731 for (i
= 0; i
< MAX_TRAFFIC_CLASS
; i
++)
5732 hw
->mac
.ops
.fc_enable(hw
, i
);
5734 hw
->mac
.ops
.fc_enable(hw
, 0);
5739 time_after(jiffies
, (adapter
->link_check_timeout
+
5740 IXGBE_TRY_LINK_TIMEOUT
))) {
5741 adapter
->flags
&= ~IXGBE_FLAG_NEED_LINK_UPDATE
;
5742 IXGBE_WRITE_REG(hw
, IXGBE_EIMS
, IXGBE_EIMC_LSC
);
5743 IXGBE_WRITE_FLUSH(hw
);
5746 adapter
->link_up
= link_up
;
5747 adapter
->link_speed
= link_speed
;
5751 * ixgbe_watchdog_link_is_up - update netif_carrier status and
5752 * print link up message
5753 * @adapter - pointer to the device adapter structure
5755 static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter
*adapter
)
5757 struct net_device
*netdev
= adapter
->netdev
;
5758 struct ixgbe_hw
*hw
= &adapter
->hw
;
5759 u32 link_speed
= adapter
->link_speed
;
5760 bool flow_rx
, flow_tx
;
5762 /* only continue if link was previously down */
5763 if (netif_carrier_ok(netdev
))
5766 adapter
->flags2
&= ~IXGBE_FLAG2_SEARCH_FOR_SFP
;
5768 switch (hw
->mac
.type
) {
5769 case ixgbe_mac_82598EB
: {
5770 u32 frctl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
5771 u32 rmcs
= IXGBE_READ_REG(hw
, IXGBE_RMCS
);
5772 flow_rx
= !!(frctl
& IXGBE_FCTRL_RFCE
);
5773 flow_tx
= !!(rmcs
& IXGBE_RMCS_TFCE_802_3X
);
5776 case ixgbe_mac_X540
:
5777 case ixgbe_mac_82599EB
: {
5778 u32 mflcn
= IXGBE_READ_REG(hw
, IXGBE_MFLCN
);
5779 u32 fccfg
= IXGBE_READ_REG(hw
, IXGBE_FCCFG
);
5780 flow_rx
= !!(mflcn
& IXGBE_MFLCN_RFCE
);
5781 flow_tx
= !!(fccfg
& IXGBE_FCCFG_TFCE_802_3X
);
5789 e_info(drv
, "NIC Link is Up %s, Flow Control: %s\n",
5790 (link_speed
== IXGBE_LINK_SPEED_10GB_FULL
?
5792 (link_speed
== IXGBE_LINK_SPEED_1GB_FULL
?
5794 (link_speed
== IXGBE_LINK_SPEED_100_FULL
?
5797 ((flow_rx
&& flow_tx
) ? "RX/TX" :
5799 (flow_tx
? "TX" : "None"))));
5801 netif_carrier_on(netdev
);
5802 ixgbe_check_vf_rate_limit(adapter
);
5806 * ixgbe_watchdog_link_is_down - update netif_carrier status and
5807 * print link down message
5808 * @adapter - pointer to the adapter structure
5810 static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter
* adapter
)
5812 struct net_device
*netdev
= adapter
->netdev
;
5813 struct ixgbe_hw
*hw
= &adapter
->hw
;
5815 adapter
->link_up
= false;
5816 adapter
->link_speed
= 0;
5818 /* only continue if link was up previously */
5819 if (!netif_carrier_ok(netdev
))
5822 /* poll for SFP+ cable when link is down */
5823 if (ixgbe_is_sfp(hw
) && hw
->mac
.type
== ixgbe_mac_82598EB
)
5824 adapter
->flags2
|= IXGBE_FLAG2_SEARCH_FOR_SFP
;
5826 e_info(drv
, "NIC Link is Down\n");
5827 netif_carrier_off(netdev
);
5831 * ixgbe_watchdog_flush_tx - flush queues on link down
5832 * @adapter - pointer to the device adapter structure
5834 static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter
*adapter
)
5837 int some_tx_pending
= 0;
5839 if (!netif_carrier_ok(adapter
->netdev
)) {
5840 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
5841 struct ixgbe_ring
*tx_ring
= adapter
->tx_ring
[i
];
5842 if (tx_ring
->next_to_use
!= tx_ring
->next_to_clean
) {
5843 some_tx_pending
= 1;
5848 if (some_tx_pending
) {
5849 /* We've lost link, so the controller stops DMA,
5850 * but we've got queued Tx work that's never going
5851 * to get done, so reset controller to flush Tx.
5852 * (Do the reset outside of interrupt context).
5854 adapter
->flags2
|= IXGBE_FLAG2_RESET_REQUESTED
;
5859 static void ixgbe_spoof_check(struct ixgbe_adapter
*adapter
)
5863 /* Do not perform spoof check for 82598 */
5864 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
)
5867 ssvpc
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_SSVPC
);
5870 * ssvpc register is cleared on read, if zero then no
5871 * spoofed packets in the last interval.
5876 e_warn(drv
, "%d Spoofed packets detected\n", ssvpc
);
5880 * ixgbe_watchdog_subtask - check and bring link up
5881 * @adapter - pointer to the device adapter structure
5883 static void ixgbe_watchdog_subtask(struct ixgbe_adapter
*adapter
)
5885 /* if interface is down do nothing */
5886 if (test_bit(__IXGBE_DOWN
, &adapter
->state
))
5889 ixgbe_watchdog_update_link(adapter
);
5891 if (adapter
->link_up
)
5892 ixgbe_watchdog_link_is_up(adapter
);
5894 ixgbe_watchdog_link_is_down(adapter
);
5896 ixgbe_spoof_check(adapter
);
5897 ixgbe_update_stats(adapter
);
5899 ixgbe_watchdog_flush_tx(adapter
);
5903 * ixgbe_sfp_detection_subtask - poll for SFP+ cable
5904 * @adapter - the ixgbe adapter structure
5906 static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter
*adapter
)
5908 struct ixgbe_hw
*hw
= &adapter
->hw
;
5911 /* not searching for SFP so there is nothing to do here */
5912 if (!(adapter
->flags2
& IXGBE_FLAG2_SEARCH_FOR_SFP
) &&
5913 !(adapter
->flags2
& IXGBE_FLAG2_SFP_NEEDS_RESET
))
5916 /* someone else is in init, wait until next service event */
5917 if (test_and_set_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
))
5920 err
= hw
->phy
.ops
.identify_sfp(hw
);
5921 if (err
== IXGBE_ERR_SFP_NOT_SUPPORTED
)
5924 if (err
== IXGBE_ERR_SFP_NOT_PRESENT
) {
5925 /* If no cable is present, then we need to reset
5926 * the next time we find a good cable. */
5927 adapter
->flags2
|= IXGBE_FLAG2_SFP_NEEDS_RESET
;
5934 /* exit if reset not needed */
5935 if (!(adapter
->flags2
& IXGBE_FLAG2_SFP_NEEDS_RESET
))
5938 adapter
->flags2
&= ~IXGBE_FLAG2_SFP_NEEDS_RESET
;
5941 * A module may be identified correctly, but the EEPROM may not have
5942 * support for that module. setup_sfp() will fail in that case, so
5943 * we should not allow that module to load.
5945 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
5946 err
= hw
->phy
.ops
.reset(hw
);
5948 err
= hw
->mac
.ops
.setup_sfp(hw
);
5950 if (err
== IXGBE_ERR_SFP_NOT_SUPPORTED
)
5953 adapter
->flags
|= IXGBE_FLAG_NEED_LINK_CONFIG
;
5954 e_info(probe
, "detected SFP+: %d\n", hw
->phy
.sfp_type
);
5957 clear_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
);
5959 if ((err
== IXGBE_ERR_SFP_NOT_SUPPORTED
) &&
5960 (adapter
->netdev
->reg_state
== NETREG_REGISTERED
)) {
5961 e_dev_err("failed to initialize because an unsupported "
5962 "SFP+ module type was detected.\n");
5963 e_dev_err("Reload the driver after installing a "
5964 "supported module.\n");
5965 unregister_netdev(adapter
->netdev
);
5970 * ixgbe_sfp_link_config_subtask - set up link SFP after module install
5971 * @adapter - the ixgbe adapter structure
5973 static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter
*adapter
)
5975 struct ixgbe_hw
*hw
= &adapter
->hw
;
5979 if (!(adapter
->flags
& IXGBE_FLAG_NEED_LINK_CONFIG
))
5982 /* someone else is in init, wait until next service event */
5983 if (test_and_set_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
))
5986 adapter
->flags
&= ~IXGBE_FLAG_NEED_LINK_CONFIG
;
5988 autoneg
= hw
->phy
.autoneg_advertised
;
5989 if ((!autoneg
) && (hw
->mac
.ops
.get_link_capabilities
))
5990 hw
->mac
.ops
.get_link_capabilities(hw
, &autoneg
, &negotiation
);
5991 hw
->mac
.autotry_restart
= false;
5992 if (hw
->mac
.ops
.setup_link
)
5993 hw
->mac
.ops
.setup_link(hw
, autoneg
, negotiation
, true);
5995 adapter
->flags
|= IXGBE_FLAG_NEED_LINK_UPDATE
;
5996 adapter
->link_check_timeout
= jiffies
;
5997 clear_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
);
6001 * ixgbe_service_timer - Timer Call-back
6002 * @data: pointer to adapter cast into an unsigned long
6004 static void ixgbe_service_timer(unsigned long data
)
6006 struct ixgbe_adapter
*adapter
= (struct ixgbe_adapter
*)data
;
6007 unsigned long next_event_offset
;
6009 /* poll faster when waiting for link */
6010 if (adapter
->flags
& IXGBE_FLAG_NEED_LINK_UPDATE
)
6011 next_event_offset
= HZ
/ 10;
6013 next_event_offset
= HZ
* 2;
6015 /* Reset the timer */
6016 mod_timer(&adapter
->service_timer
, next_event_offset
+ jiffies
);
6018 ixgbe_service_event_schedule(adapter
);
6021 static void ixgbe_reset_subtask(struct ixgbe_adapter
*adapter
)
6023 if (!(adapter
->flags2
& IXGBE_FLAG2_RESET_REQUESTED
))
6026 adapter
->flags2
&= ~IXGBE_FLAG2_RESET_REQUESTED
;
6028 /* If we're already down or resetting, just bail */
6029 if (test_bit(__IXGBE_DOWN
, &adapter
->state
) ||
6030 test_bit(__IXGBE_RESETTING
, &adapter
->state
))
6033 ixgbe_dump(adapter
);
6034 netdev_err(adapter
->netdev
, "Reset adapter\n");
6035 adapter
->tx_timeout_count
++;
6037 ixgbe_reinit_locked(adapter
);
6041 * ixgbe_service_task - manages and runs subtasks
6042 * @work: pointer to work_struct containing our data
6044 static void ixgbe_service_task(struct work_struct
*work
)
6046 struct ixgbe_adapter
*adapter
= container_of(work
,
6047 struct ixgbe_adapter
,
6050 ixgbe_reset_subtask(adapter
);
6051 ixgbe_sfp_detection_subtask(adapter
);
6052 ixgbe_sfp_link_config_subtask(adapter
);
6053 ixgbe_check_overtemp_subtask(adapter
);
6054 ixgbe_watchdog_subtask(adapter
);
6055 ixgbe_fdir_reinit_subtask(adapter
);
6056 ixgbe_check_hang_subtask(adapter
);
6058 ixgbe_service_event_complete(adapter
);
6061 void ixgbe_tx_ctxtdesc(struct ixgbe_ring
*tx_ring
, u32 vlan_macip_lens
,
6062 u32 fcoe_sof_eof
, u32 type_tucmd
, u32 mss_l4len_idx
)
6064 struct ixgbe_adv_tx_context_desc
*context_desc
;
6065 u16 i
= tx_ring
->next_to_use
;
6067 context_desc
= IXGBE_TX_CTXTDESC_ADV(tx_ring
, i
);
6070 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
6072 /* set bits to identify this as an advanced context descriptor */
6073 type_tucmd
|= IXGBE_TXD_CMD_DEXT
| IXGBE_ADVTXD_DTYP_CTXT
;
6075 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
6076 context_desc
->seqnum_seed
= cpu_to_le32(fcoe_sof_eof
);
6077 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd
);
6078 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
6081 static int ixgbe_tso(struct ixgbe_ring
*tx_ring
, struct sk_buff
*skb
,
6082 u32 tx_flags
, __be16 protocol
, u8
*hdr_len
)
6085 u32 vlan_macip_lens
, type_tucmd
;
6086 u32 mss_l4len_idx
, l4len
;
6088 if (!skb_is_gso(skb
))
6091 if (skb_header_cloned(skb
)) {
6092 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
6097 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
6098 type_tucmd
= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
6100 if (protocol
== __constant_htons(ETH_P_IP
)) {
6101 struct iphdr
*iph
= ip_hdr(skb
);
6104 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
6108 type_tucmd
|= IXGBE_ADVTXD_TUCMD_IPV4
;
6109 } else if (skb_is_gso_v6(skb
)) {
6110 ipv6_hdr(skb
)->payload_len
= 0;
6111 tcp_hdr(skb
)->check
=
6112 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
6113 &ipv6_hdr(skb
)->daddr
,
6117 l4len
= tcp_hdrlen(skb
);
6118 *hdr_len
= skb_transport_offset(skb
) + l4len
;
6120 /* mss_l4len_id: use 1 as index for TSO */
6121 mss_l4len_idx
= l4len
<< IXGBE_ADVTXD_L4LEN_SHIFT
;
6122 mss_l4len_idx
|= skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
;
6123 mss_l4len_idx
|= 1 << IXGBE_ADVTXD_IDX_SHIFT
;
6125 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
6126 vlan_macip_lens
= skb_network_header_len(skb
);
6127 vlan_macip_lens
|= skb_network_offset(skb
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
6128 vlan_macip_lens
|= tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
6130 ixgbe_tx_ctxtdesc(tx_ring
, vlan_macip_lens
, 0, type_tucmd
,
6136 static bool ixgbe_tx_csum(struct ixgbe_ring
*tx_ring
,
6137 struct sk_buff
*skb
, u32 tx_flags
,
6140 u32 vlan_macip_lens
= 0;
6141 u32 mss_l4len_idx
= 0;
6144 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
6145 if (!(tx_flags
& IXGBE_TX_FLAGS_HW_VLAN
) &&
6146 !(tx_flags
& IXGBE_TX_FLAGS_TXSW
))
6151 case __constant_htons(ETH_P_IP
):
6152 vlan_macip_lens
|= skb_network_header_len(skb
);
6153 type_tucmd
|= IXGBE_ADVTXD_TUCMD_IPV4
;
6154 l4_hdr
= ip_hdr(skb
)->protocol
;
6156 case __constant_htons(ETH_P_IPV6
):
6157 vlan_macip_lens
|= skb_network_header_len(skb
);
6158 l4_hdr
= ipv6_hdr(skb
)->nexthdr
;
6161 if (unlikely(net_ratelimit())) {
6162 dev_warn(tx_ring
->dev
,
6163 "partial checksum but proto=%x!\n",
6171 type_tucmd
|= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
6172 mss_l4len_idx
= tcp_hdrlen(skb
) <<
6173 IXGBE_ADVTXD_L4LEN_SHIFT
;
6176 type_tucmd
|= IXGBE_ADVTXD_TUCMD_L4T_SCTP
;
6177 mss_l4len_idx
= sizeof(struct sctphdr
) <<
6178 IXGBE_ADVTXD_L4LEN_SHIFT
;
6181 mss_l4len_idx
= sizeof(struct udphdr
) <<
6182 IXGBE_ADVTXD_L4LEN_SHIFT
;
6185 if (unlikely(net_ratelimit())) {
6186 dev_warn(tx_ring
->dev
,
6187 "partial checksum but l4 proto=%x!\n",
6194 vlan_macip_lens
|= skb_network_offset(skb
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
6195 vlan_macip_lens
|= tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
6197 ixgbe_tx_ctxtdesc(tx_ring
, vlan_macip_lens
, 0,
6198 type_tucmd
, mss_l4len_idx
);
6200 return (skb
->ip_summed
== CHECKSUM_PARTIAL
);
6203 static __le32
ixgbe_tx_cmd_type(u32 tx_flags
)
6205 /* set type for advanced descriptor with frame checksum insertion */
6206 __le32 cmd_type
= cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA
|
6207 IXGBE_ADVTXD_DCMD_IFCS
|
6208 IXGBE_ADVTXD_DCMD_DEXT
);
6210 /* set HW vlan bit if vlan is present */
6211 if (tx_flags
& IXGBE_TX_FLAGS_HW_VLAN
)
6212 cmd_type
|= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE
);
6214 /* set segmentation enable bits for TSO/FSO */
6216 if ((tx_flags
& IXGBE_TX_FLAGS_TSO
) || (tx_flags
& IXGBE_TX_FLAGS_FSO
))
6218 if (tx_flags
& IXGBE_TX_FLAGS_TSO
)
6220 cmd_type
|= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE
);
6225 static __le32
ixgbe_tx_olinfo_status(u32 tx_flags
, unsigned int paylen
)
6227 __le32 olinfo_status
=
6228 cpu_to_le32(paylen
<< IXGBE_ADVTXD_PAYLEN_SHIFT
);
6230 if (tx_flags
& IXGBE_TX_FLAGS_TSO
) {
6231 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM
|
6232 (1 << IXGBE_ADVTXD_IDX_SHIFT
));
6233 /* enble IPv4 checksum for TSO */
6234 if (tx_flags
& IXGBE_TX_FLAGS_IPV4
)
6235 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM
);
6238 /* enable L4 checksum for TSO and TX checksum offload */
6239 if (tx_flags
& IXGBE_TX_FLAGS_CSUM
)
6240 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM
);
6243 /* use index 1 context for FCOE/FSO */
6244 if (tx_flags
& IXGBE_TX_FLAGS_FCOE
)
6245 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_CC
|
6246 (1 << IXGBE_ADVTXD_IDX_SHIFT
));
6250 * Check Context must be set if Tx switch is enabled, which it
6251 * always is for case where virtual functions are running
6253 if (tx_flags
& IXGBE_TX_FLAGS_TXSW
)
6254 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_CC
);
6256 return olinfo_status
;
6259 #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
6262 static void ixgbe_tx_map(struct ixgbe_ring
*tx_ring
,
6263 struct sk_buff
*skb
,
6264 struct ixgbe_tx_buffer
*first
,
6268 struct device
*dev
= tx_ring
->dev
;
6269 struct ixgbe_tx_buffer
*tx_buffer_info
;
6270 union ixgbe_adv_tx_desc
*tx_desc
;
6272 __le32 cmd_type
, olinfo_status
;
6273 struct skb_frag_struct
*frag
;
6275 unsigned int data_len
= skb
->data_len
;
6276 unsigned int size
= skb_headlen(skb
);
6278 u32 paylen
= skb
->len
- hdr_len
;
6279 u16 i
= tx_ring
->next_to_use
;
6283 if (tx_flags
& IXGBE_TX_FLAGS_FCOE
) {
6284 if (data_len
>= sizeof(struct fcoe_crc_eof
)) {
6285 data_len
-= sizeof(struct fcoe_crc_eof
);
6287 size
-= sizeof(struct fcoe_crc_eof
) - data_len
;
6293 dma
= dma_map_single(dev
, skb
->data
, size
, DMA_TO_DEVICE
);
6294 if (dma_mapping_error(dev
, dma
))
6297 cmd_type
= ixgbe_tx_cmd_type(tx_flags
);
6298 olinfo_status
= ixgbe_tx_olinfo_status(tx_flags
, paylen
);
6300 tx_desc
= IXGBE_TX_DESC_ADV(tx_ring
, i
);
6303 while (size
> IXGBE_MAX_DATA_PER_TXD
) {
6304 tx_desc
->read
.buffer_addr
= cpu_to_le64(dma
+ offset
);
6305 tx_desc
->read
.cmd_type_len
=
6306 cmd_type
| cpu_to_le32(IXGBE_MAX_DATA_PER_TXD
);
6307 tx_desc
->read
.olinfo_status
= olinfo_status
;
6309 offset
+= IXGBE_MAX_DATA_PER_TXD
;
6310 size
-= IXGBE_MAX_DATA_PER_TXD
;
6314 if (i
== tx_ring
->count
) {
6315 tx_desc
= IXGBE_TX_DESC_ADV(tx_ring
, 0);
6320 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
6321 tx_buffer_info
->length
= offset
+ size
;
6322 tx_buffer_info
->tx_flags
= tx_flags
;
6323 tx_buffer_info
->dma
= dma
;
6325 tx_desc
->read
.buffer_addr
= cpu_to_le64(dma
+ offset
);
6326 tx_desc
->read
.cmd_type_len
= cmd_type
| cpu_to_le32(size
);
6327 tx_desc
->read
.olinfo_status
= olinfo_status
;
6332 frag
= &skb_shinfo(skb
)->frags
[f
];
6334 size
= min_t(unsigned int, data_len
, frag
->size
);
6342 tx_flags
|= IXGBE_TX_FLAGS_MAPPED_AS_PAGE
;
6344 dma
= dma_map_page(dev
, frag
->page
, frag
->page_offset
,
6345 size
, DMA_TO_DEVICE
);
6346 if (dma_mapping_error(dev
, dma
))
6351 if (i
== tx_ring
->count
) {
6352 tx_desc
= IXGBE_TX_DESC_ADV(tx_ring
, 0);
6357 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(IXGBE_TXD_CMD
);
6360 if (i
== tx_ring
->count
)
6363 tx_ring
->next_to_use
= i
;
6365 if (tx_flags
& IXGBE_TX_FLAGS_TSO
)
6366 gso_segs
= skb_shinfo(skb
)->gso_segs
;
6368 /* adjust for FCoE Sequence Offload */
6369 else if (tx_flags
& IXGBE_TX_FLAGS_FSO
)
6370 gso_segs
= DIV_ROUND_UP(skb
->len
- hdr_len
,
6371 skb_shinfo(skb
)->gso_size
);
6372 #endif /* IXGBE_FCOE */
6376 /* multiply data chunks by size of headers */
6377 tx_buffer_info
->bytecount
= paylen
+ (gso_segs
* hdr_len
);
6378 tx_buffer_info
->gso_segs
= gso_segs
;
6379 tx_buffer_info
->skb
= skb
;
6381 /* set the timestamp */
6382 first
->time_stamp
= jiffies
;
6385 * Force memory writes to complete before letting h/w
6386 * know there are new descriptors to fetch. (Only
6387 * applicable for weak-ordered memory model archs,
6392 /* set next_to_watch value indicating a packet is present */
6393 first
->next_to_watch
= tx_desc
;
6395 /* notify HW of packet */
6396 writel(i
, tx_ring
->tail
);
6400 dev_err(dev
, "TX DMA map failed\n");
6402 /* clear dma mappings for failed tx_buffer_info map */
6404 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
6405 ixgbe_unmap_tx_resource(tx_ring
, tx_buffer_info
);
6406 if (tx_buffer_info
== first
)
6413 dev_kfree_skb_any(skb
);
6415 tx_ring
->next_to_use
= i
;
6418 static void ixgbe_atr(struct ixgbe_ring
*ring
, struct sk_buff
*skb
,
6419 u32 tx_flags
, __be16 protocol
)
6421 struct ixgbe_q_vector
*q_vector
= ring
->q_vector
;
6422 union ixgbe_atr_hash_dword input
= { .dword
= 0 };
6423 union ixgbe_atr_hash_dword common
= { .dword
= 0 };
6425 unsigned char *network
;
6427 struct ipv6hdr
*ipv6
;
6432 /* if ring doesn't have a interrupt vector, cannot perform ATR */
6436 /* do nothing if sampling is disabled */
6437 if (!ring
->atr_sample_rate
)
6442 /* snag network header to get L4 type and address */
6443 hdr
.network
= skb_network_header(skb
);
6445 /* Currently only IPv4/IPv6 with TCP is supported */
6446 if ((protocol
!= __constant_htons(ETH_P_IPV6
) ||
6447 hdr
.ipv6
->nexthdr
!= IPPROTO_TCP
) &&
6448 (protocol
!= __constant_htons(ETH_P_IP
) ||
6449 hdr
.ipv4
->protocol
!= IPPROTO_TCP
))
6454 /* skip this packet since it is invalid or the socket is closing */
6458 /* sample on all syn packets or once every atr sample count */
6459 if (!th
->syn
&& (ring
->atr_count
< ring
->atr_sample_rate
))
6462 /* reset sample count */
6463 ring
->atr_count
= 0;
6465 vlan_id
= htons(tx_flags
>> IXGBE_TX_FLAGS_VLAN_SHIFT
);
6468 * src and dst are inverted, think how the receiver sees them
6470 * The input is broken into two sections, a non-compressed section
6471 * containing vm_pool, vlan_id, and flow_type. The rest of the data
6472 * is XORed together and stored in the compressed dword.
6474 input
.formatted
.vlan_id
= vlan_id
;
6477 * since src port and flex bytes occupy the same word XOR them together
6478 * and write the value to source port portion of compressed dword
6480 if (tx_flags
& (IXGBE_TX_FLAGS_SW_VLAN
| IXGBE_TX_FLAGS_HW_VLAN
))
6481 common
.port
.src
^= th
->dest
^ __constant_htons(ETH_P_8021Q
);
6483 common
.port
.src
^= th
->dest
^ protocol
;
6484 common
.port
.dst
^= th
->source
;
6486 if (protocol
== __constant_htons(ETH_P_IP
)) {
6487 input
.formatted
.flow_type
= IXGBE_ATR_FLOW_TYPE_TCPV4
;
6488 common
.ip
^= hdr
.ipv4
->saddr
^ hdr
.ipv4
->daddr
;
6490 input
.formatted
.flow_type
= IXGBE_ATR_FLOW_TYPE_TCPV6
;
6491 common
.ip
^= hdr
.ipv6
->saddr
.s6_addr32
[0] ^
6492 hdr
.ipv6
->saddr
.s6_addr32
[1] ^
6493 hdr
.ipv6
->saddr
.s6_addr32
[2] ^
6494 hdr
.ipv6
->saddr
.s6_addr32
[3] ^
6495 hdr
.ipv6
->daddr
.s6_addr32
[0] ^
6496 hdr
.ipv6
->daddr
.s6_addr32
[1] ^
6497 hdr
.ipv6
->daddr
.s6_addr32
[2] ^
6498 hdr
.ipv6
->daddr
.s6_addr32
[3];
6501 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
6502 ixgbe_fdir_add_signature_filter_82599(&q_vector
->adapter
->hw
,
6503 input
, common
, ring
->queue_index
);
6506 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring
*tx_ring
, u16 size
)
6508 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
6509 /* Herbert's original patch had:
6510 * smp_mb__after_netif_stop_queue();
6511 * but since that doesn't exist yet, just open code it. */
6514 /* We need to check again in a case another CPU has just
6515 * made room available. */
6516 if (likely(ixgbe_desc_unused(tx_ring
) < size
))
6519 /* A reprieve! - use start_queue because it doesn't call schedule */
6520 netif_start_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
6521 ++tx_ring
->tx_stats
.restart_queue
;
6525 static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring
*tx_ring
, u16 size
)
6527 if (likely(ixgbe_desc_unused(tx_ring
) >= size
))
6529 return __ixgbe_maybe_stop_tx(tx_ring
, size
);
6532 static u16
ixgbe_select_queue(struct net_device
*dev
, struct sk_buff
*skb
)
6534 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
6535 int txq
= skb_rx_queue_recorded(skb
) ? skb_get_rx_queue(skb
) :
6538 __be16 protocol
= vlan_get_protocol(skb
);
6540 if (((protocol
== htons(ETH_P_FCOE
)) ||
6541 (protocol
== htons(ETH_P_FIP
))) &&
6542 (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
)) {
6543 txq
&= (adapter
->ring_feature
[RING_F_FCOE
].indices
- 1);
6544 txq
+= adapter
->ring_feature
[RING_F_FCOE
].mask
;
6549 if (adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
) {
6550 while (unlikely(txq
>= dev
->real_num_tx_queues
))
6551 txq
-= dev
->real_num_tx_queues
;
6555 return skb_tx_hash(dev
, skb
);
6558 netdev_tx_t
ixgbe_xmit_frame_ring(struct sk_buff
*skb
,
6559 struct ixgbe_adapter
*adapter
,
6560 struct ixgbe_ring
*tx_ring
)
6562 struct ixgbe_tx_buffer
*first
;
6565 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
6568 u16 count
= TXD_USE_COUNT(skb_headlen(skb
));
6569 __be16 protocol
= skb
->protocol
;
6573 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
6574 * + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD,
6575 * + 2 desc gap to keep tail from touching head,
6576 * + 1 desc for context descriptor,
6577 * otherwise try next time
6579 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
6580 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
6581 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
6583 count
+= skb_shinfo(skb
)->nr_frags
;
6585 if (ixgbe_maybe_stop_tx(tx_ring
, count
+ 3)) {
6586 tx_ring
->tx_stats
.tx_busy
++;
6587 return NETDEV_TX_BUSY
;
6590 #ifdef CONFIG_PCI_IOV
6591 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
6592 tx_flags
|= IXGBE_TX_FLAGS_TXSW
;
6595 /* if we have a HW VLAN tag being added default to the HW one */
6596 if (vlan_tx_tag_present(skb
)) {
6597 tx_flags
|= vlan_tx_tag_get(skb
) << IXGBE_TX_FLAGS_VLAN_SHIFT
;
6598 tx_flags
|= IXGBE_TX_FLAGS_HW_VLAN
;
6599 /* else if it is a SW VLAN check the next protocol and store the tag */
6600 } else if (protocol
== __constant_htons(ETH_P_8021Q
)) {
6601 struct vlan_hdr
*vhdr
, _vhdr
;
6602 vhdr
= skb_header_pointer(skb
, ETH_HLEN
, sizeof(_vhdr
), &_vhdr
);
6606 protocol
= vhdr
->h_vlan_encapsulated_proto
;
6607 tx_flags
|= ntohs(vhdr
->h_vlan_TCI
) << IXGBE_TX_FLAGS_VLAN_SHIFT
;
6608 tx_flags
|= IXGBE_TX_FLAGS_SW_VLAN
;
6611 if ((adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
) &&
6612 ((tx_flags
& (IXGBE_TX_FLAGS_HW_VLAN
| IXGBE_TX_FLAGS_SW_VLAN
)) ||
6613 (skb
->priority
!= TC_PRIO_CONTROL
))) {
6614 tx_flags
&= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK
;
6615 tx_flags
|= tx_ring
->dcb_tc
<<
6616 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT
;
6617 if (tx_flags
& IXGBE_TX_FLAGS_SW_VLAN
) {
6618 struct vlan_ethhdr
*vhdr
;
6619 if (skb_header_cloned(skb
) &&
6620 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
6622 vhdr
= (struct vlan_ethhdr
*)skb
->data
;
6623 vhdr
->h_vlan_TCI
= htons(tx_flags
>>
6624 IXGBE_TX_FLAGS_VLAN_SHIFT
);
6626 tx_flags
|= IXGBE_TX_FLAGS_HW_VLAN
;
6630 /* record the location of the first descriptor for this packet */
6631 first
= &tx_ring
->tx_buffer_info
[tx_ring
->next_to_use
];
6634 /* setup tx offload for FCoE */
6635 if ((protocol
== __constant_htons(ETH_P_FCOE
)) &&
6636 (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
)) {
6637 tso
= ixgbe_fso(tx_ring
, skb
, tx_flags
, &hdr_len
);
6641 tx_flags
|= IXGBE_TX_FLAGS_FSO
|
6642 IXGBE_TX_FLAGS_FCOE
;
6644 tx_flags
|= IXGBE_TX_FLAGS_FCOE
;
6649 #endif /* IXGBE_FCOE */
6650 /* setup IPv4/IPv6 offloads */
6651 if (protocol
== __constant_htons(ETH_P_IP
))
6652 tx_flags
|= IXGBE_TX_FLAGS_IPV4
;
6654 tso
= ixgbe_tso(tx_ring
, skb
, tx_flags
, protocol
, &hdr_len
);
6658 tx_flags
|= IXGBE_TX_FLAGS_TSO
;
6659 else if (ixgbe_tx_csum(tx_ring
, skb
, tx_flags
, protocol
))
6660 tx_flags
|= IXGBE_TX_FLAGS_CSUM
;
6662 /* add the ATR filter if ATR is on */
6663 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE
, &tx_ring
->state
))
6664 ixgbe_atr(tx_ring
, skb
, tx_flags
, protocol
);
6668 #endif /* IXGBE_FCOE */
6669 ixgbe_tx_map(tx_ring
, skb
, first
, tx_flags
, hdr_len
);
6671 ixgbe_maybe_stop_tx(tx_ring
, DESC_NEEDED
);
6673 return NETDEV_TX_OK
;
6676 dev_kfree_skb_any(skb
);
6677 return NETDEV_TX_OK
;
6680 static netdev_tx_t
ixgbe_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
6682 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
6683 struct ixgbe_ring
*tx_ring
;
6685 tx_ring
= adapter
->tx_ring
[skb
->queue_mapping
];
6686 return ixgbe_xmit_frame_ring(skb
, adapter
, tx_ring
);
6690 * ixgbe_set_mac - Change the Ethernet Address of the NIC
6691 * @netdev: network interface device structure
6692 * @p: pointer to an address structure
6694 * Returns 0 on success, negative on failure
6696 static int ixgbe_set_mac(struct net_device
*netdev
, void *p
)
6698 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
6699 struct ixgbe_hw
*hw
= &adapter
->hw
;
6700 struct sockaddr
*addr
= p
;
6702 if (!is_valid_ether_addr(addr
->sa_data
))
6703 return -EADDRNOTAVAIL
;
6705 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
6706 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
6708 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, adapter
->num_vfs
,
6715 ixgbe_mdio_read(struct net_device
*netdev
, int prtad
, int devad
, u16 addr
)
6717 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
6718 struct ixgbe_hw
*hw
= &adapter
->hw
;
6722 if (prtad
!= hw
->phy
.mdio
.prtad
)
6724 rc
= hw
->phy
.ops
.read_reg(hw
, addr
, devad
, &value
);
6730 static int ixgbe_mdio_write(struct net_device
*netdev
, int prtad
, int devad
,
6731 u16 addr
, u16 value
)
6733 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
6734 struct ixgbe_hw
*hw
= &adapter
->hw
;
6736 if (prtad
!= hw
->phy
.mdio
.prtad
)
6738 return hw
->phy
.ops
.write_reg(hw
, addr
, devad
, value
);
6741 static int ixgbe_ioctl(struct net_device
*netdev
, struct ifreq
*req
, int cmd
)
6743 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
6745 return mdio_mii_ioctl(&adapter
->hw
.phy
.mdio
, if_mii(req
), cmd
);
6749 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
6751 * @netdev: network interface device structure
6753 * Returns non-zero on failure
6755 static int ixgbe_add_sanmac_netdev(struct net_device
*dev
)
6758 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
6759 struct ixgbe_mac_info
*mac
= &adapter
->hw
.mac
;
6761 if (is_valid_ether_addr(mac
->san_addr
)) {
6763 err
= dev_addr_add(dev
, mac
->san_addr
, NETDEV_HW_ADDR_T_SAN
);
6770 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
6772 * @netdev: network interface device structure
6774 * Returns non-zero on failure
6776 static int ixgbe_del_sanmac_netdev(struct net_device
*dev
)
6779 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
6780 struct ixgbe_mac_info
*mac
= &adapter
->hw
.mac
;
6782 if (is_valid_ether_addr(mac
->san_addr
)) {
6784 err
= dev_addr_del(dev
, mac
->san_addr
, NETDEV_HW_ADDR_T_SAN
);
6790 #ifdef CONFIG_NET_POLL_CONTROLLER
6792 * Polling 'interrupt' - used by things like netconsole to send skbs
6793 * without having to re-enable interrupts. It's not called while
6794 * the interrupt routine is executing.
6796 static void ixgbe_netpoll(struct net_device
*netdev
)
6798 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
6801 /* if interface is down do nothing */
6802 if (test_bit(__IXGBE_DOWN
, &adapter
->state
))
6805 adapter
->flags
|= IXGBE_FLAG_IN_NETPOLL
;
6806 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
6807 int num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
6808 for (i
= 0; i
< num_q_vectors
; i
++) {
6809 struct ixgbe_q_vector
*q_vector
= adapter
->q_vector
[i
];
6810 ixgbe_msix_clean_rings(0, q_vector
);
6813 ixgbe_intr(adapter
->pdev
->irq
, netdev
);
6815 adapter
->flags
&= ~IXGBE_FLAG_IN_NETPOLL
;
6819 static struct rtnl_link_stats64
*ixgbe_get_stats64(struct net_device
*netdev
,
6820 struct rtnl_link_stats64
*stats
)
6822 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
6826 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
6827 struct ixgbe_ring
*ring
= ACCESS_ONCE(adapter
->rx_ring
[i
]);
6833 start
= u64_stats_fetch_begin_bh(&ring
->syncp
);
6834 packets
= ring
->stats
.packets
;
6835 bytes
= ring
->stats
.bytes
;
6836 } while (u64_stats_fetch_retry_bh(&ring
->syncp
, start
));
6837 stats
->rx_packets
+= packets
;
6838 stats
->rx_bytes
+= bytes
;
6842 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
6843 struct ixgbe_ring
*ring
= ACCESS_ONCE(adapter
->tx_ring
[i
]);
6849 start
= u64_stats_fetch_begin_bh(&ring
->syncp
);
6850 packets
= ring
->stats
.packets
;
6851 bytes
= ring
->stats
.bytes
;
6852 } while (u64_stats_fetch_retry_bh(&ring
->syncp
, start
));
6853 stats
->tx_packets
+= packets
;
6854 stats
->tx_bytes
+= bytes
;
6858 /* following stats updated by ixgbe_watchdog_task() */
6859 stats
->multicast
= netdev
->stats
.multicast
;
6860 stats
->rx_errors
= netdev
->stats
.rx_errors
;
6861 stats
->rx_length_errors
= netdev
->stats
.rx_length_errors
;
6862 stats
->rx_crc_errors
= netdev
->stats
.rx_crc_errors
;
6863 stats
->rx_missed_errors
= netdev
->stats
.rx_missed_errors
;
6867 /* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
6868 * #adapter: pointer to ixgbe_adapter
6869 * @tc: number of traffic classes currently enabled
6871 * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
6872 * 802.1Q priority maps to a packet buffer that exists.
6874 static void ixgbe_validate_rtr(struct ixgbe_adapter
*adapter
, u8 tc
)
6876 struct ixgbe_hw
*hw
= &adapter
->hw
;
6880 /* 82598 have a static priority to TC mapping that can not
6881 * be changed so no validation is needed.
6883 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
6886 reg
= IXGBE_READ_REG(hw
, IXGBE_RTRUP2TC
);
6889 for (i
= 0; i
< MAX_TRAFFIC_CLASS
; i
++) {
6890 u8 up2tc
= reg
>> (i
* IXGBE_RTRUP2TC_UP_SHIFT
);
6892 /* If up2tc is out of bounds default to zero */
6894 reg
&= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT
);
6898 IXGBE_WRITE_REG(hw
, IXGBE_RTRUP2TC
, reg
);
6904 /* ixgbe_setup_tc - routine to configure net_device for multiple traffic
6907 * @netdev: net device to configure
6908 * @tc: number of traffic classes to enable
6910 int ixgbe_setup_tc(struct net_device
*dev
, u8 tc
)
6912 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
6913 struct ixgbe_hw
*hw
= &adapter
->hw
;
6915 /* Multiple traffic classes requires multiple queues */
6916 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)) {
6917 e_err(drv
, "Enable failed, needs MSI-X\n");
6921 /* Hardware supports up to 8 traffic classes */
6922 if (tc
> MAX_TRAFFIC_CLASS
||
6923 (hw
->mac
.type
== ixgbe_mac_82598EB
&& tc
< MAX_TRAFFIC_CLASS
))
6926 /* Hardware has to reinitialize queues and interrupts to
6927 * match packet buffer alignment. Unfortunantly, the
6928 * hardware is not flexible enough to do this dynamically.
6930 if (netif_running(dev
))
6932 ixgbe_clear_interrupt_scheme(adapter
);
6935 netdev_set_num_tc(dev
, tc
);
6936 adapter
->last_lfc_mode
= adapter
->hw
.fc
.current_mode
;
6938 adapter
->flags
|= IXGBE_FLAG_DCB_ENABLED
;
6939 adapter
->flags
&= ~IXGBE_FLAG_FDIR_HASH_CAPABLE
;
6941 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
)
6942 adapter
->hw
.fc
.requested_mode
= ixgbe_fc_none
;
6944 netdev_reset_tc(dev
);
6946 adapter
->hw
.fc
.requested_mode
= adapter
->last_lfc_mode
;
6948 adapter
->flags
&= ~IXGBE_FLAG_DCB_ENABLED
;
6949 adapter
->flags
|= IXGBE_FLAG_FDIR_HASH_CAPABLE
;
6951 adapter
->temp_dcb_cfg
.pfc_mode_enable
= false;
6952 adapter
->dcb_cfg
.pfc_mode_enable
= false;
6955 ixgbe_init_interrupt_scheme(adapter
);
6956 ixgbe_validate_rtr(adapter
, tc
);
6957 if (netif_running(dev
))
6963 void ixgbe_do_reset(struct net_device
*netdev
)
6965 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
6967 if (netif_running(netdev
))
6968 ixgbe_reinit_locked(adapter
);
6970 ixgbe_reset(adapter
);
6973 static u32
ixgbe_fix_features(struct net_device
*netdev
, u32 data
)
6975 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
6978 if (adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
)
6979 data
&= ~NETIF_F_HW_VLAN_RX
;
6982 /* return error if RXHASH is being enabled when RSS is not supported */
6983 if (!(adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
))
6984 data
&= ~NETIF_F_RXHASH
;
6986 /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
6987 if (!(data
& NETIF_F_RXCSUM
))
6988 data
&= ~NETIF_F_LRO
;
6990 /* Turn off LRO if not RSC capable or invalid ITR settings */
6991 if (!(adapter
->flags2
& IXGBE_FLAG2_RSC_CAPABLE
)) {
6992 data
&= ~NETIF_F_LRO
;
6993 } else if (!(adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
) &&
6994 (adapter
->rx_itr_setting
!= 1 &&
6995 adapter
->rx_itr_setting
> IXGBE_MAX_RSC_INT_RATE
)) {
6996 data
&= ~NETIF_F_LRO
;
6997 e_info(probe
, "rx-usecs set too low, not enabling RSC\n");
7003 static int ixgbe_set_features(struct net_device
*netdev
, u32 data
)
7005 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
7006 bool need_reset
= false;
7008 /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
7009 if (!(data
& NETIF_F_RXCSUM
))
7010 adapter
->flags
&= ~IXGBE_FLAG_RX_CSUM_ENABLED
;
7012 adapter
->flags
|= IXGBE_FLAG_RX_CSUM_ENABLED
;
7014 /* Make sure RSC matches LRO, reset if change */
7015 if (!!(data
& NETIF_F_LRO
) !=
7016 !!(adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
)) {
7017 adapter
->flags2
^= IXGBE_FLAG2_RSC_ENABLED
;
7018 switch (adapter
->hw
.mac
.type
) {
7019 case ixgbe_mac_X540
:
7020 case ixgbe_mac_82599EB
:
7029 * Check if Flow Director n-tuple support was enabled or disabled. If
7030 * the state changed, we need to reset.
7032 if (!(adapter
->flags
& IXGBE_FLAG_FDIR_PERFECT_CAPABLE
)) {
7033 /* turn off ATR, enable perfect filters and reset */
7034 if (data
& NETIF_F_NTUPLE
) {
7035 adapter
->flags
&= ~IXGBE_FLAG_FDIR_HASH_CAPABLE
;
7036 adapter
->flags
|= IXGBE_FLAG_FDIR_PERFECT_CAPABLE
;
7039 } else if (!(data
& NETIF_F_NTUPLE
)) {
7040 /* turn off Flow Director, set ATR and reset */
7041 adapter
->flags
&= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE
;
7042 if ((adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
) &&
7043 !(adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
))
7044 adapter
->flags
|= IXGBE_FLAG_FDIR_HASH_CAPABLE
;
7049 ixgbe_do_reset(netdev
);
7055 static const struct net_device_ops ixgbe_netdev_ops
= {
7056 .ndo_open
= ixgbe_open
,
7057 .ndo_stop
= ixgbe_close
,
7058 .ndo_start_xmit
= ixgbe_xmit_frame
,
7059 .ndo_select_queue
= ixgbe_select_queue
,
7060 .ndo_set_rx_mode
= ixgbe_set_rx_mode
,
7061 .ndo_validate_addr
= eth_validate_addr
,
7062 .ndo_set_mac_address
= ixgbe_set_mac
,
7063 .ndo_change_mtu
= ixgbe_change_mtu
,
7064 .ndo_tx_timeout
= ixgbe_tx_timeout
,
7065 .ndo_vlan_rx_add_vid
= ixgbe_vlan_rx_add_vid
,
7066 .ndo_vlan_rx_kill_vid
= ixgbe_vlan_rx_kill_vid
,
7067 .ndo_do_ioctl
= ixgbe_ioctl
,
7068 .ndo_set_vf_mac
= ixgbe_ndo_set_vf_mac
,
7069 .ndo_set_vf_vlan
= ixgbe_ndo_set_vf_vlan
,
7070 .ndo_set_vf_tx_rate
= ixgbe_ndo_set_vf_bw
,
7071 .ndo_get_vf_config
= ixgbe_ndo_get_vf_config
,
7072 .ndo_get_stats64
= ixgbe_get_stats64
,
7073 .ndo_setup_tc
= ixgbe_setup_tc
,
7074 #ifdef CONFIG_NET_POLL_CONTROLLER
7075 .ndo_poll_controller
= ixgbe_netpoll
,
7078 .ndo_fcoe_ddp_setup
= ixgbe_fcoe_ddp_get
,
7079 .ndo_fcoe_ddp_target
= ixgbe_fcoe_ddp_target
,
7080 .ndo_fcoe_ddp_done
= ixgbe_fcoe_ddp_put
,
7081 .ndo_fcoe_enable
= ixgbe_fcoe_enable
,
7082 .ndo_fcoe_disable
= ixgbe_fcoe_disable
,
7083 .ndo_fcoe_get_wwn
= ixgbe_fcoe_get_wwn
,
7084 #endif /* IXGBE_FCOE */
7085 .ndo_set_features
= ixgbe_set_features
,
7086 .ndo_fix_features
= ixgbe_fix_features
,
7089 static void __devinit
ixgbe_probe_vf(struct ixgbe_adapter
*adapter
,
7090 const struct ixgbe_info
*ii
)
7092 #ifdef CONFIG_PCI_IOV
7093 struct ixgbe_hw
*hw
= &adapter
->hw
;
7095 int num_vf_macvlans
, i
;
7096 struct vf_macvlans
*mv_list
;
7098 if (hw
->mac
.type
== ixgbe_mac_82598EB
|| !max_vfs
)
7101 /* The 82599 supports up to 64 VFs per physical function
7102 * but this implementation limits allocation to 63 so that
7103 * basic networking resources are still available to the
7106 adapter
->num_vfs
= (max_vfs
> 63) ? 63 : max_vfs
;
7107 adapter
->flags
|= IXGBE_FLAG_SRIOV_ENABLED
;
7108 err
= pci_enable_sriov(adapter
->pdev
, adapter
->num_vfs
);
7110 e_err(probe
, "Failed to enable PCI sriov: %d\n", err
);
7114 num_vf_macvlans
= hw
->mac
.num_rar_entries
-
7115 (IXGBE_MAX_PF_MACVLANS
+ 1 + adapter
->num_vfs
);
7117 adapter
->mv_list
= mv_list
= kcalloc(num_vf_macvlans
,
7118 sizeof(struct vf_macvlans
),
7121 /* Initialize list of VF macvlans */
7122 INIT_LIST_HEAD(&adapter
->vf_mvs
.l
);
7123 for (i
= 0; i
< num_vf_macvlans
; i
++) {
7125 mv_list
->free
= true;
7126 mv_list
->rar_entry
= hw
->mac
.num_rar_entries
-
7127 (i
+ adapter
->num_vfs
+ 1);
7128 list_add(&mv_list
->l
, &adapter
->vf_mvs
.l
);
7133 /* If call to enable VFs succeeded then allocate memory
7134 * for per VF control structures.
7137 kcalloc(adapter
->num_vfs
,
7138 sizeof(struct vf_data_storage
), GFP_KERNEL
);
7139 if (adapter
->vfinfo
) {
7140 /* Now that we're sure SR-IOV is enabled
7141 * and memory allocated set up the mailbox parameters
7143 ixgbe_init_mbx_params_pf(hw
);
7144 memcpy(&hw
->mbx
.ops
, ii
->mbx_ops
,
7145 sizeof(hw
->mbx
.ops
));
7147 /* Disable RSC when in SR-IOV mode */
7148 adapter
->flags2
&= ~(IXGBE_FLAG2_RSC_CAPABLE
|
7149 IXGBE_FLAG2_RSC_ENABLED
);
7154 e_err(probe
, "Unable to allocate memory for VF Data Storage - "
7155 "SRIOV disabled\n");
7156 pci_disable_sriov(adapter
->pdev
);
7159 adapter
->flags
&= ~IXGBE_FLAG_SRIOV_ENABLED
;
7160 adapter
->num_vfs
= 0;
7161 #endif /* CONFIG_PCI_IOV */
7165 * ixgbe_probe - Device Initialization Routine
7166 * @pdev: PCI device information struct
7167 * @ent: entry in ixgbe_pci_tbl
7169 * Returns 0 on success, negative on failure
7171 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
7172 * The OS initialization, configuring of the adapter private structure,
7173 * and a hardware reset occur.
7175 static int __devinit
ixgbe_probe(struct pci_dev
*pdev
,
7176 const struct pci_device_id
*ent
)
7178 struct net_device
*netdev
;
7179 struct ixgbe_adapter
*adapter
= NULL
;
7180 struct ixgbe_hw
*hw
;
7181 const struct ixgbe_info
*ii
= ixgbe_info_tbl
[ent
->driver_data
];
7182 static int cards_found
;
7183 int i
, err
, pci_using_dac
;
7184 u8 part_str
[IXGBE_PBANUM_LENGTH
];
7185 unsigned int indices
= num_possible_cpus();
7191 /* Catch broken hardware that put the wrong VF device ID in
7192 * the PCIe SR-IOV capability.
7194 if (pdev
->is_virtfn
) {
7195 WARN(1, KERN_ERR
"%s (%hx:%hx) should not be a VF!\n",
7196 pci_name(pdev
), pdev
->vendor
, pdev
->device
);
7200 err
= pci_enable_device_mem(pdev
);
7204 if (!dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)) &&
7205 !dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64))) {
7208 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
7210 err
= dma_set_coherent_mask(&pdev
->dev
,
7214 "No usable DMA configuration, aborting\n");
7221 err
= pci_request_selected_regions(pdev
, pci_select_bars(pdev
,
7222 IORESOURCE_MEM
), ixgbe_driver_name
);
7225 "pci_request_selected_regions failed 0x%x\n", err
);
7229 pci_enable_pcie_error_reporting(pdev
);
7231 pci_set_master(pdev
);
7232 pci_save_state(pdev
);
7234 #ifdef CONFIG_IXGBE_DCB
7235 indices
*= MAX_TRAFFIC_CLASS
;
7238 if (ii
->mac
== ixgbe_mac_82598EB
)
7239 indices
= min_t(unsigned int, indices
, IXGBE_MAX_RSS_INDICES
);
7241 indices
= min_t(unsigned int, indices
, IXGBE_MAX_FDIR_INDICES
);
7244 indices
+= min_t(unsigned int, num_possible_cpus(),
7245 IXGBE_MAX_FCOE_INDICES
);
7247 netdev
= alloc_etherdev_mq(sizeof(struct ixgbe_adapter
), indices
);
7250 goto err_alloc_etherdev
;
7253 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
7255 adapter
= netdev_priv(netdev
);
7256 pci_set_drvdata(pdev
, adapter
);
7258 adapter
->netdev
= netdev
;
7259 adapter
->pdev
= pdev
;
7262 adapter
->msg_enable
= (1 << DEFAULT_DEBUG_LEVEL_SHIFT
) - 1;
7264 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
7265 pci_resource_len(pdev
, 0));
7271 for (i
= 1; i
<= 5; i
++) {
7272 if (pci_resource_len(pdev
, i
) == 0)
7276 netdev
->netdev_ops
= &ixgbe_netdev_ops
;
7277 ixgbe_set_ethtool_ops(netdev
);
7278 netdev
->watchdog_timeo
= 5 * HZ
;
7279 strncpy(netdev
->name
, pci_name(pdev
), sizeof(netdev
->name
) - 1);
7281 adapter
->bd_number
= cards_found
;
7284 memcpy(&hw
->mac
.ops
, ii
->mac_ops
, sizeof(hw
->mac
.ops
));
7285 hw
->mac
.type
= ii
->mac
;
7288 memcpy(&hw
->eeprom
.ops
, ii
->eeprom_ops
, sizeof(hw
->eeprom
.ops
));
7289 eec
= IXGBE_READ_REG(hw
, IXGBE_EEC
);
7290 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
7291 if (!(eec
& (1 << 8)))
7292 hw
->eeprom
.ops
.read
= &ixgbe_read_eeprom_bit_bang_generic
;
7295 memcpy(&hw
->phy
.ops
, ii
->phy_ops
, sizeof(hw
->phy
.ops
));
7296 hw
->phy
.sfp_type
= ixgbe_sfp_type_unknown
;
7297 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
7298 hw
->phy
.mdio
.prtad
= MDIO_PRTAD_NONE
;
7299 hw
->phy
.mdio
.mmds
= 0;
7300 hw
->phy
.mdio
.mode_support
= MDIO_SUPPORTS_C45
| MDIO_EMULATE_C22
;
7301 hw
->phy
.mdio
.dev
= netdev
;
7302 hw
->phy
.mdio
.mdio_read
= ixgbe_mdio_read
;
7303 hw
->phy
.mdio
.mdio_write
= ixgbe_mdio_write
;
7305 ii
->get_invariants(hw
);
7307 /* setup the private structure */
7308 err
= ixgbe_sw_init(adapter
);
7312 /* Make it possible the adapter to be woken up via WOL */
7313 switch (adapter
->hw
.mac
.type
) {
7314 case ixgbe_mac_82599EB
:
7315 case ixgbe_mac_X540
:
7316 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_WUS
, ~0);
7323 * If there is a fan on this device and it has failed log the
7326 if (adapter
->flags
& IXGBE_FLAG_FAN_FAIL_CAPABLE
) {
7327 u32 esdp
= IXGBE_READ_REG(hw
, IXGBE_ESDP
);
7328 if (esdp
& IXGBE_ESDP_SDP1
)
7329 e_crit(probe
, "Fan has stopped, replace the adapter\n");
7332 /* reset_hw fills in the perm_addr as well */
7333 hw
->phy
.reset_if_overtemp
= true;
7334 err
= hw
->mac
.ops
.reset_hw(hw
);
7335 hw
->phy
.reset_if_overtemp
= false;
7336 if (err
== IXGBE_ERR_SFP_NOT_PRESENT
&&
7337 hw
->mac
.type
== ixgbe_mac_82598EB
) {
7339 } else if (err
== IXGBE_ERR_SFP_NOT_SUPPORTED
) {
7340 e_dev_err("failed to load because an unsupported SFP+ "
7341 "module type was detected.\n");
7342 e_dev_err("Reload the driver after installing a supported "
7346 e_dev_err("HW Init failed: %d\n", err
);
7350 ixgbe_probe_vf(adapter
, ii
);
7352 netdev
->features
= NETIF_F_SG
|
7355 NETIF_F_HW_VLAN_TX
|
7356 NETIF_F_HW_VLAN_RX
|
7357 NETIF_F_HW_VLAN_FILTER
|
7363 netdev
->hw_features
= netdev
->features
;
7365 switch (adapter
->hw
.mac
.type
) {
7366 case ixgbe_mac_82599EB
:
7367 case ixgbe_mac_X540
:
7368 netdev
->features
|= NETIF_F_SCTP_CSUM
;
7369 netdev
->hw_features
|= NETIF_F_SCTP_CSUM
|
7376 netdev
->vlan_features
|= NETIF_F_TSO
;
7377 netdev
->vlan_features
|= NETIF_F_TSO6
;
7378 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
7379 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
7380 netdev
->vlan_features
|= NETIF_F_SG
;
7382 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
7384 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
7385 adapter
->flags
&= ~(IXGBE_FLAG_RSS_ENABLED
|
7386 IXGBE_FLAG_DCB_ENABLED
);
7388 #ifdef CONFIG_IXGBE_DCB
7389 netdev
->dcbnl_ops
= &dcbnl_ops
;
7393 if (adapter
->flags
& IXGBE_FLAG_FCOE_CAPABLE
) {
7394 if (hw
->mac
.ops
.get_device_caps
) {
7395 hw
->mac
.ops
.get_device_caps(hw
, &device_caps
);
7396 if (device_caps
& IXGBE_DEVICE_CAPS_FCOE_OFFLOADS
)
7397 adapter
->flags
&= ~IXGBE_FLAG_FCOE_CAPABLE
;
7400 if (adapter
->flags
& IXGBE_FLAG_FCOE_CAPABLE
) {
7401 netdev
->vlan_features
|= NETIF_F_FCOE_CRC
;
7402 netdev
->vlan_features
|= NETIF_F_FSO
;
7403 netdev
->vlan_features
|= NETIF_F_FCOE_MTU
;
7405 #endif /* IXGBE_FCOE */
7406 if (pci_using_dac
) {
7407 netdev
->features
|= NETIF_F_HIGHDMA
;
7408 netdev
->vlan_features
|= NETIF_F_HIGHDMA
;
7411 if (adapter
->flags2
& IXGBE_FLAG2_RSC_CAPABLE
)
7412 netdev
->hw_features
|= NETIF_F_LRO
;
7413 if (adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
)
7414 netdev
->features
|= NETIF_F_LRO
;
7416 /* make sure the EEPROM is good */
7417 if (hw
->eeprom
.ops
.validate_checksum(hw
, NULL
) < 0) {
7418 e_dev_err("The EEPROM Checksum Is Not Valid\n");
7423 memcpy(netdev
->dev_addr
, hw
->mac
.perm_addr
, netdev
->addr_len
);
7424 memcpy(netdev
->perm_addr
, hw
->mac
.perm_addr
, netdev
->addr_len
);
7426 if (ixgbe_validate_mac_addr(netdev
->perm_addr
)) {
7427 e_dev_err("invalid MAC address\n");
7432 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
7433 if (hw
->mac
.ops
.disable_tx_laser
&&
7434 ((hw
->phy
.multispeed_fiber
) ||
7435 ((hw
->mac
.ops
.get_media_type(hw
) == ixgbe_media_type_fiber
) &&
7436 (hw
->mac
.type
== ixgbe_mac_82599EB
))))
7437 hw
->mac
.ops
.disable_tx_laser(hw
);
7439 setup_timer(&adapter
->service_timer
, &ixgbe_service_timer
,
7440 (unsigned long) adapter
);
7442 INIT_WORK(&adapter
->service_task
, ixgbe_service_task
);
7443 clear_bit(__IXGBE_SERVICE_SCHED
, &adapter
->state
);
7445 err
= ixgbe_init_interrupt_scheme(adapter
);
7449 if (!(adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
)) {
7450 netdev
->hw_features
&= ~NETIF_F_RXHASH
;
7451 netdev
->features
&= ~NETIF_F_RXHASH
;
7454 switch (pdev
->device
) {
7455 case IXGBE_DEV_ID_82599_SFP
:
7456 /* Only this subdevice supports WOL */
7457 if (pdev
->subsystem_device
== IXGBE_SUBDEV_ID_82599_SFP
)
7458 adapter
->wol
= IXGBE_WUFC_MAG
;
7460 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE
:
7461 /* All except this subdevice support WOL */
7462 if (pdev
->subsystem_device
!= IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ
)
7463 adapter
->wol
= IXGBE_WUFC_MAG
;
7465 case IXGBE_DEV_ID_82599_KX4
:
7466 adapter
->wol
= IXGBE_WUFC_MAG
;
7472 device_set_wakeup_enable(&adapter
->pdev
->dev
, adapter
->wol
);
7474 /* pick up the PCI bus settings for reporting later */
7475 hw
->mac
.ops
.get_bus_info(hw
);
7477 /* print bus type/speed/width info */
7478 e_dev_info("(PCI Express:%s:%s) %pM\n",
7479 (hw
->bus
.speed
== ixgbe_bus_speed_5000
? "5.0GT/s" :
7480 hw
->bus
.speed
== ixgbe_bus_speed_2500
? "2.5GT/s" :
7482 (hw
->bus
.width
== ixgbe_bus_width_pcie_x8
? "Width x8" :
7483 hw
->bus
.width
== ixgbe_bus_width_pcie_x4
? "Width x4" :
7484 hw
->bus
.width
== ixgbe_bus_width_pcie_x1
? "Width x1" :
7488 err
= ixgbe_read_pba_string_generic(hw
, part_str
, IXGBE_PBANUM_LENGTH
);
7490 strncpy(part_str
, "Unknown", IXGBE_PBANUM_LENGTH
);
7491 if (ixgbe_is_sfp(hw
) && hw
->phy
.sfp_type
!= ixgbe_sfp_type_not_present
)
7492 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
7493 hw
->mac
.type
, hw
->phy
.type
, hw
->phy
.sfp_type
,
7496 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
7497 hw
->mac
.type
, hw
->phy
.type
, part_str
);
7499 if (hw
->bus
.width
<= ixgbe_bus_width_pcie_x4
) {
7500 e_dev_warn("PCI-Express bandwidth available for this card is "
7501 "not sufficient for optimal performance.\n");
7502 e_dev_warn("For optimal performance a x8 PCI-Express slot "
7506 /* save off EEPROM version number */
7507 hw
->eeprom
.ops
.read(hw
, 0x29, &adapter
->eeprom_version
);
7509 /* reset the hardware with the new settings */
7510 err
= hw
->mac
.ops
.start_hw(hw
);
7512 if (err
== IXGBE_ERR_EEPROM_VERSION
) {
7513 /* We are running on a pre-production device, log a warning */
7514 e_dev_warn("This device is a pre-production adapter/LOM. "
7515 "Please be aware there may be issues associated "
7516 "with your hardware. If you are experiencing "
7517 "problems please contact your Intel or hardware "
7518 "representative who provided you with this "
7521 strcpy(netdev
->name
, "eth%d");
7522 err
= register_netdev(netdev
);
7526 /* carrier off reporting is important to ethtool even BEFORE open */
7527 netif_carrier_off(netdev
);
7529 #ifdef CONFIG_IXGBE_DCA
7530 if (dca_add_requester(&pdev
->dev
) == 0) {
7531 adapter
->flags
|= IXGBE_FLAG_DCA_ENABLED
;
7532 ixgbe_setup_dca(adapter
);
7535 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
7536 e_info(probe
, "IOV is enabled with %d VFs\n", adapter
->num_vfs
);
7537 for (i
= 0; i
< adapter
->num_vfs
; i
++)
7538 ixgbe_vf_configuration(pdev
, (i
| 0x10000000));
7541 /* Inform firmware of driver version */
7542 if (hw
->mac
.ops
.set_fw_drv_ver
)
7543 hw
->mac
.ops
.set_fw_drv_ver(hw
, MAJ
, MIN
, BUILD
,
7546 /* add san mac addr to netdev */
7547 ixgbe_add_sanmac_netdev(netdev
);
7549 e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
7554 ixgbe_release_hw_control(adapter
);
7555 ixgbe_clear_interrupt_scheme(adapter
);
7558 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
7559 ixgbe_disable_sriov(adapter
);
7560 adapter
->flags2
&= ~IXGBE_FLAG2_SEARCH_FOR_SFP
;
7561 iounmap(hw
->hw_addr
);
7563 free_netdev(netdev
);
7565 pci_release_selected_regions(pdev
,
7566 pci_select_bars(pdev
, IORESOURCE_MEM
));
7569 pci_disable_device(pdev
);
7574 * ixgbe_remove - Device Removal Routine
7575 * @pdev: PCI device information struct
7577 * ixgbe_remove is called by the PCI subsystem to alert the driver
7578 * that it should release a PCI device. The could be caused by a
7579 * Hot-Plug event, or because the driver is going to be removed from
7582 static void __devexit
ixgbe_remove(struct pci_dev
*pdev
)
7584 struct ixgbe_adapter
*adapter
= pci_get_drvdata(pdev
);
7585 struct net_device
*netdev
= adapter
->netdev
;
7587 set_bit(__IXGBE_DOWN
, &adapter
->state
);
7588 cancel_work_sync(&adapter
->service_task
);
7590 #ifdef CONFIG_IXGBE_DCA
7591 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
) {
7592 adapter
->flags
&= ~IXGBE_FLAG_DCA_ENABLED
;
7593 dca_remove_requester(&pdev
->dev
);
7594 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_CTRL
, 1);
7599 if (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
)
7600 ixgbe_cleanup_fcoe(adapter
);
7602 #endif /* IXGBE_FCOE */
7604 /* remove the added san mac */
7605 ixgbe_del_sanmac_netdev(netdev
);
7607 if (netdev
->reg_state
== NETREG_REGISTERED
)
7608 unregister_netdev(netdev
);
7610 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
7611 ixgbe_disable_sriov(adapter
);
7613 ixgbe_clear_interrupt_scheme(adapter
);
7615 ixgbe_release_hw_control(adapter
);
7617 iounmap(adapter
->hw
.hw_addr
);
7618 pci_release_selected_regions(pdev
, pci_select_bars(pdev
,
7621 e_dev_info("complete\n");
7623 free_netdev(netdev
);
7625 pci_disable_pcie_error_reporting(pdev
);
7627 pci_disable_device(pdev
);
7631 * ixgbe_io_error_detected - called when PCI error is detected
7632 * @pdev: Pointer to PCI device
7633 * @state: The current pci connection state
7635 * This function is called after a PCI bus error affecting
7636 * this device has been detected.
7638 static pci_ers_result_t
ixgbe_io_error_detected(struct pci_dev
*pdev
,
7639 pci_channel_state_t state
)
7641 struct ixgbe_adapter
*adapter
= pci_get_drvdata(pdev
);
7642 struct net_device
*netdev
= adapter
->netdev
;
7644 netif_device_detach(netdev
);
7646 if (state
== pci_channel_io_perm_failure
)
7647 return PCI_ERS_RESULT_DISCONNECT
;
7649 if (netif_running(netdev
))
7650 ixgbe_down(adapter
);
7651 pci_disable_device(pdev
);
7653 /* Request a slot reset. */
7654 return PCI_ERS_RESULT_NEED_RESET
;
7658 * ixgbe_io_slot_reset - called after the pci bus has been reset.
7659 * @pdev: Pointer to PCI device
7661 * Restart the card from scratch, as if from a cold-boot.
7663 static pci_ers_result_t
ixgbe_io_slot_reset(struct pci_dev
*pdev
)
7665 struct ixgbe_adapter
*adapter
= pci_get_drvdata(pdev
);
7666 pci_ers_result_t result
;
7669 if (pci_enable_device_mem(pdev
)) {
7670 e_err(probe
, "Cannot re-enable PCI device after reset.\n");
7671 result
= PCI_ERS_RESULT_DISCONNECT
;
7673 pci_set_master(pdev
);
7674 pci_restore_state(pdev
);
7675 pci_save_state(pdev
);
7677 pci_wake_from_d3(pdev
, false);
7679 ixgbe_reset(adapter
);
7680 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_WUS
, ~0);
7681 result
= PCI_ERS_RESULT_RECOVERED
;
7684 err
= pci_cleanup_aer_uncorrect_error_status(pdev
);
7686 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
7687 "failed 0x%0x\n", err
);
7688 /* non-fatal, continue */
7695 * ixgbe_io_resume - called when traffic can start flowing again.
7696 * @pdev: Pointer to PCI device
7698 * This callback is called when the error recovery driver tells us that
7699 * its OK to resume normal operation.
7701 static void ixgbe_io_resume(struct pci_dev
*pdev
)
7703 struct ixgbe_adapter
*adapter
= pci_get_drvdata(pdev
);
7704 struct net_device
*netdev
= adapter
->netdev
;
7706 if (netif_running(netdev
)) {
7707 if (ixgbe_up(adapter
)) {
7708 e_info(probe
, "ixgbe_up failed after reset\n");
7713 netif_device_attach(netdev
);
7716 static struct pci_error_handlers ixgbe_err_handler
= {
7717 .error_detected
= ixgbe_io_error_detected
,
7718 .slot_reset
= ixgbe_io_slot_reset
,
7719 .resume
= ixgbe_io_resume
,
7722 static struct pci_driver ixgbe_driver
= {
7723 .name
= ixgbe_driver_name
,
7724 .id_table
= ixgbe_pci_tbl
,
7725 .probe
= ixgbe_probe
,
7726 .remove
= __devexit_p(ixgbe_remove
),
7728 .suspend
= ixgbe_suspend
,
7729 .resume
= ixgbe_resume
,
7731 .shutdown
= ixgbe_shutdown
,
7732 .err_handler
= &ixgbe_err_handler
7736 * ixgbe_init_module - Driver Registration Routine
7738 * ixgbe_init_module is the first routine called when the driver is
7739 * loaded. All it does is register with the PCI subsystem.
7741 static int __init
ixgbe_init_module(void)
7744 pr_info("%s - version %s\n", ixgbe_driver_string
, ixgbe_driver_version
);
7745 pr_info("%s\n", ixgbe_copyright
);
7747 #ifdef CONFIG_IXGBE_DCA
7748 dca_register_notify(&dca_notifier
);
7751 ret
= pci_register_driver(&ixgbe_driver
);
7755 module_init(ixgbe_init_module
);
7758 * ixgbe_exit_module - Driver Exit Cleanup Routine
7760 * ixgbe_exit_module is called just before the driver is removed
7763 static void __exit
ixgbe_exit_module(void)
7765 #ifdef CONFIG_IXGBE_DCA
7766 dca_unregister_notify(&dca_notifier
);
7768 pci_unregister_driver(&ixgbe_driver
);
7769 rcu_barrier(); /* Wait for completion of call_rcu()'s */
7772 #ifdef CONFIG_IXGBE_DCA
7773 static int ixgbe_notify_dca(struct notifier_block
*nb
, unsigned long event
,
7778 ret_val
= driver_for_each_device(&ixgbe_driver
.driver
, NULL
, &event
,
7779 __ixgbe_notify_dca
);
7781 return ret_val
? NOTIFY_BAD
: NOTIFY_DONE
;
7784 #endif /* CONFIG_IXGBE_DCA */
7786 module_exit(ixgbe_exit_module
);