1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 #include "ixgbe_sriov.h"
32 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
33 * @adapter: board private structure to initialize
35 * Cache the descriptor ring offsets for RSS to the assigned rings.
38 static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter
*adapter
)
42 if (!(adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
))
45 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
46 adapter
->rx_ring
[i
]->reg_idx
= i
;
47 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
48 adapter
->tx_ring
[i
]->reg_idx
= i
;
52 #ifdef CONFIG_IXGBE_DCB
54 /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
55 static void ixgbe_get_first_reg_idx(struct ixgbe_adapter
*adapter
, u8 tc
,
56 unsigned int *tx
, unsigned int *rx
)
58 struct net_device
*dev
= adapter
->netdev
;
59 struct ixgbe_hw
*hw
= &adapter
->hw
;
60 u8 num_tcs
= netdev_get_num_tc(dev
);
65 switch (hw
->mac
.type
) {
66 case ixgbe_mac_82598EB
:
70 case ixgbe_mac_82599EB
:
77 *tx
= ((tc
+ 2) << 4);
79 } else if (tc
< num_tcs
) {
80 *tx
= ((tc
+ 8) << 3);
109 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
110 * @adapter: board private structure to initialize
112 * Cache the descriptor ring offsets for DCB to the assigned rings.
115 static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter
*adapter
)
117 struct net_device
*dev
= adapter
->netdev
;
119 u8 num_tcs
= netdev_get_num_tc(dev
);
124 for (i
= 0, k
= 0; i
< num_tcs
; i
++) {
125 unsigned int tx_s
, rx_s
;
126 u16 count
= dev
->tc_to_txq
[i
].count
;
128 ixgbe_get_first_reg_idx(adapter
, i
, &tx_s
, &rx_s
);
129 for (j
= 0; j
< count
; j
++, k
++) {
130 adapter
->tx_ring
[k
]->reg_idx
= tx_s
+ j
;
131 adapter
->rx_ring
[k
]->reg_idx
= rx_s
+ j
;
132 adapter
->tx_ring
[k
]->dcb_tc
= i
;
133 adapter
->rx_ring
[k
]->dcb_tc
= i
;
143 * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
144 * @adapter: board private structure to initialize
146 * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
149 static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter
*adapter
)
151 struct ixgbe_ring_feature
*f
= &adapter
->ring_feature
[RING_F_FCOE
];
153 u8 fcoe_rx_i
= 0, fcoe_tx_i
= 0;
155 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
))
158 if (adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
) {
159 ixgbe_cache_ring_rss(adapter
);
161 fcoe_rx_i
= f
->offset
;
162 fcoe_tx_i
= f
->offset
;
164 for (i
= 0; i
< f
->indices
; i
++, fcoe_rx_i
++, fcoe_tx_i
++) {
165 adapter
->rx_ring
[f
->offset
+ i
]->reg_idx
= fcoe_rx_i
;
166 adapter
->tx_ring
[f
->offset
+ i
]->reg_idx
= fcoe_tx_i
;
171 #endif /* IXGBE_FCOE */
173 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
174 * @adapter: board private structure to initialize
176 * SR-IOV doesn't use any descriptor rings but changes the default if
177 * no other mapping is used.
180 static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter
*adapter
)
182 adapter
->rx_ring
[0]->reg_idx
= adapter
->num_vfs
* 2;
183 adapter
->tx_ring
[0]->reg_idx
= adapter
->num_vfs
* 2;
184 if (adapter
->num_vfs
)
191 * ixgbe_cache_ring_register - Descriptor ring to register mapping
192 * @adapter: board private structure to initialize
194 * Once we know the feature-set enabled for the device, we'll cache
195 * the register offset the descriptor ring is assigned to.
197 * Note, the order the various feature calls is important. It must start with
198 * the "most" features enabled at the same time, then trickle down to the
199 * least amount of features turned on at once.
201 static void ixgbe_cache_ring_register(struct ixgbe_adapter
*adapter
)
203 /* start with default case */
204 adapter
->rx_ring
[0]->reg_idx
= 0;
205 adapter
->tx_ring
[0]->reg_idx
= 0;
207 if (ixgbe_cache_ring_sriov(adapter
))
210 #ifdef CONFIG_IXGBE_DCB
211 if (ixgbe_cache_ring_dcb(adapter
))
216 if (ixgbe_cache_ring_fcoe(adapter
))
218 #endif /* IXGBE_FCOE */
220 if (ixgbe_cache_ring_rss(adapter
))
225 * ixgbe_set_sriov_queues - Allocate queues for IOV use
226 * @adapter: board private structure to initialize
228 * IOV doesn't actually use anything, so just NAK the
229 * request for now and let the other queue routines
230 * figure out what to do.
232 static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter
*adapter
)
238 * ixgbe_set_rss_queues - Allocate queues for RSS
239 * @adapter: board private structure to initialize
241 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
242 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
245 static bool ixgbe_set_rss_queues(struct ixgbe_adapter
*adapter
)
247 struct ixgbe_ring_feature
*f
;
250 if (!(adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
)) {
251 adapter
->flags
&= ~IXGBE_FLAG_FDIR_HASH_CAPABLE
;
255 /* set mask for 16 queue limit of RSS */
256 f
= &adapter
->ring_feature
[RING_F_RSS
];
263 * Use Flow Director in addition to RSS to ensure the best
264 * distribution of flows across cores, even when an FDIR flow
267 if (adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
) {
268 f
= &adapter
->ring_feature
[RING_F_FDIR
];
270 f
->indices
= min_t(u16
, num_online_cpus(), f
->limit
);
271 rss_i
= max_t(u16
, rss_i
, f
->indices
);
274 adapter
->num_rx_queues
= rss_i
;
275 adapter
->num_tx_queues
= rss_i
;
282 * ixgbe_set_fcoe_queues - Allocate queues for Fiber Channel over Ethernet (FCoE)
283 * @adapter: board private structure to initialize
285 * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
286 * Offset is used as the index of the first rx queue used by FCoE.
288 static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter
*adapter
)
290 struct ixgbe_ring_feature
*f
= &adapter
->ring_feature
[RING_F_FCOE
];
292 if (!(adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
))
295 f
->indices
= min_t(int, num_online_cpus(), f
->limit
);
297 adapter
->num_rx_queues
= 1;
298 adapter
->num_tx_queues
= 1;
300 if (adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
) {
301 e_info(probe
, "FCoE enabled with RSS\n");
302 ixgbe_set_rss_queues(adapter
);
305 /* adding FCoE rx rings to the end */
306 f
->offset
= adapter
->num_rx_queues
;
307 adapter
->num_rx_queues
+= f
->indices
;
308 adapter
->num_tx_queues
+= f
->indices
;
312 #endif /* IXGBE_FCOE */
314 /* Artificial max queue cap per traffic class in DCB mode */
315 #define DCB_QUEUE_CAP 8
317 #ifdef CONFIG_IXGBE_DCB
318 static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter
*adapter
)
320 int per_tc_q
, q
, i
, offset
= 0;
321 struct net_device
*dev
= adapter
->netdev
;
322 int tcs
= netdev_get_num_tc(dev
);
327 /* Map queue offset and counts onto allocated tx queues */
328 per_tc_q
= min_t(unsigned int, dev
->num_tx_queues
/ tcs
, DCB_QUEUE_CAP
);
329 q
= min_t(int, num_online_cpus(), per_tc_q
);
331 for (i
= 0; i
< tcs
; i
++) {
332 netdev_set_tc_queue(dev
, i
, q
, offset
);
336 adapter
->num_tx_queues
= q
* tcs
;
337 adapter
->num_rx_queues
= q
* tcs
;
340 /* FCoE enabled queues require special configuration indexed
341 * by feature specific indices and offset. Here we map FCoE
342 * indices onto the DCB queue pairs allowing FCoE to own
343 * configuration later.
345 if (adapter
->flags
& IXGBE_FLAG_FCOE_ENABLED
) {
346 u8 prio_tc
[MAX_USER_PRIORITY
] = {0};
348 struct ixgbe_ring_feature
*f
=
349 &adapter
->ring_feature
[RING_F_FCOE
];
351 ixgbe_dcb_unpack_map(&adapter
->dcb_cfg
, DCB_TX_CONFIG
, prio_tc
);
352 tc
= prio_tc
[adapter
->fcoe
.up
];
353 f
->indices
= dev
->tc_to_txq
[tc
].count
;
354 f
->offset
= dev
->tc_to_txq
[tc
].offset
;
363 * ixgbe_set_num_queues - Allocate queues for device, feature dependent
364 * @adapter: board private structure to initialize
366 * This is the top level queue allocation routine. The order here is very
367 * important, starting with the "most" number of features turned on at once,
368 * and ending with the smallest set of features. This way large combinations
369 * can be allocated if they're turned on, and smaller combinations are the
370 * fallthrough conditions.
373 static int ixgbe_set_num_queues(struct ixgbe_adapter
*adapter
)
375 /* Start with base case */
376 adapter
->num_rx_queues
= 1;
377 adapter
->num_tx_queues
= 1;
378 adapter
->num_rx_pools
= adapter
->num_rx_queues
;
379 adapter
->num_rx_queues_per_pool
= 1;
381 if (ixgbe_set_sriov_queues(adapter
))
384 #ifdef CONFIG_IXGBE_DCB
385 if (ixgbe_set_dcb_queues(adapter
))
390 if (ixgbe_set_fcoe_queues(adapter
))
393 #endif /* IXGBE_FCOE */
394 if (ixgbe_set_rss_queues(adapter
))
397 /* fallback to base case */
398 adapter
->num_rx_queues
= 1;
399 adapter
->num_tx_queues
= 1;
402 if ((adapter
->netdev
->reg_state
== NETREG_UNREGISTERED
) ||
403 (adapter
->netdev
->reg_state
== NETREG_UNREGISTERING
))
406 /* Notify the stack of the (possibly) reduced queue counts. */
407 netif_set_real_num_tx_queues(adapter
->netdev
, adapter
->num_tx_queues
);
408 return netif_set_real_num_rx_queues(adapter
->netdev
,
409 adapter
->num_rx_queues
);
412 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter
*adapter
,
415 int err
, vector_threshold
;
417 /* We'll want at least 2 (vector_threshold):
418 * 1) TxQ[0] + RxQ[0] handler
419 * 2) Other (Link Status Change, etc.)
421 vector_threshold
= MIN_MSIX_COUNT
;
424 * The more we get, the more we will assign to Tx/Rx Cleanup
425 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
426 * Right now, we simply care about how many we'll get; we'll
427 * set them up later while requesting irq's.
429 while (vectors
>= vector_threshold
) {
430 err
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
432 if (!err
) /* Success in acquiring all requested vectors. */
435 vectors
= 0; /* Nasty failure, quit now */
436 else /* err == number of vectors we should try again with */
440 if (vectors
< vector_threshold
) {
441 /* Can't allocate enough MSI-X interrupts? Oh well.
442 * This just means we'll go with either a single MSI
443 * vector or fall back to legacy interrupts.
445 netif_printk(adapter
, hw
, KERN_DEBUG
, adapter
->netdev
,
446 "Unable to allocate MSI-X interrupts\n");
447 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
448 kfree(adapter
->msix_entries
);
449 adapter
->msix_entries
= NULL
;
451 adapter
->flags
|= IXGBE_FLAG_MSIX_ENABLED
; /* Woot! */
453 * Adjust for only the vectors we'll use, which is minimum
454 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
455 * vectors we were allocated.
457 vectors
-= NON_Q_VECTORS
;
458 adapter
->num_q_vectors
= min(vectors
, adapter
->max_q_vectors
);
462 static void ixgbe_add_ring(struct ixgbe_ring
*ring
,
463 struct ixgbe_ring_container
*head
)
465 ring
->next
= head
->ring
;
471 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
472 * @adapter: board private structure to initialize
473 * @v_count: q_vectors allocated on adapter, used for ring interleaving
474 * @v_idx: index of vector in adapter struct
475 * @txr_count: total number of Tx rings to allocate
476 * @txr_idx: index of first Tx ring to allocate
477 * @rxr_count: total number of Rx rings to allocate
478 * @rxr_idx: index of first Rx ring to allocate
480 * We allocate one q_vector. If allocation fails we return -ENOMEM.
482 static int ixgbe_alloc_q_vector(struct ixgbe_adapter
*adapter
,
483 int v_count
, int v_idx
,
484 int txr_count
, int txr_idx
,
485 int rxr_count
, int rxr_idx
)
487 struct ixgbe_q_vector
*q_vector
;
488 struct ixgbe_ring
*ring
;
491 int ring_count
, size
;
493 ring_count
= txr_count
+ rxr_count
;
494 size
= sizeof(struct ixgbe_q_vector
) +
495 (sizeof(struct ixgbe_ring
) * ring_count
);
497 /* customize cpu for Flow Director mapping */
498 if (adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
) {
499 if (cpu_online(v_idx
)) {
501 node
= cpu_to_node(cpu
);
505 /* allocate q_vector and rings */
506 q_vector
= kzalloc_node(size
, GFP_KERNEL
, node
);
508 q_vector
= kzalloc(size
, GFP_KERNEL
);
512 /* setup affinity mask and node */
514 cpumask_set_cpu(cpu
, &q_vector
->affinity_mask
);
516 cpumask_copy(&q_vector
->affinity_mask
, cpu_online_mask
);
517 q_vector
->numa_node
= node
;
519 /* initialize NAPI */
520 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
523 /* tie q_vector and adapter together */
524 adapter
->q_vector
[v_idx
] = q_vector
;
525 q_vector
->adapter
= adapter
;
526 q_vector
->v_idx
= v_idx
;
528 /* initialize work limits */
529 q_vector
->tx
.work_limit
= adapter
->tx_work_limit
;
531 /* initialize pointer to rings */
532 ring
= q_vector
->ring
;
535 /* assign generic ring traits */
536 ring
->dev
= &adapter
->pdev
->dev
;
537 ring
->netdev
= adapter
->netdev
;
539 /* configure backlink on ring */
540 ring
->q_vector
= q_vector
;
542 /* update q_vector Tx values */
543 ixgbe_add_ring(ring
, &q_vector
->tx
);
545 /* apply Tx specific ring traits */
546 ring
->count
= adapter
->tx_ring_count
;
547 ring
->queue_index
= txr_idx
;
549 /* assign ring to adapter */
550 adapter
->tx_ring
[txr_idx
] = ring
;
552 /* update count and index */
556 /* push pointer to next ring */
561 /* assign generic ring traits */
562 ring
->dev
= &adapter
->pdev
->dev
;
563 ring
->netdev
= adapter
->netdev
;
565 /* configure backlink on ring */
566 ring
->q_vector
= q_vector
;
568 /* update q_vector Rx values */
569 ixgbe_add_ring(ring
, &q_vector
->rx
);
572 * 82599 errata, UDP frames with a 0 checksum
573 * can be marked as checksum errors.
575 if (adapter
->hw
.mac
.type
== ixgbe_mac_82599EB
)
576 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR
, &ring
->state
);
579 if (adapter
->netdev
->features
& NETIF_F_FCOE_MTU
) {
580 struct ixgbe_ring_feature
*f
;
581 f
= &adapter
->ring_feature
[RING_F_FCOE
];
582 if ((rxr_idx
>= f
->offset
) &&
583 (rxr_idx
< f
->offset
+ f
->indices
))
584 set_bit(__IXGBE_RX_FCOE
, &ring
->state
);
587 #endif /* IXGBE_FCOE */
588 /* apply Rx specific ring traits */
589 ring
->count
= adapter
->rx_ring_count
;
590 ring
->queue_index
= rxr_idx
;
592 /* assign ring to adapter */
593 adapter
->rx_ring
[rxr_idx
] = ring
;
595 /* update count and index */
599 /* push pointer to next ring */
607 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
608 * @adapter: board private structure to initialize
609 * @v_idx: Index of vector to be freed
611 * This function frees the memory allocated to the q_vector. In addition if
612 * NAPI is enabled it will delete any references to the NAPI struct prior
613 * to freeing the q_vector.
615 static void ixgbe_free_q_vector(struct ixgbe_adapter
*adapter
, int v_idx
)
617 struct ixgbe_q_vector
*q_vector
= adapter
->q_vector
[v_idx
];
618 struct ixgbe_ring
*ring
;
620 ixgbe_for_each_ring(ring
, q_vector
->tx
)
621 adapter
->tx_ring
[ring
->queue_index
] = NULL
;
623 ixgbe_for_each_ring(ring
, q_vector
->rx
)
624 adapter
->rx_ring
[ring
->queue_index
] = NULL
;
626 adapter
->q_vector
[v_idx
] = NULL
;
627 netif_napi_del(&q_vector
->napi
);
630 * ixgbe_get_stats64() might access the rings on this vector,
631 * we must wait a grace period before freeing it.
633 kfree_rcu(q_vector
, rcu
);
637 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
638 * @adapter: board private structure to initialize
640 * We allocate one q_vector per queue interrupt. If allocation fails we
643 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter
*adapter
)
645 int q_vectors
= adapter
->num_q_vectors
;
646 int rxr_remaining
= adapter
->num_rx_queues
;
647 int txr_remaining
= adapter
->num_tx_queues
;
648 int rxr_idx
= 0, txr_idx
= 0, v_idx
= 0;
651 /* only one q_vector if MSI-X is disabled. */
652 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
655 if (q_vectors
>= (rxr_remaining
+ txr_remaining
)) {
656 for (; rxr_remaining
; v_idx
++) {
657 err
= ixgbe_alloc_q_vector(adapter
, q_vectors
, v_idx
,
663 /* update counts and index */
669 for (; v_idx
< q_vectors
; v_idx
++) {
670 int rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- v_idx
);
671 int tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- v_idx
);
672 err
= ixgbe_alloc_q_vector(adapter
, q_vectors
, v_idx
,
679 /* update counts and index */
680 rxr_remaining
-= rqpv
;
681 txr_remaining
-= tqpv
;
689 adapter
->num_tx_queues
= 0;
690 adapter
->num_rx_queues
= 0;
691 adapter
->num_q_vectors
= 0;
694 ixgbe_free_q_vector(adapter
, v_idx
);
700 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
701 * @adapter: board private structure to initialize
703 * This function frees the memory allocated to the q_vectors. In addition if
704 * NAPI is enabled it will delete any references to the NAPI struct prior
705 * to freeing the q_vector.
707 static void ixgbe_free_q_vectors(struct ixgbe_adapter
*adapter
)
709 int v_idx
= adapter
->num_q_vectors
;
711 adapter
->num_tx_queues
= 0;
712 adapter
->num_rx_queues
= 0;
713 adapter
->num_q_vectors
= 0;
716 ixgbe_free_q_vector(adapter
, v_idx
);
719 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter
*adapter
)
721 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
722 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
723 pci_disable_msix(adapter
->pdev
);
724 kfree(adapter
->msix_entries
);
725 adapter
->msix_entries
= NULL
;
726 } else if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
727 adapter
->flags
&= ~IXGBE_FLAG_MSI_ENABLED
;
728 pci_disable_msi(adapter
->pdev
);
733 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
734 * @adapter: board private structure to initialize
736 * Attempt to configure the interrupts using the best available
737 * capabilities of the hardware and the kernel.
739 static int ixgbe_set_interrupt_capability(struct ixgbe_adapter
*adapter
)
741 struct ixgbe_hw
*hw
= &adapter
->hw
;
743 int vector
, v_budget
;
746 * It's easy to be greedy for MSI-X vectors, but it really
747 * doesn't do us much good if we have a lot more vectors
748 * than CPU's. So let's be conservative and only ask for
749 * (roughly) the same number of vectors as there are CPU's.
750 * The default is to use pairs of vectors.
752 v_budget
= max(adapter
->num_rx_queues
, adapter
->num_tx_queues
);
753 v_budget
= min_t(int, v_budget
, num_online_cpus());
754 v_budget
+= NON_Q_VECTORS
;
757 * At the same time, hardware can only support a maximum of
758 * hw.mac->max_msix_vectors vectors. With features
759 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
760 * descriptor queues supported by our device. Thus, we cap it off in
761 * those rare cases where the cpu count also exceeds our vector limit.
763 v_budget
= min_t(int, v_budget
, hw
->mac
.max_msix_vectors
);
765 /* A failure in MSI-X entry allocation isn't fatal, but it does
766 * mean we disable MSI-X capabilities of the adapter. */
767 adapter
->msix_entries
= kcalloc(v_budget
,
768 sizeof(struct msix_entry
), GFP_KERNEL
);
769 if (adapter
->msix_entries
) {
770 for (vector
= 0; vector
< v_budget
; vector
++)
771 adapter
->msix_entries
[vector
].entry
= vector
;
773 ixgbe_acquire_msix_vectors(adapter
, v_budget
);
775 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
779 adapter
->flags
&= ~IXGBE_FLAG_DCB_ENABLED
;
780 adapter
->flags
&= ~IXGBE_FLAG_RSS_ENABLED
;
781 if (adapter
->flags
& IXGBE_FLAG_FDIR_HASH_CAPABLE
) {
783 "ATR is not supported while multiple "
784 "queues are disabled. Disabling Flow Director\n");
786 adapter
->flags
&= ~IXGBE_FLAG_FDIR_HASH_CAPABLE
;
787 adapter
->atr_sample_rate
= 0;
788 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
789 ixgbe_disable_sriov(adapter
);
791 err
= ixgbe_set_num_queues(adapter
);
795 adapter
->num_q_vectors
= 1;
797 err
= pci_enable_msi(adapter
->pdev
);
799 adapter
->flags
|= IXGBE_FLAG_MSI_ENABLED
;
801 netif_printk(adapter
, hw
, KERN_DEBUG
, adapter
->netdev
,
802 "Unable to allocate MSI interrupt, "
803 "falling back to legacy. Error: %d\n", err
);
813 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
814 * @adapter: board private structure to initialize
816 * We determine which interrupt scheme to use based on...
817 * - Kernel support (MSI, MSI-X)
818 * - which can be user-defined (via MODULE_PARAM)
819 * - Hardware queue count (num_*_queues)
820 * - defined by miscellaneous hardware support/features (RSS, etc.)
822 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter
*adapter
)
826 /* Number of supported queues */
827 err
= ixgbe_set_num_queues(adapter
);
831 err
= ixgbe_set_interrupt_capability(adapter
);
833 e_dev_err("Unable to setup interrupt capabilities\n");
834 goto err_set_interrupt
;
837 err
= ixgbe_alloc_q_vectors(adapter
);
839 e_dev_err("Unable to allocate memory for queue vectors\n");
840 goto err_alloc_q_vectors
;
843 ixgbe_cache_ring_register(adapter
);
845 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
846 (adapter
->num_rx_queues
> 1) ? "Enabled" : "Disabled",
847 adapter
->num_rx_queues
, adapter
->num_tx_queues
);
849 set_bit(__IXGBE_DOWN
, &adapter
->state
);
854 ixgbe_reset_interrupt_capability(adapter
);
860 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
861 * @adapter: board private structure to clear interrupt scheme on
863 * We go through and clear interrupt specific resources and reset the structure
864 * to pre-load conditions
866 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter
*adapter
)
868 adapter
->num_tx_queues
= 0;
869 adapter
->num_rx_queues
= 0;
871 ixgbe_free_q_vectors(adapter
);
872 ixgbe_reset_interrupt_capability(adapter
);
875 void ixgbe_tx_ctxtdesc(struct ixgbe_ring
*tx_ring
, u32 vlan_macip_lens
,
876 u32 fcoe_sof_eof
, u32 type_tucmd
, u32 mss_l4len_idx
)
878 struct ixgbe_adv_tx_context_desc
*context_desc
;
879 u16 i
= tx_ring
->next_to_use
;
881 context_desc
= IXGBE_TX_CTXTDESC(tx_ring
, i
);
884 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
886 /* set bits to identify this as an advanced context descriptor */
887 type_tucmd
|= IXGBE_TXD_CMD_DEXT
| IXGBE_ADVTXD_DTYP_CTXT
;
889 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
890 context_desc
->seqnum_seed
= cpu_to_le32(fcoe_sof_eof
);
891 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd
);
892 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);