ixgbe: Define FCoE and Flow director limits much sooner to allow for changes
[linux-2.6/cjktty.git] / drivers / net / ethernet / intel / ixgbe / ixgbe_lib.c
blob9fafa38009f90a717653c0d3d7dce8c7795efbc3
1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include "ixgbe.h"
29 #include "ixgbe_sriov.h"
31 #ifdef CONFIG_IXGBE_DCB
32 /**
33 * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
34 * @adapter: board private structure to initialize
36 * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It
37 * will also try to cache the proper offsets if RSS/FCoE are enabled along
38 * with VMDq.
40 **/
41 static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
43 #ifdef IXGBE_FCOE
44 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
45 #endif /* IXGBE_FCOE */
46 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
47 int i;
48 u16 reg_idx;
49 u8 tcs = netdev_get_num_tc(adapter->netdev);
51 /* verify we have DCB queueing enabled before proceeding */
52 if (tcs <= 1)
53 return false;
55 /* verify we have VMDq enabled before proceeding */
56 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
57 return false;
59 /* start at VMDq register offset for SR-IOV enabled setups */
60 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
61 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
62 /* If we are greater than indices move to next pool */
63 if ((reg_idx & ~vmdq->mask) >= tcs)
64 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
65 adapter->rx_ring[i]->reg_idx = reg_idx;
68 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
69 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
70 /* If we are greater than indices move to next pool */
71 if ((reg_idx & ~vmdq->mask) >= tcs)
72 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
73 adapter->tx_ring[i]->reg_idx = reg_idx;
76 #ifdef IXGBE_FCOE
77 /* nothing to do if FCoE is disabled */
78 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
79 return true;
81 /* The work is already done if the FCoE ring is shared */
82 if (fcoe->offset < tcs)
83 return true;
85 /* The FCoE rings exist separately, we need to move their reg_idx */
86 if (fcoe->indices) {
87 u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
88 u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
90 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
91 for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
92 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
93 adapter->rx_ring[i]->reg_idx = reg_idx;
94 reg_idx++;
97 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
98 for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
99 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
100 adapter->tx_ring[i]->reg_idx = reg_idx;
101 reg_idx++;
105 #endif /* IXGBE_FCOE */
106 return true;
109 /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
110 static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
111 unsigned int *tx, unsigned int *rx)
113 struct net_device *dev = adapter->netdev;
114 struct ixgbe_hw *hw = &adapter->hw;
115 u8 num_tcs = netdev_get_num_tc(dev);
117 *tx = 0;
118 *rx = 0;
120 switch (hw->mac.type) {
121 case ixgbe_mac_82598EB:
122 /* TxQs/TC: 4 RxQs/TC: 8 */
123 *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */
124 *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
125 break;
126 case ixgbe_mac_82599EB:
127 case ixgbe_mac_X540:
128 if (num_tcs > 4) {
130 * TCs : TC0/1 TC2/3 TC4-7
131 * TxQs/TC: 32 16 8
132 * RxQs/TC: 16 16 16
134 *rx = tc << 4;
135 if (tc < 3)
136 *tx = tc << 5; /* 0, 32, 64 */
137 else if (tc < 5)
138 *tx = (tc + 2) << 4; /* 80, 96 */
139 else
140 *tx = (tc + 8) << 3; /* 104, 112, 120 */
141 } else {
143 * TCs : TC0 TC1 TC2/3
144 * TxQs/TC: 64 32 16
145 * RxQs/TC: 32 32 32
147 *rx = tc << 5;
148 if (tc < 2)
149 *tx = tc << 6; /* 0, 64 */
150 else
151 *tx = (tc + 4) << 4; /* 96, 112 */
153 default:
154 break;
159 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
160 * @adapter: board private structure to initialize
162 * Cache the descriptor ring offsets for DCB to the assigned rings.
165 static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
167 struct net_device *dev = adapter->netdev;
168 unsigned int tx_idx, rx_idx;
169 int tc, offset, rss_i, i;
170 u8 num_tcs = netdev_get_num_tc(dev);
172 /* verify we have DCB queueing enabled before proceeding */
173 if (num_tcs <= 1)
174 return false;
176 rss_i = adapter->ring_feature[RING_F_RSS].indices;
178 for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
179 ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
180 for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
181 adapter->tx_ring[offset + i]->reg_idx = tx_idx;
182 adapter->rx_ring[offset + i]->reg_idx = rx_idx;
183 adapter->tx_ring[offset + i]->dcb_tc = tc;
184 adapter->rx_ring[offset + i]->dcb_tc = tc;
188 return true;
191 #endif
193 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
194 * @adapter: board private structure to initialize
196 * SR-IOV doesn't use any descriptor rings but changes the default if
197 * no other mapping is used.
200 static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
202 #ifdef IXGBE_FCOE
203 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
204 #endif /* IXGBE_FCOE */
205 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
206 struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
207 int i;
208 u16 reg_idx;
210 /* only proceed if VMDq is enabled */
211 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
212 return false;
214 /* start at VMDq register offset for SR-IOV enabled setups */
215 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
216 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
217 #ifdef IXGBE_FCOE
218 /* Allow first FCoE queue to be mapped as RSS */
219 if (fcoe->offset && (i > fcoe->offset))
220 break;
221 #endif
222 /* If we are greater than indices move to next pool */
223 if ((reg_idx & ~vmdq->mask) >= rss->indices)
224 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
225 adapter->rx_ring[i]->reg_idx = reg_idx;
228 #ifdef IXGBE_FCOE
229 /* FCoE uses a linear block of queues so just assigning 1:1 */
230 for (; i < adapter->num_rx_queues; i++, reg_idx++)
231 adapter->rx_ring[i]->reg_idx = reg_idx;
233 #endif
234 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
235 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
236 #ifdef IXGBE_FCOE
237 /* Allow first FCoE queue to be mapped as RSS */
238 if (fcoe->offset && (i > fcoe->offset))
239 break;
240 #endif
241 /* If we are greater than indices move to next pool */
242 if ((reg_idx & rss->mask) >= rss->indices)
243 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
244 adapter->tx_ring[i]->reg_idx = reg_idx;
247 #ifdef IXGBE_FCOE
248 /* FCoE uses a linear block of queues so just assigning 1:1 */
249 for (; i < adapter->num_tx_queues; i++, reg_idx++)
250 adapter->tx_ring[i]->reg_idx = reg_idx;
252 #endif
254 return true;
258 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
259 * @adapter: board private structure to initialize
261 * Cache the descriptor ring offsets for RSS to the assigned rings.
264 static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
266 int i;
268 for (i = 0; i < adapter->num_rx_queues; i++)
269 adapter->rx_ring[i]->reg_idx = i;
270 for (i = 0; i < adapter->num_tx_queues; i++)
271 adapter->tx_ring[i]->reg_idx = i;
273 return true;
277 * ixgbe_cache_ring_register - Descriptor ring to register mapping
278 * @adapter: board private structure to initialize
280 * Once we know the feature-set enabled for the device, we'll cache
281 * the register offset the descriptor ring is assigned to.
283 * Note, the order the various feature calls is important. It must start with
284 * the "most" features enabled at the same time, then trickle down to the
285 * least amount of features turned on at once.
287 static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
289 /* start with default case */
290 adapter->rx_ring[0]->reg_idx = 0;
291 adapter->tx_ring[0]->reg_idx = 0;
293 #ifdef CONFIG_IXGBE_DCB
294 if (ixgbe_cache_ring_dcb_sriov(adapter))
295 return;
297 if (ixgbe_cache_ring_dcb(adapter))
298 return;
300 #endif
301 if (ixgbe_cache_ring_sriov(adapter))
302 return;
304 ixgbe_cache_ring_rss(adapter);
307 #define IXGBE_RSS_16Q_MASK 0xF
308 #define IXGBE_RSS_8Q_MASK 0x7
309 #define IXGBE_RSS_4Q_MASK 0x3
310 #define IXGBE_RSS_2Q_MASK 0x1
311 #define IXGBE_RSS_DISABLED_MASK 0x0
313 #ifdef CONFIG_IXGBE_DCB
315 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
316 * @adapter: board private structure to initialize
318 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
319 * and VM pools where appropriate. Also assign queues based on DCB
320 * priorities and map accordingly..
323 static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
325 int i;
326 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
327 u16 vmdq_m = 0;
328 #ifdef IXGBE_FCOE
329 u16 fcoe_i = 0;
330 #endif
331 u8 tcs = netdev_get_num_tc(adapter->netdev);
333 /* verify we have DCB queueing enabled before proceeding */
334 if (tcs <= 1)
335 return false;
337 /* verify we have VMDq enabled before proceeding */
338 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
339 return false;
341 /* Add starting offset to total pool count */
342 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
344 /* 16 pools w/ 8 TC per pool */
345 if (tcs > 4) {
346 vmdq_i = min_t(u16, vmdq_i, 16);
347 vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
348 /* 32 pools w/ 4 TC per pool */
349 } else {
350 vmdq_i = min_t(u16, vmdq_i, 32);
351 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
354 #ifdef IXGBE_FCOE
355 /* queues in the remaining pools are available for FCoE */
356 fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
358 #endif
359 /* remove the starting offset from the pool count */
360 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
362 /* save features for later use */
363 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
364 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
367 * We do not support DCB, VMDq, and RSS all simultaneously
368 * so we will disable RSS since it is the lowest priority
370 adapter->ring_feature[RING_F_RSS].indices = 1;
371 adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
373 /* disable ATR as it is not supported when VMDq is enabled */
374 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
376 adapter->num_rx_pools = vmdq_i;
377 adapter->num_rx_queues_per_pool = tcs;
379 adapter->num_tx_queues = vmdq_i * tcs;
380 adapter->num_rx_queues = vmdq_i * tcs;
382 #ifdef IXGBE_FCOE
383 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
384 struct ixgbe_ring_feature *fcoe;
386 fcoe = &adapter->ring_feature[RING_F_FCOE];
388 /* limit ourselves based on feature limits */
389 fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
391 if (fcoe_i) {
392 /* alloc queues for FCoE separately */
393 fcoe->indices = fcoe_i;
394 fcoe->offset = vmdq_i * tcs;
396 /* add queues to adapter */
397 adapter->num_tx_queues += fcoe_i;
398 adapter->num_rx_queues += fcoe_i;
399 } else if (tcs > 1) {
400 /* use queue belonging to FcoE TC */
401 fcoe->indices = 1;
402 fcoe->offset = ixgbe_fcoe_get_tc(adapter);
403 } else {
404 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
406 fcoe->indices = 0;
407 fcoe->offset = 0;
411 #endif /* IXGBE_FCOE */
412 /* configure TC to queue mapping */
413 for (i = 0; i < tcs; i++)
414 netdev_set_tc_queue(adapter->netdev, i, 1, i);
416 return true;
419 static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
421 struct net_device *dev = adapter->netdev;
422 struct ixgbe_ring_feature *f;
423 int rss_i, rss_m, i;
424 int tcs;
426 /* Map queue offset and counts onto allocated tx queues */
427 tcs = netdev_get_num_tc(dev);
429 /* verify we have DCB queueing enabled before proceeding */
430 if (tcs <= 1)
431 return false;
433 /* determine the upper limit for our current DCB mode */
434 rss_i = dev->num_tx_queues / tcs;
435 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
436 /* 8 TC w/ 4 queues per TC */
437 rss_i = min_t(u16, rss_i, 4);
438 rss_m = IXGBE_RSS_4Q_MASK;
439 } else if (tcs > 4) {
440 /* 8 TC w/ 8 queues per TC */
441 rss_i = min_t(u16, rss_i, 8);
442 rss_m = IXGBE_RSS_8Q_MASK;
443 } else {
444 /* 4 TC w/ 16 queues per TC */
445 rss_i = min_t(u16, rss_i, 16);
446 rss_m = IXGBE_RSS_16Q_MASK;
449 /* set RSS mask and indices */
450 f = &adapter->ring_feature[RING_F_RSS];
451 rss_i = min_t(int, rss_i, f->limit);
452 f->indices = rss_i;
453 f->mask = rss_m;
455 /* disable ATR as it is not supported when multiple TCs are enabled */
456 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
458 #ifdef IXGBE_FCOE
459 /* FCoE enabled queues require special configuration indexed
460 * by feature specific indices and offset. Here we map FCoE
461 * indices onto the DCB queue pairs allowing FCoE to own
462 * configuration later.
464 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
465 u8 tc = ixgbe_fcoe_get_tc(adapter);
467 f = &adapter->ring_feature[RING_F_FCOE];
468 f->indices = min_t(u16, rss_i, f->limit);
469 f->offset = rss_i * tc;
472 #endif /* IXGBE_FCOE */
473 for (i = 0; i < tcs; i++)
474 netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
476 adapter->num_tx_queues = rss_i * tcs;
477 adapter->num_rx_queues = rss_i * tcs;
479 return true;
482 #endif
484 * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
485 * @adapter: board private structure to initialize
487 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
488 * and VM pools where appropriate. If RSS is available, then also try and
489 * enable RSS and map accordingly.
492 static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
494 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
495 u16 vmdq_m = 0;
496 u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
497 u16 rss_m = IXGBE_RSS_DISABLED_MASK;
498 #ifdef IXGBE_FCOE
499 u16 fcoe_i = 0;
500 #endif
502 /* only proceed if SR-IOV is enabled */
503 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
504 return false;
506 /* Add starting offset to total pool count */
507 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
509 /* double check we are limited to maximum pools */
510 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
512 /* 64 pool mode with 2 queues per pool */
513 if ((vmdq_i > 32) || (rss_i < 4)) {
514 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
515 rss_m = IXGBE_RSS_2Q_MASK;
516 rss_i = min_t(u16, rss_i, 2);
517 /* 32 pool mode with 4 queues per pool */
518 } else {
519 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
520 rss_m = IXGBE_RSS_4Q_MASK;
521 rss_i = 4;
524 #ifdef IXGBE_FCOE
525 /* queues in the remaining pools are available for FCoE */
526 fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
528 #endif
529 /* remove the starting offset from the pool count */
530 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
532 /* save features for later use */
533 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
534 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
536 /* limit RSS based on user input and save for later use */
537 adapter->ring_feature[RING_F_RSS].indices = rss_i;
538 adapter->ring_feature[RING_F_RSS].mask = rss_m;
540 adapter->num_rx_pools = vmdq_i;
541 adapter->num_rx_queues_per_pool = rss_i;
543 adapter->num_rx_queues = vmdq_i * rss_i;
544 adapter->num_tx_queues = vmdq_i * rss_i;
546 /* disable ATR as it is not supported when VMDq is enabled */
547 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
549 #ifdef IXGBE_FCOE
551 * FCoE can use rings from adjacent buffers to allow RSS
552 * like behavior. To account for this we need to add the
553 * FCoE indices to the total ring count.
555 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
556 struct ixgbe_ring_feature *fcoe;
558 fcoe = &adapter->ring_feature[RING_F_FCOE];
560 /* limit ourselves based on feature limits */
561 fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
563 if (vmdq_i > 1 && fcoe_i) {
564 /* alloc queues for FCoE separately */
565 fcoe->indices = fcoe_i;
566 fcoe->offset = vmdq_i * rss_i;
567 } else {
568 /* merge FCoE queues with RSS queues */
569 fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
571 /* limit indices to rss_i if MSI-X is disabled */
572 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
573 fcoe_i = rss_i;
575 /* attempt to reserve some queues for just FCoE */
576 fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
577 fcoe->offset = fcoe_i - fcoe->indices;
579 fcoe_i -= rss_i;
582 /* add queues to adapter */
583 adapter->num_tx_queues += fcoe_i;
584 adapter->num_rx_queues += fcoe_i;
587 #endif
588 return true;
592 * ixgbe_set_rss_queues - Allocate queues for RSS
593 * @adapter: board private structure to initialize
595 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
596 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
599 static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
601 struct ixgbe_ring_feature *f;
602 u16 rss_i;
604 /* set mask for 16 queue limit of RSS */
605 f = &adapter->ring_feature[RING_F_RSS];
606 rss_i = f->limit;
608 f->indices = rss_i;
609 f->mask = IXGBE_RSS_16Q_MASK;
611 /* disable ATR by default, it will be configured below */
612 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
615 * Use Flow Director in addition to RSS to ensure the best
616 * distribution of flows across cores, even when an FDIR flow
617 * isn't matched.
619 if (rss_i > 1 && adapter->atr_sample_rate) {
620 f = &adapter->ring_feature[RING_F_FDIR];
622 rss_i = f->indices = f->limit;
624 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
625 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
628 #ifdef IXGBE_FCOE
630 * FCoE can exist on the same rings as standard network traffic
631 * however it is preferred to avoid that if possible. In order
632 * to get the best performance we allocate as many FCoE queues
633 * as we can and we place them at the end of the ring array to
634 * avoid sharing queues with standard RSS on systems with 24 or
635 * more CPUs.
637 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
638 struct net_device *dev = adapter->netdev;
639 u16 fcoe_i;
641 f = &adapter->ring_feature[RING_F_FCOE];
643 /* merge FCoE queues with RSS queues */
644 fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
645 fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
647 /* limit indices to rss_i if MSI-X is disabled */
648 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
649 fcoe_i = rss_i;
651 /* attempt to reserve some queues for just FCoE */
652 f->indices = min_t(u16, fcoe_i, f->limit);
653 f->offset = fcoe_i - f->indices;
654 rss_i = max_t(u16, fcoe_i, rss_i);
657 #endif /* IXGBE_FCOE */
658 adapter->num_rx_queues = rss_i;
659 adapter->num_tx_queues = rss_i;
661 return true;
665 * ixgbe_set_num_queues - Allocate queues for device, feature dependent
666 * @adapter: board private structure to initialize
668 * This is the top level queue allocation routine. The order here is very
669 * important, starting with the "most" number of features turned on at once,
670 * and ending with the smallest set of features. This way large combinations
671 * can be allocated if they're turned on, and smaller combinations are the
672 * fallthrough conditions.
675 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
677 /* Start with base case */
678 adapter->num_rx_queues = 1;
679 adapter->num_tx_queues = 1;
680 adapter->num_rx_pools = adapter->num_rx_queues;
681 adapter->num_rx_queues_per_pool = 1;
683 #ifdef CONFIG_IXGBE_DCB
684 if (ixgbe_set_dcb_sriov_queues(adapter))
685 return;
687 if (ixgbe_set_dcb_queues(adapter))
688 return;
690 #endif
691 if (ixgbe_set_sriov_queues(adapter))
692 return;
694 ixgbe_set_rss_queues(adapter);
697 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
698 int vectors)
700 int err, vector_threshold;
702 /* We'll want at least 2 (vector_threshold):
703 * 1) TxQ[0] + RxQ[0] handler
704 * 2) Other (Link Status Change, etc.)
706 vector_threshold = MIN_MSIX_COUNT;
709 * The more we get, the more we will assign to Tx/Rx Cleanup
710 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
711 * Right now, we simply care about how many we'll get; we'll
712 * set them up later while requesting irq's.
714 while (vectors >= vector_threshold) {
715 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
716 vectors);
717 if (!err) /* Success in acquiring all requested vectors. */
718 break;
719 else if (err < 0)
720 vectors = 0; /* Nasty failure, quit now */
721 else /* err == number of vectors we should try again with */
722 vectors = err;
725 if (vectors < vector_threshold) {
726 /* Can't allocate enough MSI-X interrupts? Oh well.
727 * This just means we'll go with either a single MSI
728 * vector or fall back to legacy interrupts.
730 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
731 "Unable to allocate MSI-X interrupts\n");
732 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
733 kfree(adapter->msix_entries);
734 adapter->msix_entries = NULL;
735 } else {
736 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
738 * Adjust for only the vectors we'll use, which is minimum
739 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
740 * vectors we were allocated.
742 vectors -= NON_Q_VECTORS;
743 adapter->num_q_vectors = min(vectors, adapter->max_q_vectors);
747 static void ixgbe_add_ring(struct ixgbe_ring *ring,
748 struct ixgbe_ring_container *head)
750 ring->next = head->ring;
751 head->ring = ring;
752 head->count++;
756 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
757 * @adapter: board private structure to initialize
758 * @v_count: q_vectors allocated on adapter, used for ring interleaving
759 * @v_idx: index of vector in adapter struct
760 * @txr_count: total number of Tx rings to allocate
761 * @txr_idx: index of first Tx ring to allocate
762 * @rxr_count: total number of Rx rings to allocate
763 * @rxr_idx: index of first Rx ring to allocate
765 * We allocate one q_vector. If allocation fails we return -ENOMEM.
767 static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
768 int v_count, int v_idx,
769 int txr_count, int txr_idx,
770 int rxr_count, int rxr_idx)
772 struct ixgbe_q_vector *q_vector;
773 struct ixgbe_ring *ring;
774 int node = -1;
775 int cpu = -1;
776 int ring_count, size;
778 ring_count = txr_count + rxr_count;
779 size = sizeof(struct ixgbe_q_vector) +
780 (sizeof(struct ixgbe_ring) * ring_count);
782 /* customize cpu for Flow Director mapping */
783 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
784 if (cpu_online(v_idx)) {
785 cpu = v_idx;
786 node = cpu_to_node(cpu);
790 /* allocate q_vector and rings */
791 q_vector = kzalloc_node(size, GFP_KERNEL, node);
792 if (!q_vector)
793 q_vector = kzalloc(size, GFP_KERNEL);
794 if (!q_vector)
795 return -ENOMEM;
797 /* setup affinity mask and node */
798 if (cpu != -1)
799 cpumask_set_cpu(cpu, &q_vector->affinity_mask);
800 q_vector->numa_node = node;
802 #ifdef CONFIG_IXGBE_DCA
803 /* initialize CPU for DCA */
804 q_vector->cpu = -1;
806 #endif
807 /* initialize NAPI */
808 netif_napi_add(adapter->netdev, &q_vector->napi,
809 ixgbe_poll, 64);
811 /* tie q_vector and adapter together */
812 adapter->q_vector[v_idx] = q_vector;
813 q_vector->adapter = adapter;
814 q_vector->v_idx = v_idx;
816 /* initialize work limits */
817 q_vector->tx.work_limit = adapter->tx_work_limit;
819 /* initialize pointer to rings */
820 ring = q_vector->ring;
822 /* intialize ITR */
823 if (txr_count && !rxr_count) {
824 /* tx only vector */
825 if (adapter->tx_itr_setting == 1)
826 q_vector->itr = IXGBE_10K_ITR;
827 else
828 q_vector->itr = adapter->tx_itr_setting;
829 } else {
830 /* rx or rx/tx vector */
831 if (adapter->rx_itr_setting == 1)
832 q_vector->itr = IXGBE_20K_ITR;
833 else
834 q_vector->itr = adapter->rx_itr_setting;
837 while (txr_count) {
838 /* assign generic ring traits */
839 ring->dev = &adapter->pdev->dev;
840 ring->netdev = adapter->netdev;
842 /* configure backlink on ring */
843 ring->q_vector = q_vector;
845 /* update q_vector Tx values */
846 ixgbe_add_ring(ring, &q_vector->tx);
848 /* apply Tx specific ring traits */
849 ring->count = adapter->tx_ring_count;
850 ring->queue_index = txr_idx;
852 /* assign ring to adapter */
853 adapter->tx_ring[txr_idx] = ring;
855 /* update count and index */
856 txr_count--;
857 txr_idx += v_count;
859 /* push pointer to next ring */
860 ring++;
863 while (rxr_count) {
864 /* assign generic ring traits */
865 ring->dev = &adapter->pdev->dev;
866 ring->netdev = adapter->netdev;
868 /* configure backlink on ring */
869 ring->q_vector = q_vector;
871 /* update q_vector Rx values */
872 ixgbe_add_ring(ring, &q_vector->rx);
875 * 82599 errata, UDP frames with a 0 checksum
876 * can be marked as checksum errors.
878 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
879 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
881 #ifdef IXGBE_FCOE
882 if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
883 struct ixgbe_ring_feature *f;
884 f = &adapter->ring_feature[RING_F_FCOE];
885 if ((rxr_idx >= f->offset) &&
886 (rxr_idx < f->offset + f->indices))
887 set_bit(__IXGBE_RX_FCOE, &ring->state);
890 #endif /* IXGBE_FCOE */
891 /* apply Rx specific ring traits */
892 ring->count = adapter->rx_ring_count;
893 ring->queue_index = rxr_idx;
895 /* assign ring to adapter */
896 adapter->rx_ring[rxr_idx] = ring;
898 /* update count and index */
899 rxr_count--;
900 rxr_idx += v_count;
902 /* push pointer to next ring */
903 ring++;
906 return 0;
910 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
911 * @adapter: board private structure to initialize
912 * @v_idx: Index of vector to be freed
914 * This function frees the memory allocated to the q_vector. In addition if
915 * NAPI is enabled it will delete any references to the NAPI struct prior
916 * to freeing the q_vector.
918 static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
920 struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
921 struct ixgbe_ring *ring;
923 ixgbe_for_each_ring(ring, q_vector->tx)
924 adapter->tx_ring[ring->queue_index] = NULL;
926 ixgbe_for_each_ring(ring, q_vector->rx)
927 adapter->rx_ring[ring->queue_index] = NULL;
929 adapter->q_vector[v_idx] = NULL;
930 netif_napi_del(&q_vector->napi);
933 * ixgbe_get_stats64() might access the rings on this vector,
934 * we must wait a grace period before freeing it.
936 kfree_rcu(q_vector, rcu);
940 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
941 * @adapter: board private structure to initialize
943 * We allocate one q_vector per queue interrupt. If allocation fails we
944 * return -ENOMEM.
946 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
948 int q_vectors = adapter->num_q_vectors;
949 int rxr_remaining = adapter->num_rx_queues;
950 int txr_remaining = adapter->num_tx_queues;
951 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
952 int err;
954 /* only one q_vector if MSI-X is disabled. */
955 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
956 q_vectors = 1;
958 if (q_vectors >= (rxr_remaining + txr_remaining)) {
959 for (; rxr_remaining; v_idx++) {
960 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
961 0, 0, 1, rxr_idx);
963 if (err)
964 goto err_out;
966 /* update counts and index */
967 rxr_remaining--;
968 rxr_idx++;
972 for (; v_idx < q_vectors; v_idx++) {
973 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
974 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
975 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
976 tqpv, txr_idx,
977 rqpv, rxr_idx);
979 if (err)
980 goto err_out;
982 /* update counts and index */
983 rxr_remaining -= rqpv;
984 txr_remaining -= tqpv;
985 rxr_idx++;
986 txr_idx++;
989 return 0;
991 err_out:
992 adapter->num_tx_queues = 0;
993 adapter->num_rx_queues = 0;
994 adapter->num_q_vectors = 0;
996 while (v_idx--)
997 ixgbe_free_q_vector(adapter, v_idx);
999 return -ENOMEM;
1003 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
1004 * @adapter: board private structure to initialize
1006 * This function frees the memory allocated to the q_vectors. In addition if
1007 * NAPI is enabled it will delete any references to the NAPI struct prior
1008 * to freeing the q_vector.
1010 static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
1012 int v_idx = adapter->num_q_vectors;
1014 adapter->num_tx_queues = 0;
1015 adapter->num_rx_queues = 0;
1016 adapter->num_q_vectors = 0;
1018 while (v_idx--)
1019 ixgbe_free_q_vector(adapter, v_idx);
1022 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
1024 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1025 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1026 pci_disable_msix(adapter->pdev);
1027 kfree(adapter->msix_entries);
1028 adapter->msix_entries = NULL;
1029 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1030 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
1031 pci_disable_msi(adapter->pdev);
1036 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
1037 * @adapter: board private structure to initialize
1039 * Attempt to configure the interrupts using the best available
1040 * capabilities of the hardware and the kernel.
1042 static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1044 struct ixgbe_hw *hw = &adapter->hw;
1045 int vector, v_budget, err;
1048 * It's easy to be greedy for MSI-X vectors, but it really
1049 * doesn't do us much good if we have a lot more vectors
1050 * than CPU's. So let's be conservative and only ask for
1051 * (roughly) the same number of vectors as there are CPU's.
1052 * The default is to use pairs of vectors.
1054 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1055 v_budget = min_t(int, v_budget, num_online_cpus());
1056 v_budget += NON_Q_VECTORS;
1059 * At the same time, hardware can only support a maximum of
1060 * hw.mac->max_msix_vectors vectors. With features
1061 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
1062 * descriptor queues supported by our device. Thus, we cap it off in
1063 * those rare cases where the cpu count also exceeds our vector limit.
1065 v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
1067 /* A failure in MSI-X entry allocation isn't fatal, but it does
1068 * mean we disable MSI-X capabilities of the adapter. */
1069 adapter->msix_entries = kcalloc(v_budget,
1070 sizeof(struct msix_entry), GFP_KERNEL);
1071 if (adapter->msix_entries) {
1072 for (vector = 0; vector < v_budget; vector++)
1073 adapter->msix_entries[vector].entry = vector;
1075 ixgbe_acquire_msix_vectors(adapter, v_budget);
1077 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
1078 return;
1081 /* disable DCB if number of TCs exceeds 1 */
1082 if (netdev_get_num_tc(adapter->netdev) > 1) {
1083 e_err(probe, "num TCs exceeds number of queues - disabling DCB\n");
1084 netdev_reset_tc(adapter->netdev);
1086 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1087 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1089 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1090 adapter->temp_dcb_cfg.pfc_mode_enable = false;
1091 adapter->dcb_cfg.pfc_mode_enable = false;
1093 adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1094 adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1096 /* disable SR-IOV */
1097 ixgbe_disable_sriov(adapter);
1099 /* disable RSS */
1100 adapter->ring_feature[RING_F_RSS].limit = 1;
1102 ixgbe_set_num_queues(adapter);
1103 adapter->num_q_vectors = 1;
1105 err = pci_enable_msi(adapter->pdev);
1106 if (err) {
1107 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
1108 "Unable to allocate MSI interrupt, "
1109 "falling back to legacy. Error: %d\n", err);
1110 return;
1112 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
1116 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
1117 * @adapter: board private structure to initialize
1119 * We determine which interrupt scheme to use based on...
1120 * - Kernel support (MSI, MSI-X)
1121 * - which can be user-defined (via MODULE_PARAM)
1122 * - Hardware queue count (num_*_queues)
1123 * - defined by miscellaneous hardware support/features (RSS, etc.)
1125 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
1127 int err;
1129 /* Number of supported queues */
1130 ixgbe_set_num_queues(adapter);
1132 /* Set interrupt mode */
1133 ixgbe_set_interrupt_capability(adapter);
1135 err = ixgbe_alloc_q_vectors(adapter);
1136 if (err) {
1137 e_dev_err("Unable to allocate memory for queue vectors\n");
1138 goto err_alloc_q_vectors;
1141 ixgbe_cache_ring_register(adapter);
1143 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
1144 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
1145 adapter->num_rx_queues, adapter->num_tx_queues);
1147 set_bit(__IXGBE_DOWN, &adapter->state);
1149 return 0;
1151 err_alloc_q_vectors:
1152 ixgbe_reset_interrupt_capability(adapter);
1153 return err;
1157 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
1158 * @adapter: board private structure to clear interrupt scheme on
1160 * We go through and clear interrupt specific resources and reset the structure
1161 * to pre-load conditions
1163 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
1165 adapter->num_tx_queues = 0;
1166 adapter->num_rx_queues = 0;
1168 ixgbe_free_q_vectors(adapter);
1169 ixgbe_reset_interrupt_capability(adapter);
1172 void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
1173 u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
1175 struct ixgbe_adv_tx_context_desc *context_desc;
1176 u16 i = tx_ring->next_to_use;
1178 context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
1180 i++;
1181 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1183 /* set bits to identify this as an advanced context descriptor */
1184 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1186 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1187 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
1188 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1189 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);