cxgb3 - fix interaction with pktgen
[linux-2.6.git] / drivers / net / cxgb3 / sge.c
blob7b13d8a31e38c1bcc5af6fc0dadb9dd9e3632f76
1 /*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
39 #include "common.h"
40 #include "regs.h"
41 #include "sge_defs.h"
42 #include "t3_cpl.h"
43 #include "firmware_exports.h"
45 #define USE_GTS 0
47 #define SGE_RX_SM_BUF_SIZE 1536
49 #define SGE_RX_COPY_THRES 256
50 #define SGE_RX_PULL_LEN 128
53 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
54 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
55 * directly.
57 #define FL0_PG_CHUNK_SIZE 2048
59 #define SGE_RX_DROP_THRES 16
62 * Period of the Tx buffer reclaim timer. This timer does not need to run
63 * frequently as Tx buffers are usually reclaimed by new Tx packets.
65 #define TX_RECLAIM_PERIOD (HZ / 4)
67 /* WR size in bytes */
68 #define WR_LEN (WR_FLITS * 8)
71 * Types of Tx queues in each queue set. Order here matters, do not change.
73 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
75 /* Values for sge_txq.flags */
76 enum {
77 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
78 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
81 struct tx_desc {
82 __be64 flit[TX_DESC_FLITS];
85 struct rx_desc {
86 __be32 addr_lo;
87 __be32 len_gen;
88 __be32 gen2;
89 __be32 addr_hi;
92 struct tx_sw_desc { /* SW state per Tx descriptor */
93 struct sk_buff *skb;
94 u8 eop; /* set if last descriptor for packet */
95 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
96 u8 fragidx; /* first page fragment associated with descriptor */
97 s8 sflit; /* start flit of first SGL entry in descriptor */
100 struct rx_sw_desc { /* SW state per Rx descriptor */
101 union {
102 struct sk_buff *skb;
103 struct fl_pg_chunk pg_chunk;
105 DECLARE_PCI_UNMAP_ADDR(dma_addr);
108 struct rsp_desc { /* response queue descriptor */
109 struct rss_header rss_hdr;
110 __be32 flags;
111 __be32 len_cq;
112 u8 imm_data[47];
113 u8 intr_gen;
117 * Holds unmapping information for Tx packets that need deferred unmapping.
118 * This structure lives at skb->head and must be allocated by callers.
120 struct deferred_unmap_info {
121 struct pci_dev *pdev;
122 dma_addr_t addr[MAX_SKB_FRAGS + 1];
126 * Maps a number of flits to the number of Tx descriptors that can hold them.
127 * The formula is
129 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
131 * HW allows up to 4 descriptors to be combined into a WR.
133 static u8 flit_desc_map[] = {
135 #if SGE_NUM_GENBITS == 1
136 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
137 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
138 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
139 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
140 #elif SGE_NUM_GENBITS == 2
141 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
142 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
143 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
144 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
145 #else
146 # error "SGE_NUM_GENBITS must be 1 or 2"
147 #endif
150 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
152 return container_of(q, struct sge_qset, fl[qidx]);
155 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
157 return container_of(q, struct sge_qset, rspq);
160 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
162 return container_of(q, struct sge_qset, txq[qidx]);
166 * refill_rspq - replenish an SGE response queue
167 * @adapter: the adapter
168 * @q: the response queue to replenish
169 * @credits: how many new responses to make available
171 * Replenishes a response queue by making the supplied number of responses
172 * available to HW.
174 static inline void refill_rspq(struct adapter *adapter,
175 const struct sge_rspq *q, unsigned int credits)
177 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
178 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
182 * need_skb_unmap - does the platform need unmapping of sk_buffs?
184 * Returns true if the platfrom needs sk_buff unmapping. The compiler
185 * optimizes away unecessary code if this returns true.
187 static inline int need_skb_unmap(void)
190 * This structure is used to tell if the platfrom needs buffer
191 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
193 struct dummy {
194 DECLARE_PCI_UNMAP_ADDR(addr);
197 return sizeof(struct dummy) != 0;
201 * unmap_skb - unmap a packet main body and its page fragments
202 * @skb: the packet
203 * @q: the Tx queue containing Tx descriptors for the packet
204 * @cidx: index of Tx descriptor
205 * @pdev: the PCI device
207 * Unmap the main body of an sk_buff and its page fragments, if any.
208 * Because of the fairly complicated structure of our SGLs and the desire
209 * to conserve space for metadata, the information necessary to unmap an
210 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
211 * descriptors (the physical addresses of the various data buffers), and
212 * the SW descriptor state (assorted indices). The send functions
213 * initialize the indices for the first packet descriptor so we can unmap
214 * the buffers held in the first Tx descriptor here, and we have enough
215 * information at this point to set the state for the next Tx descriptor.
217 * Note that it is possible to clean up the first descriptor of a packet
218 * before the send routines have written the next descriptors, but this
219 * race does not cause any problem. We just end up writing the unmapping
220 * info for the descriptor first.
222 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
223 unsigned int cidx, struct pci_dev *pdev)
225 const struct sg_ent *sgp;
226 struct tx_sw_desc *d = &q->sdesc[cidx];
227 int nfrags, frag_idx, curflit, j = d->addr_idx;
229 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
230 frag_idx = d->fragidx;
232 if (frag_idx == 0 && skb_headlen(skb)) {
233 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
234 skb_headlen(skb), PCI_DMA_TODEVICE);
235 j = 1;
238 curflit = d->sflit + 1 + j;
239 nfrags = skb_shinfo(skb)->nr_frags;
241 while (frag_idx < nfrags && curflit < WR_FLITS) {
242 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
243 skb_shinfo(skb)->frags[frag_idx].size,
244 PCI_DMA_TODEVICE);
245 j ^= 1;
246 if (j == 0) {
247 sgp++;
248 curflit++;
250 curflit++;
251 frag_idx++;
254 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
255 d = cidx + 1 == q->size ? q->sdesc : d + 1;
256 d->fragidx = frag_idx;
257 d->addr_idx = j;
258 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
263 * free_tx_desc - reclaims Tx descriptors and their buffers
264 * @adapter: the adapter
265 * @q: the Tx queue to reclaim descriptors from
266 * @n: the number of descriptors to reclaim
268 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
269 * Tx buffers. Called with the Tx queue lock held.
271 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
272 unsigned int n)
274 struct tx_sw_desc *d;
275 struct pci_dev *pdev = adapter->pdev;
276 unsigned int cidx = q->cidx;
278 const int need_unmap = need_skb_unmap() &&
279 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
281 d = &q->sdesc[cidx];
282 while (n--) {
283 if (d->skb) { /* an SGL is present */
284 if (need_unmap)
285 unmap_skb(d->skb, q, cidx, pdev);
286 if (d->eop)
287 kfree_skb(d->skb);
289 ++d;
290 if (++cidx == q->size) {
291 cidx = 0;
292 d = q->sdesc;
295 q->cidx = cidx;
299 * reclaim_completed_tx - reclaims completed Tx descriptors
300 * @adapter: the adapter
301 * @q: the Tx queue to reclaim completed descriptors from
303 * Reclaims Tx descriptors that the SGE has indicated it has processed,
304 * and frees the associated buffers if possible. Called with the Tx
305 * queue's lock held.
307 static inline void reclaim_completed_tx(struct adapter *adapter,
308 struct sge_txq *q)
310 unsigned int reclaim = q->processed - q->cleaned;
312 if (reclaim) {
313 free_tx_desc(adapter, q, reclaim);
314 q->cleaned += reclaim;
315 q->in_use -= reclaim;
320 * should_restart_tx - are there enough resources to restart a Tx queue?
321 * @q: the Tx queue
323 * Checks if there are enough descriptors to restart a suspended Tx queue.
325 static inline int should_restart_tx(const struct sge_txq *q)
327 unsigned int r = q->processed - q->cleaned;
329 return q->in_use - r < (q->size >> 1);
333 * free_rx_bufs - free the Rx buffers on an SGE free list
334 * @pdev: the PCI device associated with the adapter
335 * @rxq: the SGE free list to clean up
337 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
338 * this queue should be stopped before calling this function.
340 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
342 unsigned int cidx = q->cidx;
344 while (q->credits--) {
345 struct rx_sw_desc *d = &q->sdesc[cidx];
347 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
348 q->buf_size, PCI_DMA_FROMDEVICE);
349 if (q->use_pages) {
350 put_page(d->pg_chunk.page);
351 d->pg_chunk.page = NULL;
352 } else {
353 kfree_skb(d->skb);
354 d->skb = NULL;
356 if (++cidx == q->size)
357 cidx = 0;
360 if (q->pg_chunk.page) {
361 __free_page(q->pg_chunk.page);
362 q->pg_chunk.page = NULL;
367 * add_one_rx_buf - add a packet buffer to a free-buffer list
368 * @va: buffer start VA
369 * @len: the buffer length
370 * @d: the HW Rx descriptor to write
371 * @sd: the SW Rx descriptor to write
372 * @gen: the generation bit value
373 * @pdev: the PCI device associated with the adapter
375 * Add a buffer of the given length to the supplied HW and SW Rx
376 * descriptors.
378 static inline void add_one_rx_buf(void *va, unsigned int len,
379 struct rx_desc *d, struct rx_sw_desc *sd,
380 unsigned int gen, struct pci_dev *pdev)
382 dma_addr_t mapping;
384 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
385 pci_unmap_addr_set(sd, dma_addr, mapping);
387 d->addr_lo = cpu_to_be32(mapping);
388 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
389 wmb();
390 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
391 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
394 static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp)
396 if (!q->pg_chunk.page) {
397 q->pg_chunk.page = alloc_page(gfp);
398 if (unlikely(!q->pg_chunk.page))
399 return -ENOMEM;
400 q->pg_chunk.va = page_address(q->pg_chunk.page);
401 q->pg_chunk.offset = 0;
403 sd->pg_chunk = q->pg_chunk;
405 q->pg_chunk.offset += q->buf_size;
406 if (q->pg_chunk.offset == PAGE_SIZE)
407 q->pg_chunk.page = NULL;
408 else {
409 q->pg_chunk.va += q->buf_size;
410 get_page(q->pg_chunk.page);
412 return 0;
416 * refill_fl - refill an SGE free-buffer list
417 * @adapter: the adapter
418 * @q: the free-list to refill
419 * @n: the number of new buffers to allocate
420 * @gfp: the gfp flags for allocating new buffers
422 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
423 * allocated with the supplied gfp flags. The caller must assure that
424 * @n does not exceed the queue's capacity.
426 static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
428 void *buf_start;
429 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
430 struct rx_desc *d = &q->desc[q->pidx];
432 while (n--) {
433 if (q->use_pages) {
434 if (unlikely(alloc_pg_chunk(q, sd, gfp))) {
435 nomem: q->alloc_failed++;
436 break;
438 buf_start = sd->pg_chunk.va;
439 } else {
440 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
442 if (!skb)
443 goto nomem;
445 sd->skb = skb;
446 buf_start = skb->data;
449 add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
450 adap->pdev);
451 d++;
452 sd++;
453 if (++q->pidx == q->size) {
454 q->pidx = 0;
455 q->gen ^= 1;
456 sd = q->sdesc;
457 d = q->desc;
459 q->credits++;
462 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
465 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
467 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
471 * recycle_rx_buf - recycle a receive buffer
472 * @adapter: the adapter
473 * @q: the SGE free list
474 * @idx: index of buffer to recycle
476 * Recycles the specified buffer on the given free list by adding it at
477 * the next available slot on the list.
479 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
480 unsigned int idx)
482 struct rx_desc *from = &q->desc[idx];
483 struct rx_desc *to = &q->desc[q->pidx];
485 q->sdesc[q->pidx] = q->sdesc[idx];
486 to->addr_lo = from->addr_lo; /* already big endian */
487 to->addr_hi = from->addr_hi; /* likewise */
488 wmb();
489 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
490 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
491 q->credits++;
493 if (++q->pidx == q->size) {
494 q->pidx = 0;
495 q->gen ^= 1;
497 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
501 * alloc_ring - allocate resources for an SGE descriptor ring
502 * @pdev: the PCI device
503 * @nelem: the number of descriptors
504 * @elem_size: the size of each descriptor
505 * @sw_size: the size of the SW state associated with each ring element
506 * @phys: the physical address of the allocated ring
507 * @metadata: address of the array holding the SW state for the ring
509 * Allocates resources for an SGE descriptor ring, such as Tx queues,
510 * free buffer lists, or response queues. Each SGE ring requires
511 * space for its HW descriptors plus, optionally, space for the SW state
512 * associated with each HW entry (the metadata). The function returns
513 * three values: the virtual address for the HW ring (the return value
514 * of the function), the physical address of the HW ring, and the address
515 * of the SW ring.
517 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
518 size_t sw_size, dma_addr_t * phys, void *metadata)
520 size_t len = nelem * elem_size;
521 void *s = NULL;
522 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
524 if (!p)
525 return NULL;
526 if (sw_size) {
527 s = kcalloc(nelem, sw_size, GFP_KERNEL);
529 if (!s) {
530 dma_free_coherent(&pdev->dev, len, p, *phys);
531 return NULL;
534 if (metadata)
535 *(void **)metadata = s;
536 memset(p, 0, len);
537 return p;
541 * free_qset - free the resources of an SGE queue set
542 * @adapter: the adapter owning the queue set
543 * @q: the queue set
545 * Release the HW and SW resources associated with an SGE queue set, such
546 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
547 * queue set must be quiesced prior to calling this.
549 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
551 int i;
552 struct pci_dev *pdev = adapter->pdev;
554 if (q->tx_reclaim_timer.function)
555 del_timer_sync(&q->tx_reclaim_timer);
557 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
558 if (q->fl[i].desc) {
559 spin_lock(&adapter->sge.reg_lock);
560 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
561 spin_unlock(&adapter->sge.reg_lock);
562 free_rx_bufs(pdev, &q->fl[i]);
563 kfree(q->fl[i].sdesc);
564 dma_free_coherent(&pdev->dev,
565 q->fl[i].size *
566 sizeof(struct rx_desc), q->fl[i].desc,
567 q->fl[i].phys_addr);
570 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
571 if (q->txq[i].desc) {
572 spin_lock(&adapter->sge.reg_lock);
573 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
574 spin_unlock(&adapter->sge.reg_lock);
575 if (q->txq[i].sdesc) {
576 free_tx_desc(adapter, &q->txq[i],
577 q->txq[i].in_use);
578 kfree(q->txq[i].sdesc);
580 dma_free_coherent(&pdev->dev,
581 q->txq[i].size *
582 sizeof(struct tx_desc),
583 q->txq[i].desc, q->txq[i].phys_addr);
584 __skb_queue_purge(&q->txq[i].sendq);
587 if (q->rspq.desc) {
588 spin_lock(&adapter->sge.reg_lock);
589 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
590 spin_unlock(&adapter->sge.reg_lock);
591 dma_free_coherent(&pdev->dev,
592 q->rspq.size * sizeof(struct rsp_desc),
593 q->rspq.desc, q->rspq.phys_addr);
596 memset(q, 0, sizeof(*q));
600 * init_qset_cntxt - initialize an SGE queue set context info
601 * @qs: the queue set
602 * @id: the queue set id
604 * Initializes the TIDs and context ids for the queues of a queue set.
606 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
608 qs->rspq.cntxt_id = id;
609 qs->fl[0].cntxt_id = 2 * id;
610 qs->fl[1].cntxt_id = 2 * id + 1;
611 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
612 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
613 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
614 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
615 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
619 * sgl_len - calculates the size of an SGL of the given capacity
620 * @n: the number of SGL entries
622 * Calculates the number of flits needed for a scatter/gather list that
623 * can hold the given number of entries.
625 static inline unsigned int sgl_len(unsigned int n)
627 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
628 return (3 * n) / 2 + (n & 1);
632 * flits_to_desc - returns the num of Tx descriptors for the given flits
633 * @n: the number of flits
635 * Calculates the number of Tx descriptors needed for the supplied number
636 * of flits.
638 static inline unsigned int flits_to_desc(unsigned int n)
640 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
641 return flit_desc_map[n];
645 * get_packet - return the next ingress packet buffer from a free list
646 * @adap: the adapter that received the packet
647 * @fl: the SGE free list holding the packet
648 * @len: the packet length including any SGE padding
649 * @drop_thres: # of remaining buffers before we start dropping packets
651 * Get the next packet from a free list and complete setup of the
652 * sk_buff. If the packet is small we make a copy and recycle the
653 * original buffer, otherwise we use the original buffer itself. If a
654 * positive drop threshold is supplied packets are dropped and their
655 * buffers recycled if (a) the number of remaining buffers is under the
656 * threshold and the packet is too big to copy, or (b) the packet should
657 * be copied but there is no memory for the copy.
659 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
660 unsigned int len, unsigned int drop_thres)
662 struct sk_buff *skb = NULL;
663 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
665 prefetch(sd->skb->data);
666 fl->credits--;
668 if (len <= SGE_RX_COPY_THRES) {
669 skb = alloc_skb(len, GFP_ATOMIC);
670 if (likely(skb != NULL)) {
671 __skb_put(skb, len);
672 pci_dma_sync_single_for_cpu(adap->pdev,
673 pci_unmap_addr(sd, dma_addr), len,
674 PCI_DMA_FROMDEVICE);
675 memcpy(skb->data, sd->skb->data, len);
676 pci_dma_sync_single_for_device(adap->pdev,
677 pci_unmap_addr(sd, dma_addr), len,
678 PCI_DMA_FROMDEVICE);
679 } else if (!drop_thres)
680 goto use_orig_buf;
681 recycle:
682 recycle_rx_buf(adap, fl, fl->cidx);
683 return skb;
686 if (unlikely(fl->credits < drop_thres))
687 goto recycle;
689 use_orig_buf:
690 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
691 fl->buf_size, PCI_DMA_FROMDEVICE);
692 skb = sd->skb;
693 skb_put(skb, len);
694 __refill_fl(adap, fl);
695 return skb;
699 * get_packet_pg - return the next ingress packet buffer from a free list
700 * @adap: the adapter that received the packet
701 * @fl: the SGE free list holding the packet
702 * @len: the packet length including any SGE padding
703 * @drop_thres: # of remaining buffers before we start dropping packets
705 * Get the next packet from a free list populated with page chunks.
706 * If the packet is small we make a copy and recycle the original buffer,
707 * otherwise we attach the original buffer as a page fragment to a fresh
708 * sk_buff. If a positive drop threshold is supplied packets are dropped
709 * and their buffers recycled if (a) the number of remaining buffers is
710 * under the threshold and the packet is too big to copy, or (b) there's
711 * no system memory.
713 * Note: this function is similar to @get_packet but deals with Rx buffers
714 * that are page chunks rather than sk_buffs.
716 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
717 unsigned int len, unsigned int drop_thres)
719 struct sk_buff *skb = NULL;
720 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
722 if (len <= SGE_RX_COPY_THRES) {
723 skb = alloc_skb(len, GFP_ATOMIC);
724 if (likely(skb != NULL)) {
725 __skb_put(skb, len);
726 pci_dma_sync_single_for_cpu(adap->pdev,
727 pci_unmap_addr(sd, dma_addr), len,
728 PCI_DMA_FROMDEVICE);
729 memcpy(skb->data, sd->pg_chunk.va, len);
730 pci_dma_sync_single_for_device(adap->pdev,
731 pci_unmap_addr(sd, dma_addr), len,
732 PCI_DMA_FROMDEVICE);
733 } else if (!drop_thres)
734 return NULL;
735 recycle:
736 fl->credits--;
737 recycle_rx_buf(adap, fl, fl->cidx);
738 return skb;
741 if (unlikely(fl->credits <= drop_thres))
742 goto recycle;
744 skb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
745 if (unlikely(!skb)) {
746 if (!drop_thres)
747 return NULL;
748 goto recycle;
751 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
752 fl->buf_size, PCI_DMA_FROMDEVICE);
753 __skb_put(skb, SGE_RX_PULL_LEN);
754 memcpy(skb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
755 skb_fill_page_desc(skb, 0, sd->pg_chunk.page,
756 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
757 len - SGE_RX_PULL_LEN);
758 skb->len = len;
759 skb->data_len = len - SGE_RX_PULL_LEN;
760 skb->truesize += skb->data_len;
762 fl->credits--;
764 * We do not refill FLs here, we let the caller do it to overlap a
765 * prefetch.
767 return skb;
771 * get_imm_packet - return the next ingress packet buffer from a response
772 * @resp: the response descriptor containing the packet data
774 * Return a packet containing the immediate data of the given response.
776 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
778 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
780 if (skb) {
781 __skb_put(skb, IMMED_PKT_SIZE);
782 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
784 return skb;
788 * calc_tx_descs - calculate the number of Tx descriptors for a packet
789 * @skb: the packet
791 * Returns the number of Tx descriptors needed for the given Ethernet
792 * packet. Ethernet packets require addition of WR and CPL headers.
794 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
796 unsigned int flits;
798 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
799 return 1;
801 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
802 if (skb_shinfo(skb)->gso_size)
803 flits++;
804 return flits_to_desc(flits);
808 * make_sgl - populate a scatter/gather list for a packet
809 * @skb: the packet
810 * @sgp: the SGL to populate
811 * @start: start address of skb main body data to include in the SGL
812 * @len: length of skb main body data to include in the SGL
813 * @pdev: the PCI device
815 * Generates a scatter/gather list for the buffers that make up a packet
816 * and returns the SGL size in 8-byte words. The caller must size the SGL
817 * appropriately.
819 static inline unsigned int make_sgl(const struct sk_buff *skb,
820 struct sg_ent *sgp, unsigned char *start,
821 unsigned int len, struct pci_dev *pdev)
823 dma_addr_t mapping;
824 unsigned int i, j = 0, nfrags;
826 if (len) {
827 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
828 sgp->len[0] = cpu_to_be32(len);
829 sgp->addr[0] = cpu_to_be64(mapping);
830 j = 1;
833 nfrags = skb_shinfo(skb)->nr_frags;
834 for (i = 0; i < nfrags; i++) {
835 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
837 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
838 frag->size, PCI_DMA_TODEVICE);
839 sgp->len[j] = cpu_to_be32(frag->size);
840 sgp->addr[j] = cpu_to_be64(mapping);
841 j ^= 1;
842 if (j == 0)
843 ++sgp;
845 if (j)
846 sgp->len[j] = 0;
847 return ((nfrags + (len != 0)) * 3) / 2 + j;
851 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
852 * @adap: the adapter
853 * @q: the Tx queue
855 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
856 * where the HW is going to sleep just after we checked, however,
857 * then the interrupt handler will detect the outstanding TX packet
858 * and ring the doorbell for us.
860 * When GTS is disabled we unconditionally ring the doorbell.
862 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
864 #if USE_GTS
865 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
866 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
867 set_bit(TXQ_LAST_PKT_DB, &q->flags);
868 t3_write_reg(adap, A_SG_KDOORBELL,
869 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
871 #else
872 wmb(); /* write descriptors before telling HW */
873 t3_write_reg(adap, A_SG_KDOORBELL,
874 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
875 #endif
878 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
880 #if SGE_NUM_GENBITS == 2
881 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
882 #endif
886 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
887 * @ndesc: number of Tx descriptors spanned by the SGL
888 * @skb: the packet corresponding to the WR
889 * @d: first Tx descriptor to be written
890 * @pidx: index of above descriptors
891 * @q: the SGE Tx queue
892 * @sgl: the SGL
893 * @flits: number of flits to the start of the SGL in the first descriptor
894 * @sgl_flits: the SGL size in flits
895 * @gen: the Tx descriptor generation
896 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
897 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
899 * Write a work request header and an associated SGL. If the SGL is
900 * small enough to fit into one Tx descriptor it has already been written
901 * and we just need to write the WR header. Otherwise we distribute the
902 * SGL across the number of descriptors it spans.
904 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
905 struct tx_desc *d, unsigned int pidx,
906 const struct sge_txq *q,
907 const struct sg_ent *sgl,
908 unsigned int flits, unsigned int sgl_flits,
909 unsigned int gen, __be32 wr_hi,
910 __be32 wr_lo)
912 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
913 struct tx_sw_desc *sd = &q->sdesc[pidx];
915 sd->skb = skb;
916 if (need_skb_unmap()) {
917 sd->fragidx = 0;
918 sd->addr_idx = 0;
919 sd->sflit = flits;
922 if (likely(ndesc == 1)) {
923 sd->eop = 1;
924 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
925 V_WR_SGLSFLT(flits)) | wr_hi;
926 wmb();
927 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
928 V_WR_GEN(gen)) | wr_lo;
929 wr_gen2(d, gen);
930 } else {
931 unsigned int ogen = gen;
932 const u64 *fp = (const u64 *)sgl;
933 struct work_request_hdr *wp = wrp;
935 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
936 V_WR_SGLSFLT(flits)) | wr_hi;
938 while (sgl_flits) {
939 unsigned int avail = WR_FLITS - flits;
941 if (avail > sgl_flits)
942 avail = sgl_flits;
943 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
944 sgl_flits -= avail;
945 ndesc--;
946 if (!sgl_flits)
947 break;
949 fp += avail;
950 d++;
951 sd->eop = 0;
952 sd++;
953 if (++pidx == q->size) {
954 pidx = 0;
955 gen ^= 1;
956 d = q->desc;
957 sd = q->sdesc;
960 sd->skb = skb;
961 wrp = (struct work_request_hdr *)d;
962 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
963 V_WR_SGLSFLT(1)) | wr_hi;
964 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
965 sgl_flits + 1)) |
966 V_WR_GEN(gen)) | wr_lo;
967 wr_gen2(d, gen);
968 flits = 1;
970 sd->eop = 1;
971 wrp->wr_hi |= htonl(F_WR_EOP);
972 wmb();
973 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
974 wr_gen2((struct tx_desc *)wp, ogen);
975 WARN_ON(ndesc != 0);
980 * write_tx_pkt_wr - write a TX_PKT work request
981 * @adap: the adapter
982 * @skb: the packet to send
983 * @pi: the egress interface
984 * @pidx: index of the first Tx descriptor to write
985 * @gen: the generation value to use
986 * @q: the Tx queue
987 * @ndesc: number of descriptors the packet will occupy
988 * @compl: the value of the COMPL bit to use
990 * Generate a TX_PKT work request to send the supplied packet.
992 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
993 const struct port_info *pi,
994 unsigned int pidx, unsigned int gen,
995 struct sge_txq *q, unsigned int ndesc,
996 unsigned int compl)
998 unsigned int flits, sgl_flits, cntrl, tso_info;
999 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1000 struct tx_desc *d = &q->desc[pidx];
1001 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1003 cpl->len = htonl(skb->len | 0x80000000);
1004 cntrl = V_TXPKT_INTF(pi->port_id);
1006 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1007 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1009 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1010 if (tso_info) {
1011 int eth_type;
1012 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1014 d->flit[2] = 0;
1015 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1016 hdr->cntrl = htonl(cntrl);
1017 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1018 CPL_ETH_II : CPL_ETH_II_VLAN;
1019 tso_info |= V_LSO_ETH_TYPE(eth_type) |
1020 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1021 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1022 hdr->lso_info = htonl(tso_info);
1023 flits = 3;
1024 } else {
1025 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1026 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1027 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1028 cpl->cntrl = htonl(cntrl);
1030 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1031 q->sdesc[pidx].skb = NULL;
1032 if (!skb->data_len)
1033 skb_copy_from_linear_data(skb, &d->flit[2],
1034 skb->len);
1035 else
1036 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1038 flits = (skb->len + 7) / 8 + 2;
1039 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1040 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1041 | F_WR_SOP | F_WR_EOP | compl);
1042 wmb();
1043 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1044 V_WR_TID(q->token));
1045 wr_gen2(d, gen);
1046 kfree_skb(skb);
1047 return;
1050 flits = 2;
1053 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1054 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
1056 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1057 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1058 htonl(V_WR_TID(q->token)));
1062 * eth_xmit - add a packet to the Ethernet Tx queue
1063 * @skb: the packet
1064 * @dev: the egress net device
1066 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1068 int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1070 unsigned int ndesc, pidx, credits, gen, compl;
1071 const struct port_info *pi = netdev_priv(dev);
1072 struct adapter *adap = pi->adapter;
1073 struct sge_qset *qs = pi->qs;
1074 struct sge_txq *q = &qs->txq[TXQ_ETH];
1077 * The chip min packet length is 9 octets but play safe and reject
1078 * anything shorter than an Ethernet header.
1080 if (unlikely(skb->len < ETH_HLEN)) {
1081 dev_kfree_skb(skb);
1082 return NETDEV_TX_OK;
1085 spin_lock(&q->lock);
1086 reclaim_completed_tx(adap, q);
1088 credits = q->size - q->in_use;
1089 ndesc = calc_tx_descs(skb);
1091 if (unlikely(credits < ndesc)) {
1092 if (!netif_queue_stopped(dev)) {
1093 netif_stop_queue(dev);
1094 set_bit(TXQ_ETH, &qs->txq_stopped);
1095 q->stops++;
1096 dev_err(&adap->pdev->dev,
1097 "%s: Tx ring %u full while queue awake!\n",
1098 dev->name, q->cntxt_id & 7);
1100 spin_unlock(&q->lock);
1101 return NETDEV_TX_BUSY;
1104 q->in_use += ndesc;
1105 if (unlikely(credits - ndesc < q->stop_thres)) {
1106 q->stops++;
1107 netif_stop_queue(dev);
1108 set_bit(TXQ_ETH, &qs->txq_stopped);
1109 #if !USE_GTS
1110 if (should_restart_tx(q) &&
1111 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1112 q->restarts++;
1113 netif_wake_queue(dev);
1115 #endif
1118 gen = q->gen;
1119 q->unacked += ndesc;
1120 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1121 q->unacked &= 7;
1122 pidx = q->pidx;
1123 q->pidx += ndesc;
1124 if (q->pidx >= q->size) {
1125 q->pidx -= q->size;
1126 q->gen ^= 1;
1129 /* update port statistics */
1130 if (skb->ip_summed == CHECKSUM_COMPLETE)
1131 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1132 if (skb_shinfo(skb)->gso_size)
1133 qs->port_stats[SGE_PSTAT_TSO]++;
1134 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1135 qs->port_stats[SGE_PSTAT_VLANINS]++;
1137 dev->trans_start = jiffies;
1138 spin_unlock(&q->lock);
1141 * We do not use Tx completion interrupts to free DMAd Tx packets.
1142 * This is good for performamce but means that we rely on new Tx
1143 * packets arriving to run the destructors of completed packets,
1144 * which open up space in their sockets' send queues. Sometimes
1145 * we do not get such new packets causing Tx to stall. A single
1146 * UDP transmitter is a good example of this situation. We have
1147 * a clean up timer that periodically reclaims completed packets
1148 * but it doesn't run often enough (nor do we want it to) to prevent
1149 * lengthy stalls. A solution to this problem is to run the
1150 * destructor early, after the packet is queued but before it's DMAd.
1151 * A cons is that we lie to socket memory accounting, but the amount
1152 * of extra memory is reasonable (limited by the number of Tx
1153 * descriptors), the packets do actually get freed quickly by new
1154 * packets almost always, and for protocols like TCP that wait for
1155 * acks to really free up the data the extra memory is even less.
1156 * On the positive side we run the destructors on the sending CPU
1157 * rather than on a potentially different completing CPU, usually a
1158 * good thing. We also run them without holding our Tx queue lock,
1159 * unlike what reclaim_completed_tx() would otherwise do.
1161 * Run the destructor before telling the DMA engine about the packet
1162 * to make sure it doesn't complete and get freed prematurely.
1164 if (likely(!skb_shared(skb)))
1165 skb_orphan(skb);
1167 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1168 check_ring_tx_db(adap, q);
1169 return NETDEV_TX_OK;
1173 * write_imm - write a packet into a Tx descriptor as immediate data
1174 * @d: the Tx descriptor to write
1175 * @skb: the packet
1176 * @len: the length of packet data to write as immediate data
1177 * @gen: the generation bit value to write
1179 * Writes a packet as immediate data into a Tx descriptor. The packet
1180 * contains a work request at its beginning. We must write the packet
1181 * carefully so the SGE doesn't read it accidentally before it's written
1182 * in its entirety.
1184 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1185 unsigned int len, unsigned int gen)
1187 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1188 struct work_request_hdr *to = (struct work_request_hdr *)d;
1190 if (likely(!skb->data_len))
1191 memcpy(&to[1], &from[1], len - sizeof(*from));
1192 else
1193 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1195 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1196 V_WR_BCNTLFLT(len & 7));
1197 wmb();
1198 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1199 V_WR_LEN((len + 7) / 8));
1200 wr_gen2(d, gen);
1201 kfree_skb(skb);
1205 * check_desc_avail - check descriptor availability on a send queue
1206 * @adap: the adapter
1207 * @q: the send queue
1208 * @skb: the packet needing the descriptors
1209 * @ndesc: the number of Tx descriptors needed
1210 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1212 * Checks if the requested number of Tx descriptors is available on an
1213 * SGE send queue. If the queue is already suspended or not enough
1214 * descriptors are available the packet is queued for later transmission.
1215 * Must be called with the Tx queue locked.
1217 * Returns 0 if enough descriptors are available, 1 if there aren't
1218 * enough descriptors and the packet has been queued, and 2 if the caller
1219 * needs to retry because there weren't enough descriptors at the
1220 * beginning of the call but some freed up in the mean time.
1222 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1223 struct sk_buff *skb, unsigned int ndesc,
1224 unsigned int qid)
1226 if (unlikely(!skb_queue_empty(&q->sendq))) {
1227 addq_exit:__skb_queue_tail(&q->sendq, skb);
1228 return 1;
1230 if (unlikely(q->size - q->in_use < ndesc)) {
1231 struct sge_qset *qs = txq_to_qset(q, qid);
1233 set_bit(qid, &qs->txq_stopped);
1234 smp_mb__after_clear_bit();
1236 if (should_restart_tx(q) &&
1237 test_and_clear_bit(qid, &qs->txq_stopped))
1238 return 2;
1240 q->stops++;
1241 goto addq_exit;
1243 return 0;
1247 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1248 * @q: the SGE control Tx queue
1250 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1251 * that send only immediate data (presently just the control queues) and
1252 * thus do not have any sk_buffs to release.
1254 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1256 unsigned int reclaim = q->processed - q->cleaned;
1258 q->in_use -= reclaim;
1259 q->cleaned += reclaim;
1262 static inline int immediate(const struct sk_buff *skb)
1264 return skb->len <= WR_LEN;
1268 * ctrl_xmit - send a packet through an SGE control Tx queue
1269 * @adap: the adapter
1270 * @q: the control queue
1271 * @skb: the packet
1273 * Send a packet through an SGE control Tx queue. Packets sent through
1274 * a control queue must fit entirely as immediate data in a single Tx
1275 * descriptor and have no page fragments.
1277 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1278 struct sk_buff *skb)
1280 int ret;
1281 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1283 if (unlikely(!immediate(skb))) {
1284 WARN_ON(1);
1285 dev_kfree_skb(skb);
1286 return NET_XMIT_SUCCESS;
1289 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1290 wrp->wr_lo = htonl(V_WR_TID(q->token));
1292 spin_lock(&q->lock);
1293 again:reclaim_completed_tx_imm(q);
1295 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1296 if (unlikely(ret)) {
1297 if (ret == 1) {
1298 spin_unlock(&q->lock);
1299 return NET_XMIT_CN;
1301 goto again;
1304 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1306 q->in_use++;
1307 if (++q->pidx >= q->size) {
1308 q->pidx = 0;
1309 q->gen ^= 1;
1311 spin_unlock(&q->lock);
1312 wmb();
1313 t3_write_reg(adap, A_SG_KDOORBELL,
1314 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1315 return NET_XMIT_SUCCESS;
1319 * restart_ctrlq - restart a suspended control queue
1320 * @qs: the queue set cotaining the control queue
1322 * Resumes transmission on a suspended Tx control queue.
1324 static void restart_ctrlq(unsigned long data)
1326 struct sk_buff *skb;
1327 struct sge_qset *qs = (struct sge_qset *)data;
1328 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1330 spin_lock(&q->lock);
1331 again:reclaim_completed_tx_imm(q);
1333 while (q->in_use < q->size &&
1334 (skb = __skb_dequeue(&q->sendq)) != NULL) {
1336 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1338 if (++q->pidx >= q->size) {
1339 q->pidx = 0;
1340 q->gen ^= 1;
1342 q->in_use++;
1345 if (!skb_queue_empty(&q->sendq)) {
1346 set_bit(TXQ_CTRL, &qs->txq_stopped);
1347 smp_mb__after_clear_bit();
1349 if (should_restart_tx(q) &&
1350 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1351 goto again;
1352 q->stops++;
1355 spin_unlock(&q->lock);
1356 t3_write_reg(qs->adap, A_SG_KDOORBELL,
1357 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1361 * Send a management message through control queue 0
1363 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1365 return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1369 * deferred_unmap_destructor - unmap a packet when it is freed
1370 * @skb: the packet
1372 * This is the packet destructor used for Tx packets that need to remain
1373 * mapped until they are freed rather than until their Tx descriptors are
1374 * freed.
1376 static void deferred_unmap_destructor(struct sk_buff *skb)
1378 int i;
1379 const dma_addr_t *p;
1380 const struct skb_shared_info *si;
1381 const struct deferred_unmap_info *dui;
1383 dui = (struct deferred_unmap_info *)skb->head;
1384 p = dui->addr;
1386 if (skb->tail - skb->transport_header)
1387 pci_unmap_single(dui->pdev, *p++,
1388 skb->tail - skb->transport_header,
1389 PCI_DMA_TODEVICE);
1391 si = skb_shinfo(skb);
1392 for (i = 0; i < si->nr_frags; i++)
1393 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1394 PCI_DMA_TODEVICE);
1397 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1398 const struct sg_ent *sgl, int sgl_flits)
1400 dma_addr_t *p;
1401 struct deferred_unmap_info *dui;
1403 dui = (struct deferred_unmap_info *)skb->head;
1404 dui->pdev = pdev;
1405 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1406 *p++ = be64_to_cpu(sgl->addr[0]);
1407 *p++ = be64_to_cpu(sgl->addr[1]);
1409 if (sgl_flits)
1410 *p = be64_to_cpu(sgl->addr[0]);
1414 * write_ofld_wr - write an offload work request
1415 * @adap: the adapter
1416 * @skb: the packet to send
1417 * @q: the Tx queue
1418 * @pidx: index of the first Tx descriptor to write
1419 * @gen: the generation value to use
1420 * @ndesc: number of descriptors the packet will occupy
1422 * Write an offload work request to send the supplied packet. The packet
1423 * data already carry the work request with most fields populated.
1425 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1426 struct sge_txq *q, unsigned int pidx,
1427 unsigned int gen, unsigned int ndesc)
1429 unsigned int sgl_flits, flits;
1430 struct work_request_hdr *from;
1431 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1432 struct tx_desc *d = &q->desc[pidx];
1434 if (immediate(skb)) {
1435 q->sdesc[pidx].skb = NULL;
1436 write_imm(d, skb, skb->len, gen);
1437 return;
1440 /* Only TX_DATA builds SGLs */
1442 from = (struct work_request_hdr *)skb->data;
1443 memcpy(&d->flit[1], &from[1],
1444 skb_transport_offset(skb) - sizeof(*from));
1446 flits = skb_transport_offset(skb) / 8;
1447 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1448 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1449 skb->tail - skb->transport_header,
1450 adap->pdev);
1451 if (need_skb_unmap()) {
1452 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1453 skb->destructor = deferred_unmap_destructor;
1456 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1457 gen, from->wr_hi, from->wr_lo);
1461 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1462 * @skb: the packet
1464 * Returns the number of Tx descriptors needed for the given offload
1465 * packet. These packets are already fully constructed.
1467 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1469 unsigned int flits, cnt;
1471 if (skb->len <= WR_LEN)
1472 return 1; /* packet fits as immediate data */
1474 flits = skb_transport_offset(skb) / 8; /* headers */
1475 cnt = skb_shinfo(skb)->nr_frags;
1476 if (skb->tail != skb->transport_header)
1477 cnt++;
1478 return flits_to_desc(flits + sgl_len(cnt));
1482 * ofld_xmit - send a packet through an offload queue
1483 * @adap: the adapter
1484 * @q: the Tx offload queue
1485 * @skb: the packet
1487 * Send an offload packet through an SGE offload queue.
1489 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1490 struct sk_buff *skb)
1492 int ret;
1493 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1495 spin_lock(&q->lock);
1496 again:reclaim_completed_tx(adap, q);
1498 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1499 if (unlikely(ret)) {
1500 if (ret == 1) {
1501 skb->priority = ndesc; /* save for restart */
1502 spin_unlock(&q->lock);
1503 return NET_XMIT_CN;
1505 goto again;
1508 gen = q->gen;
1509 q->in_use += ndesc;
1510 pidx = q->pidx;
1511 q->pidx += ndesc;
1512 if (q->pidx >= q->size) {
1513 q->pidx -= q->size;
1514 q->gen ^= 1;
1516 spin_unlock(&q->lock);
1518 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1519 check_ring_tx_db(adap, q);
1520 return NET_XMIT_SUCCESS;
1524 * restart_offloadq - restart a suspended offload queue
1525 * @qs: the queue set cotaining the offload queue
1527 * Resumes transmission on a suspended Tx offload queue.
1529 static void restart_offloadq(unsigned long data)
1531 struct sk_buff *skb;
1532 struct sge_qset *qs = (struct sge_qset *)data;
1533 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1534 const struct port_info *pi = netdev_priv(qs->netdev);
1535 struct adapter *adap = pi->adapter;
1537 spin_lock(&q->lock);
1538 again:reclaim_completed_tx(adap, q);
1540 while ((skb = skb_peek(&q->sendq)) != NULL) {
1541 unsigned int gen, pidx;
1542 unsigned int ndesc = skb->priority;
1544 if (unlikely(q->size - q->in_use < ndesc)) {
1545 set_bit(TXQ_OFLD, &qs->txq_stopped);
1546 smp_mb__after_clear_bit();
1548 if (should_restart_tx(q) &&
1549 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1550 goto again;
1551 q->stops++;
1552 break;
1555 gen = q->gen;
1556 q->in_use += ndesc;
1557 pidx = q->pidx;
1558 q->pidx += ndesc;
1559 if (q->pidx >= q->size) {
1560 q->pidx -= q->size;
1561 q->gen ^= 1;
1563 __skb_unlink(skb, &q->sendq);
1564 spin_unlock(&q->lock);
1566 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1567 spin_lock(&q->lock);
1569 spin_unlock(&q->lock);
1571 #if USE_GTS
1572 set_bit(TXQ_RUNNING, &q->flags);
1573 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1574 #endif
1575 t3_write_reg(adap, A_SG_KDOORBELL,
1576 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1580 * queue_set - return the queue set a packet should use
1581 * @skb: the packet
1583 * Maps a packet to the SGE queue set it should use. The desired queue
1584 * set is carried in bits 1-3 in the packet's priority.
1586 static inline int queue_set(const struct sk_buff *skb)
1588 return skb->priority >> 1;
1592 * is_ctrl_pkt - return whether an offload packet is a control packet
1593 * @skb: the packet
1595 * Determines whether an offload packet should use an OFLD or a CTRL
1596 * Tx queue. This is indicated by bit 0 in the packet's priority.
1598 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1600 return skb->priority & 1;
1604 * t3_offload_tx - send an offload packet
1605 * @tdev: the offload device to send to
1606 * @skb: the packet
1608 * Sends an offload packet. We use the packet priority to select the
1609 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1610 * should be sent as regular or control, bits 1-3 select the queue set.
1612 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1614 struct adapter *adap = tdev2adap(tdev);
1615 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1617 if (unlikely(is_ctrl_pkt(skb)))
1618 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1620 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1624 * offload_enqueue - add an offload packet to an SGE offload receive queue
1625 * @q: the SGE response queue
1626 * @skb: the packet
1628 * Add a new offload packet to an SGE response queue's offload packet
1629 * queue. If the packet is the first on the queue it schedules the RX
1630 * softirq to process the queue.
1632 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1634 skb->next = skb->prev = NULL;
1635 if (q->rx_tail)
1636 q->rx_tail->next = skb;
1637 else {
1638 struct sge_qset *qs = rspq_to_qset(q);
1640 napi_schedule(&qs->napi);
1641 q->rx_head = skb;
1643 q->rx_tail = skb;
1647 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1648 * @tdev: the offload device that will be receiving the packets
1649 * @q: the SGE response queue that assembled the bundle
1650 * @skbs: the partial bundle
1651 * @n: the number of packets in the bundle
1653 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1655 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1656 struct sge_rspq *q,
1657 struct sk_buff *skbs[], int n)
1659 if (n) {
1660 q->offload_bundles++;
1661 tdev->recv(tdev, skbs, n);
1666 * ofld_poll - NAPI handler for offload packets in interrupt mode
1667 * @dev: the network device doing the polling
1668 * @budget: polling budget
1670 * The NAPI handler for offload packets when a response queue is serviced
1671 * by the hard interrupt handler, i.e., when it's operating in non-polling
1672 * mode. Creates small packet batches and sends them through the offload
1673 * receive handler. Batches need to be of modest size as we do prefetches
1674 * on the packets in each.
1676 static int ofld_poll(struct napi_struct *napi, int budget)
1678 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1679 struct sge_rspq *q = &qs->rspq;
1680 struct adapter *adapter = qs->adap;
1681 int work_done = 0;
1683 while (work_done < budget) {
1684 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1685 int ngathered;
1687 spin_lock_irq(&q->lock);
1688 head = q->rx_head;
1689 if (!head) {
1690 napi_complete(napi);
1691 spin_unlock_irq(&q->lock);
1692 return work_done;
1695 tail = q->rx_tail;
1696 q->rx_head = q->rx_tail = NULL;
1697 spin_unlock_irq(&q->lock);
1699 for (ngathered = 0; work_done < budget && head; work_done++) {
1700 prefetch(head->data);
1701 skbs[ngathered] = head;
1702 head = head->next;
1703 skbs[ngathered]->next = NULL;
1704 if (++ngathered == RX_BUNDLE_SIZE) {
1705 q->offload_bundles++;
1706 adapter->tdev.recv(&adapter->tdev, skbs,
1707 ngathered);
1708 ngathered = 0;
1711 if (head) { /* splice remaining packets back onto Rx queue */
1712 spin_lock_irq(&q->lock);
1713 tail->next = q->rx_head;
1714 if (!q->rx_head)
1715 q->rx_tail = tail;
1716 q->rx_head = head;
1717 spin_unlock_irq(&q->lock);
1719 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1722 return work_done;
1726 * rx_offload - process a received offload packet
1727 * @tdev: the offload device receiving the packet
1728 * @rq: the response queue that received the packet
1729 * @skb: the packet
1730 * @rx_gather: a gather list of packets if we are building a bundle
1731 * @gather_idx: index of the next available slot in the bundle
1733 * Process an ingress offload pakcet and add it to the offload ingress
1734 * queue. Returns the index of the next available slot in the bundle.
1736 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1737 struct sk_buff *skb, struct sk_buff *rx_gather[],
1738 unsigned int gather_idx)
1740 rq->offload_pkts++;
1741 skb_reset_mac_header(skb);
1742 skb_reset_network_header(skb);
1743 skb_reset_transport_header(skb);
1745 if (rq->polling) {
1746 rx_gather[gather_idx++] = skb;
1747 if (gather_idx == RX_BUNDLE_SIZE) {
1748 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1749 gather_idx = 0;
1750 rq->offload_bundles++;
1752 } else
1753 offload_enqueue(rq, skb);
1755 return gather_idx;
1759 * restart_tx - check whether to restart suspended Tx queues
1760 * @qs: the queue set to resume
1762 * Restarts suspended Tx queues of an SGE queue set if they have enough
1763 * free resources to resume operation.
1765 static void restart_tx(struct sge_qset *qs)
1767 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1768 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1769 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1770 qs->txq[TXQ_ETH].restarts++;
1771 if (netif_running(qs->netdev))
1772 netif_wake_queue(qs->netdev);
1775 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1776 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1777 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1778 qs->txq[TXQ_OFLD].restarts++;
1779 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1781 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1782 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1783 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1784 qs->txq[TXQ_CTRL].restarts++;
1785 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1790 * rx_eth - process an ingress ethernet packet
1791 * @adap: the adapter
1792 * @rq: the response queue that received the packet
1793 * @skb: the packet
1794 * @pad: amount of padding at the start of the buffer
1796 * Process an ingress ethernet pakcet and deliver it to the stack.
1797 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1798 * if it was immediate data in a response.
1800 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1801 struct sk_buff *skb, int pad)
1803 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1804 struct port_info *pi;
1806 skb_pull(skb, sizeof(*p) + pad);
1807 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
1808 skb->dev->last_rx = jiffies;
1809 pi = netdev_priv(skb->dev);
1810 if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
1811 !p->fragment) {
1812 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1813 skb->ip_summed = CHECKSUM_UNNECESSARY;
1814 } else
1815 skb->ip_summed = CHECKSUM_NONE;
1817 if (unlikely(p->vlan_valid)) {
1818 struct vlan_group *grp = pi->vlan_grp;
1820 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1821 if (likely(grp))
1822 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1823 rq->polling);
1824 else
1825 dev_kfree_skb_any(skb);
1826 } else if (rq->polling)
1827 netif_receive_skb(skb);
1828 else
1829 netif_rx(skb);
1833 * handle_rsp_cntrl_info - handles control information in a response
1834 * @qs: the queue set corresponding to the response
1835 * @flags: the response control flags
1837 * Handles the control information of an SGE response, such as GTS
1838 * indications and completion credits for the queue set's Tx queues.
1839 * HW coalesces credits, we don't do any extra SW coalescing.
1841 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
1843 unsigned int credits;
1845 #if USE_GTS
1846 if (flags & F_RSPD_TXQ0_GTS)
1847 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1848 #endif
1850 credits = G_RSPD_TXQ0_CR(flags);
1851 if (credits)
1852 qs->txq[TXQ_ETH].processed += credits;
1854 credits = G_RSPD_TXQ2_CR(flags);
1855 if (credits)
1856 qs->txq[TXQ_CTRL].processed += credits;
1858 # if USE_GTS
1859 if (flags & F_RSPD_TXQ1_GTS)
1860 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1861 # endif
1862 credits = G_RSPD_TXQ1_CR(flags);
1863 if (credits)
1864 qs->txq[TXQ_OFLD].processed += credits;
1868 * check_ring_db - check if we need to ring any doorbells
1869 * @adapter: the adapter
1870 * @qs: the queue set whose Tx queues are to be examined
1871 * @sleeping: indicates which Tx queue sent GTS
1873 * Checks if some of a queue set's Tx queues need to ring their doorbells
1874 * to resume transmission after idling while they still have unprocessed
1875 * descriptors.
1877 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1878 unsigned int sleeping)
1880 if (sleeping & F_RSPD_TXQ0_GTS) {
1881 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1883 if (txq->cleaned + txq->in_use != txq->processed &&
1884 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1885 set_bit(TXQ_RUNNING, &txq->flags);
1886 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1887 V_EGRCNTX(txq->cntxt_id));
1891 if (sleeping & F_RSPD_TXQ1_GTS) {
1892 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1894 if (txq->cleaned + txq->in_use != txq->processed &&
1895 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1896 set_bit(TXQ_RUNNING, &txq->flags);
1897 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1898 V_EGRCNTX(txq->cntxt_id));
1904 * is_new_response - check if a response is newly written
1905 * @r: the response descriptor
1906 * @q: the response queue
1908 * Returns true if a response descriptor contains a yet unprocessed
1909 * response.
1911 static inline int is_new_response(const struct rsp_desc *r,
1912 const struct sge_rspq *q)
1914 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1917 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1918 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1919 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1920 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1921 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1923 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1924 #define NOMEM_INTR_DELAY 2500
1927 * process_responses - process responses from an SGE response queue
1928 * @adap: the adapter
1929 * @qs: the queue set to which the response queue belongs
1930 * @budget: how many responses can be processed in this round
1932 * Process responses from an SGE response queue up to the supplied budget.
1933 * Responses include received packets as well as credits and other events
1934 * for the queues that belong to the response queue's queue set.
1935 * A negative budget is effectively unlimited.
1937 * Additionally choose the interrupt holdoff time for the next interrupt
1938 * on this queue. If the system is under memory shortage use a fairly
1939 * long delay to help recovery.
1941 static int process_responses(struct adapter *adap, struct sge_qset *qs,
1942 int budget)
1944 struct sge_rspq *q = &qs->rspq;
1945 struct rsp_desc *r = &q->desc[q->cidx];
1946 int budget_left = budget;
1947 unsigned int sleeping = 0;
1948 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1949 int ngathered = 0;
1951 q->next_holdoff = q->holdoff_tmr;
1953 while (likely(budget_left && is_new_response(r, q))) {
1954 int eth, ethpad = 2;
1955 struct sk_buff *skb = NULL;
1956 u32 len, flags = ntohl(r->flags);
1957 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
1959 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1961 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1962 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1963 if (!skb)
1964 goto no_mem;
1966 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
1967 skb->data[0] = CPL_ASYNC_NOTIF;
1968 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
1969 q->async_notif++;
1970 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1971 skb = get_imm_packet(r);
1972 if (unlikely(!skb)) {
1973 no_mem:
1974 q->next_holdoff = NOMEM_INTR_DELAY;
1975 q->nomem++;
1976 /* consume one credit since we tried */
1977 budget_left--;
1978 break;
1980 q->imm_data++;
1981 ethpad = 0;
1982 } else if ((len = ntohl(r->len_cq)) != 0) {
1983 struct sge_fl *fl;
1985 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1986 if (fl->use_pages) {
1987 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
1989 prefetch(addr);
1990 #if L1_CACHE_BYTES < 128
1991 prefetch(addr + L1_CACHE_BYTES);
1992 #endif
1993 __refill_fl(adap, fl);
1995 skb = get_packet_pg(adap, fl, G_RSPD_LEN(len),
1996 eth ? SGE_RX_DROP_THRES : 0);
1997 } else
1998 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1999 eth ? SGE_RX_DROP_THRES : 0);
2000 if (unlikely(!skb)) {
2001 if (!eth)
2002 goto no_mem;
2003 q->rx_drops++;
2004 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2005 __skb_pull(skb, 2);
2007 if (++fl->cidx == fl->size)
2008 fl->cidx = 0;
2009 } else
2010 q->pure_rsps++;
2012 if (flags & RSPD_CTRL_MASK) {
2013 sleeping |= flags & RSPD_GTS_MASK;
2014 handle_rsp_cntrl_info(qs, flags);
2017 r++;
2018 if (unlikely(++q->cidx == q->size)) {
2019 q->cidx = 0;
2020 q->gen ^= 1;
2021 r = q->desc;
2023 prefetch(r);
2025 if (++q->credits >= (q->size / 4)) {
2026 refill_rspq(adap, q, q->credits);
2027 q->credits = 0;
2030 if (likely(skb != NULL)) {
2031 if (eth)
2032 rx_eth(adap, q, skb, ethpad);
2033 else {
2034 /* Preserve the RSS info in csum & priority */
2035 skb->csum = rss_hi;
2036 skb->priority = rss_lo;
2037 ngathered = rx_offload(&adap->tdev, q, skb,
2038 offload_skbs,
2039 ngathered);
2042 --budget_left;
2045 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2046 if (sleeping)
2047 check_ring_db(adap, qs, sleeping);
2049 smp_mb(); /* commit Tx queue .processed updates */
2050 if (unlikely(qs->txq_stopped != 0))
2051 restart_tx(qs);
2053 budget -= budget_left;
2054 return budget;
2057 static inline int is_pure_response(const struct rsp_desc *r)
2059 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2061 return (n | r->len_cq) == 0;
2065 * napi_rx_handler - the NAPI handler for Rx processing
2066 * @napi: the napi instance
2067 * @budget: how many packets we can process in this round
2069 * Handler for new data events when using NAPI.
2071 static int napi_rx_handler(struct napi_struct *napi, int budget)
2073 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2074 struct adapter *adap = qs->adap;
2075 int work_done = process_responses(adap, qs, budget);
2077 if (likely(work_done < budget)) {
2078 napi_complete(napi);
2081 * Because we don't atomically flush the following
2082 * write it is possible that in very rare cases it can
2083 * reach the device in a way that races with a new
2084 * response being written plus an error interrupt
2085 * causing the NAPI interrupt handler below to return
2086 * unhandled status to the OS. To protect against
2087 * this would require flushing the write and doing
2088 * both the write and the flush with interrupts off.
2089 * Way too expensive and unjustifiable given the
2090 * rarity of the race.
2092 * The race cannot happen at all with MSI-X.
2094 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2095 V_NEWTIMER(qs->rspq.next_holdoff) |
2096 V_NEWINDEX(qs->rspq.cidx));
2098 return work_done;
2102 * Returns true if the device is already scheduled for polling.
2104 static inline int napi_is_scheduled(struct napi_struct *napi)
2106 return test_bit(NAPI_STATE_SCHED, &napi->state);
2110 * process_pure_responses - process pure responses from a response queue
2111 * @adap: the adapter
2112 * @qs: the queue set owning the response queue
2113 * @r: the first pure response to process
2115 * A simpler version of process_responses() that handles only pure (i.e.,
2116 * non data-carrying) responses. Such respones are too light-weight to
2117 * justify calling a softirq under NAPI, so we handle them specially in
2118 * the interrupt handler. The function is called with a pointer to a
2119 * response, which the caller must ensure is a valid pure response.
2121 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2123 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2124 struct rsp_desc *r)
2126 struct sge_rspq *q = &qs->rspq;
2127 unsigned int sleeping = 0;
2129 do {
2130 u32 flags = ntohl(r->flags);
2132 r++;
2133 if (unlikely(++q->cidx == q->size)) {
2134 q->cidx = 0;
2135 q->gen ^= 1;
2136 r = q->desc;
2138 prefetch(r);
2140 if (flags & RSPD_CTRL_MASK) {
2141 sleeping |= flags & RSPD_GTS_MASK;
2142 handle_rsp_cntrl_info(qs, flags);
2145 q->pure_rsps++;
2146 if (++q->credits >= (q->size / 4)) {
2147 refill_rspq(adap, q, q->credits);
2148 q->credits = 0;
2150 } while (is_new_response(r, q) && is_pure_response(r));
2152 if (sleeping)
2153 check_ring_db(adap, qs, sleeping);
2155 smp_mb(); /* commit Tx queue .processed updates */
2156 if (unlikely(qs->txq_stopped != 0))
2157 restart_tx(qs);
2159 return is_new_response(r, q);
2163 * handle_responses - decide what to do with new responses in NAPI mode
2164 * @adap: the adapter
2165 * @q: the response queue
2167 * This is used by the NAPI interrupt handlers to decide what to do with
2168 * new SGE responses. If there are no new responses it returns -1. If
2169 * there are new responses and they are pure (i.e., non-data carrying)
2170 * it handles them straight in hard interrupt context as they are very
2171 * cheap and don't deliver any packets. Finally, if there are any data
2172 * signaling responses it schedules the NAPI handler. Returns 1 if it
2173 * schedules NAPI, 0 if all new responses were pure.
2175 * The caller must ascertain NAPI is not already running.
2177 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2179 struct sge_qset *qs = rspq_to_qset(q);
2180 struct rsp_desc *r = &q->desc[q->cidx];
2182 if (!is_new_response(r, q))
2183 return -1;
2184 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2185 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2186 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2187 return 0;
2189 napi_schedule(&qs->napi);
2190 return 1;
2194 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2195 * (i.e., response queue serviced in hard interrupt).
2197 irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2199 struct sge_qset *qs = cookie;
2200 struct adapter *adap = qs->adap;
2201 struct sge_rspq *q = &qs->rspq;
2203 spin_lock(&q->lock);
2204 if (process_responses(adap, qs, -1) == 0)
2205 q->unhandled_irqs++;
2206 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2207 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2208 spin_unlock(&q->lock);
2209 return IRQ_HANDLED;
2213 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2214 * (i.e., response queue serviced by NAPI polling).
2216 static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2218 struct sge_qset *qs = cookie;
2219 struct sge_rspq *q = &qs->rspq;
2221 spin_lock(&q->lock);
2223 if (handle_responses(qs->adap, q) < 0)
2224 q->unhandled_irqs++;
2225 spin_unlock(&q->lock);
2226 return IRQ_HANDLED;
2230 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2231 * SGE response queues as well as error and other async events as they all use
2232 * the same MSI vector. We use one SGE response queue per port in this mode
2233 * and protect all response queues with queue 0's lock.
2235 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2237 int new_packets = 0;
2238 struct adapter *adap = cookie;
2239 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2241 spin_lock(&q->lock);
2243 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2244 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2245 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2246 new_packets = 1;
2249 if (adap->params.nports == 2 &&
2250 process_responses(adap, &adap->sge.qs[1], -1)) {
2251 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2253 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2254 V_NEWTIMER(q1->next_holdoff) |
2255 V_NEWINDEX(q1->cidx));
2256 new_packets = 1;
2259 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2260 q->unhandled_irqs++;
2262 spin_unlock(&q->lock);
2263 return IRQ_HANDLED;
2266 static int rspq_check_napi(struct sge_qset *qs)
2268 struct sge_rspq *q = &qs->rspq;
2270 if (!napi_is_scheduled(&qs->napi) &&
2271 is_new_response(&q->desc[q->cidx], q)) {
2272 napi_schedule(&qs->napi);
2273 return 1;
2275 return 0;
2279 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2280 * by NAPI polling). Handles data events from SGE response queues as well as
2281 * error and other async events as they all use the same MSI vector. We use
2282 * one SGE response queue per port in this mode and protect all response
2283 * queues with queue 0's lock.
2285 static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2287 int new_packets;
2288 struct adapter *adap = cookie;
2289 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2291 spin_lock(&q->lock);
2293 new_packets = rspq_check_napi(&adap->sge.qs[0]);
2294 if (adap->params.nports == 2)
2295 new_packets += rspq_check_napi(&adap->sge.qs[1]);
2296 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2297 q->unhandled_irqs++;
2299 spin_unlock(&q->lock);
2300 return IRQ_HANDLED;
2304 * A helper function that processes responses and issues GTS.
2306 static inline int process_responses_gts(struct adapter *adap,
2307 struct sge_rspq *rq)
2309 int work;
2311 work = process_responses(adap, rspq_to_qset(rq), -1);
2312 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2313 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2314 return work;
2318 * The legacy INTx interrupt handler. This needs to handle data events from
2319 * SGE response queues as well as error and other async events as they all use
2320 * the same interrupt pin. We use one SGE response queue per port in this mode
2321 * and protect all response queues with queue 0's lock.
2323 static irqreturn_t t3_intr(int irq, void *cookie)
2325 int work_done, w0, w1;
2326 struct adapter *adap = cookie;
2327 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2328 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2330 spin_lock(&q0->lock);
2332 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2333 w1 = adap->params.nports == 2 &&
2334 is_new_response(&q1->desc[q1->cidx], q1);
2336 if (likely(w0 | w1)) {
2337 t3_write_reg(adap, A_PL_CLI, 0);
2338 t3_read_reg(adap, A_PL_CLI); /* flush */
2340 if (likely(w0))
2341 process_responses_gts(adap, q0);
2343 if (w1)
2344 process_responses_gts(adap, q1);
2346 work_done = w0 | w1;
2347 } else
2348 work_done = t3_slow_intr_handler(adap);
2350 spin_unlock(&q0->lock);
2351 return IRQ_RETVAL(work_done != 0);
2355 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2356 * Handles data events from SGE response queues as well as error and other
2357 * async events as they all use the same interrupt pin. We use one SGE
2358 * response queue per port in this mode and protect all response queues with
2359 * queue 0's lock.
2361 static irqreturn_t t3b_intr(int irq, void *cookie)
2363 u32 map;
2364 struct adapter *adap = cookie;
2365 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2367 t3_write_reg(adap, A_PL_CLI, 0);
2368 map = t3_read_reg(adap, A_SG_DATA_INTR);
2370 if (unlikely(!map)) /* shared interrupt, most likely */
2371 return IRQ_NONE;
2373 spin_lock(&q0->lock);
2375 if (unlikely(map & F_ERRINTR))
2376 t3_slow_intr_handler(adap);
2378 if (likely(map & 1))
2379 process_responses_gts(adap, q0);
2381 if (map & 2)
2382 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2384 spin_unlock(&q0->lock);
2385 return IRQ_HANDLED;
2389 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2390 * Handles data events from SGE response queues as well as error and other
2391 * async events as they all use the same interrupt pin. We use one SGE
2392 * response queue per port in this mode and protect all response queues with
2393 * queue 0's lock.
2395 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2397 u32 map;
2398 struct adapter *adap = cookie;
2399 struct sge_qset *qs0 = &adap->sge.qs[0];
2400 struct sge_rspq *q0 = &qs0->rspq;
2402 t3_write_reg(adap, A_PL_CLI, 0);
2403 map = t3_read_reg(adap, A_SG_DATA_INTR);
2405 if (unlikely(!map)) /* shared interrupt, most likely */
2406 return IRQ_NONE;
2408 spin_lock(&q0->lock);
2410 if (unlikely(map & F_ERRINTR))
2411 t3_slow_intr_handler(adap);
2413 if (likely(map & 1))
2414 napi_schedule(&qs0->napi);
2416 if (map & 2)
2417 napi_schedule(&adap->sge.qs[1].napi);
2419 spin_unlock(&q0->lock);
2420 return IRQ_HANDLED;
2424 * t3_intr_handler - select the top-level interrupt handler
2425 * @adap: the adapter
2426 * @polling: whether using NAPI to service response queues
2428 * Selects the top-level interrupt handler based on the type of interrupts
2429 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2430 * response queues.
2432 irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2434 if (adap->flags & USING_MSIX)
2435 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2436 if (adap->flags & USING_MSI)
2437 return polling ? t3_intr_msi_napi : t3_intr_msi;
2438 if (adap->params.rev > 0)
2439 return polling ? t3b_intr_napi : t3b_intr;
2440 return t3_intr;
2444 * t3_sge_err_intr_handler - SGE async event interrupt handler
2445 * @adapter: the adapter
2447 * Interrupt handler for SGE asynchronous (non-data) events.
2449 void t3_sge_err_intr_handler(struct adapter *adapter)
2451 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2453 if (status & F_RSPQCREDITOVERFOW)
2454 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2456 if (status & F_RSPQDISABLED) {
2457 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2459 CH_ALERT(adapter,
2460 "packet delivered to disabled response queue "
2461 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2464 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2465 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
2466 status & F_HIPIODRBDROPERR ? "high" : "lo");
2468 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2469 if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
2470 t3_fatal_err(adapter);
2474 * sge_timer_cb - perform periodic maintenance of an SGE qset
2475 * @data: the SGE queue set to maintain
2477 * Runs periodically from a timer to perform maintenance of an SGE queue
2478 * set. It performs two tasks:
2480 * a) Cleans up any completed Tx descriptors that may still be pending.
2481 * Normal descriptor cleanup happens when new packets are added to a Tx
2482 * queue so this timer is relatively infrequent and does any cleanup only
2483 * if the Tx queue has not seen any new packets in a while. We make a
2484 * best effort attempt to reclaim descriptors, in that we don't wait
2485 * around if we cannot get a queue's lock (which most likely is because
2486 * someone else is queueing new packets and so will also handle the clean
2487 * up). Since control queues use immediate data exclusively we don't
2488 * bother cleaning them up here.
2490 * b) Replenishes Rx queues that have run out due to memory shortage.
2491 * Normally new Rx buffers are added when existing ones are consumed but
2492 * when out of memory a queue can become empty. We try to add only a few
2493 * buffers here, the queue will be replenished fully as these new buffers
2494 * are used up if memory shortage has subsided.
2496 static void sge_timer_cb(unsigned long data)
2498 spinlock_t *lock;
2499 struct sge_qset *qs = (struct sge_qset *)data;
2500 struct adapter *adap = qs->adap;
2502 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2503 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2504 spin_unlock(&qs->txq[TXQ_ETH].lock);
2506 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2507 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2508 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2510 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2511 &adap->sge.qs[0].rspq.lock;
2512 if (spin_trylock_irq(lock)) {
2513 if (!napi_is_scheduled(&qs->napi)) {
2514 u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2516 if (qs->fl[0].credits < qs->fl[0].size)
2517 __refill_fl(adap, &qs->fl[0]);
2518 if (qs->fl[1].credits < qs->fl[1].size)
2519 __refill_fl(adap, &qs->fl[1]);
2521 if (status & (1 << qs->rspq.cntxt_id)) {
2522 qs->rspq.starved++;
2523 if (qs->rspq.credits) {
2524 refill_rspq(adap, &qs->rspq, 1);
2525 qs->rspq.credits--;
2526 qs->rspq.restarted++;
2527 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2528 1 << qs->rspq.cntxt_id);
2532 spin_unlock_irq(lock);
2534 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2538 * t3_update_qset_coalesce - update coalescing settings for a queue set
2539 * @qs: the SGE queue set
2540 * @p: new queue set parameters
2542 * Update the coalescing settings for an SGE queue set. Nothing is done
2543 * if the queue set is not initialized yet.
2545 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2547 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2548 qs->rspq.polling = p->polling;
2549 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
2553 * t3_sge_alloc_qset - initialize an SGE queue set
2554 * @adapter: the adapter
2555 * @id: the queue set id
2556 * @nports: how many Ethernet ports will be using this queue set
2557 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2558 * @p: configuration parameters for this queue set
2559 * @ntxq: number of Tx queues for the queue set
2560 * @netdev: net device associated with this queue set
2562 * Allocate resources and initialize an SGE queue set. A queue set
2563 * comprises a response queue, two Rx free-buffer queues, and up to 3
2564 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2565 * queue, offload queue, and control queue.
2567 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2568 int irq_vec_idx, const struct qset_params *p,
2569 int ntxq, struct net_device *dev)
2571 int i, ret = -ENOMEM;
2572 struct sge_qset *q = &adapter->sge.qs[id];
2574 init_qset_cntxt(q, id);
2575 init_timer(&q->tx_reclaim_timer);
2576 q->tx_reclaim_timer.data = (unsigned long)q;
2577 q->tx_reclaim_timer.function = sge_timer_cb;
2579 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2580 sizeof(struct rx_desc),
2581 sizeof(struct rx_sw_desc),
2582 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2583 if (!q->fl[0].desc)
2584 goto err;
2586 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2587 sizeof(struct rx_desc),
2588 sizeof(struct rx_sw_desc),
2589 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2590 if (!q->fl[1].desc)
2591 goto err;
2593 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2594 sizeof(struct rsp_desc), 0,
2595 &q->rspq.phys_addr, NULL);
2596 if (!q->rspq.desc)
2597 goto err;
2599 for (i = 0; i < ntxq; ++i) {
2601 * The control queue always uses immediate data so does not
2602 * need to keep track of any sk_buffs.
2604 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2606 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2607 sizeof(struct tx_desc), sz,
2608 &q->txq[i].phys_addr,
2609 &q->txq[i].sdesc);
2610 if (!q->txq[i].desc)
2611 goto err;
2613 q->txq[i].gen = 1;
2614 q->txq[i].size = p->txq_size[i];
2615 spin_lock_init(&q->txq[i].lock);
2616 skb_queue_head_init(&q->txq[i].sendq);
2619 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2620 (unsigned long)q);
2621 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2622 (unsigned long)q);
2624 q->fl[0].gen = q->fl[1].gen = 1;
2625 q->fl[0].size = p->fl_size;
2626 q->fl[1].size = p->jumbo_size;
2628 q->rspq.gen = 1;
2629 q->rspq.size = p->rspq_size;
2630 spin_lock_init(&q->rspq.lock);
2632 q->txq[TXQ_ETH].stop_thres = nports *
2633 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2635 #if FL0_PG_CHUNK_SIZE > 0
2636 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
2637 #else
2638 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
2639 #endif
2640 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
2641 q->fl[1].buf_size = is_offload(adapter) ?
2642 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2643 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
2645 spin_lock(&adapter->sge.reg_lock);
2647 /* FL threshold comparison uses < */
2648 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2649 q->rspq.phys_addr, q->rspq.size,
2650 q->fl[0].buf_size, 1, 0);
2651 if (ret)
2652 goto err_unlock;
2654 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2655 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2656 q->fl[i].phys_addr, q->fl[i].size,
2657 q->fl[i].buf_size, p->cong_thres, 1,
2659 if (ret)
2660 goto err_unlock;
2663 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2664 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2665 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2666 1, 0);
2667 if (ret)
2668 goto err_unlock;
2670 if (ntxq > 1) {
2671 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2672 USE_GTS, SGE_CNTXT_OFLD, id,
2673 q->txq[TXQ_OFLD].phys_addr,
2674 q->txq[TXQ_OFLD].size, 0, 1, 0);
2675 if (ret)
2676 goto err_unlock;
2679 if (ntxq > 2) {
2680 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2681 SGE_CNTXT_CTRL, id,
2682 q->txq[TXQ_CTRL].phys_addr,
2683 q->txq[TXQ_CTRL].size,
2684 q->txq[TXQ_CTRL].token, 1, 0);
2685 if (ret)
2686 goto err_unlock;
2689 spin_unlock(&adapter->sge.reg_lock);
2691 q->adap = adapter;
2692 q->netdev = dev;
2693 t3_update_qset_coalesce(q, p);
2695 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2696 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2697 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2699 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2700 V_NEWTIMER(q->rspq.holdoff_tmr));
2702 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2703 return 0;
2705 err_unlock:
2706 spin_unlock(&adapter->sge.reg_lock);
2707 err:
2708 t3_free_qset(adapter, q);
2709 return ret;
2713 * t3_free_sge_resources - free SGE resources
2714 * @adap: the adapter
2716 * Frees resources used by the SGE queue sets.
2718 void t3_free_sge_resources(struct adapter *adap)
2720 int i;
2722 for (i = 0; i < SGE_QSETS; ++i)
2723 t3_free_qset(adap, &adap->sge.qs[i]);
2727 * t3_sge_start - enable SGE
2728 * @adap: the adapter
2730 * Enables the SGE for DMAs. This is the last step in starting packet
2731 * transfers.
2733 void t3_sge_start(struct adapter *adap)
2735 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2739 * t3_sge_stop - disable SGE operation
2740 * @adap: the adapter
2742 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2743 * from error interrupts) or from normal process context. In the latter
2744 * case it also disables any pending queue restart tasklets. Note that
2745 * if it is called in interrupt context it cannot disable the restart
2746 * tasklets as it cannot wait, however the tasklets will have no effect
2747 * since the doorbells are disabled and the driver will call this again
2748 * later from process context, at which time the tasklets will be stopped
2749 * if they are still running.
2751 void t3_sge_stop(struct adapter *adap)
2753 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2754 if (!in_interrupt()) {
2755 int i;
2757 for (i = 0; i < SGE_QSETS; ++i) {
2758 struct sge_qset *qs = &adap->sge.qs[i];
2760 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2761 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2767 * t3_sge_init - initialize SGE
2768 * @adap: the adapter
2769 * @p: the SGE parameters
2771 * Performs SGE initialization needed every time after a chip reset.
2772 * We do not initialize any of the queue sets here, instead the driver
2773 * top-level must request those individually. We also do not enable DMA
2774 * here, that should be done after the queues have been set up.
2776 void t3_sge_init(struct adapter *adap, struct sge_params *p)
2778 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2780 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
2781 F_CQCRDTCTRL |
2782 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2783 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2784 #if SGE_NUM_GENBITS == 1
2785 ctrl |= F_EGRGENCTRL;
2786 #endif
2787 if (adap->params.rev > 0) {
2788 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2789 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
2790 ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
2792 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2793 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2794 V_LORCQDRBTHRSH(512));
2795 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2796 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
2797 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
2798 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
2799 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2800 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2801 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2802 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2803 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2807 * t3_sge_prep - one-time SGE initialization
2808 * @adap: the associated adapter
2809 * @p: SGE parameters
2811 * Performs one-time initialization of SGE SW state. Includes determining
2812 * defaults for the assorted SGE parameters, which admins can change until
2813 * they are used to initialize the SGE.
2815 void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
2817 int i;
2819 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2820 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2822 for (i = 0; i < SGE_QSETS; ++i) {
2823 struct qset_params *q = p->qset + i;
2825 q->polling = adap->params.rev > 0;
2826 q->coalesce_usecs = 5;
2827 q->rspq_size = 1024;
2828 q->fl_size = 1024;
2829 q->jumbo_size = 512;
2830 q->txq_size[TXQ_ETH] = 1024;
2831 q->txq_size[TXQ_OFLD] = 1024;
2832 q->txq_size[TXQ_CTRL] = 256;
2833 q->cong_thres = 0;
2836 spin_lock_init(&adap->sge.reg_lock);
2840 * t3_get_desc - dump an SGE descriptor for debugging purposes
2841 * @qs: the queue set
2842 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2843 * @idx: the descriptor index in the queue
2844 * @data: where to dump the descriptor contents
2846 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2847 * size of the descriptor.
2849 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2850 unsigned char *data)
2852 if (qnum >= 6)
2853 return -EINVAL;
2855 if (qnum < 3) {
2856 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2857 return -EINVAL;
2858 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2859 return sizeof(struct tx_desc);
2862 if (qnum == 3) {
2863 if (!qs->rspq.desc || idx >= qs->rspq.size)
2864 return -EINVAL;
2865 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2866 return sizeof(struct rsp_desc);
2869 qnum -= 4;
2870 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2871 return -EINVAL;
2872 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2873 return sizeof(struct rx_desc);