cxgb3 - bind qsets on multiport adapter
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / cxgb3 / sge.c
blobccea06a044025551953099011e2f1135ba203fc1
1 /*
2 * This file is part of the Chelsio T3 Ethernet driver.
4 * Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
12 #include <linux/skbuff.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_vlan.h>
16 #include <linux/ip.h>
17 #include <linux/tcp.h>
18 #include <linux/dma-mapping.h>
19 #include "common.h"
20 #include "regs.h"
21 #include "sge_defs.h"
22 #include "t3_cpl.h"
23 #include "firmware_exports.h"
25 #define USE_GTS 0
27 #define SGE_RX_SM_BUF_SIZE 1536
28 #define SGE_RX_COPY_THRES 256
30 # define SGE_RX_DROP_THRES 16
33 * Period of the Tx buffer reclaim timer. This timer does not need to run
34 * frequently as Tx buffers are usually reclaimed by new Tx packets.
36 #define TX_RECLAIM_PERIOD (HZ / 4)
38 /* WR size in bytes */
39 #define WR_LEN (WR_FLITS * 8)
42 * Types of Tx queues in each queue set. Order here matters, do not change.
44 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
46 /* Values for sge_txq.flags */
47 enum {
48 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
49 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
52 struct tx_desc {
53 u64 flit[TX_DESC_FLITS];
56 struct rx_desc {
57 __be32 addr_lo;
58 __be32 len_gen;
59 __be32 gen2;
60 __be32 addr_hi;
63 struct tx_sw_desc { /* SW state per Tx descriptor */
64 struct sk_buff *skb;
67 struct rx_sw_desc { /* SW state per Rx descriptor */
68 struct sk_buff *skb;
69 DECLARE_PCI_UNMAP_ADDR(dma_addr);
72 struct rsp_desc { /* response queue descriptor */
73 struct rss_header rss_hdr;
74 __be32 flags;
75 __be32 len_cq;
76 u8 imm_data[47];
77 u8 intr_gen;
80 struct unmap_info { /* packet unmapping info, overlays skb->cb */
81 int sflit; /* start flit of first SGL entry in Tx descriptor */
82 u16 fragidx; /* first page fragment in current Tx descriptor */
83 u16 addr_idx; /* buffer index of first SGL entry in descriptor */
84 u32 len; /* mapped length of skb main body */
88 * Maps a number of flits to the number of Tx descriptors that can hold them.
89 * The formula is
91 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
93 * HW allows up to 4 descriptors to be combined into a WR.
95 static u8 flit_desc_map[] = {
97 #if SGE_NUM_GENBITS == 1
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
101 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
102 #elif SGE_NUM_GENBITS == 2
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
106 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
107 #else
108 # error "SGE_NUM_GENBITS must be 1 or 2"
109 #endif
112 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
114 return container_of(q, struct sge_qset, fl[qidx]);
117 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
119 return container_of(q, struct sge_qset, rspq);
122 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
124 return container_of(q, struct sge_qset, txq[qidx]);
128 * refill_rspq - replenish an SGE response queue
129 * @adapter: the adapter
130 * @q: the response queue to replenish
131 * @credits: how many new responses to make available
133 * Replenishes a response queue by making the supplied number of responses
134 * available to HW.
136 static inline void refill_rspq(struct adapter *adapter,
137 const struct sge_rspq *q, unsigned int credits)
139 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
140 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
144 * need_skb_unmap - does the platform need unmapping of sk_buffs?
146 * Returns true if the platfrom needs sk_buff unmapping. The compiler
147 * optimizes away unecessary code if this returns true.
149 static inline int need_skb_unmap(void)
152 * This structure is used to tell if the platfrom needs buffer
153 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
155 struct dummy {
156 DECLARE_PCI_UNMAP_ADDR(addr);
159 return sizeof(struct dummy) != 0;
163 * unmap_skb - unmap a packet main body and its page fragments
164 * @skb: the packet
165 * @q: the Tx queue containing Tx descriptors for the packet
166 * @cidx: index of Tx descriptor
167 * @pdev: the PCI device
169 * Unmap the main body of an sk_buff and its page fragments, if any.
170 * Because of the fairly complicated structure of our SGLs and the desire
171 * to conserve space for metadata, we keep the information necessary to
172 * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
173 * in the Tx descriptors (the physical addresses of the various data
174 * buffers). The send functions initialize the state in skb->cb so we
175 * can unmap the buffers held in the first Tx descriptor here, and we
176 * have enough information at this point to update the state for the next
177 * Tx descriptor.
179 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
180 unsigned int cidx, struct pci_dev *pdev)
182 const struct sg_ent *sgp;
183 struct unmap_info *ui = (struct unmap_info *)skb->cb;
184 int nfrags, frag_idx, curflit, j = ui->addr_idx;
186 sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
188 if (ui->len) {
189 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
190 PCI_DMA_TODEVICE);
191 ui->len = 0; /* so we know for next descriptor for this skb */
192 j = 1;
195 frag_idx = ui->fragidx;
196 curflit = ui->sflit + 1 + j;
197 nfrags = skb_shinfo(skb)->nr_frags;
199 while (frag_idx < nfrags && curflit < WR_FLITS) {
200 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
201 skb_shinfo(skb)->frags[frag_idx].size,
202 PCI_DMA_TODEVICE);
203 j ^= 1;
204 if (j == 0) {
205 sgp++;
206 curflit++;
208 curflit++;
209 frag_idx++;
212 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
213 ui->fragidx = frag_idx;
214 ui->addr_idx = j;
215 ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
220 * free_tx_desc - reclaims Tx descriptors and their buffers
221 * @adapter: the adapter
222 * @q: the Tx queue to reclaim descriptors from
223 * @n: the number of descriptors to reclaim
225 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
226 * Tx buffers. Called with the Tx queue lock held.
228 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
229 unsigned int n)
231 struct tx_sw_desc *d;
232 struct pci_dev *pdev = adapter->pdev;
233 unsigned int cidx = q->cidx;
235 d = &q->sdesc[cidx];
236 while (n--) {
237 if (d->skb) { /* an SGL is present */
238 if (need_skb_unmap())
239 unmap_skb(d->skb, q, cidx, pdev);
240 if (d->skb->priority == cidx)
241 kfree_skb(d->skb);
243 ++d;
244 if (++cidx == q->size) {
245 cidx = 0;
246 d = q->sdesc;
249 q->cidx = cidx;
253 * reclaim_completed_tx - reclaims completed Tx descriptors
254 * @adapter: the adapter
255 * @q: the Tx queue to reclaim completed descriptors from
257 * Reclaims Tx descriptors that the SGE has indicated it has processed,
258 * and frees the associated buffers if possible. Called with the Tx
259 * queue's lock held.
261 static inline void reclaim_completed_tx(struct adapter *adapter,
262 struct sge_txq *q)
264 unsigned int reclaim = q->processed - q->cleaned;
266 if (reclaim) {
267 free_tx_desc(adapter, q, reclaim);
268 q->cleaned += reclaim;
269 q->in_use -= reclaim;
274 * should_restart_tx - are there enough resources to restart a Tx queue?
275 * @q: the Tx queue
277 * Checks if there are enough descriptors to restart a suspended Tx queue.
279 static inline int should_restart_tx(const struct sge_txq *q)
281 unsigned int r = q->processed - q->cleaned;
283 return q->in_use - r < (q->size >> 1);
287 * free_rx_bufs - free the Rx buffers on an SGE free list
288 * @pdev: the PCI device associated with the adapter
289 * @rxq: the SGE free list to clean up
291 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
292 * this queue should be stopped before calling this function.
294 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
296 unsigned int cidx = q->cidx;
298 while (q->credits--) {
299 struct rx_sw_desc *d = &q->sdesc[cidx];
301 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
302 q->buf_size, PCI_DMA_FROMDEVICE);
303 kfree_skb(d->skb);
304 d->skb = NULL;
305 if (++cidx == q->size)
306 cidx = 0;
311 * add_one_rx_buf - add a packet buffer to a free-buffer list
312 * @skb: the buffer to add
313 * @len: the buffer length
314 * @d: the HW Rx descriptor to write
315 * @sd: the SW Rx descriptor to write
316 * @gen: the generation bit value
317 * @pdev: the PCI device associated with the adapter
319 * Add a buffer of the given length to the supplied HW and SW Rx
320 * descriptors.
322 static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len,
323 struct rx_desc *d, struct rx_sw_desc *sd,
324 unsigned int gen, struct pci_dev *pdev)
326 dma_addr_t mapping;
328 sd->skb = skb;
329 mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
330 pci_unmap_addr_set(sd, dma_addr, mapping);
332 d->addr_lo = cpu_to_be32(mapping);
333 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
334 wmb();
335 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
336 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
340 * refill_fl - refill an SGE free-buffer list
341 * @adapter: the adapter
342 * @q: the free-list to refill
343 * @n: the number of new buffers to allocate
344 * @gfp: the gfp flags for allocating new buffers
346 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
347 * allocated with the supplied gfp flags. The caller must assure that
348 * @n does not exceed the queue's capacity.
350 static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
352 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
353 struct rx_desc *d = &q->desc[q->pidx];
355 while (n--) {
356 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
358 if (!skb)
359 break;
361 add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev);
362 d++;
363 sd++;
364 if (++q->pidx == q->size) {
365 q->pidx = 0;
366 q->gen ^= 1;
367 sd = q->sdesc;
368 d = q->desc;
370 q->credits++;
373 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
376 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
378 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
382 * recycle_rx_buf - recycle a receive buffer
383 * @adapter: the adapter
384 * @q: the SGE free list
385 * @idx: index of buffer to recycle
387 * Recycles the specified buffer on the given free list by adding it at
388 * the next available slot on the list.
390 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
391 unsigned int idx)
393 struct rx_desc *from = &q->desc[idx];
394 struct rx_desc *to = &q->desc[q->pidx];
396 q->sdesc[q->pidx] = q->sdesc[idx];
397 to->addr_lo = from->addr_lo; /* already big endian */
398 to->addr_hi = from->addr_hi; /* likewise */
399 wmb();
400 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
401 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
402 q->credits++;
404 if (++q->pidx == q->size) {
405 q->pidx = 0;
406 q->gen ^= 1;
408 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
412 * alloc_ring - allocate resources for an SGE descriptor ring
413 * @pdev: the PCI device
414 * @nelem: the number of descriptors
415 * @elem_size: the size of each descriptor
416 * @sw_size: the size of the SW state associated with each ring element
417 * @phys: the physical address of the allocated ring
418 * @metadata: address of the array holding the SW state for the ring
420 * Allocates resources for an SGE descriptor ring, such as Tx queues,
421 * free buffer lists, or response queues. Each SGE ring requires
422 * space for its HW descriptors plus, optionally, space for the SW state
423 * associated with each HW entry (the metadata). The function returns
424 * three values: the virtual address for the HW ring (the return value
425 * of the function), the physical address of the HW ring, and the address
426 * of the SW ring.
428 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
429 size_t sw_size, dma_addr_t *phys, void *metadata)
431 size_t len = nelem * elem_size;
432 void *s = NULL;
433 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
435 if (!p)
436 return NULL;
437 if (sw_size) {
438 s = kcalloc(nelem, sw_size, GFP_KERNEL);
440 if (!s) {
441 dma_free_coherent(&pdev->dev, len, p, *phys);
442 return NULL;
445 if (metadata)
446 *(void **)metadata = s;
447 memset(p, 0, len);
448 return p;
452 * free_qset - free the resources of an SGE queue set
453 * @adapter: the adapter owning the queue set
454 * @q: the queue set
456 * Release the HW and SW resources associated with an SGE queue set, such
457 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
458 * queue set must be quiesced prior to calling this.
460 void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
462 int i;
463 struct pci_dev *pdev = adapter->pdev;
465 if (q->tx_reclaim_timer.function)
466 del_timer_sync(&q->tx_reclaim_timer);
468 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
469 if (q->fl[i].desc) {
470 spin_lock(&adapter->sge.reg_lock);
471 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
472 spin_unlock(&adapter->sge.reg_lock);
473 free_rx_bufs(pdev, &q->fl[i]);
474 kfree(q->fl[i].sdesc);
475 dma_free_coherent(&pdev->dev,
476 q->fl[i].size *
477 sizeof(struct rx_desc), q->fl[i].desc,
478 q->fl[i].phys_addr);
481 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
482 if (q->txq[i].desc) {
483 spin_lock(&adapter->sge.reg_lock);
484 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
485 spin_unlock(&adapter->sge.reg_lock);
486 if (q->txq[i].sdesc) {
487 free_tx_desc(adapter, &q->txq[i],
488 q->txq[i].in_use);
489 kfree(q->txq[i].sdesc);
491 dma_free_coherent(&pdev->dev,
492 q->txq[i].size *
493 sizeof(struct tx_desc),
494 q->txq[i].desc, q->txq[i].phys_addr);
495 __skb_queue_purge(&q->txq[i].sendq);
498 if (q->rspq.desc) {
499 spin_lock(&adapter->sge.reg_lock);
500 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
501 spin_unlock(&adapter->sge.reg_lock);
502 dma_free_coherent(&pdev->dev,
503 q->rspq.size * sizeof(struct rsp_desc),
504 q->rspq.desc, q->rspq.phys_addr);
507 if (q->netdev)
508 q->netdev->atalk_ptr = NULL;
510 memset(q, 0, sizeof(*q));
514 * init_qset_cntxt - initialize an SGE queue set context info
515 * @qs: the queue set
516 * @id: the queue set id
518 * Initializes the TIDs and context ids for the queues of a queue set.
520 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
522 qs->rspq.cntxt_id = id;
523 qs->fl[0].cntxt_id = 2 * id;
524 qs->fl[1].cntxt_id = 2 * id + 1;
525 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
526 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
527 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
528 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
529 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
533 * sgl_len - calculates the size of an SGL of the given capacity
534 * @n: the number of SGL entries
536 * Calculates the number of flits needed for a scatter/gather list that
537 * can hold the given number of entries.
539 static inline unsigned int sgl_len(unsigned int n)
541 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
542 return (3 * n) / 2 + (n & 1);
546 * flits_to_desc - returns the num of Tx descriptors for the given flits
547 * @n: the number of flits
549 * Calculates the number of Tx descriptors needed for the supplied number
550 * of flits.
552 static inline unsigned int flits_to_desc(unsigned int n)
554 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
555 return flit_desc_map[n];
559 * get_packet - return the next ingress packet buffer from a free list
560 * @adap: the adapter that received the packet
561 * @fl: the SGE free list holding the packet
562 * @len: the packet length including any SGE padding
563 * @drop_thres: # of remaining buffers before we start dropping packets
565 * Get the next packet from a free list and complete setup of the
566 * sk_buff. If the packet is small we make a copy and recycle the
567 * original buffer, otherwise we use the original buffer itself. If a
568 * positive drop threshold is supplied packets are dropped and their
569 * buffers recycled if (a) the number of remaining buffers is under the
570 * threshold and the packet is too big to copy, or (b) the packet should
571 * be copied but there is no memory for the copy.
573 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
574 unsigned int len, unsigned int drop_thres)
576 struct sk_buff *skb = NULL;
577 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
579 prefetch(sd->skb->data);
581 if (len <= SGE_RX_COPY_THRES) {
582 skb = alloc_skb(len, GFP_ATOMIC);
583 if (likely(skb != NULL)) {
584 __skb_put(skb, len);
585 pci_dma_sync_single_for_cpu(adap->pdev,
586 pci_unmap_addr(sd,
587 dma_addr),
588 len, PCI_DMA_FROMDEVICE);
589 memcpy(skb->data, sd->skb->data, len);
590 pci_dma_sync_single_for_device(adap->pdev,
591 pci_unmap_addr(sd,
592 dma_addr),
593 len, PCI_DMA_FROMDEVICE);
594 } else if (!drop_thres)
595 goto use_orig_buf;
596 recycle:
597 recycle_rx_buf(adap, fl, fl->cidx);
598 return skb;
601 if (unlikely(fl->credits < drop_thres))
602 goto recycle;
604 use_orig_buf:
605 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
606 fl->buf_size, PCI_DMA_FROMDEVICE);
607 skb = sd->skb;
608 skb_put(skb, len);
609 __refill_fl(adap, fl);
610 return skb;
614 * get_imm_packet - return the next ingress packet buffer from a response
615 * @resp: the response descriptor containing the packet data
617 * Return a packet containing the immediate data of the given response.
619 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
621 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
623 if (skb) {
624 __skb_put(skb, IMMED_PKT_SIZE);
625 memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
627 return skb;
631 * calc_tx_descs - calculate the number of Tx descriptors for a packet
632 * @skb: the packet
634 * Returns the number of Tx descriptors needed for the given Ethernet
635 * packet. Ethernet packets require addition of WR and CPL headers.
637 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
639 unsigned int flits;
641 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
642 return 1;
644 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
645 if (skb_shinfo(skb)->gso_size)
646 flits++;
647 return flits_to_desc(flits);
651 * make_sgl - populate a scatter/gather list for a packet
652 * @skb: the packet
653 * @sgp: the SGL to populate
654 * @start: start address of skb main body data to include in the SGL
655 * @len: length of skb main body data to include in the SGL
656 * @pdev: the PCI device
658 * Generates a scatter/gather list for the buffers that make up a packet
659 * and returns the SGL size in 8-byte words. The caller must size the SGL
660 * appropriately.
662 static inline unsigned int make_sgl(const struct sk_buff *skb,
663 struct sg_ent *sgp, unsigned char *start,
664 unsigned int len, struct pci_dev *pdev)
666 dma_addr_t mapping;
667 unsigned int i, j = 0, nfrags;
669 if (len) {
670 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
671 sgp->len[0] = cpu_to_be32(len);
672 sgp->addr[0] = cpu_to_be64(mapping);
673 j = 1;
676 nfrags = skb_shinfo(skb)->nr_frags;
677 for (i = 0; i < nfrags; i++) {
678 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
680 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
681 frag->size, PCI_DMA_TODEVICE);
682 sgp->len[j] = cpu_to_be32(frag->size);
683 sgp->addr[j] = cpu_to_be64(mapping);
684 j ^= 1;
685 if (j == 0)
686 ++sgp;
688 if (j)
689 sgp->len[j] = 0;
690 return ((nfrags + (len != 0)) * 3) / 2 + j;
694 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
695 * @adap: the adapter
696 * @q: the Tx queue
698 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
699 * where the HW is going to sleep just after we checked, however,
700 * then the interrupt handler will detect the outstanding TX packet
701 * and ring the doorbell for us.
703 * When GTS is disabled we unconditionally ring the doorbell.
705 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
707 #if USE_GTS
708 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
709 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
710 set_bit(TXQ_LAST_PKT_DB, &q->flags);
711 t3_write_reg(adap, A_SG_KDOORBELL,
712 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
714 #else
715 wmb(); /* write descriptors before telling HW */
716 t3_write_reg(adap, A_SG_KDOORBELL,
717 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
718 #endif
721 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
723 #if SGE_NUM_GENBITS == 2
724 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
725 #endif
729 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
730 * @ndesc: number of Tx descriptors spanned by the SGL
731 * @skb: the packet corresponding to the WR
732 * @d: first Tx descriptor to be written
733 * @pidx: index of above descriptors
734 * @q: the SGE Tx queue
735 * @sgl: the SGL
736 * @flits: number of flits to the start of the SGL in the first descriptor
737 * @sgl_flits: the SGL size in flits
738 * @gen: the Tx descriptor generation
739 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
740 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
742 * Write a work request header and an associated SGL. If the SGL is
743 * small enough to fit into one Tx descriptor it has already been written
744 * and we just need to write the WR header. Otherwise we distribute the
745 * SGL across the number of descriptors it spans.
747 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
748 struct tx_desc *d, unsigned int pidx,
749 const struct sge_txq *q,
750 const struct sg_ent *sgl,
751 unsigned int flits, unsigned int sgl_flits,
752 unsigned int gen, unsigned int wr_hi,
753 unsigned int wr_lo)
755 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
756 struct tx_sw_desc *sd = &q->sdesc[pidx];
758 sd->skb = skb;
759 if (need_skb_unmap()) {
760 struct unmap_info *ui = (struct unmap_info *)skb->cb;
762 ui->fragidx = 0;
763 ui->addr_idx = 0;
764 ui->sflit = flits;
767 if (likely(ndesc == 1)) {
768 skb->priority = pidx;
769 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
770 V_WR_SGLSFLT(flits)) | wr_hi;
771 wmb();
772 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
773 V_WR_GEN(gen)) | wr_lo;
774 wr_gen2(d, gen);
775 } else {
776 unsigned int ogen = gen;
777 const u64 *fp = (const u64 *)sgl;
778 struct work_request_hdr *wp = wrp;
780 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
781 V_WR_SGLSFLT(flits)) | wr_hi;
783 while (sgl_flits) {
784 unsigned int avail = WR_FLITS - flits;
786 if (avail > sgl_flits)
787 avail = sgl_flits;
788 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
789 sgl_flits -= avail;
790 ndesc--;
791 if (!sgl_flits)
792 break;
794 fp += avail;
795 d++;
796 sd++;
797 if (++pidx == q->size) {
798 pidx = 0;
799 gen ^= 1;
800 d = q->desc;
801 sd = q->sdesc;
804 sd->skb = skb;
805 wrp = (struct work_request_hdr *)d;
806 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
807 V_WR_SGLSFLT(1)) | wr_hi;
808 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
809 sgl_flits + 1)) |
810 V_WR_GEN(gen)) | wr_lo;
811 wr_gen2(d, gen);
812 flits = 1;
814 skb->priority = pidx;
815 wrp->wr_hi |= htonl(F_WR_EOP);
816 wmb();
817 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
818 wr_gen2((struct tx_desc *)wp, ogen);
819 WARN_ON(ndesc != 0);
824 * write_tx_pkt_wr - write a TX_PKT work request
825 * @adap: the adapter
826 * @skb: the packet to send
827 * @pi: the egress interface
828 * @pidx: index of the first Tx descriptor to write
829 * @gen: the generation value to use
830 * @q: the Tx queue
831 * @ndesc: number of descriptors the packet will occupy
832 * @compl: the value of the COMPL bit to use
834 * Generate a TX_PKT work request to send the supplied packet.
836 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
837 const struct port_info *pi,
838 unsigned int pidx, unsigned int gen,
839 struct sge_txq *q, unsigned int ndesc,
840 unsigned int compl)
842 unsigned int flits, sgl_flits, cntrl, tso_info;
843 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
844 struct tx_desc *d = &q->desc[pidx];
845 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
847 cpl->len = htonl(skb->len | 0x80000000);
848 cntrl = V_TXPKT_INTF(pi->port_id);
850 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
851 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
853 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
854 if (tso_info) {
855 int eth_type;
856 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
858 d->flit[2] = 0;
859 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
860 hdr->cntrl = htonl(cntrl);
861 eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
862 CPL_ETH_II : CPL_ETH_II_VLAN;
863 tso_info |= V_LSO_ETH_TYPE(eth_type) |
864 V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) |
865 V_LSO_TCPHDR_WORDS(skb->h.th->doff);
866 hdr->lso_info = htonl(tso_info);
867 flits = 3;
868 } else {
869 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
870 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
871 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
872 cpl->cntrl = htonl(cntrl);
874 if (skb->len <= WR_LEN - sizeof(*cpl)) {
875 q->sdesc[pidx].skb = NULL;
876 if (!skb->data_len)
877 memcpy(&d->flit[2], skb->data, skb->len);
878 else
879 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
881 flits = (skb->len + 7) / 8 + 2;
882 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
883 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
884 | F_WR_SOP | F_WR_EOP | compl);
885 wmb();
886 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
887 V_WR_TID(q->token));
888 wr_gen2(d, gen);
889 kfree_skb(skb);
890 return;
893 flits = 2;
896 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
897 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
898 if (need_skb_unmap())
899 ((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
901 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
902 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
903 htonl(V_WR_TID(q->token)));
907 * eth_xmit - add a packet to the Ethernet Tx queue
908 * @skb: the packet
909 * @dev: the egress net device
911 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
913 int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
915 unsigned int ndesc, pidx, credits, gen, compl;
916 const struct port_info *pi = netdev_priv(dev);
917 struct adapter *adap = dev->priv;
918 struct sge_qset *qs = dev2qset(dev);
919 struct sge_txq *q = &qs->txq[TXQ_ETH];
922 * The chip min packet length is 9 octets but play safe and reject
923 * anything shorter than an Ethernet header.
925 if (unlikely(skb->len < ETH_HLEN)) {
926 dev_kfree_skb(skb);
927 return NETDEV_TX_OK;
930 spin_lock(&q->lock);
931 reclaim_completed_tx(adap, q);
933 credits = q->size - q->in_use;
934 ndesc = calc_tx_descs(skb);
936 if (unlikely(credits < ndesc)) {
937 if (!netif_queue_stopped(dev)) {
938 netif_stop_queue(dev);
939 set_bit(TXQ_ETH, &qs->txq_stopped);
940 q->stops++;
941 dev_err(&adap->pdev->dev,
942 "%s: Tx ring %u full while queue awake!\n",
943 dev->name, q->cntxt_id & 7);
945 spin_unlock(&q->lock);
946 return NETDEV_TX_BUSY;
949 q->in_use += ndesc;
950 if (unlikely(credits - ndesc < q->stop_thres)) {
951 q->stops++;
952 netif_stop_queue(dev);
953 set_bit(TXQ_ETH, &qs->txq_stopped);
954 #if !USE_GTS
955 if (should_restart_tx(q) &&
956 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
957 q->restarts++;
958 netif_wake_queue(dev);
960 #endif
963 gen = q->gen;
964 q->unacked += ndesc;
965 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
966 q->unacked &= 7;
967 pidx = q->pidx;
968 q->pidx += ndesc;
969 if (q->pidx >= q->size) {
970 q->pidx -= q->size;
971 q->gen ^= 1;
974 /* update port statistics */
975 if (skb->ip_summed == CHECKSUM_COMPLETE)
976 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
977 if (skb_shinfo(skb)->gso_size)
978 qs->port_stats[SGE_PSTAT_TSO]++;
979 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
980 qs->port_stats[SGE_PSTAT_VLANINS]++;
982 dev->trans_start = jiffies;
983 spin_unlock(&q->lock);
986 * We do not use Tx completion interrupts to free DMAd Tx packets.
987 * This is good for performamce but means that we rely on new Tx
988 * packets arriving to run the destructors of completed packets,
989 * which open up space in their sockets' send queues. Sometimes
990 * we do not get such new packets causing Tx to stall. A single
991 * UDP transmitter is a good example of this situation. We have
992 * a clean up timer that periodically reclaims completed packets
993 * but it doesn't run often enough (nor do we want it to) to prevent
994 * lengthy stalls. A solution to this problem is to run the
995 * destructor early, after the packet is queued but before it's DMAd.
996 * A cons is that we lie to socket memory accounting, but the amount
997 * of extra memory is reasonable (limited by the number of Tx
998 * descriptors), the packets do actually get freed quickly by new
999 * packets almost always, and for protocols like TCP that wait for
1000 * acks to really free up the data the extra memory is even less.
1001 * On the positive side we run the destructors on the sending CPU
1002 * rather than on a potentially different completing CPU, usually a
1003 * good thing. We also run them without holding our Tx queue lock,
1004 * unlike what reclaim_completed_tx() would otherwise do.
1006 * Run the destructor before telling the DMA engine about the packet
1007 * to make sure it doesn't complete and get freed prematurely.
1009 if (likely(!skb_shared(skb)))
1010 skb_orphan(skb);
1012 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1013 check_ring_tx_db(adap, q);
1014 return NETDEV_TX_OK;
1018 * write_imm - write a packet into a Tx descriptor as immediate data
1019 * @d: the Tx descriptor to write
1020 * @skb: the packet
1021 * @len: the length of packet data to write as immediate data
1022 * @gen: the generation bit value to write
1024 * Writes a packet as immediate data into a Tx descriptor. The packet
1025 * contains a work request at its beginning. We must write the packet
1026 * carefully so the SGE doesn't read accidentally before it's written in
1027 * its entirety.
1029 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1030 unsigned int len, unsigned int gen)
1032 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1033 struct work_request_hdr *to = (struct work_request_hdr *)d;
1035 memcpy(&to[1], &from[1], len - sizeof(*from));
1036 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1037 V_WR_BCNTLFLT(len & 7));
1038 wmb();
1039 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1040 V_WR_LEN((len + 7) / 8));
1041 wr_gen2(d, gen);
1042 kfree_skb(skb);
1046 * check_desc_avail - check descriptor availability on a send queue
1047 * @adap: the adapter
1048 * @q: the send queue
1049 * @skb: the packet needing the descriptors
1050 * @ndesc: the number of Tx descriptors needed
1051 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1053 * Checks if the requested number of Tx descriptors is available on an
1054 * SGE send queue. If the queue is already suspended or not enough
1055 * descriptors are available the packet is queued for later transmission.
1056 * Must be called with the Tx queue locked.
1058 * Returns 0 if enough descriptors are available, 1 if there aren't
1059 * enough descriptors and the packet has been queued, and 2 if the caller
1060 * needs to retry because there weren't enough descriptors at the
1061 * beginning of the call but some freed up in the mean time.
1063 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1064 struct sk_buff *skb, unsigned int ndesc,
1065 unsigned int qid)
1067 if (unlikely(!skb_queue_empty(&q->sendq))) {
1068 addq_exit:__skb_queue_tail(&q->sendq, skb);
1069 return 1;
1071 if (unlikely(q->size - q->in_use < ndesc)) {
1072 struct sge_qset *qs = txq_to_qset(q, qid);
1074 set_bit(qid, &qs->txq_stopped);
1075 smp_mb__after_clear_bit();
1077 if (should_restart_tx(q) &&
1078 test_and_clear_bit(qid, &qs->txq_stopped))
1079 return 2;
1081 q->stops++;
1082 goto addq_exit;
1084 return 0;
1088 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1089 * @q: the SGE control Tx queue
1091 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1092 * that send only immediate data (presently just the control queues) and
1093 * thus do not have any sk_buffs to release.
1095 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1097 unsigned int reclaim = q->processed - q->cleaned;
1099 q->in_use -= reclaim;
1100 q->cleaned += reclaim;
1103 static inline int immediate(const struct sk_buff *skb)
1105 return skb->len <= WR_LEN && !skb->data_len;
1109 * ctrl_xmit - send a packet through an SGE control Tx queue
1110 * @adap: the adapter
1111 * @q: the control queue
1112 * @skb: the packet
1114 * Send a packet through an SGE control Tx queue. Packets sent through
1115 * a control queue must fit entirely as immediate data in a single Tx
1116 * descriptor and have no page fragments.
1118 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1119 struct sk_buff *skb)
1121 int ret;
1122 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1124 if (unlikely(!immediate(skb))) {
1125 WARN_ON(1);
1126 dev_kfree_skb(skb);
1127 return NET_XMIT_SUCCESS;
1130 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1131 wrp->wr_lo = htonl(V_WR_TID(q->token));
1133 spin_lock(&q->lock);
1134 again:reclaim_completed_tx_imm(q);
1136 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1137 if (unlikely(ret)) {
1138 if (ret == 1) {
1139 spin_unlock(&q->lock);
1140 return NET_XMIT_CN;
1142 goto again;
1145 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1147 q->in_use++;
1148 if (++q->pidx >= q->size) {
1149 q->pidx = 0;
1150 q->gen ^= 1;
1152 spin_unlock(&q->lock);
1153 wmb();
1154 t3_write_reg(adap, A_SG_KDOORBELL,
1155 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1156 return NET_XMIT_SUCCESS;
1160 * restart_ctrlq - restart a suspended control queue
1161 * @qs: the queue set cotaining the control queue
1163 * Resumes transmission on a suspended Tx control queue.
1165 static void restart_ctrlq(unsigned long data)
1167 struct sk_buff *skb;
1168 struct sge_qset *qs = (struct sge_qset *)data;
1169 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1170 struct adapter *adap = qs->netdev->priv;
1172 spin_lock(&q->lock);
1173 again:reclaim_completed_tx_imm(q);
1175 while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
1177 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1179 if (++q->pidx >= q->size) {
1180 q->pidx = 0;
1181 q->gen ^= 1;
1183 q->in_use++;
1186 if (!skb_queue_empty(&q->sendq)) {
1187 set_bit(TXQ_CTRL, &qs->txq_stopped);
1188 smp_mb__after_clear_bit();
1190 if (should_restart_tx(q) &&
1191 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1192 goto again;
1193 q->stops++;
1196 spin_unlock(&q->lock);
1197 t3_write_reg(adap, A_SG_KDOORBELL,
1198 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1202 * Send a management message through control queue 0
1204 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1206 return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1210 * write_ofld_wr - write an offload work request
1211 * @adap: the adapter
1212 * @skb: the packet to send
1213 * @q: the Tx queue
1214 * @pidx: index of the first Tx descriptor to write
1215 * @gen: the generation value to use
1216 * @ndesc: number of descriptors the packet will occupy
1218 * Write an offload work request to send the supplied packet. The packet
1219 * data already carry the work request with most fields populated.
1221 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1222 struct sge_txq *q, unsigned int pidx,
1223 unsigned int gen, unsigned int ndesc)
1225 unsigned int sgl_flits, flits;
1226 struct work_request_hdr *from;
1227 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1228 struct tx_desc *d = &q->desc[pidx];
1230 if (immediate(skb)) {
1231 q->sdesc[pidx].skb = NULL;
1232 write_imm(d, skb, skb->len, gen);
1233 return;
1236 /* Only TX_DATA builds SGLs */
1238 from = (struct work_request_hdr *)skb->data;
1239 memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from));
1241 flits = (skb->h.raw - skb->data) / 8;
1242 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1243 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
1244 adap->pdev);
1245 if (need_skb_unmap())
1246 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
1248 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1249 gen, from->wr_hi, from->wr_lo);
1253 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1254 * @skb: the packet
1256 * Returns the number of Tx descriptors needed for the given offload
1257 * packet. These packets are already fully constructed.
1259 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1261 unsigned int flits, cnt = skb_shinfo(skb)->nr_frags;
1263 if (skb->len <= WR_LEN && cnt == 0)
1264 return 1; /* packet fits as immediate data */
1266 flits = (skb->h.raw - skb->data) / 8; /* headers */
1267 if (skb->tail != skb->h.raw)
1268 cnt++;
1269 return flits_to_desc(flits + sgl_len(cnt));
1273 * ofld_xmit - send a packet through an offload queue
1274 * @adap: the adapter
1275 * @q: the Tx offload queue
1276 * @skb: the packet
1278 * Send an offload packet through an SGE offload queue.
1280 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1281 struct sk_buff *skb)
1283 int ret;
1284 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1286 spin_lock(&q->lock);
1287 again:reclaim_completed_tx(adap, q);
1289 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1290 if (unlikely(ret)) {
1291 if (ret == 1) {
1292 skb->priority = ndesc; /* save for restart */
1293 spin_unlock(&q->lock);
1294 return NET_XMIT_CN;
1296 goto again;
1299 gen = q->gen;
1300 q->in_use += ndesc;
1301 pidx = q->pidx;
1302 q->pidx += ndesc;
1303 if (q->pidx >= q->size) {
1304 q->pidx -= q->size;
1305 q->gen ^= 1;
1307 spin_unlock(&q->lock);
1309 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1310 check_ring_tx_db(adap, q);
1311 return NET_XMIT_SUCCESS;
1315 * restart_offloadq - restart a suspended offload queue
1316 * @qs: the queue set cotaining the offload queue
1318 * Resumes transmission on a suspended Tx offload queue.
1320 static void restart_offloadq(unsigned long data)
1322 struct sk_buff *skb;
1323 struct sge_qset *qs = (struct sge_qset *)data;
1324 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1325 struct adapter *adap = qs->netdev->priv;
1327 spin_lock(&q->lock);
1328 again:reclaim_completed_tx(adap, q);
1330 while ((skb = skb_peek(&q->sendq)) != NULL) {
1331 unsigned int gen, pidx;
1332 unsigned int ndesc = skb->priority;
1334 if (unlikely(q->size - q->in_use < ndesc)) {
1335 set_bit(TXQ_OFLD, &qs->txq_stopped);
1336 smp_mb__after_clear_bit();
1338 if (should_restart_tx(q) &&
1339 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1340 goto again;
1341 q->stops++;
1342 break;
1345 gen = q->gen;
1346 q->in_use += ndesc;
1347 pidx = q->pidx;
1348 q->pidx += ndesc;
1349 if (q->pidx >= q->size) {
1350 q->pidx -= q->size;
1351 q->gen ^= 1;
1353 __skb_unlink(skb, &q->sendq);
1354 spin_unlock(&q->lock);
1356 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1357 spin_lock(&q->lock);
1359 spin_unlock(&q->lock);
1361 #if USE_GTS
1362 set_bit(TXQ_RUNNING, &q->flags);
1363 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1364 #endif
1365 t3_write_reg(adap, A_SG_KDOORBELL,
1366 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1370 * queue_set - return the queue set a packet should use
1371 * @skb: the packet
1373 * Maps a packet to the SGE queue set it should use. The desired queue
1374 * set is carried in bits 1-3 in the packet's priority.
1376 static inline int queue_set(const struct sk_buff *skb)
1378 return skb->priority >> 1;
1382 * is_ctrl_pkt - return whether an offload packet is a control packet
1383 * @skb: the packet
1385 * Determines whether an offload packet should use an OFLD or a CTRL
1386 * Tx queue. This is indicated by bit 0 in the packet's priority.
1388 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1390 return skb->priority & 1;
1394 * t3_offload_tx - send an offload packet
1395 * @tdev: the offload device to send to
1396 * @skb: the packet
1398 * Sends an offload packet. We use the packet priority to select the
1399 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1400 * should be sent as regular or control, bits 1-3 select the queue set.
1402 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1404 struct adapter *adap = tdev2adap(tdev);
1405 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1407 if (unlikely(is_ctrl_pkt(skb)))
1408 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1410 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1414 * offload_enqueue - add an offload packet to an SGE offload receive queue
1415 * @q: the SGE response queue
1416 * @skb: the packet
1418 * Add a new offload packet to an SGE response queue's offload packet
1419 * queue. If the packet is the first on the queue it schedules the RX
1420 * softirq to process the queue.
1422 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1424 skb->next = skb->prev = NULL;
1425 if (q->rx_tail)
1426 q->rx_tail->next = skb;
1427 else {
1428 struct sge_qset *qs = rspq_to_qset(q);
1430 if (__netif_rx_schedule_prep(qs->netdev))
1431 __netif_rx_schedule(qs->netdev);
1432 q->rx_head = skb;
1434 q->rx_tail = skb;
1438 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1439 * @tdev: the offload device that will be receiving the packets
1440 * @q: the SGE response queue that assembled the bundle
1441 * @skbs: the partial bundle
1442 * @n: the number of packets in the bundle
1444 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1446 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1447 struct sge_rspq *q,
1448 struct sk_buff *skbs[], int n)
1450 if (n) {
1451 q->offload_bundles++;
1452 tdev->recv(tdev, skbs, n);
1457 * ofld_poll - NAPI handler for offload packets in interrupt mode
1458 * @dev: the network device doing the polling
1459 * @budget: polling budget
1461 * The NAPI handler for offload packets when a response queue is serviced
1462 * by the hard interrupt handler, i.e., when it's operating in non-polling
1463 * mode. Creates small packet batches and sends them through the offload
1464 * receive handler. Batches need to be of modest size as we do prefetches
1465 * on the packets in each.
1467 static int ofld_poll(struct net_device *dev, int *budget)
1469 struct adapter *adapter = dev->priv;
1470 struct sge_qset *qs = dev2qset(dev);
1471 struct sge_rspq *q = &qs->rspq;
1472 int work_done, limit = min(*budget, dev->quota), avail = limit;
1474 while (avail) {
1475 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1476 int ngathered;
1478 spin_lock_irq(&q->lock);
1479 head = q->rx_head;
1480 if (!head) {
1481 work_done = limit - avail;
1482 *budget -= work_done;
1483 dev->quota -= work_done;
1484 __netif_rx_complete(dev);
1485 spin_unlock_irq(&q->lock);
1486 return 0;
1489 tail = q->rx_tail;
1490 q->rx_head = q->rx_tail = NULL;
1491 spin_unlock_irq(&q->lock);
1493 for (ngathered = 0; avail && head; avail--) {
1494 prefetch(head->data);
1495 skbs[ngathered] = head;
1496 head = head->next;
1497 skbs[ngathered]->next = NULL;
1498 if (++ngathered == RX_BUNDLE_SIZE) {
1499 q->offload_bundles++;
1500 adapter->tdev.recv(&adapter->tdev, skbs,
1501 ngathered);
1502 ngathered = 0;
1505 if (head) { /* splice remaining packets back onto Rx queue */
1506 spin_lock_irq(&q->lock);
1507 tail->next = q->rx_head;
1508 if (!q->rx_head)
1509 q->rx_tail = tail;
1510 q->rx_head = head;
1511 spin_unlock_irq(&q->lock);
1513 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1515 work_done = limit - avail;
1516 *budget -= work_done;
1517 dev->quota -= work_done;
1518 return 1;
1522 * rx_offload - process a received offload packet
1523 * @tdev: the offload device receiving the packet
1524 * @rq: the response queue that received the packet
1525 * @skb: the packet
1526 * @rx_gather: a gather list of packets if we are building a bundle
1527 * @gather_idx: index of the next available slot in the bundle
1529 * Process an ingress offload pakcet and add it to the offload ingress
1530 * queue. Returns the index of the next available slot in the bundle.
1532 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1533 struct sk_buff *skb, struct sk_buff *rx_gather[],
1534 unsigned int gather_idx)
1536 rq->offload_pkts++;
1537 skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data;
1539 if (rq->polling) {
1540 rx_gather[gather_idx++] = skb;
1541 if (gather_idx == RX_BUNDLE_SIZE) {
1542 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1543 gather_idx = 0;
1544 rq->offload_bundles++;
1546 } else
1547 offload_enqueue(rq, skb);
1549 return gather_idx;
1553 * update_tx_completed - update the number of processed Tx descriptors
1554 * @qs: the queue set to update
1555 * @idx: which Tx queue within the set to update
1556 * @credits: number of new processed descriptors
1557 * @tx_completed: accumulates credits for the queues
1559 * Updates the number of completed Tx descriptors for a queue set's Tx
1560 * queue. On UP systems we updated the information immediately but on
1561 * MP we accumulate the credits locally and update the Tx queue when we
1562 * reach a threshold to avoid cache-line bouncing.
1564 static inline void update_tx_completed(struct sge_qset *qs, int idx,
1565 unsigned int credits,
1566 unsigned int tx_completed[])
1568 #ifdef CONFIG_SMP
1569 tx_completed[idx] += credits;
1570 if (tx_completed[idx] > 32) {
1571 qs->txq[idx].processed += tx_completed[idx];
1572 tx_completed[idx] = 0;
1574 #else
1575 qs->txq[idx].processed += credits;
1576 #endif
1580 * restart_tx - check whether to restart suspended Tx queues
1581 * @qs: the queue set to resume
1583 * Restarts suspended Tx queues of an SGE queue set if they have enough
1584 * free resources to resume operation.
1586 static void restart_tx(struct sge_qset *qs)
1588 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1589 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1590 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1591 qs->txq[TXQ_ETH].restarts++;
1592 if (netif_running(qs->netdev))
1593 netif_wake_queue(qs->netdev);
1596 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1597 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1598 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1599 qs->txq[TXQ_OFLD].restarts++;
1600 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1602 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1603 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1604 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1605 qs->txq[TXQ_CTRL].restarts++;
1606 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1611 * rx_eth - process an ingress ethernet packet
1612 * @adap: the adapter
1613 * @rq: the response queue that received the packet
1614 * @skb: the packet
1615 * @pad: amount of padding at the start of the buffer
1617 * Process an ingress ethernet pakcet and deliver it to the stack.
1618 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1619 * if it was immediate data in a response.
1621 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1622 struct sk_buff *skb, int pad)
1624 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1625 struct port_info *pi;
1627 rq->eth_pkts++;
1628 skb_pull(skb, sizeof(*p) + pad);
1629 skb->dev = adap->port[p->iff];
1630 skb->dev->last_rx = jiffies;
1631 skb->protocol = eth_type_trans(skb, skb->dev);
1632 pi = netdev_priv(skb->dev);
1633 if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
1634 !p->fragment) {
1635 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1636 skb->ip_summed = CHECKSUM_UNNECESSARY;
1637 } else
1638 skb->ip_summed = CHECKSUM_NONE;
1640 if (unlikely(p->vlan_valid)) {
1641 struct vlan_group *grp = pi->vlan_grp;
1643 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1644 if (likely(grp))
1645 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1646 rq->polling);
1647 else
1648 dev_kfree_skb_any(skb);
1649 } else if (rq->polling)
1650 netif_receive_skb(skb);
1651 else
1652 netif_rx(skb);
1656 * handle_rsp_cntrl_info - handles control information in a response
1657 * @qs: the queue set corresponding to the response
1658 * @flags: the response control flags
1659 * @tx_completed: accumulates completion credits for the Tx queues
1661 * Handles the control information of an SGE response, such as GTS
1662 * indications and completion credits for the queue set's Tx queues.
1664 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags,
1665 unsigned int tx_completed[])
1667 unsigned int credits;
1669 #if USE_GTS
1670 if (flags & F_RSPD_TXQ0_GTS)
1671 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1672 #endif
1674 /* ETH credits are already coalesced, return them immediately. */
1675 credits = G_RSPD_TXQ0_CR(flags);
1676 if (credits)
1677 qs->txq[TXQ_ETH].processed += credits;
1679 # if USE_GTS
1680 if (flags & F_RSPD_TXQ1_GTS)
1681 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1682 # endif
1683 update_tx_completed(qs, TXQ_OFLD, G_RSPD_TXQ1_CR(flags), tx_completed);
1684 update_tx_completed(qs, TXQ_CTRL, G_RSPD_TXQ2_CR(flags), tx_completed);
1688 * flush_tx_completed - returns accumulated Tx completions to Tx queues
1689 * @qs: the queue set to update
1690 * @tx_completed: pending completion credits to return to Tx queues
1692 * Updates the number of completed Tx descriptors for a queue set's Tx
1693 * queues with the credits pending in @tx_completed. This does something
1694 * only on MP systems as on UP systems we return the credits immediately.
1696 static inline void flush_tx_completed(struct sge_qset *qs,
1697 unsigned int tx_completed[])
1699 #if defined(CONFIG_SMP)
1700 if (tx_completed[TXQ_OFLD])
1701 qs->txq[TXQ_OFLD].processed += tx_completed[TXQ_OFLD];
1702 if (tx_completed[TXQ_CTRL])
1703 qs->txq[TXQ_CTRL].processed += tx_completed[TXQ_CTRL];
1704 #endif
1708 * check_ring_db - check if we need to ring any doorbells
1709 * @adapter: the adapter
1710 * @qs: the queue set whose Tx queues are to be examined
1711 * @sleeping: indicates which Tx queue sent GTS
1713 * Checks if some of a queue set's Tx queues need to ring their doorbells
1714 * to resume transmission after idling while they still have unprocessed
1715 * descriptors.
1717 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1718 unsigned int sleeping)
1720 if (sleeping & F_RSPD_TXQ0_GTS) {
1721 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1723 if (txq->cleaned + txq->in_use != txq->processed &&
1724 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1725 set_bit(TXQ_RUNNING, &txq->flags);
1726 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1727 V_EGRCNTX(txq->cntxt_id));
1731 if (sleeping & F_RSPD_TXQ1_GTS) {
1732 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1734 if (txq->cleaned + txq->in_use != txq->processed &&
1735 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1736 set_bit(TXQ_RUNNING, &txq->flags);
1737 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1738 V_EGRCNTX(txq->cntxt_id));
1744 * is_new_response - check if a response is newly written
1745 * @r: the response descriptor
1746 * @q: the response queue
1748 * Returns true if a response descriptor contains a yet unprocessed
1749 * response.
1751 static inline int is_new_response(const struct rsp_desc *r,
1752 const struct sge_rspq *q)
1754 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1757 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1758 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1759 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1760 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1761 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1763 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1764 #define NOMEM_INTR_DELAY 2500
1767 * process_responses - process responses from an SGE response queue
1768 * @adap: the adapter
1769 * @qs: the queue set to which the response queue belongs
1770 * @budget: how many responses can be processed in this round
1772 * Process responses from an SGE response queue up to the supplied budget.
1773 * Responses include received packets as well as credits and other events
1774 * for the queues that belong to the response queue's queue set.
1775 * A negative budget is effectively unlimited.
1777 * Additionally choose the interrupt holdoff time for the next interrupt
1778 * on this queue. If the system is under memory shortage use a fairly
1779 * long delay to help recovery.
1781 static int process_responses(struct adapter *adap, struct sge_qset *qs,
1782 int budget)
1784 struct sge_rspq *q = &qs->rspq;
1785 struct rsp_desc *r = &q->desc[q->cidx];
1786 int budget_left = budget;
1787 unsigned int sleeping = 0, tx_completed[3] = { 0, 0, 0 };
1788 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1789 int ngathered = 0;
1791 q->next_holdoff = q->holdoff_tmr;
1793 while (likely(budget_left && is_new_response(r, q))) {
1794 int eth, ethpad = 0;
1795 struct sk_buff *skb = NULL;
1796 u32 len, flags = ntohl(r->flags);
1797 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
1799 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1801 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1802 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1803 if (!skb)
1804 goto no_mem;
1806 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
1807 skb->data[0] = CPL_ASYNC_NOTIF;
1808 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
1809 q->async_notif++;
1810 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1811 skb = get_imm_packet(r);
1812 if (unlikely(!skb)) {
1813 no_mem:
1814 q->next_holdoff = NOMEM_INTR_DELAY;
1815 q->nomem++;
1816 /* consume one credit since we tried */
1817 budget_left--;
1818 break;
1820 q->imm_data++;
1821 } else if ((len = ntohl(r->len_cq)) != 0) {
1822 struct sge_fl *fl;
1824 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1825 fl->credits--;
1826 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1827 eth ? SGE_RX_DROP_THRES : 0);
1828 if (!skb)
1829 q->rx_drops++;
1830 else if (r->rss_hdr.opcode == CPL_TRACE_PKT)
1831 __skb_pull(skb, 2);
1832 ethpad = 2;
1833 if (++fl->cidx == fl->size)
1834 fl->cidx = 0;
1835 } else
1836 q->pure_rsps++;
1838 if (flags & RSPD_CTRL_MASK) {
1839 sleeping |= flags & RSPD_GTS_MASK;
1840 handle_rsp_cntrl_info(qs, flags, tx_completed);
1843 r++;
1844 if (unlikely(++q->cidx == q->size)) {
1845 q->cidx = 0;
1846 q->gen ^= 1;
1847 r = q->desc;
1849 prefetch(r);
1851 if (++q->credits >= (q->size / 4)) {
1852 refill_rspq(adap, q, q->credits);
1853 q->credits = 0;
1856 if (likely(skb != NULL)) {
1857 if (eth)
1858 rx_eth(adap, q, skb, ethpad);
1859 else {
1860 /* Preserve the RSS info in csum & priority */
1861 skb->csum = rss_hi;
1862 skb->priority = rss_lo;
1863 ngathered = rx_offload(&adap->tdev, q, skb,
1864 offload_skbs, ngathered);
1868 --budget_left;
1871 flush_tx_completed(qs, tx_completed);
1872 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
1873 if (sleeping)
1874 check_ring_db(adap, qs, sleeping);
1876 smp_mb(); /* commit Tx queue .processed updates */
1877 if (unlikely(qs->txq_stopped != 0))
1878 restart_tx(qs);
1880 budget -= budget_left;
1881 return budget;
1884 static inline int is_pure_response(const struct rsp_desc *r)
1886 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
1888 return (n | r->len_cq) == 0;
1892 * napi_rx_handler - the NAPI handler for Rx processing
1893 * @dev: the net device
1894 * @budget: how many packets we can process in this round
1896 * Handler for new data events when using NAPI.
1898 static int napi_rx_handler(struct net_device *dev, int *budget)
1900 struct adapter *adap = dev->priv;
1901 struct sge_qset *qs = dev2qset(dev);
1902 int effective_budget = min(*budget, dev->quota);
1904 int work_done = process_responses(adap, qs, effective_budget);
1905 *budget -= work_done;
1906 dev->quota -= work_done;
1908 if (work_done >= effective_budget)
1909 return 1;
1911 netif_rx_complete(dev);
1914 * Because we don't atomically flush the following write it is
1915 * possible that in very rare cases it can reach the device in a way
1916 * that races with a new response being written plus an error interrupt
1917 * causing the NAPI interrupt handler below to return unhandled status
1918 * to the OS. To protect against this would require flushing the write
1919 * and doing both the write and the flush with interrupts off. Way too
1920 * expensive and unjustifiable given the rarity of the race.
1922 * The race cannot happen at all with MSI-X.
1924 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
1925 V_NEWTIMER(qs->rspq.next_holdoff) |
1926 V_NEWINDEX(qs->rspq.cidx));
1927 return 0;
1931 * Returns true if the device is already scheduled for polling.
1933 static inline int napi_is_scheduled(struct net_device *dev)
1935 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
1939 * process_pure_responses - process pure responses from a response queue
1940 * @adap: the adapter
1941 * @qs: the queue set owning the response queue
1942 * @r: the first pure response to process
1944 * A simpler version of process_responses() that handles only pure (i.e.,
1945 * non data-carrying) responses. Such respones are too light-weight to
1946 * justify calling a softirq under NAPI, so we handle them specially in
1947 * the interrupt handler. The function is called with a pointer to a
1948 * response, which the caller must ensure is a valid pure response.
1950 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
1952 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
1953 struct rsp_desc *r)
1955 struct sge_rspq *q = &qs->rspq;
1956 unsigned int sleeping = 0, tx_completed[3] = { 0, 0, 0 };
1958 do {
1959 u32 flags = ntohl(r->flags);
1961 r++;
1962 if (unlikely(++q->cidx == q->size)) {
1963 q->cidx = 0;
1964 q->gen ^= 1;
1965 r = q->desc;
1967 prefetch(r);
1969 if (flags & RSPD_CTRL_MASK) {
1970 sleeping |= flags & RSPD_GTS_MASK;
1971 handle_rsp_cntrl_info(qs, flags, tx_completed);
1974 q->pure_rsps++;
1975 if (++q->credits >= (q->size / 4)) {
1976 refill_rspq(adap, q, q->credits);
1977 q->credits = 0;
1979 } while (is_new_response(r, q) && is_pure_response(r));
1981 flush_tx_completed(qs, tx_completed);
1983 if (sleeping)
1984 check_ring_db(adap, qs, sleeping);
1986 smp_mb(); /* commit Tx queue .processed updates */
1987 if (unlikely(qs->txq_stopped != 0))
1988 restart_tx(qs);
1990 return is_new_response(r, q);
1994 * handle_responses - decide what to do with new responses in NAPI mode
1995 * @adap: the adapter
1996 * @q: the response queue
1998 * This is used by the NAPI interrupt handlers to decide what to do with
1999 * new SGE responses. If there are no new responses it returns -1. If
2000 * there are new responses and they are pure (i.e., non-data carrying)
2001 * it handles them straight in hard interrupt context as they are very
2002 * cheap and don't deliver any packets. Finally, if there are any data
2003 * signaling responses it schedules the NAPI handler. Returns 1 if it
2004 * schedules NAPI, 0 if all new responses were pure.
2006 * The caller must ascertain NAPI is not already running.
2008 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2010 struct sge_qset *qs = rspq_to_qset(q);
2011 struct rsp_desc *r = &q->desc[q->cidx];
2013 if (!is_new_response(r, q))
2014 return -1;
2015 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2016 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2017 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2018 return 0;
2020 if (likely(__netif_rx_schedule_prep(qs->netdev)))
2021 __netif_rx_schedule(qs->netdev);
2022 return 1;
2026 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2027 * (i.e., response queue serviced in hard interrupt).
2029 irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2031 struct sge_qset *qs = cookie;
2032 struct adapter *adap = qs->netdev->priv;
2033 struct sge_rspq *q = &qs->rspq;
2035 spin_lock(&q->lock);
2036 if (process_responses(adap, qs, -1) == 0)
2037 q->unhandled_irqs++;
2038 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2039 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2040 spin_unlock(&q->lock);
2041 return IRQ_HANDLED;
2045 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2046 * (i.e., response queue serviced by NAPI polling).
2048 irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2050 struct sge_qset *qs = cookie;
2051 struct adapter *adap = qs->netdev->priv;
2052 struct sge_rspq *q = &qs->rspq;
2054 spin_lock(&q->lock);
2055 BUG_ON(napi_is_scheduled(qs->netdev));
2057 if (handle_responses(adap, q) < 0)
2058 q->unhandled_irqs++;
2059 spin_unlock(&q->lock);
2060 return IRQ_HANDLED;
2064 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2065 * SGE response queues as well as error and other async events as they all use
2066 * the same MSI vector. We use one SGE response queue per port in this mode
2067 * and protect all response queues with queue 0's lock.
2069 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2071 int new_packets = 0;
2072 struct adapter *adap = cookie;
2073 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2075 spin_lock(&q->lock);
2077 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2078 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2079 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2080 new_packets = 1;
2083 if (adap->params.nports == 2 &&
2084 process_responses(adap, &adap->sge.qs[1], -1)) {
2085 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2087 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2088 V_NEWTIMER(q1->next_holdoff) |
2089 V_NEWINDEX(q1->cidx));
2090 new_packets = 1;
2093 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2094 q->unhandled_irqs++;
2096 spin_unlock(&q->lock);
2097 return IRQ_HANDLED;
2100 static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
2102 if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
2103 if (likely(__netif_rx_schedule_prep(dev)))
2104 __netif_rx_schedule(dev);
2105 return 1;
2107 return 0;
2111 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2112 * by NAPI polling). Handles data events from SGE response queues as well as
2113 * error and other async events as they all use the same MSI vector. We use
2114 * one SGE response queue per port in this mode and protect all response
2115 * queues with queue 0's lock.
2117 irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2119 int new_packets;
2120 struct adapter *adap = cookie;
2121 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2123 spin_lock(&q->lock);
2125 new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
2126 if (adap->params.nports == 2)
2127 new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
2128 &adap->sge.qs[1].rspq);
2129 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2130 q->unhandled_irqs++;
2132 spin_unlock(&q->lock);
2133 return IRQ_HANDLED;
2137 * A helper function that processes responses and issues GTS.
2139 static inline int process_responses_gts(struct adapter *adap,
2140 struct sge_rspq *rq)
2142 int work;
2144 work = process_responses(adap, rspq_to_qset(rq), -1);
2145 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2146 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2147 return work;
2151 * The legacy INTx interrupt handler. This needs to handle data events from
2152 * SGE response queues as well as error and other async events as they all use
2153 * the same interrupt pin. We use one SGE response queue per port in this mode
2154 * and protect all response queues with queue 0's lock.
2156 static irqreturn_t t3_intr(int irq, void *cookie)
2158 int work_done, w0, w1;
2159 struct adapter *adap = cookie;
2160 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2161 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2163 spin_lock(&q0->lock);
2165 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2166 w1 = adap->params.nports == 2 &&
2167 is_new_response(&q1->desc[q1->cidx], q1);
2169 if (likely(w0 | w1)) {
2170 t3_write_reg(adap, A_PL_CLI, 0);
2171 t3_read_reg(adap, A_PL_CLI); /* flush */
2173 if (likely(w0))
2174 process_responses_gts(adap, q0);
2176 if (w1)
2177 process_responses_gts(adap, q1);
2179 work_done = w0 | w1;
2180 } else
2181 work_done = t3_slow_intr_handler(adap);
2183 spin_unlock(&q0->lock);
2184 return IRQ_RETVAL(work_done != 0);
2188 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2189 * Handles data events from SGE response queues as well as error and other
2190 * async events as they all use the same interrupt pin. We use one SGE
2191 * response queue per port in this mode and protect all response queues with
2192 * queue 0's lock.
2194 static irqreturn_t t3b_intr(int irq, void *cookie)
2196 u32 map;
2197 struct adapter *adap = cookie;
2198 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2200 t3_write_reg(adap, A_PL_CLI, 0);
2201 map = t3_read_reg(adap, A_SG_DATA_INTR);
2203 if (unlikely(!map)) /* shared interrupt, most likely */
2204 return IRQ_NONE;
2206 spin_lock(&q0->lock);
2208 if (unlikely(map & F_ERRINTR))
2209 t3_slow_intr_handler(adap);
2211 if (likely(map & 1))
2212 process_responses_gts(adap, q0);
2214 if (map & 2)
2215 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2217 spin_unlock(&q0->lock);
2218 return IRQ_HANDLED;
2222 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2223 * Handles data events from SGE response queues as well as error and other
2224 * async events as they all use the same interrupt pin. We use one SGE
2225 * response queue per port in this mode and protect all response queues with
2226 * queue 0's lock.
2228 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2230 u32 map;
2231 struct net_device *dev;
2232 struct adapter *adap = cookie;
2233 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2235 t3_write_reg(adap, A_PL_CLI, 0);
2236 map = t3_read_reg(adap, A_SG_DATA_INTR);
2238 if (unlikely(!map)) /* shared interrupt, most likely */
2239 return IRQ_NONE;
2241 spin_lock(&q0->lock);
2243 if (unlikely(map & F_ERRINTR))
2244 t3_slow_intr_handler(adap);
2246 if (likely(map & 1)) {
2247 dev = adap->sge.qs[0].netdev;
2249 BUG_ON(napi_is_scheduled(dev));
2250 if (likely(__netif_rx_schedule_prep(dev)))
2251 __netif_rx_schedule(dev);
2253 if (map & 2) {
2254 dev = adap->sge.qs[1].netdev;
2256 BUG_ON(napi_is_scheduled(dev));
2257 if (likely(__netif_rx_schedule_prep(dev)))
2258 __netif_rx_schedule(dev);
2261 spin_unlock(&q0->lock);
2262 return IRQ_HANDLED;
2266 * t3_intr_handler - select the top-level interrupt handler
2267 * @adap: the adapter
2268 * @polling: whether using NAPI to service response queues
2270 * Selects the top-level interrupt handler based on the type of interrupts
2271 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2272 * response queues.
2274 intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
2276 if (adap->flags & USING_MSIX)
2277 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2278 if (adap->flags & USING_MSI)
2279 return polling ? t3_intr_msi_napi : t3_intr_msi;
2280 if (adap->params.rev > 0)
2281 return polling ? t3b_intr_napi : t3b_intr;
2282 return t3_intr;
2286 * t3_sge_err_intr_handler - SGE async event interrupt handler
2287 * @adapter: the adapter
2289 * Interrupt handler for SGE asynchronous (non-data) events.
2291 void t3_sge_err_intr_handler(struct adapter *adapter)
2293 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2295 if (status & F_RSPQCREDITOVERFOW)
2296 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2298 if (status & F_RSPQDISABLED) {
2299 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2301 CH_ALERT(adapter,
2302 "packet delivered to disabled response queue "
2303 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2306 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2307 if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
2308 t3_fatal_err(adapter);
2312 * sge_timer_cb - perform periodic maintenance of an SGE qset
2313 * @data: the SGE queue set to maintain
2315 * Runs periodically from a timer to perform maintenance of an SGE queue
2316 * set. It performs two tasks:
2318 * a) Cleans up any completed Tx descriptors that may still be pending.
2319 * Normal descriptor cleanup happens when new packets are added to a Tx
2320 * queue so this timer is relatively infrequent and does any cleanup only
2321 * if the Tx queue has not seen any new packets in a while. We make a
2322 * best effort attempt to reclaim descriptors, in that we don't wait
2323 * around if we cannot get a queue's lock (which most likely is because
2324 * someone else is queueing new packets and so will also handle the clean
2325 * up). Since control queues use immediate data exclusively we don't
2326 * bother cleaning them up here.
2328 * b) Replenishes Rx queues that have run out due to memory shortage.
2329 * Normally new Rx buffers are added when existing ones are consumed but
2330 * when out of memory a queue can become empty. We try to add only a few
2331 * buffers here, the queue will be replenished fully as these new buffers
2332 * are used up if memory shortage has subsided.
2334 static void sge_timer_cb(unsigned long data)
2336 spinlock_t *lock;
2337 struct sge_qset *qs = (struct sge_qset *)data;
2338 struct adapter *adap = qs->netdev->priv;
2340 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2341 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2342 spin_unlock(&qs->txq[TXQ_ETH].lock);
2344 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2345 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2346 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2348 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2349 &adap->sge.qs[0].rspq.lock;
2350 if (spin_trylock_irq(lock)) {
2351 if (!napi_is_scheduled(qs->netdev)) {
2352 if (qs->fl[0].credits < qs->fl[0].size)
2353 __refill_fl(adap, &qs->fl[0]);
2354 if (qs->fl[1].credits < qs->fl[1].size)
2355 __refill_fl(adap, &qs->fl[1]);
2357 spin_unlock_irq(lock);
2359 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2363 * t3_update_qset_coalesce - update coalescing settings for a queue set
2364 * @qs: the SGE queue set
2365 * @p: new queue set parameters
2367 * Update the coalescing settings for an SGE queue set. Nothing is done
2368 * if the queue set is not initialized yet.
2370 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2372 if (!qs->netdev)
2373 return;
2375 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2376 qs->rspq.polling = p->polling;
2377 qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
2381 * t3_sge_alloc_qset - initialize an SGE queue set
2382 * @adapter: the adapter
2383 * @id: the queue set id
2384 * @nports: how many Ethernet ports will be using this queue set
2385 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2386 * @p: configuration parameters for this queue set
2387 * @ntxq: number of Tx queues for the queue set
2388 * @netdev: net device associated with this queue set
2390 * Allocate resources and initialize an SGE queue set. A queue set
2391 * comprises a response queue, two Rx free-buffer queues, and up to 3
2392 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2393 * queue, offload queue, and control queue.
2395 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2396 int irq_vec_idx, const struct qset_params *p,
2397 int ntxq, struct net_device *netdev)
2399 int i, ret = -ENOMEM;
2400 struct sge_qset *q = &adapter->sge.qs[id];
2402 init_qset_cntxt(q, id);
2403 init_timer(&q->tx_reclaim_timer);
2404 q->tx_reclaim_timer.data = (unsigned long)q;
2405 q->tx_reclaim_timer.function = sge_timer_cb;
2407 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2408 sizeof(struct rx_desc),
2409 sizeof(struct rx_sw_desc),
2410 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2411 if (!q->fl[0].desc)
2412 goto err;
2414 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2415 sizeof(struct rx_desc),
2416 sizeof(struct rx_sw_desc),
2417 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2418 if (!q->fl[1].desc)
2419 goto err;
2421 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2422 sizeof(struct rsp_desc), 0,
2423 &q->rspq.phys_addr, NULL);
2424 if (!q->rspq.desc)
2425 goto err;
2427 for (i = 0; i < ntxq; ++i) {
2429 * The control queue always uses immediate data so does not
2430 * need to keep track of any sk_buffs.
2432 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2434 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2435 sizeof(struct tx_desc), sz,
2436 &q->txq[i].phys_addr,
2437 &q->txq[i].sdesc);
2438 if (!q->txq[i].desc)
2439 goto err;
2441 q->txq[i].gen = 1;
2442 q->txq[i].size = p->txq_size[i];
2443 spin_lock_init(&q->txq[i].lock);
2444 skb_queue_head_init(&q->txq[i].sendq);
2447 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2448 (unsigned long)q);
2449 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2450 (unsigned long)q);
2452 q->fl[0].gen = q->fl[1].gen = 1;
2453 q->fl[0].size = p->fl_size;
2454 q->fl[1].size = p->jumbo_size;
2456 q->rspq.gen = 1;
2457 q->rspq.size = p->rspq_size;
2458 spin_lock_init(&q->rspq.lock);
2460 q->txq[TXQ_ETH].stop_thres = nports *
2461 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2463 if (ntxq == 1) {
2464 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
2465 sizeof(struct cpl_rx_pkt);
2466 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
2467 sizeof(struct cpl_rx_pkt);
2468 } else {
2469 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
2470 sizeof(struct cpl_rx_data);
2471 q->fl[1].buf_size = (16 * 1024) -
2472 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2475 spin_lock(&adapter->sge.reg_lock);
2477 /* FL threshold comparison uses < */
2478 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2479 q->rspq.phys_addr, q->rspq.size,
2480 q->fl[0].buf_size, 1, 0);
2481 if (ret)
2482 goto err_unlock;
2484 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2485 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2486 q->fl[i].phys_addr, q->fl[i].size,
2487 q->fl[i].buf_size, p->cong_thres, 1,
2489 if (ret)
2490 goto err_unlock;
2493 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2494 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2495 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2496 1, 0);
2497 if (ret)
2498 goto err_unlock;
2500 if (ntxq > 1) {
2501 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2502 USE_GTS, SGE_CNTXT_OFLD, id,
2503 q->txq[TXQ_OFLD].phys_addr,
2504 q->txq[TXQ_OFLD].size, 0, 1, 0);
2505 if (ret)
2506 goto err_unlock;
2509 if (ntxq > 2) {
2510 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2511 SGE_CNTXT_CTRL, id,
2512 q->txq[TXQ_CTRL].phys_addr,
2513 q->txq[TXQ_CTRL].size,
2514 q->txq[TXQ_CTRL].token, 1, 0);
2515 if (ret)
2516 goto err_unlock;
2519 spin_unlock(&adapter->sge.reg_lock);
2520 q->netdev = netdev;
2521 t3_update_qset_coalesce(q, p);
2524 * We use atalk_ptr as a backpointer to a qset. In case a device is
2525 * associated with multiple queue sets only the first one sets
2526 * atalk_ptr.
2528 if (netdev->atalk_ptr == NULL)
2529 netdev->atalk_ptr = q;
2531 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2532 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2533 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2535 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2536 V_NEWTIMER(q->rspq.holdoff_tmr));
2538 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2539 return 0;
2541 err_unlock:
2542 spin_unlock(&adapter->sge.reg_lock);
2543 err:
2544 t3_free_qset(adapter, q);
2545 return ret;
2549 * t3_free_sge_resources - free SGE resources
2550 * @adap: the adapter
2552 * Frees resources used by the SGE queue sets.
2554 void t3_free_sge_resources(struct adapter *adap)
2556 int i;
2558 for (i = 0; i < SGE_QSETS; ++i)
2559 t3_free_qset(adap, &adap->sge.qs[i]);
2563 * t3_sge_start - enable SGE
2564 * @adap: the adapter
2566 * Enables the SGE for DMAs. This is the last step in starting packet
2567 * transfers.
2569 void t3_sge_start(struct adapter *adap)
2571 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2575 * t3_sge_stop - disable SGE operation
2576 * @adap: the adapter
2578 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2579 * from error interrupts) or from normal process context. In the latter
2580 * case it also disables any pending queue restart tasklets. Note that
2581 * if it is called in interrupt context it cannot disable the restart
2582 * tasklets as it cannot wait, however the tasklets will have no effect
2583 * since the doorbells are disabled and the driver will call this again
2584 * later from process context, at which time the tasklets will be stopped
2585 * if they are still running.
2587 void t3_sge_stop(struct adapter *adap)
2589 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2590 if (!in_interrupt()) {
2591 int i;
2593 for (i = 0; i < SGE_QSETS; ++i) {
2594 struct sge_qset *qs = &adap->sge.qs[i];
2596 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2597 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2603 * t3_sge_init - initialize SGE
2604 * @adap: the adapter
2605 * @p: the SGE parameters
2607 * Performs SGE initialization needed every time after a chip reset.
2608 * We do not initialize any of the queue sets here, instead the driver
2609 * top-level must request those individually. We also do not enable DMA
2610 * here, that should be done after the queues have been set up.
2612 void t3_sge_init(struct adapter *adap, struct sge_params *p)
2614 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2616 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
2617 F_CQCRDTCTRL |
2618 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2619 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2620 #if SGE_NUM_GENBITS == 1
2621 ctrl |= F_EGRGENCTRL;
2622 #endif
2623 if (adap->params.rev > 0) {
2624 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2625 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
2626 ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
2628 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2629 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2630 V_LORCQDRBTHRSH(512));
2631 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2632 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
2633 V_TIMEOUT(100 * core_ticks_per_usec(adap)));
2634 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
2635 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2636 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2637 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2638 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2639 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2643 * t3_sge_prep - one-time SGE initialization
2644 * @adap: the associated adapter
2645 * @p: SGE parameters
2647 * Performs one-time initialization of SGE SW state. Includes determining
2648 * defaults for the assorted SGE parameters, which admins can change until
2649 * they are used to initialize the SGE.
2651 void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
2653 int i;
2655 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2656 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2658 for (i = 0; i < SGE_QSETS; ++i) {
2659 struct qset_params *q = p->qset + i;
2661 q->polling = adap->params.rev > 0;
2662 q->coalesce_usecs = 5;
2663 q->rspq_size = 1024;
2664 q->fl_size = 4096;
2665 q->jumbo_size = 512;
2666 q->txq_size[TXQ_ETH] = 1024;
2667 q->txq_size[TXQ_OFLD] = 1024;
2668 q->txq_size[TXQ_CTRL] = 256;
2669 q->cong_thres = 0;
2672 spin_lock_init(&adap->sge.reg_lock);
2676 * t3_get_desc - dump an SGE descriptor for debugging purposes
2677 * @qs: the queue set
2678 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2679 * @idx: the descriptor index in the queue
2680 * @data: where to dump the descriptor contents
2682 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2683 * size of the descriptor.
2685 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2686 unsigned char *data)
2688 if (qnum >= 6)
2689 return -EINVAL;
2691 if (qnum < 3) {
2692 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2693 return -EINVAL;
2694 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2695 return sizeof(struct tx_desc);
2698 if (qnum == 3) {
2699 if (!qs->rspq.desc || idx >= qs->rspq.size)
2700 return -EINVAL;
2701 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2702 return sizeof(struct rsp_desc);
2705 qnum -= 4;
2706 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2707 return -EINVAL;
2708 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2709 return sizeof(struct rx_desc);