1 /*****************************************************************************
5 * $Date: 2005/06/21 18:29:48 $ *
8 * part of the Chelsio 10Gb Ethernet Driver. *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
22 * http://www.chelsio.com *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
27 * Maintainers: maintainers@chelsio.com *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
38 ****************************************************************************/
42 #include <linux/types.h>
43 #include <linux/errno.h>
44 #include <linux/pci.h>
45 #include <linux/ktime.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/if_vlan.h>
49 #include <linux/skbuff.h>
50 #include <linux/init.h>
52 #include <linux/tcp.h>
55 #include <linux/if_arp.h>
56 #include <linux/slab.h>
63 /* This belongs in if_ether.h */
64 #define ETH_P_CPL5 0xf
67 #define SGE_FREELQ_N 2
68 #define SGE_CMDQ0_E_N 1024
69 #define SGE_CMDQ1_E_N 128
70 #define SGE_FREEL_SIZE 4096
71 #define SGE_JUMBO_FREEL_SIZE 512
72 #define SGE_FREEL_REFILL_THRESH 16
73 #define SGE_RESPQ_E_N 1024
74 #define SGE_INTRTIMER_NRES 1000
75 #define SGE_RX_SM_BUF_SIZE 1536
76 #define SGE_TX_DESC_MAX_PLEN 16384
78 #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
81 * Period of the TX buffer reclaim timer. This timer does not need to run
82 * frequently as TX buffers are usually reclaimed by new TX packets.
84 #define TX_RECLAIM_PERIOD (HZ / 4)
86 #define M_CMD_LEN 0x7fffffff
87 #define V_CMD_LEN(v) (v)
88 #define G_CMD_LEN(v) ((v) & M_CMD_LEN)
89 #define V_CMD_GEN1(v) ((v) << 31)
90 #define V_CMD_GEN2(v) (v)
91 #define F_CMD_DATAVALID (1 << 1)
92 #define F_CMD_SOP (1 << 2)
93 #define V_CMD_EOP(v) ((v) << 3)
96 * Command queue, receive buffer list, and response queue descriptors.
98 #if defined(__BIG_ENDIAN_BITFIELD)
115 u32 Cmdq1CreditReturn
: 5;
116 u32 Cmdq1DmaComplete
: 5;
117 u32 Cmdq0CreditReturn
: 5;
118 u32 Cmdq0DmaComplete
: 5;
125 u32 GenerationBit
: 1;
128 #elif defined(__LITTLE_ENDIAN_BITFIELD)
145 u32 GenerationBit
: 1;
152 u32 Cmdq0DmaComplete
: 5;
153 u32 Cmdq0CreditReturn
: 5;
154 u32 Cmdq1DmaComplete
: 5;
155 u32 Cmdq1CreditReturn
: 5;
161 * SW Context Command and Freelist Queue Descriptors
165 DEFINE_DMA_UNMAP_ADDR(dma_addr
);
166 DEFINE_DMA_UNMAP_LEN(dma_len
);
171 DEFINE_DMA_UNMAP_ADDR(dma_addr
);
172 DEFINE_DMA_UNMAP_LEN(dma_len
);
176 * SW command, freelist and response rings
179 unsigned long status
; /* HW DMA fetch status */
180 unsigned int in_use
; /* # of in-use command descriptors */
181 unsigned int size
; /* # of descriptors */
182 unsigned int processed
; /* total # of descs HW has processed */
183 unsigned int cleaned
; /* total # of descs SW has reclaimed */
184 unsigned int stop_thres
; /* SW TX queue suspend threshold */
185 u16 pidx
; /* producer index (SW) */
186 u16 cidx
; /* consumer index (HW) */
187 u8 genbit
; /* current generation (=valid) bit */
188 u8 sop
; /* is next entry start of packet? */
189 struct cmdQ_e
*entries
; /* HW command descriptor Q */
190 struct cmdQ_ce
*centries
; /* SW command context descriptor Q */
191 dma_addr_t dma_addr
; /* DMA addr HW command descriptor Q */
192 spinlock_t lock
; /* Lock to protect cmdQ enqueuing */
196 unsigned int credits
; /* # of available RX buffers */
197 unsigned int size
; /* free list capacity */
198 u16 pidx
; /* producer index (SW) */
199 u16 cidx
; /* consumer index (HW) */
200 u16 rx_buffer_size
; /* Buffer size on this free list */
201 u16 dma_offset
; /* DMA offset to align IP headers */
202 u16 recycleq_idx
; /* skb recycle q to use */
203 u8 genbit
; /* current generation (=valid) bit */
204 struct freelQ_e
*entries
; /* HW freelist descriptor Q */
205 struct freelQ_ce
*centries
; /* SW freelist context descriptor Q */
206 dma_addr_t dma_addr
; /* DMA addr HW freelist descriptor Q */
210 unsigned int credits
; /* credits to be returned to SGE */
211 unsigned int size
; /* # of response Q descriptors */
212 u16 cidx
; /* consumer index (SW) */
213 u8 genbit
; /* current generation(=valid) bit */
214 struct respQ_e
*entries
; /* HW response descriptor Q */
215 dma_addr_t dma_addr
; /* DMA addr HW response descriptor Q */
218 /* Bit flags for cmdQ.status */
220 CMDQ_STAT_RUNNING
= 1, /* fetch engine is running */
221 CMDQ_STAT_LAST_PKT_DB
= 2 /* last packet rung the doorbell */
224 /* T204 TX SW scheduler */
226 /* Per T204 TX port */
228 unsigned int avail
; /* available bits - quota */
229 unsigned int drain_bits_per_1024ns
; /* drain rate */
230 unsigned int speed
; /* drain rate, mbps */
231 unsigned int mtu
; /* mtu size */
232 struct sk_buff_head skbq
; /* pending skbs */
235 /* Per T204 device */
237 ktime_t last_updated
; /* last time quotas were computed */
238 unsigned int max_avail
; /* max bits to be sent to any port */
239 unsigned int port
; /* port index (round robin ports) */
240 unsigned int num
; /* num skbs in per port queues */
241 struct sched_port p
[MAX_NPORTS
];
242 struct tasklet_struct sched_tsk
;/* tasklet used to run scheduler */
244 static void restart_sched(unsigned long);
248 * Main SGE data structure
250 * Interrupts are handled by a single CPU and it is likely that on a MP system
251 * the application is migrated to another CPU. In that scenario, we try to
252 * separate the RX(in irq context) and TX state in order to decrease memory
256 struct adapter
*adapter
; /* adapter backpointer */
257 struct net_device
*netdev
; /* netdevice backpointer */
258 struct freelQ freelQ
[SGE_FREELQ_N
]; /* buffer free lists */
259 struct respQ respQ
; /* response Q */
260 unsigned long stopped_tx_queues
; /* bitmap of suspended Tx queues */
261 unsigned int rx_pkt_pad
; /* RX padding for L2 packets */
262 unsigned int jumbo_fl
; /* jumbo freelist Q index */
263 unsigned int intrtimer_nres
; /* no-resource interrupt timer */
264 unsigned int fixed_intrtimer
;/* non-adaptive interrupt timer */
265 struct timer_list tx_reclaim_timer
; /* reclaims TX buffers */
266 struct timer_list espibug_timer
;
267 unsigned long espibug_timeout
;
268 struct sk_buff
*espibug_skb
[MAX_NPORTS
];
269 u32 sge_control
; /* shadow value of sge control reg */
270 struct sge_intr_counts stats
;
271 struct sge_port_stats __percpu
*port_stats
[MAX_NPORTS
];
272 struct sched
*tx_sched
;
273 struct cmdQ cmdQ
[SGE_CMDQ_N
] ____cacheline_aligned_in_smp
;
277 * stop tasklet and free all pending skb's
279 static void tx_sched_stop(struct sge
*sge
)
281 struct sched
*s
= sge
->tx_sched
;
284 tasklet_kill(&s
->sched_tsk
);
286 for (i
= 0; i
< MAX_NPORTS
; i
++)
287 __skb_queue_purge(&s
->p
[s
->port
].skbq
);
291 * t1_sched_update_parms() is called when the MTU or link speed changes. It
292 * re-computes scheduler parameters to scope with the change.
294 unsigned int t1_sched_update_parms(struct sge
*sge
, unsigned int port
,
295 unsigned int mtu
, unsigned int speed
)
297 struct sched
*s
= sge
->tx_sched
;
298 struct sched_port
*p
= &s
->p
[port
];
299 unsigned int max_avail_segs
;
301 pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu
, speed
);
308 unsigned long long drain
= 1024ULL * p
->speed
* (p
->mtu
- 40);
309 do_div(drain
, (p
->mtu
+ 50) * 1000);
310 p
->drain_bits_per_1024ns
= (unsigned int) drain
;
313 p
->drain_bits_per_1024ns
=
314 90 * p
->drain_bits_per_1024ns
/ 100;
317 if (board_info(sge
->adapter
)->board
== CHBT_BOARD_CHT204
) {
318 p
->drain_bits_per_1024ns
-= 16;
319 s
->max_avail
= max(4096U, p
->mtu
+ 16 + 14 + 4);
320 max_avail_segs
= max(1U, 4096 / (p
->mtu
- 40));
322 s
->max_avail
= 16384;
323 max_avail_segs
= max(1U, 9000 / (p
->mtu
- 40));
326 pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
327 "max_avail_segs %u drain_bits_per_1024ns %u\n", p
->mtu
,
328 p
->speed
, s
->max_avail
, max_avail_segs
,
329 p
->drain_bits_per_1024ns
);
331 return max_avail_segs
* (p
->mtu
- 40);
337 * get_clock() implements a ns clock (see ktime_get)
339 static inline ktime_t
get_clock(void)
344 return timespec_to_ktime(ts
);
348 * tx_sched_init() allocates resources and does basic initialization.
350 static int tx_sched_init(struct sge
*sge
)
355 s
= kzalloc(sizeof (struct sched
), GFP_KERNEL
);
359 pr_debug("tx_sched_init\n");
360 tasklet_init(&s
->sched_tsk
, restart_sched
, (unsigned long) sge
);
363 for (i
= 0; i
< MAX_NPORTS
; i
++) {
364 skb_queue_head_init(&s
->p
[i
].skbq
);
365 t1_sched_update_parms(sge
, i
, 1500, 1000);
372 * sched_update_avail() computes the delta since the last time it was called
373 * and updates the per port quota (number of bits that can be sent to the any
376 static inline int sched_update_avail(struct sge
*sge
)
378 struct sched
*s
= sge
->tx_sched
;
379 ktime_t now
= get_clock();
381 long long delta_time_ns
;
383 delta_time_ns
= ktime_to_ns(ktime_sub(now
, s
->last_updated
));
385 pr_debug("sched_update_avail delta=%lld\n", delta_time_ns
);
386 if (delta_time_ns
< 15000)
389 for (i
= 0; i
< MAX_NPORTS
; i
++) {
390 struct sched_port
*p
= &s
->p
[i
];
391 unsigned int delta_avail
;
393 delta_avail
= (p
->drain_bits_per_1024ns
* delta_time_ns
) >> 13;
394 p
->avail
= min(p
->avail
+ delta_avail
, s
->max_avail
);
397 s
->last_updated
= now
;
403 * sched_skb() is called from two different places. In the tx path, any
404 * packet generating load on an output port will call sched_skb()
405 * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
406 * context (skb == NULL).
407 * The scheduler only returns a skb (which will then be sent) if the
408 * length of the skb is <= the current quota of the output port.
410 static struct sk_buff
*sched_skb(struct sge
*sge
, struct sk_buff
*skb
,
411 unsigned int credits
)
413 struct sched
*s
= sge
->tx_sched
;
414 struct sk_buff_head
*skbq
;
415 unsigned int i
, len
, update
= 1;
417 pr_debug("sched_skb %p\n", skb
);
422 skbq
= &s
->p
[skb
->dev
->if_port
].skbq
;
423 __skb_queue_tail(skbq
, skb
);
428 if (credits
< MAX_SKB_FRAGS
+ 1)
432 for (i
= 0; i
< MAX_NPORTS
; i
++) {
433 s
->port
= (s
->port
+ 1) & (MAX_NPORTS
- 1);
434 skbq
= &s
->p
[s
->port
].skbq
;
436 skb
= skb_peek(skbq
);
442 if (len
<= s
->p
[s
->port
].avail
) {
443 s
->p
[s
->port
].avail
-= len
;
445 __skb_unlink(skb
, skbq
);
451 if (update
-- && sched_update_avail(sge
))
455 /* If there are more pending skbs, we use the hardware to schedule us
458 if (s
->num
&& !skb
) {
459 struct cmdQ
*q
= &sge
->cmdQ
[0];
460 clear_bit(CMDQ_STAT_LAST_PKT_DB
, &q
->status
);
461 if (test_and_set_bit(CMDQ_STAT_RUNNING
, &q
->status
) == 0) {
462 set_bit(CMDQ_STAT_LAST_PKT_DB
, &q
->status
);
463 writel(F_CMDQ0_ENABLE
, sge
->adapter
->regs
+ A_SG_DOORBELL
);
466 pr_debug("sched_skb ret %p\n", skb
);
472 * PIO to indicate that memory mapped Q contains valid descriptor(s).
474 static inline void doorbell_pio(struct adapter
*adapter
, u32 val
)
477 writel(val
, adapter
->regs
+ A_SG_DOORBELL
);
481 * Frees all RX buffers on the freelist Q. The caller must make sure that
482 * the SGE is turned off before calling this function.
484 static void free_freelQ_buffers(struct pci_dev
*pdev
, struct freelQ
*q
)
486 unsigned int cidx
= q
->cidx
;
488 while (q
->credits
--) {
489 struct freelQ_ce
*ce
= &q
->centries
[cidx
];
491 pci_unmap_single(pdev
, dma_unmap_addr(ce
, dma_addr
),
492 dma_unmap_len(ce
, dma_len
),
494 dev_kfree_skb(ce
->skb
);
496 if (++cidx
== q
->size
)
502 * Free RX free list and response queue resources.
504 static void free_rx_resources(struct sge
*sge
)
506 struct pci_dev
*pdev
= sge
->adapter
->pdev
;
507 unsigned int size
, i
;
509 if (sge
->respQ
.entries
) {
510 size
= sizeof(struct respQ_e
) * sge
->respQ
.size
;
511 pci_free_consistent(pdev
, size
, sge
->respQ
.entries
,
512 sge
->respQ
.dma_addr
);
515 for (i
= 0; i
< SGE_FREELQ_N
; i
++) {
516 struct freelQ
*q
= &sge
->freelQ
[i
];
519 free_freelQ_buffers(pdev
, q
);
523 size
= sizeof(struct freelQ_e
) * q
->size
;
524 pci_free_consistent(pdev
, size
, q
->entries
,
531 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
534 static int alloc_rx_resources(struct sge
*sge
, struct sge_params
*p
)
536 struct pci_dev
*pdev
= sge
->adapter
->pdev
;
537 unsigned int size
, i
;
539 for (i
= 0; i
< SGE_FREELQ_N
; i
++) {
540 struct freelQ
*q
= &sge
->freelQ
[i
];
543 q
->size
= p
->freelQ_size
[i
];
544 q
->dma_offset
= sge
->rx_pkt_pad
? 0 : NET_IP_ALIGN
;
545 size
= sizeof(struct freelQ_e
) * q
->size
;
546 q
->entries
= pci_alloc_consistent(pdev
, size
, &q
->dma_addr
);
550 size
= sizeof(struct freelQ_ce
) * q
->size
;
551 q
->centries
= kzalloc(size
, GFP_KERNEL
);
557 * Calculate the buffer sizes for the two free lists. FL0 accommodates
558 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
559 * including all the sk_buff overhead.
561 * Note: For T2 FL0 and FL1 are reversed.
563 sge
->freelQ
[!sge
->jumbo_fl
].rx_buffer_size
= SGE_RX_SM_BUF_SIZE
+
564 sizeof(struct cpl_rx_data
) +
565 sge
->freelQ
[!sge
->jumbo_fl
].dma_offset
;
568 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
570 sge
->freelQ
[sge
->jumbo_fl
].rx_buffer_size
= size
;
573 * Setup which skb recycle Q should be used when recycling buffers from
576 sge
->freelQ
[!sge
->jumbo_fl
].recycleq_idx
= 0;
577 sge
->freelQ
[sge
->jumbo_fl
].recycleq_idx
= 1;
579 sge
->respQ
.genbit
= 1;
580 sge
->respQ
.size
= SGE_RESPQ_E_N
;
581 sge
->respQ
.credits
= 0;
582 size
= sizeof(struct respQ_e
) * sge
->respQ
.size
;
584 pci_alloc_consistent(pdev
, size
, &sge
->respQ
.dma_addr
);
585 if (!sge
->respQ
.entries
)
590 free_rx_resources(sge
);
595 * Reclaims n TX descriptors and frees the buffers associated with them.
597 static void free_cmdQ_buffers(struct sge
*sge
, struct cmdQ
*q
, unsigned int n
)
600 struct pci_dev
*pdev
= sge
->adapter
->pdev
;
601 unsigned int cidx
= q
->cidx
;
604 ce
= &q
->centries
[cidx
];
606 if (likely(dma_unmap_len(ce
, dma_len
))) {
607 pci_unmap_single(pdev
, dma_unmap_addr(ce
, dma_addr
),
608 dma_unmap_len(ce
, dma_len
),
614 dev_kfree_skb_any(ce
->skb
);
618 if (++cidx
== q
->size
) {
629 * Assumes that SGE is stopped and all interrupts are disabled.
631 static void free_tx_resources(struct sge
*sge
)
633 struct pci_dev
*pdev
= sge
->adapter
->pdev
;
634 unsigned int size
, i
;
636 for (i
= 0; i
< SGE_CMDQ_N
; i
++) {
637 struct cmdQ
*q
= &sge
->cmdQ
[i
];
641 free_cmdQ_buffers(sge
, q
, q
->in_use
);
645 size
= sizeof(struct cmdQ_e
) * q
->size
;
646 pci_free_consistent(pdev
, size
, q
->entries
,
653 * Allocates basic TX resources, consisting of memory mapped command Qs.
655 static int alloc_tx_resources(struct sge
*sge
, struct sge_params
*p
)
657 struct pci_dev
*pdev
= sge
->adapter
->pdev
;
658 unsigned int size
, i
;
660 for (i
= 0; i
< SGE_CMDQ_N
; i
++) {
661 struct cmdQ
*q
= &sge
->cmdQ
[i
];
665 q
->size
= p
->cmdQ_size
[i
];
668 q
->processed
= q
->cleaned
= 0;
670 spin_lock_init(&q
->lock
);
671 size
= sizeof(struct cmdQ_e
) * q
->size
;
672 q
->entries
= pci_alloc_consistent(pdev
, size
, &q
->dma_addr
);
676 size
= sizeof(struct cmdQ_ce
) * q
->size
;
677 q
->centries
= kzalloc(size
, GFP_KERNEL
);
683 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
684 * only. For queue 0 set the stop threshold so we can handle one more
685 * packet from each port, plus reserve an additional 24 entries for
686 * Ethernet packets only. Queue 1 never suspends nor do we reserve
687 * space for Ethernet packets.
689 sge
->cmdQ
[0].stop_thres
= sge
->adapter
->params
.nports
*
694 free_tx_resources(sge
);
698 static inline void setup_ring_params(struct adapter
*adapter
, u64 addr
,
699 u32 size
, int base_reg_lo
,
700 int base_reg_hi
, int size_reg
)
702 writel((u32
)addr
, adapter
->regs
+ base_reg_lo
);
703 writel(addr
>> 32, adapter
->regs
+ base_reg_hi
);
704 writel(size
, adapter
->regs
+ size_reg
);
708 * Enable/disable VLAN acceleration.
710 void t1_set_vlan_accel(struct adapter
*adapter
, int on_off
)
712 struct sge
*sge
= adapter
->sge
;
714 sge
->sge_control
&= ~F_VLAN_XTRACT
;
716 sge
->sge_control
|= F_VLAN_XTRACT
;
717 if (adapter
->open_device_map
) {
718 writel(sge
->sge_control
, adapter
->regs
+ A_SG_CONTROL
);
719 readl(adapter
->regs
+ A_SG_CONTROL
); /* flush */
724 * Programs the various SGE registers. However, the engine is not yet enabled,
725 * but sge->sge_control is setup and ready to go.
727 static void configure_sge(struct sge
*sge
, struct sge_params
*p
)
729 struct adapter
*ap
= sge
->adapter
;
731 writel(0, ap
->regs
+ A_SG_CONTROL
);
732 setup_ring_params(ap
, sge
->cmdQ
[0].dma_addr
, sge
->cmdQ
[0].size
,
733 A_SG_CMD0BASELWR
, A_SG_CMD0BASEUPR
, A_SG_CMD0SIZE
);
734 setup_ring_params(ap
, sge
->cmdQ
[1].dma_addr
, sge
->cmdQ
[1].size
,
735 A_SG_CMD1BASELWR
, A_SG_CMD1BASEUPR
, A_SG_CMD1SIZE
);
736 setup_ring_params(ap
, sge
->freelQ
[0].dma_addr
,
737 sge
->freelQ
[0].size
, A_SG_FL0BASELWR
,
738 A_SG_FL0BASEUPR
, A_SG_FL0SIZE
);
739 setup_ring_params(ap
, sge
->freelQ
[1].dma_addr
,
740 sge
->freelQ
[1].size
, A_SG_FL1BASELWR
,
741 A_SG_FL1BASEUPR
, A_SG_FL1SIZE
);
743 /* The threshold comparison uses <. */
744 writel(SGE_RX_SM_BUF_SIZE
+ 1, ap
->regs
+ A_SG_FLTHRESHOLD
);
746 setup_ring_params(ap
, sge
->respQ
.dma_addr
, sge
->respQ
.size
,
747 A_SG_RSPBASELWR
, A_SG_RSPBASEUPR
, A_SG_RSPSIZE
);
748 writel((u32
)sge
->respQ
.size
- 1, ap
->regs
+ A_SG_RSPQUEUECREDIT
);
750 sge
->sge_control
= F_CMDQ0_ENABLE
| F_CMDQ1_ENABLE
| F_FL0_ENABLE
|
751 F_FL1_ENABLE
| F_CPL_ENABLE
| F_RESPONSE_QUEUE_ENABLE
|
752 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS
| F_ISCSI_COALESCE
|
753 V_RX_PKT_OFFSET(sge
->rx_pkt_pad
);
755 #if defined(__BIG_ENDIAN_BITFIELD)
756 sge
->sge_control
|= F_ENABLE_BIG_ENDIAN
;
759 /* Initialize no-resource timer */
760 sge
->intrtimer_nres
= SGE_INTRTIMER_NRES
* core_ticks_per_usec(ap
);
762 t1_sge_set_coalesce_params(sge
, p
);
766 * Return the payload capacity of the jumbo free-list buffers.
768 static inline unsigned int jumbo_payload_capacity(const struct sge
*sge
)
770 return sge
->freelQ
[sge
->jumbo_fl
].rx_buffer_size
-
771 sge
->freelQ
[sge
->jumbo_fl
].dma_offset
-
772 sizeof(struct cpl_rx_data
);
776 * Frees all SGE related resources and the sge structure itself
778 void t1_sge_destroy(struct sge
*sge
)
782 for_each_port(sge
->adapter
, i
)
783 free_percpu(sge
->port_stats
[i
]);
785 kfree(sge
->tx_sched
);
786 free_tx_resources(sge
);
787 free_rx_resources(sge
);
792 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
793 * context Q) until the Q is full or alloc_skb fails.
795 * It is possible that the generation bits already match, indicating that the
796 * buffer is already valid and nothing needs to be done. This happens when we
797 * copied a received buffer into a new sk_buff during the interrupt processing.
799 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
800 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
803 static void refill_free_list(struct sge
*sge
, struct freelQ
*q
)
805 struct pci_dev
*pdev
= sge
->adapter
->pdev
;
806 struct freelQ_ce
*ce
= &q
->centries
[q
->pidx
];
807 struct freelQ_e
*e
= &q
->entries
[q
->pidx
];
808 unsigned int dma_len
= q
->rx_buffer_size
- q
->dma_offset
;
810 while (q
->credits
< q
->size
) {
814 skb
= alloc_skb(q
->rx_buffer_size
, GFP_ATOMIC
);
818 skb_reserve(skb
, q
->dma_offset
);
819 mapping
= pci_map_single(pdev
, skb
->data
, dma_len
,
821 skb_reserve(skb
, sge
->rx_pkt_pad
);
824 dma_unmap_addr_set(ce
, dma_addr
, mapping
);
825 dma_unmap_len_set(ce
, dma_len
, dma_len
);
826 e
->addr_lo
= (u32
)mapping
;
827 e
->addr_hi
= (u64
)mapping
>> 32;
828 e
->len_gen
= V_CMD_LEN(dma_len
) | V_CMD_GEN1(q
->genbit
);
830 e
->gen2
= V_CMD_GEN2(q
->genbit
);
834 if (++q
->pidx
== q
->size
) {
845 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
846 * of both rings, we go into 'few interrupt mode' in order to give the system
847 * time to free up resources.
849 static void freelQs_empty(struct sge
*sge
)
851 struct adapter
*adapter
= sge
->adapter
;
852 u32 irq_reg
= readl(adapter
->regs
+ A_SG_INT_ENABLE
);
855 refill_free_list(sge
, &sge
->freelQ
[0]);
856 refill_free_list(sge
, &sge
->freelQ
[1]);
858 if (sge
->freelQ
[0].credits
> (sge
->freelQ
[0].size
>> 2) &&
859 sge
->freelQ
[1].credits
> (sge
->freelQ
[1].size
>> 2)) {
860 irq_reg
|= F_FL_EXHAUSTED
;
861 irqholdoff_reg
= sge
->fixed_intrtimer
;
863 /* Clear the F_FL_EXHAUSTED interrupts for now */
864 irq_reg
&= ~F_FL_EXHAUSTED
;
865 irqholdoff_reg
= sge
->intrtimer_nres
;
867 writel(irqholdoff_reg
, adapter
->regs
+ A_SG_INTRTIMER
);
868 writel(irq_reg
, adapter
->regs
+ A_SG_INT_ENABLE
);
870 /* We reenable the Qs to force a freelist GTS interrupt later */
871 doorbell_pio(adapter
, F_FL0_ENABLE
| F_FL1_ENABLE
);
874 #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
875 #define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
876 #define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
877 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
880 * Disable SGE Interrupts
882 void t1_sge_intr_disable(struct sge
*sge
)
884 u32 val
= readl(sge
->adapter
->regs
+ A_PL_ENABLE
);
886 writel(val
& ~SGE_PL_INTR_MASK
, sge
->adapter
->regs
+ A_PL_ENABLE
);
887 writel(0, sge
->adapter
->regs
+ A_SG_INT_ENABLE
);
891 * Enable SGE interrupts.
893 void t1_sge_intr_enable(struct sge
*sge
)
895 u32 en
= SGE_INT_ENABLE
;
896 u32 val
= readl(sge
->adapter
->regs
+ A_PL_ENABLE
);
898 if (sge
->adapter
->flags
& TSO_CAPABLE
)
899 en
&= ~F_PACKET_TOO_BIG
;
900 writel(en
, sge
->adapter
->regs
+ A_SG_INT_ENABLE
);
901 writel(val
| SGE_PL_INTR_MASK
, sge
->adapter
->regs
+ A_PL_ENABLE
);
905 * Clear SGE interrupts.
907 void t1_sge_intr_clear(struct sge
*sge
)
909 writel(SGE_PL_INTR_MASK
, sge
->adapter
->regs
+ A_PL_CAUSE
);
910 writel(0xffffffff, sge
->adapter
->regs
+ A_SG_INT_CAUSE
);
914 * SGE 'Error' interrupt handler
916 int t1_sge_intr_error_handler(struct sge
*sge
)
918 struct adapter
*adapter
= sge
->adapter
;
919 u32 cause
= readl(adapter
->regs
+ A_SG_INT_CAUSE
);
921 if (adapter
->flags
& TSO_CAPABLE
)
922 cause
&= ~F_PACKET_TOO_BIG
;
923 if (cause
& F_RESPQ_EXHAUSTED
)
924 sge
->stats
.respQ_empty
++;
925 if (cause
& F_RESPQ_OVERFLOW
) {
926 sge
->stats
.respQ_overflow
++;
927 pr_alert("%s: SGE response queue overflow\n",
930 if (cause
& F_FL_EXHAUSTED
) {
931 sge
->stats
.freelistQ_empty
++;
934 if (cause
& F_PACKET_TOO_BIG
) {
935 sge
->stats
.pkt_too_big
++;
936 pr_alert("%s: SGE max packet size exceeded\n",
939 if (cause
& F_PACKET_MISMATCH
) {
940 sge
->stats
.pkt_mismatch
++;
941 pr_alert("%s: SGE packet mismatch\n", adapter
->name
);
943 if (cause
& SGE_INT_FATAL
)
944 t1_fatal_err(adapter
);
946 writel(cause
, adapter
->regs
+ A_SG_INT_CAUSE
);
950 const struct sge_intr_counts
*t1_sge_get_intr_counts(const struct sge
*sge
)
955 void t1_sge_get_port_stats(const struct sge
*sge
, int port
,
956 struct sge_port_stats
*ss
)
960 memset(ss
, 0, sizeof(*ss
));
961 for_each_possible_cpu(cpu
) {
962 struct sge_port_stats
*st
= per_cpu_ptr(sge
->port_stats
[port
], cpu
);
964 ss
->rx_cso_good
+= st
->rx_cso_good
;
965 ss
->tx_cso
+= st
->tx_cso
;
966 ss
->tx_tso
+= st
->tx_tso
;
967 ss
->tx_need_hdrroom
+= st
->tx_need_hdrroom
;
968 ss
->vlan_xtract
+= st
->vlan_xtract
;
969 ss
->vlan_insert
+= st
->vlan_insert
;
974 * recycle_fl_buf - recycle a free list buffer
976 * @idx: index of buffer to recycle
978 * Recycles the specified buffer on the given free list by adding it at
979 * the next available slot on the list.
981 static void recycle_fl_buf(struct freelQ
*fl
, int idx
)
983 struct freelQ_e
*from
= &fl
->entries
[idx
];
984 struct freelQ_e
*to
= &fl
->entries
[fl
->pidx
];
986 fl
->centries
[fl
->pidx
] = fl
->centries
[idx
];
987 to
->addr_lo
= from
->addr_lo
;
988 to
->addr_hi
= from
->addr_hi
;
989 to
->len_gen
= G_CMD_LEN(from
->len_gen
) | V_CMD_GEN1(fl
->genbit
);
991 to
->gen2
= V_CMD_GEN2(fl
->genbit
);
994 if (++fl
->pidx
== fl
->size
) {
1000 static int copybreak __read_mostly
= 256;
1001 module_param(copybreak
, int, 0);
1002 MODULE_PARM_DESC(copybreak
, "Receive copy threshold");
1005 * get_packet - return the next ingress packet buffer
1006 * @pdev: the PCI device that received the packet
1007 * @fl: the SGE free list holding the packet
1008 * @len: the actual packet length, excluding any SGE padding
1010 * Get the next packet from a free list and complete setup of the
1011 * sk_buff. If the packet is small we make a copy and recycle the
1012 * original buffer, otherwise we use the original buffer itself. If a
1013 * positive drop threshold is supplied packets are dropped and their
1014 * buffers recycled if (a) the number of remaining buffers is under the
1015 * threshold and the packet is too big to copy, or (b) the packet should
1016 * be copied but there is no memory for the copy.
1018 static inline struct sk_buff
*get_packet(struct pci_dev
*pdev
,
1019 struct freelQ
*fl
, unsigned int len
)
1021 struct sk_buff
*skb
;
1022 const struct freelQ_ce
*ce
= &fl
->centries
[fl
->cidx
];
1024 if (len
< copybreak
) {
1025 skb
= alloc_skb(len
+ 2, GFP_ATOMIC
);
1029 skb_reserve(skb
, 2); /* align IP header */
1031 pci_dma_sync_single_for_cpu(pdev
,
1032 dma_unmap_addr(ce
, dma_addr
),
1033 dma_unmap_len(ce
, dma_len
),
1034 PCI_DMA_FROMDEVICE
);
1035 skb_copy_from_linear_data(ce
->skb
, skb
->data
, len
);
1036 pci_dma_sync_single_for_device(pdev
,
1037 dma_unmap_addr(ce
, dma_addr
),
1038 dma_unmap_len(ce
, dma_len
),
1039 PCI_DMA_FROMDEVICE
);
1040 recycle_fl_buf(fl
, fl
->cidx
);
1045 if (fl
->credits
< 2) {
1046 recycle_fl_buf(fl
, fl
->cidx
);
1050 pci_unmap_single(pdev
, dma_unmap_addr(ce
, dma_addr
),
1051 dma_unmap_len(ce
, dma_len
), PCI_DMA_FROMDEVICE
);
1053 prefetch(skb
->data
);
1060 * unexpected_offload - handle an unexpected offload packet
1061 * @adapter: the adapter
1062 * @fl: the free list that received the packet
1064 * Called when we receive an unexpected offload packet (e.g., the TOE
1065 * function is disabled or the card is a NIC). Prints a message and
1066 * recycles the buffer.
1068 static void unexpected_offload(struct adapter
*adapter
, struct freelQ
*fl
)
1070 struct freelQ_ce
*ce
= &fl
->centries
[fl
->cidx
];
1071 struct sk_buff
*skb
= ce
->skb
;
1073 pci_dma_sync_single_for_cpu(adapter
->pdev
, dma_unmap_addr(ce
, dma_addr
),
1074 dma_unmap_len(ce
, dma_len
), PCI_DMA_FROMDEVICE
);
1075 pr_err("%s: unexpected offload packet, cmd %u\n",
1076 adapter
->name
, *skb
->data
);
1077 recycle_fl_buf(fl
, fl
->cidx
);
1081 * T1/T2 SGE limits the maximum DMA size per TX descriptor to
1082 * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
1083 * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
1084 * Note that the *_large_page_tx_descs stuff will be optimized out when
1085 * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
1087 * compute_large_page_descs() computes how many additional descriptors are
1088 * required to break down the stack's request.
1090 static inline unsigned int compute_large_page_tx_descs(struct sk_buff
*skb
)
1092 unsigned int count
= 0;
1094 if (PAGE_SIZE
> SGE_TX_DESC_MAX_PLEN
) {
1095 unsigned int nfrags
= skb_shinfo(skb
)->nr_frags
;
1096 unsigned int i
, len
= skb_headlen(skb
);
1097 while (len
> SGE_TX_DESC_MAX_PLEN
) {
1099 len
-= SGE_TX_DESC_MAX_PLEN
;
1101 for (i
= 0; nfrags
--; i
++) {
1102 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1104 while (len
> SGE_TX_DESC_MAX_PLEN
) {
1106 len
-= SGE_TX_DESC_MAX_PLEN
;
1114 * Write a cmdQ entry.
1116 * Since this function writes the 'flags' field, it must not be used to
1117 * write the first cmdQ entry.
1119 static inline void write_tx_desc(struct cmdQ_e
*e
, dma_addr_t mapping
,
1120 unsigned int len
, unsigned int gen
,
1123 BUG_ON(len
> SGE_TX_DESC_MAX_PLEN
);
1125 e
->addr_lo
= (u32
)mapping
;
1126 e
->addr_hi
= (u64
)mapping
>> 32;
1127 e
->len_gen
= V_CMD_LEN(len
) | V_CMD_GEN1(gen
);
1128 e
->flags
= F_CMD_DATAVALID
| V_CMD_EOP(eop
) | V_CMD_GEN2(gen
);
1132 * See comment for previous function.
1134 * write_tx_descs_large_page() writes additional SGE tx descriptors if
1135 * *desc_len exceeds HW's capability.
1137 static inline unsigned int write_large_page_tx_descs(unsigned int pidx
,
1139 struct cmdQ_ce
**ce
,
1141 dma_addr_t
*desc_mapping
,
1142 unsigned int *desc_len
,
1143 unsigned int nfrags
,
1146 if (PAGE_SIZE
> SGE_TX_DESC_MAX_PLEN
) {
1147 struct cmdQ_e
*e1
= *e
;
1148 struct cmdQ_ce
*ce1
= *ce
;
1150 while (*desc_len
> SGE_TX_DESC_MAX_PLEN
) {
1151 *desc_len
-= SGE_TX_DESC_MAX_PLEN
;
1152 write_tx_desc(e1
, *desc_mapping
, SGE_TX_DESC_MAX_PLEN
,
1153 *gen
, nfrags
== 0 && *desc_len
== 0);
1155 dma_unmap_len_set(ce1
, dma_len
, 0);
1156 *desc_mapping
+= SGE_TX_DESC_MAX_PLEN
;
1160 if (++pidx
== q
->size
) {
1175 * Write the command descriptors to transmit the given skb starting at
1176 * descriptor pidx with the given generation.
1178 static inline void write_tx_descs(struct adapter
*adapter
, struct sk_buff
*skb
,
1179 unsigned int pidx
, unsigned int gen
,
1182 dma_addr_t mapping
, desc_mapping
;
1183 struct cmdQ_e
*e
, *e1
;
1185 unsigned int i
, flags
, first_desc_len
, desc_len
,
1186 nfrags
= skb_shinfo(skb
)->nr_frags
;
1188 e
= e1
= &q
->entries
[pidx
];
1189 ce
= &q
->centries
[pidx
];
1191 mapping
= pci_map_single(adapter
->pdev
, skb
->data
,
1192 skb_headlen(skb
), PCI_DMA_TODEVICE
);
1194 desc_mapping
= mapping
;
1195 desc_len
= skb_headlen(skb
);
1197 flags
= F_CMD_DATAVALID
| F_CMD_SOP
|
1198 V_CMD_EOP(nfrags
== 0 && desc_len
<= SGE_TX_DESC_MAX_PLEN
) |
1200 first_desc_len
= (desc_len
<= SGE_TX_DESC_MAX_PLEN
) ?
1201 desc_len
: SGE_TX_DESC_MAX_PLEN
;
1202 e
->addr_lo
= (u32
)desc_mapping
;
1203 e
->addr_hi
= (u64
)desc_mapping
>> 32;
1204 e
->len_gen
= V_CMD_LEN(first_desc_len
) | V_CMD_GEN1(gen
);
1206 dma_unmap_len_set(ce
, dma_len
, 0);
1208 if (PAGE_SIZE
> SGE_TX_DESC_MAX_PLEN
&&
1209 desc_len
> SGE_TX_DESC_MAX_PLEN
) {
1210 desc_mapping
+= first_desc_len
;
1211 desc_len
-= first_desc_len
;
1214 if (++pidx
== q
->size
) {
1220 pidx
= write_large_page_tx_descs(pidx
, &e1
, &ce
, &gen
,
1221 &desc_mapping
, &desc_len
,
1224 if (likely(desc_len
))
1225 write_tx_desc(e1
, desc_mapping
, desc_len
, gen
,
1230 dma_unmap_addr_set(ce
, dma_addr
, mapping
);
1231 dma_unmap_len_set(ce
, dma_len
, skb_headlen(skb
));
1233 for (i
= 0; nfrags
--; i
++) {
1234 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1237 if (++pidx
== q
->size
) {
1244 mapping
= pci_map_page(adapter
->pdev
, frag
->page
,
1245 frag
->page_offset
, frag
->size
,
1247 desc_mapping
= mapping
;
1248 desc_len
= frag
->size
;
1250 pidx
= write_large_page_tx_descs(pidx
, &e1
, &ce
, &gen
,
1251 &desc_mapping
, &desc_len
,
1253 if (likely(desc_len
))
1254 write_tx_desc(e1
, desc_mapping
, desc_len
, gen
,
1257 dma_unmap_addr_set(ce
, dma_addr
, mapping
);
1258 dma_unmap_len_set(ce
, dma_len
, frag
->size
);
1266 * Clean up completed Tx buffers.
1268 static inline void reclaim_completed_tx(struct sge
*sge
, struct cmdQ
*q
)
1270 unsigned int reclaim
= q
->processed
- q
->cleaned
;
1273 pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
1274 q
->processed
, q
->cleaned
);
1275 free_cmdQ_buffers(sge
, q
, reclaim
);
1276 q
->cleaned
+= reclaim
;
1281 * Called from tasklet. Checks the scheduler for any
1282 * pending skbs that can be sent.
1284 static void restart_sched(unsigned long arg
)
1286 struct sge
*sge
= (struct sge
*) arg
;
1287 struct adapter
*adapter
= sge
->adapter
;
1288 struct cmdQ
*q
= &sge
->cmdQ
[0];
1289 struct sk_buff
*skb
;
1290 unsigned int credits
, queued_skb
= 0;
1292 spin_lock(&q
->lock
);
1293 reclaim_completed_tx(sge
, q
);
1295 credits
= q
->size
- q
->in_use
;
1296 pr_debug("restart_sched credits=%d\n", credits
);
1297 while ((skb
= sched_skb(sge
, NULL
, credits
)) != NULL
) {
1298 unsigned int genbit
, pidx
, count
;
1299 count
= 1 + skb_shinfo(skb
)->nr_frags
;
1300 count
+= compute_large_page_tx_descs(skb
);
1305 if (q
->pidx
>= q
->size
) {
1309 write_tx_descs(adapter
, skb
, pidx
, genbit
, q
);
1310 credits
= q
->size
- q
->in_use
;
1315 clear_bit(CMDQ_STAT_LAST_PKT_DB
, &q
->status
);
1316 if (test_and_set_bit(CMDQ_STAT_RUNNING
, &q
->status
) == 0) {
1317 set_bit(CMDQ_STAT_LAST_PKT_DB
, &q
->status
);
1318 writel(F_CMDQ0_ENABLE
, adapter
->regs
+ A_SG_DOORBELL
);
1321 spin_unlock(&q
->lock
);
1325 * sge_rx - process an ingress ethernet packet
1326 * @sge: the sge structure
1327 * @fl: the free list that contains the packet buffer
1328 * @len: the packet length
1330 * Process an ingress ethernet pakcet and deliver it to the stack.
1332 static void sge_rx(struct sge
*sge
, struct freelQ
*fl
, unsigned int len
)
1334 struct sk_buff
*skb
;
1335 const struct cpl_rx_pkt
*p
;
1336 struct adapter
*adapter
= sge
->adapter
;
1337 struct sge_port_stats
*st
;
1339 skb
= get_packet(adapter
->pdev
, fl
, len
- sge
->rx_pkt_pad
);
1340 if (unlikely(!skb
)) {
1341 sge
->stats
.rx_drops
++;
1345 p
= (const struct cpl_rx_pkt
*) skb
->data
;
1346 if (p
->iff
>= adapter
->params
.nports
) {
1350 __skb_pull(skb
, sizeof(*p
));
1352 st
= this_cpu_ptr(sge
->port_stats
[p
->iff
]);
1354 skb
->protocol
= eth_type_trans(skb
, adapter
->port
[p
->iff
].dev
);
1355 if ((adapter
->flags
& RX_CSUM_ENABLED
) && p
->csum
== 0xffff &&
1356 skb
->protocol
== htons(ETH_P_IP
) &&
1357 (skb
->data
[9] == IPPROTO_TCP
|| skb
->data
[9] == IPPROTO_UDP
)) {
1359 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1361 skb
->ip_summed
= CHECKSUM_NONE
;
1363 if (unlikely(adapter
->vlan_grp
&& p
->vlan_valid
)) {
1365 vlan_hwaccel_receive_skb(skb
, adapter
->vlan_grp
,
1368 netif_receive_skb(skb
);
1372 * Returns true if a command queue has enough available descriptors that
1373 * we can resume Tx operation after temporarily disabling its packet queue.
1375 static inline int enough_free_Tx_descs(const struct cmdQ
*q
)
1377 unsigned int r
= q
->processed
- q
->cleaned
;
1379 return q
->in_use
- r
< (q
->size
>> 1);
1383 * Called when sufficient space has become available in the SGE command queues
1384 * after the Tx packet schedulers have been suspended to restart the Tx path.
1386 static void restart_tx_queues(struct sge
*sge
)
1388 struct adapter
*adap
= sge
->adapter
;
1391 if (!enough_free_Tx_descs(&sge
->cmdQ
[0]))
1394 for_each_port(adap
, i
) {
1395 struct net_device
*nd
= adap
->port
[i
].dev
;
1397 if (test_and_clear_bit(nd
->if_port
, &sge
->stopped_tx_queues
) &&
1398 netif_running(nd
)) {
1399 sge
->stats
.cmdQ_restarted
[2]++;
1400 netif_wake_queue(nd
);
1406 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1409 static unsigned int update_tx_info(struct adapter
*adapter
,
1413 struct sge
*sge
= adapter
->sge
;
1414 struct cmdQ
*cmdq
= &sge
->cmdQ
[0];
1416 cmdq
->processed
+= pr0
;
1417 if (flags
& (F_FL0_ENABLE
| F_FL1_ENABLE
)) {
1419 flags
&= ~(F_FL0_ENABLE
| F_FL1_ENABLE
);
1421 if (flags
& F_CMDQ0_ENABLE
) {
1422 clear_bit(CMDQ_STAT_RUNNING
, &cmdq
->status
);
1424 if (cmdq
->cleaned
+ cmdq
->in_use
!= cmdq
->processed
&&
1425 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB
, &cmdq
->status
)) {
1426 set_bit(CMDQ_STAT_RUNNING
, &cmdq
->status
);
1427 writel(F_CMDQ0_ENABLE
, adapter
->regs
+ A_SG_DOORBELL
);
1430 tasklet_hi_schedule(&sge
->tx_sched
->sched_tsk
);
1432 flags
&= ~F_CMDQ0_ENABLE
;
1435 if (unlikely(sge
->stopped_tx_queues
!= 0))
1436 restart_tx_queues(sge
);
1442 * Process SGE responses, up to the supplied budget. Returns the number of
1443 * responses processed. A negative budget is effectively unlimited.
1445 static int process_responses(struct adapter
*adapter
, int budget
)
1447 struct sge
*sge
= adapter
->sge
;
1448 struct respQ
*q
= &sge
->respQ
;
1449 struct respQ_e
*e
= &q
->entries
[q
->cidx
];
1451 unsigned int flags
= 0;
1452 unsigned int cmdq_processed
[SGE_CMDQ_N
] = {0, 0};
1454 while (done
< budget
&& e
->GenerationBit
== q
->genbit
) {
1455 flags
|= e
->Qsleeping
;
1457 cmdq_processed
[0] += e
->Cmdq0CreditReturn
;
1458 cmdq_processed
[1] += e
->Cmdq1CreditReturn
;
1460 /* We batch updates to the TX side to avoid cacheline
1461 * ping-pong of TX state information on MP where the sender
1462 * might run on a different CPU than this function...
1464 if (unlikely((flags
& F_CMDQ0_ENABLE
) || cmdq_processed
[0] > 64)) {
1465 flags
= update_tx_info(adapter
, flags
, cmdq_processed
[0]);
1466 cmdq_processed
[0] = 0;
1469 if (unlikely(cmdq_processed
[1] > 16)) {
1470 sge
->cmdQ
[1].processed
+= cmdq_processed
[1];
1471 cmdq_processed
[1] = 0;
1474 if (likely(e
->DataValid
)) {
1475 struct freelQ
*fl
= &sge
->freelQ
[e
->FreelistQid
];
1477 BUG_ON(!e
->Sop
|| !e
->Eop
);
1478 if (unlikely(e
->Offload
))
1479 unexpected_offload(adapter
, fl
);
1481 sge_rx(sge
, fl
, e
->BufferLength
);
1486 * Note: this depends on each packet consuming a
1487 * single free-list buffer; cf. the BUG above.
1489 if (++fl
->cidx
== fl
->size
)
1491 prefetch(fl
->centries
[fl
->cidx
].skb
);
1493 if (unlikely(--fl
->credits
<
1494 fl
->size
- SGE_FREEL_REFILL_THRESH
))
1495 refill_free_list(sge
, fl
);
1497 sge
->stats
.pure_rsps
++;
1500 if (unlikely(++q
->cidx
== q
->size
)) {
1507 if (++q
->credits
> SGE_RESPQ_REPLENISH_THRES
) {
1508 writel(q
->credits
, adapter
->regs
+ A_SG_RSPQUEUECREDIT
);
1513 flags
= update_tx_info(adapter
, flags
, cmdq_processed
[0]);
1514 sge
->cmdQ
[1].processed
+= cmdq_processed
[1];
1519 static inline int responses_pending(const struct adapter
*adapter
)
1521 const struct respQ
*Q
= &adapter
->sge
->respQ
;
1522 const struct respQ_e
*e
= &Q
->entries
[Q
->cidx
];
1524 return (e
->GenerationBit
== Q
->genbit
);
1528 * A simpler version of process_responses() that handles only pure (i.e.,
1529 * non data-carrying) responses. Such respones are too light-weight to justify
1530 * calling a softirq when using NAPI, so we handle them specially in hard
1531 * interrupt context. The function is called with a pointer to a response,
1532 * which the caller must ensure is a valid pure response. Returns 1 if it
1533 * encounters a valid data-carrying response, 0 otherwise.
1535 static int process_pure_responses(struct adapter
*adapter
)
1537 struct sge
*sge
= adapter
->sge
;
1538 struct respQ
*q
= &sge
->respQ
;
1539 struct respQ_e
*e
= &q
->entries
[q
->cidx
];
1540 const struct freelQ
*fl
= &sge
->freelQ
[e
->FreelistQid
];
1541 unsigned int flags
= 0;
1542 unsigned int cmdq_processed
[SGE_CMDQ_N
] = {0, 0};
1544 prefetch(fl
->centries
[fl
->cidx
].skb
);
1549 flags
|= e
->Qsleeping
;
1551 cmdq_processed
[0] += e
->Cmdq0CreditReturn
;
1552 cmdq_processed
[1] += e
->Cmdq1CreditReturn
;
1555 if (unlikely(++q
->cidx
== q
->size
)) {
1562 if (++q
->credits
> SGE_RESPQ_REPLENISH_THRES
) {
1563 writel(q
->credits
, adapter
->regs
+ A_SG_RSPQUEUECREDIT
);
1566 sge
->stats
.pure_rsps
++;
1567 } while (e
->GenerationBit
== q
->genbit
&& !e
->DataValid
);
1569 flags
= update_tx_info(adapter
, flags
, cmdq_processed
[0]);
1570 sge
->cmdQ
[1].processed
+= cmdq_processed
[1];
1572 return e
->GenerationBit
== q
->genbit
;
1576 * Handler for new data events when using NAPI. This does not need any locking
1577 * or protection from interrupts as data interrupts are off at this point and
1578 * other adapter interrupts do not interfere.
1580 int t1_poll(struct napi_struct
*napi
, int budget
)
1582 struct adapter
*adapter
= container_of(napi
, struct adapter
, napi
);
1583 int work_done
= process_responses(adapter
, budget
);
1585 if (likely(work_done
< budget
)) {
1586 napi_complete(napi
);
1587 writel(adapter
->sge
->respQ
.cidx
,
1588 adapter
->regs
+ A_SG_SLEEPING
);
1593 irqreturn_t
t1_interrupt(int irq
, void *data
)
1595 struct adapter
*adapter
= data
;
1596 struct sge
*sge
= adapter
->sge
;
1599 if (likely(responses_pending(adapter
))) {
1600 writel(F_PL_INTR_SGE_DATA
, adapter
->regs
+ A_PL_CAUSE
);
1602 if (napi_schedule_prep(&adapter
->napi
)) {
1603 if (process_pure_responses(adapter
))
1604 __napi_schedule(&adapter
->napi
);
1606 /* no data, no NAPI needed */
1607 writel(sge
->respQ
.cidx
, adapter
->regs
+ A_SG_SLEEPING
);
1608 /* undo schedule_prep */
1609 napi_enable(&adapter
->napi
);
1615 spin_lock(&adapter
->async_lock
);
1616 handled
= t1_slow_intr_handler(adapter
);
1617 spin_unlock(&adapter
->async_lock
);
1620 sge
->stats
.unhandled_irqs
++;
1622 return IRQ_RETVAL(handled
!= 0);
1626 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1628 * The code figures out how many entries the sk_buff will require in the
1629 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1630 * has complete. Then, it doesn't access the global structure anymore, but
1631 * uses the corresponding fields on the stack. In conjuction with a spinlock
1632 * around that code, we can make the function reentrant without holding the
1633 * lock when we actually enqueue (which might be expensive, especially on
1634 * architectures with IO MMUs).
1636 * This runs with softirqs disabled.
1638 static int t1_sge_tx(struct sk_buff
*skb
, struct adapter
*adapter
,
1639 unsigned int qid
, struct net_device
*dev
)
1641 struct sge
*sge
= adapter
->sge
;
1642 struct cmdQ
*q
= &sge
->cmdQ
[qid
];
1643 unsigned int credits
, pidx
, genbit
, count
, use_sched_skb
= 0;
1645 if (!spin_trylock(&q
->lock
))
1646 return NETDEV_TX_LOCKED
;
1648 reclaim_completed_tx(sge
, q
);
1651 credits
= q
->size
- q
->in_use
;
1652 count
= 1 + skb_shinfo(skb
)->nr_frags
;
1653 count
+= compute_large_page_tx_descs(skb
);
1655 /* Ethernet packet */
1656 if (unlikely(credits
< count
)) {
1657 if (!netif_queue_stopped(dev
)) {
1658 netif_stop_queue(dev
);
1659 set_bit(dev
->if_port
, &sge
->stopped_tx_queues
);
1660 sge
->stats
.cmdQ_full
[2]++;
1661 pr_err("%s: Tx ring full while queue awake!\n",
1664 spin_unlock(&q
->lock
);
1665 return NETDEV_TX_BUSY
;
1668 if (unlikely(credits
- count
< q
->stop_thres
)) {
1669 netif_stop_queue(dev
);
1670 set_bit(dev
->if_port
, &sge
->stopped_tx_queues
);
1671 sge
->stats
.cmdQ_full
[2]++;
1674 /* T204 cmdQ0 skbs that are destined for a certain port have to go
1675 * through the scheduler.
1677 if (sge
->tx_sched
&& !qid
&& skb
->dev
) {
1680 /* Note that the scheduler might return a different skb than
1681 * the one passed in.
1683 skb
= sched_skb(sge
, skb
, credits
);
1685 spin_unlock(&q
->lock
);
1686 return NETDEV_TX_OK
;
1689 count
= 1 + skb_shinfo(skb
)->nr_frags
;
1690 count
+= compute_large_page_tx_descs(skb
);
1697 if (q
->pidx
>= q
->size
) {
1701 spin_unlock(&q
->lock
);
1703 write_tx_descs(adapter
, skb
, pidx
, genbit
, q
);
1706 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
1707 * the doorbell if the Q is asleep. There is a natural race, where
1708 * the hardware is going to sleep just after we checked, however,
1709 * then the interrupt handler will detect the outstanding TX packet
1710 * and ring the doorbell for us.
1713 doorbell_pio(adapter
, F_CMDQ1_ENABLE
);
1715 clear_bit(CMDQ_STAT_LAST_PKT_DB
, &q
->status
);
1716 if (test_and_set_bit(CMDQ_STAT_RUNNING
, &q
->status
) == 0) {
1717 set_bit(CMDQ_STAT_LAST_PKT_DB
, &q
->status
);
1718 writel(F_CMDQ0_ENABLE
, adapter
->regs
+ A_SG_DOORBELL
);
1722 if (use_sched_skb
) {
1723 if (spin_trylock(&q
->lock
)) {
1724 credits
= q
->size
- q
->in_use
;
1729 return NETDEV_TX_OK
;
1732 #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1735 * eth_hdr_len - return the length of an Ethernet header
1736 * @data: pointer to the start of the Ethernet header
1738 * Returns the length of an Ethernet header, including optional VLAN tag.
1740 static inline int eth_hdr_len(const void *data
)
1742 const struct ethhdr
*e
= data
;
1744 return e
->h_proto
== htons(ETH_P_8021Q
) ? VLAN_ETH_HLEN
: ETH_HLEN
;
1748 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1750 netdev_tx_t
t1_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1752 struct adapter
*adapter
= dev
->ml_priv
;
1753 struct sge
*sge
= adapter
->sge
;
1754 struct sge_port_stats
*st
= this_cpu_ptr(sge
->port_stats
[dev
->if_port
]);
1755 struct cpl_tx_pkt
*cpl
;
1756 struct sk_buff
*orig_skb
= skb
;
1759 if (skb
->protocol
== htons(ETH_P_CPL5
))
1763 * We are using a non-standard hard_header_len.
1764 * Allocate more header room in the rare cases it is not big enough.
1766 if (unlikely(skb_headroom(skb
) < dev
->hard_header_len
- ETH_HLEN
)) {
1767 skb
= skb_realloc_headroom(skb
, sizeof(struct cpl_tx_pkt_lso
));
1768 ++st
->tx_need_hdrroom
;
1769 dev_kfree_skb_any(orig_skb
);
1771 return NETDEV_TX_OK
;
1774 if (skb_shinfo(skb
)->gso_size
) {
1776 struct cpl_tx_pkt_lso
*hdr
;
1780 eth_type
= skb_network_offset(skb
) == ETH_HLEN
?
1781 CPL_ETH_II
: CPL_ETH_II_VLAN
;
1783 hdr
= (struct cpl_tx_pkt_lso
*)skb_push(skb
, sizeof(*hdr
));
1784 hdr
->opcode
= CPL_TX_PKT_LSO
;
1785 hdr
->ip_csum_dis
= hdr
->l4_csum_dis
= 0;
1786 hdr
->ip_hdr_words
= ip_hdr(skb
)->ihl
;
1787 hdr
->tcp_hdr_words
= tcp_hdr(skb
)->doff
;
1788 hdr
->eth_type_mss
= htons(MK_ETH_TYPE_MSS(eth_type
,
1789 skb_shinfo(skb
)->gso_size
));
1790 hdr
->len
= htonl(skb
->len
- sizeof(*hdr
));
1791 cpl
= (struct cpl_tx_pkt
*)hdr
;
1794 * Packets shorter than ETH_HLEN can break the MAC, drop them
1795 * early. Also, we may get oversized packets because some
1796 * parts of the kernel don't handle our unusual hard_header_len
1797 * right, drop those too.
1799 if (unlikely(skb
->len
< ETH_HLEN
||
1800 skb
->len
> dev
->mtu
+ eth_hdr_len(skb
->data
))) {
1801 pr_debug("%s: packet size %d hdr %d mtu%d\n", dev
->name
,
1802 skb
->len
, eth_hdr_len(skb
->data
), dev
->mtu
);
1803 dev_kfree_skb_any(skb
);
1804 return NETDEV_TX_OK
;
1807 if (!(adapter
->flags
& UDP_CSUM_CAPABLE
) &&
1808 skb
->ip_summed
== CHECKSUM_PARTIAL
&&
1809 ip_hdr(skb
)->protocol
== IPPROTO_UDP
) {
1810 if (unlikely(skb_checksum_help(skb
))) {
1811 pr_debug("%s: unable to do udp checksum\n", dev
->name
);
1812 dev_kfree_skb_any(skb
);
1813 return NETDEV_TX_OK
;
1817 /* Hmmm, assuming to catch the gratious arp... and we'll use
1818 * it to flush out stuck espi packets...
1820 if ((unlikely(!adapter
->sge
->espibug_skb
[dev
->if_port
]))) {
1821 if (skb
->protocol
== htons(ETH_P_ARP
) &&
1822 arp_hdr(skb
)->ar_op
== htons(ARPOP_REQUEST
)) {
1823 adapter
->sge
->espibug_skb
[dev
->if_port
] = skb
;
1824 /* We want to re-use this skb later. We
1825 * simply bump the reference count and it
1826 * will not be freed...
1832 cpl
= (struct cpl_tx_pkt
*)__skb_push(skb
, sizeof(*cpl
));
1833 cpl
->opcode
= CPL_TX_PKT
;
1834 cpl
->ip_csum_dis
= 1; /* SW calculates IP csum */
1835 cpl
->l4_csum_dis
= skb
->ip_summed
== CHECKSUM_PARTIAL
? 0 : 1;
1836 /* the length field isn't used so don't bother setting it */
1838 st
->tx_cso
+= (skb
->ip_summed
== CHECKSUM_PARTIAL
);
1840 cpl
->iff
= dev
->if_port
;
1842 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1843 if (adapter
->vlan_grp
&& vlan_tx_tag_present(skb
)) {
1844 cpl
->vlan_valid
= 1;
1845 cpl
->vlan
= htons(vlan_tx_tag_get(skb
));
1849 cpl
->vlan_valid
= 0;
1852 ret
= t1_sge_tx(skb
, adapter
, 0, dev
);
1854 /* If transmit busy, and we reallocated skb's due to headroom limit,
1855 * then silently discard to avoid leak.
1857 if (unlikely(ret
!= NETDEV_TX_OK
&& skb
!= orig_skb
)) {
1858 dev_kfree_skb_any(skb
);
1865 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
1867 static void sge_tx_reclaim_cb(unsigned long data
)
1870 struct sge
*sge
= (struct sge
*)data
;
1872 for (i
= 0; i
< SGE_CMDQ_N
; ++i
) {
1873 struct cmdQ
*q
= &sge
->cmdQ
[i
];
1875 if (!spin_trylock(&q
->lock
))
1878 reclaim_completed_tx(sge
, q
);
1879 if (i
== 0 && q
->in_use
) { /* flush pending credits */
1880 writel(F_CMDQ0_ENABLE
, sge
->adapter
->regs
+ A_SG_DOORBELL
);
1882 spin_unlock(&q
->lock
);
1884 mod_timer(&sge
->tx_reclaim_timer
, jiffies
+ TX_RECLAIM_PERIOD
);
1888 * Propagate changes of the SGE coalescing parameters to the HW.
1890 int t1_sge_set_coalesce_params(struct sge
*sge
, struct sge_params
*p
)
1892 sge
->fixed_intrtimer
= p
->rx_coalesce_usecs
*
1893 core_ticks_per_usec(sge
->adapter
);
1894 writel(sge
->fixed_intrtimer
, sge
->adapter
->regs
+ A_SG_INTRTIMER
);
1899 * Allocates both RX and TX resources and configures the SGE. However,
1900 * the hardware is not enabled yet.
1902 int t1_sge_configure(struct sge
*sge
, struct sge_params
*p
)
1904 if (alloc_rx_resources(sge
, p
))
1906 if (alloc_tx_resources(sge
, p
)) {
1907 free_rx_resources(sge
);
1910 configure_sge(sge
, p
);
1913 * Now that we have sized the free lists calculate the payload
1914 * capacity of the large buffers. Other parts of the driver use
1915 * this to set the max offload coalescing size so that RX packets
1916 * do not overflow our large buffers.
1918 p
->large_buf_capacity
= jumbo_payload_capacity(sge
);
1923 * Disables the DMA engine.
1925 void t1_sge_stop(struct sge
*sge
)
1928 writel(0, sge
->adapter
->regs
+ A_SG_CONTROL
);
1929 readl(sge
->adapter
->regs
+ A_SG_CONTROL
); /* flush */
1931 if (is_T2(sge
->adapter
))
1932 del_timer_sync(&sge
->espibug_timer
);
1934 del_timer_sync(&sge
->tx_reclaim_timer
);
1938 for (i
= 0; i
< MAX_NPORTS
; i
++)
1939 kfree_skb(sge
->espibug_skb
[i
]);
1943 * Enables the DMA engine.
1945 void t1_sge_start(struct sge
*sge
)
1947 refill_free_list(sge
, &sge
->freelQ
[0]);
1948 refill_free_list(sge
, &sge
->freelQ
[1]);
1950 writel(sge
->sge_control
, sge
->adapter
->regs
+ A_SG_CONTROL
);
1951 doorbell_pio(sge
->adapter
, F_FL0_ENABLE
| F_FL1_ENABLE
);
1952 readl(sge
->adapter
->regs
+ A_SG_CONTROL
); /* flush */
1954 mod_timer(&sge
->tx_reclaim_timer
, jiffies
+ TX_RECLAIM_PERIOD
);
1956 if (is_T2(sge
->adapter
))
1957 mod_timer(&sge
->espibug_timer
, jiffies
+ sge
->espibug_timeout
);
1961 * Callback for the T2 ESPI 'stuck packet feature' workaorund
1963 static void espibug_workaround_t204(unsigned long data
)
1965 struct adapter
*adapter
= (struct adapter
*)data
;
1966 struct sge
*sge
= adapter
->sge
;
1967 unsigned int nports
= adapter
->params
.nports
;
1968 u32 seop
[MAX_NPORTS
];
1970 if (adapter
->open_device_map
& PORT_MASK
) {
1973 if (t1_espi_get_mon_t204(adapter
, &(seop
[0]), 0) < 0)
1976 for (i
= 0; i
< nports
; i
++) {
1977 struct sk_buff
*skb
= sge
->espibug_skb
[i
];
1979 if (!netif_running(adapter
->port
[i
].dev
) ||
1980 netif_queue_stopped(adapter
->port
[i
].dev
) ||
1981 !seop
[i
] || ((seop
[i
] & 0xfff) != 0) || !skb
)
1985 u8 ch_mac_addr
[ETH_ALEN
] = {
1986 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
1989 skb_copy_to_linear_data_offset(skb
,
1990 sizeof(struct cpl_tx_pkt
),
1993 skb_copy_to_linear_data_offset(skb
,
2000 /* bump the reference count to avoid freeing of
2001 * the skb once the DMA has completed.
2004 t1_sge_tx(skb
, adapter
, 0, adapter
->port
[i
].dev
);
2007 mod_timer(&sge
->espibug_timer
, jiffies
+ sge
->espibug_timeout
);
2010 static void espibug_workaround(unsigned long data
)
2012 struct adapter
*adapter
= (struct adapter
*)data
;
2013 struct sge
*sge
= adapter
->sge
;
2015 if (netif_running(adapter
->port
[0].dev
)) {
2016 struct sk_buff
*skb
= sge
->espibug_skb
[0];
2017 u32 seop
= t1_espi_get_mon(adapter
, 0x930, 0);
2019 if ((seop
& 0xfff0fff) == 0xfff && skb
) {
2021 u8 ch_mac_addr
[ETH_ALEN
] =
2022 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
2023 skb_copy_to_linear_data_offset(skb
,
2024 sizeof(struct cpl_tx_pkt
),
2027 skb_copy_to_linear_data_offset(skb
,
2034 /* bump the reference count to avoid freeing of the
2035 * skb once the DMA has completed.
2038 t1_sge_tx(skb
, adapter
, 0, adapter
->port
[0].dev
);
2041 mod_timer(&sge
->espibug_timer
, jiffies
+ sge
->espibug_timeout
);
2045 * Creates a t1_sge structure and returns suggested resource parameters.
2047 struct sge
* __devinit
t1_sge_create(struct adapter
*adapter
,
2048 struct sge_params
*p
)
2050 struct sge
*sge
= kzalloc(sizeof(*sge
), GFP_KERNEL
);
2056 sge
->adapter
= adapter
;
2057 sge
->netdev
= adapter
->port
[0].dev
;
2058 sge
->rx_pkt_pad
= t1_is_T1B(adapter
) ? 0 : 2;
2059 sge
->jumbo_fl
= t1_is_T1B(adapter
) ? 1 : 0;
2061 for_each_port(adapter
, i
) {
2062 sge
->port_stats
[i
] = alloc_percpu(struct sge_port_stats
);
2063 if (!sge
->port_stats
[i
])
2067 init_timer(&sge
->tx_reclaim_timer
);
2068 sge
->tx_reclaim_timer
.data
= (unsigned long)sge
;
2069 sge
->tx_reclaim_timer
.function
= sge_tx_reclaim_cb
;
2071 if (is_T2(sge
->adapter
)) {
2072 init_timer(&sge
->espibug_timer
);
2074 if (adapter
->params
.nports
> 1) {
2076 sge
->espibug_timer
.function
= espibug_workaround_t204
;
2078 sge
->espibug_timer
.function
= espibug_workaround
;
2079 sge
->espibug_timer
.data
= (unsigned long)sge
->adapter
;
2081 sge
->espibug_timeout
= 1;
2082 /* for T204, every 10ms */
2083 if (adapter
->params
.nports
> 1)
2084 sge
->espibug_timeout
= HZ
/100;
2088 p
->cmdQ_size
[0] = SGE_CMDQ0_E_N
;
2089 p
->cmdQ_size
[1] = SGE_CMDQ1_E_N
;
2090 p
->freelQ_size
[!sge
->jumbo_fl
] = SGE_FREEL_SIZE
;
2091 p
->freelQ_size
[sge
->jumbo_fl
] = SGE_JUMBO_FREEL_SIZE
;
2092 if (sge
->tx_sched
) {
2093 if (board_info(sge
->adapter
)->board
== CHBT_BOARD_CHT204
)
2094 p
->rx_coalesce_usecs
= 15;
2096 p
->rx_coalesce_usecs
= 50;
2098 p
->rx_coalesce_usecs
= 50;
2100 p
->coalesce_enable
= 0;
2101 p
->sample_interval_usecs
= 0;
2106 free_percpu(sge
->port_stats
[i
]);