1 /*****************************************************************************
5 * $Date: 2005/06/21 18:29:48 $ *
8 * part of the Chelsio 10Gb Ethernet Driver. *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
22 * http://www.chelsio.com *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
27 * Maintainers: maintainers@chelsio.com *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
38 ****************************************************************************/
42 #include <linux/types.h>
43 #include <linux/errno.h>
44 #include <linux/pci.h>
45 #include <linux/ktime.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/if_vlan.h>
49 #include <linux/skbuff.h>
50 #include <linux/init.h>
52 #include <linux/tcp.h>
55 #include <linux/if_arp.h>
62 /* This belongs in if_ether.h */
63 #define ETH_P_CPL5 0xf
66 #define SGE_FREELQ_N 2
67 #define SGE_CMDQ0_E_N 1024
68 #define SGE_CMDQ1_E_N 128
69 #define SGE_FREEL_SIZE 4096
70 #define SGE_JUMBO_FREEL_SIZE 512
71 #define SGE_FREEL_REFILL_THRESH 16
72 #define SGE_RESPQ_E_N 1024
73 #define SGE_INTRTIMER_NRES 1000
74 #define SGE_RX_COPY_THRES 256
75 #define SGE_RX_SM_BUF_SIZE 1536
76 #define SGE_TX_DESC_MAX_PLEN 16384
78 # define SGE_RX_DROP_THRES 2
80 #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
83 * Period of the TX buffer reclaim timer. This timer does not need to run
84 * frequently as TX buffers are usually reclaimed by new TX packets.
86 #define TX_RECLAIM_PERIOD (HZ / 4)
88 #define M_CMD_LEN 0x7fffffff
89 #define V_CMD_LEN(v) (v)
90 #define G_CMD_LEN(v) ((v) & M_CMD_LEN)
91 #define V_CMD_GEN1(v) ((v) << 31)
92 #define V_CMD_GEN2(v) (v)
93 #define F_CMD_DATAVALID (1 << 1)
94 #define F_CMD_SOP (1 << 2)
95 #define V_CMD_EOP(v) ((v) << 3)
98 * Command queue, receive buffer list, and response queue descriptors.
100 #if defined(__BIG_ENDIAN_BITFIELD)
117 u32 Cmdq1CreditReturn
: 5;
118 u32 Cmdq1DmaComplete
: 5;
119 u32 Cmdq0CreditReturn
: 5;
120 u32 Cmdq0DmaComplete
: 5;
127 u32 GenerationBit
: 1;
130 #elif defined(__LITTLE_ENDIAN_BITFIELD)
147 u32 GenerationBit
: 1;
154 u32 Cmdq0DmaComplete
: 5;
155 u32 Cmdq0CreditReturn
: 5;
156 u32 Cmdq1DmaComplete
: 5;
157 u32 Cmdq1CreditReturn
: 5;
163 * SW Context Command and Freelist Queue Descriptors
167 DECLARE_PCI_UNMAP_ADDR(dma_addr
);
168 DECLARE_PCI_UNMAP_LEN(dma_len
);
173 DECLARE_PCI_UNMAP_ADDR(dma_addr
);
174 DECLARE_PCI_UNMAP_LEN(dma_len
);
178 * SW command, freelist and response rings
181 unsigned long status
; /* HW DMA fetch status */
182 unsigned int in_use
; /* # of in-use command descriptors */
183 unsigned int size
; /* # of descriptors */
184 unsigned int processed
; /* total # of descs HW has processed */
185 unsigned int cleaned
; /* total # of descs SW has reclaimed */
186 unsigned int stop_thres
; /* SW TX queue suspend threshold */
187 u16 pidx
; /* producer index (SW) */
188 u16 cidx
; /* consumer index (HW) */
189 u8 genbit
; /* current generation (=valid) bit */
190 u8 sop
; /* is next entry start of packet? */
191 struct cmdQ_e
*entries
; /* HW command descriptor Q */
192 struct cmdQ_ce
*centries
; /* SW command context descriptor Q */
193 dma_addr_t dma_addr
; /* DMA addr HW command descriptor Q */
194 spinlock_t lock
; /* Lock to protect cmdQ enqueuing */
198 unsigned int credits
; /* # of available RX buffers */
199 unsigned int size
; /* free list capacity */
200 u16 pidx
; /* producer index (SW) */
201 u16 cidx
; /* consumer index (HW) */
202 u16 rx_buffer_size
; /* Buffer size on this free list */
203 u16 dma_offset
; /* DMA offset to align IP headers */
204 u16 recycleq_idx
; /* skb recycle q to use */
205 u8 genbit
; /* current generation (=valid) bit */
206 struct freelQ_e
*entries
; /* HW freelist descriptor Q */
207 struct freelQ_ce
*centries
; /* SW freelist context descriptor Q */
208 dma_addr_t dma_addr
; /* DMA addr HW freelist descriptor Q */
212 unsigned int credits
; /* credits to be returned to SGE */
213 unsigned int size
; /* # of response Q descriptors */
214 u16 cidx
; /* consumer index (SW) */
215 u8 genbit
; /* current generation(=valid) bit */
216 struct respQ_e
*entries
; /* HW response descriptor Q */
217 dma_addr_t dma_addr
; /* DMA addr HW response descriptor Q */
220 /* Bit flags for cmdQ.status */
222 CMDQ_STAT_RUNNING
= 1, /* fetch engine is running */
223 CMDQ_STAT_LAST_PKT_DB
= 2 /* last packet rung the doorbell */
226 /* T204 TX SW scheduler */
228 /* Per T204 TX port */
230 unsigned int avail
; /* available bits - quota */
231 unsigned int drain_bits_per_1024ns
; /* drain rate */
232 unsigned int speed
; /* drain rate, mbps */
233 unsigned int mtu
; /* mtu size */
234 struct sk_buff_head skbq
; /* pending skbs */
237 /* Per T204 device */
239 ktime_t last_updated
; /* last time quotas were computed */
240 unsigned int max_avail
; /* max bits to be sent to any port */
241 unsigned int port
; /* port index (round robin ports) */
242 unsigned int num
; /* num skbs in per port queues */
243 struct sched_port p
[MAX_NPORTS
];
244 struct tasklet_struct sched_tsk
;/* tasklet used to run scheduler */
246 static void restart_sched(unsigned long);
250 * Main SGE data structure
252 * Interrupts are handled by a single CPU and it is likely that on a MP system
253 * the application is migrated to another CPU. In that scenario, we try to
254 * seperate the RX(in irq context) and TX state in order to decrease memory
258 struct adapter
*adapter
; /* adapter backpointer */
259 struct net_device
*netdev
; /* netdevice backpointer */
260 struct freelQ freelQ
[SGE_FREELQ_N
]; /* buffer free lists */
261 struct respQ respQ
; /* response Q */
262 unsigned long stopped_tx_queues
; /* bitmap of suspended Tx queues */
263 unsigned int rx_pkt_pad
; /* RX padding for L2 packets */
264 unsigned int jumbo_fl
; /* jumbo freelist Q index */
265 unsigned int intrtimer_nres
; /* no-resource interrupt timer */
266 unsigned int fixed_intrtimer
;/* non-adaptive interrupt timer */
267 struct timer_list tx_reclaim_timer
; /* reclaims TX buffers */
268 struct timer_list espibug_timer
;
269 unsigned long espibug_timeout
;
270 struct sk_buff
*espibug_skb
[MAX_NPORTS
];
271 u32 sge_control
; /* shadow value of sge control reg */
272 struct sge_intr_counts stats
;
273 struct sge_port_stats
*port_stats
[MAX_NPORTS
];
274 struct sched
*tx_sched
;
275 struct cmdQ cmdQ
[SGE_CMDQ_N
] ____cacheline_aligned_in_smp
;
279 * stop tasklet and free all pending skb's
281 static void tx_sched_stop(struct sge
*sge
)
283 struct sched
*s
= sge
->tx_sched
;
286 tasklet_kill(&s
->sched_tsk
);
288 for (i
= 0; i
< MAX_NPORTS
; i
++)
289 __skb_queue_purge(&s
->p
[s
->port
].skbq
);
293 * t1_sched_update_parms() is called when the MTU or link speed changes. It
294 * re-computes scheduler parameters to scope with the change.
296 unsigned int t1_sched_update_parms(struct sge
*sge
, unsigned int port
,
297 unsigned int mtu
, unsigned int speed
)
299 struct sched
*s
= sge
->tx_sched
;
300 struct sched_port
*p
= &s
->p
[port
];
301 unsigned int max_avail_segs
;
303 pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu
, speed
);
310 unsigned long long drain
= 1024ULL * p
->speed
* (p
->mtu
- 40);
311 do_div(drain
, (p
->mtu
+ 50) * 1000);
312 p
->drain_bits_per_1024ns
= (unsigned int) drain
;
315 p
->drain_bits_per_1024ns
=
316 90 * p
->drain_bits_per_1024ns
/ 100;
319 if (board_info(sge
->adapter
)->board
== CHBT_BOARD_CHT204
) {
320 p
->drain_bits_per_1024ns
-= 16;
321 s
->max_avail
= max(4096U, p
->mtu
+ 16 + 14 + 4);
322 max_avail_segs
= max(1U, 4096 / (p
->mtu
- 40));
324 s
->max_avail
= 16384;
325 max_avail_segs
= max(1U, 9000 / (p
->mtu
- 40));
328 pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
329 "max_avail_segs %u drain_bits_per_1024ns %u\n", p
->mtu
,
330 p
->speed
, s
->max_avail
, max_avail_segs
,
331 p
->drain_bits_per_1024ns
);
333 return max_avail_segs
* (p
->mtu
- 40);
337 * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
338 * data that can be pushed per port.
340 void t1_sched_set_max_avail_bytes(struct sge
*sge
, unsigned int val
)
342 struct sched
*s
= sge
->tx_sched
;
346 for (i
= 0; i
< MAX_NPORTS
; i
++)
347 t1_sched_update_parms(sge
, i
, 0, 0);
351 * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
354 void t1_sched_set_drain_bits_per_us(struct sge
*sge
, unsigned int port
,
357 struct sched
*s
= sge
->tx_sched
;
358 struct sched_port
*p
= &s
->p
[port
];
359 p
->drain_bits_per_1024ns
= val
* 1024 / 1000;
360 t1_sched_update_parms(sge
, port
, 0, 0);
365 * get_clock() implements a ns clock (see ktime_get)
367 static inline ktime_t
get_clock(void)
372 return timespec_to_ktime(ts
);
376 * tx_sched_init() allocates resources and does basic initialization.
378 static int tx_sched_init(struct sge
*sge
)
383 s
= kzalloc(sizeof (struct sched
), GFP_KERNEL
);
387 pr_debug("tx_sched_init\n");
388 tasklet_init(&s
->sched_tsk
, restart_sched
, (unsigned long) sge
);
391 for (i
= 0; i
< MAX_NPORTS
; i
++) {
392 skb_queue_head_init(&s
->p
[i
].skbq
);
393 t1_sched_update_parms(sge
, i
, 1500, 1000);
400 * sched_update_avail() computes the delta since the last time it was called
401 * and updates the per port quota (number of bits that can be sent to the any
404 static inline int sched_update_avail(struct sge
*sge
)
406 struct sched
*s
= sge
->tx_sched
;
407 ktime_t now
= get_clock();
409 long long delta_time_ns
;
411 delta_time_ns
= ktime_to_ns(ktime_sub(now
, s
->last_updated
));
413 pr_debug("sched_update_avail delta=%lld\n", delta_time_ns
);
414 if (delta_time_ns
< 15000)
417 for (i
= 0; i
< MAX_NPORTS
; i
++) {
418 struct sched_port
*p
= &s
->p
[i
];
419 unsigned int delta_avail
;
421 delta_avail
= (p
->drain_bits_per_1024ns
* delta_time_ns
) >> 13;
422 p
->avail
= min(p
->avail
+ delta_avail
, s
->max_avail
);
425 s
->last_updated
= now
;
431 * sched_skb() is called from two different places. In the tx path, any
432 * packet generating load on an output port will call sched_skb()
433 * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
434 * context (skb == NULL).
435 * The scheduler only returns a skb (which will then be sent) if the
436 * length of the skb is <= the current quota of the output port.
438 static struct sk_buff
*sched_skb(struct sge
*sge
, struct sk_buff
*skb
,
439 unsigned int credits
)
441 struct sched
*s
= sge
->tx_sched
;
442 struct sk_buff_head
*skbq
;
443 unsigned int i
, len
, update
= 1;
445 pr_debug("sched_skb %p\n", skb
);
450 skbq
= &s
->p
[skb
->dev
->if_port
].skbq
;
451 __skb_queue_tail(skbq
, skb
);
456 if (credits
< MAX_SKB_FRAGS
+ 1)
460 for (i
= 0; i
< MAX_NPORTS
; i
++) {
461 s
->port
= ++s
->port
& (MAX_NPORTS
- 1);
462 skbq
= &s
->p
[s
->port
].skbq
;
464 skb
= skb_peek(skbq
);
470 if (len
<= s
->p
[s
->port
].avail
) {
471 s
->p
[s
->port
].avail
-= len
;
473 __skb_unlink(skb
, skbq
);
479 if (update
-- && sched_update_avail(sge
))
483 /* If there are more pending skbs, we use the hardware to schedule us
486 if (s
->num
&& !skb
) {
487 struct cmdQ
*q
= &sge
->cmdQ
[0];
488 clear_bit(CMDQ_STAT_LAST_PKT_DB
, &q
->status
);
489 if (test_and_set_bit(CMDQ_STAT_RUNNING
, &q
->status
) == 0) {
490 set_bit(CMDQ_STAT_LAST_PKT_DB
, &q
->status
);
491 writel(F_CMDQ0_ENABLE
, sge
->adapter
->regs
+ A_SG_DOORBELL
);
494 pr_debug("sched_skb ret %p\n", skb
);
500 * PIO to indicate that memory mapped Q contains valid descriptor(s).
502 static inline void doorbell_pio(struct adapter
*adapter
, u32 val
)
505 writel(val
, adapter
->regs
+ A_SG_DOORBELL
);
509 * Frees all RX buffers on the freelist Q. The caller must make sure that
510 * the SGE is turned off before calling this function.
512 static void free_freelQ_buffers(struct pci_dev
*pdev
, struct freelQ
*q
)
514 unsigned int cidx
= q
->cidx
;
516 while (q
->credits
--) {
517 struct freelQ_ce
*ce
= &q
->centries
[cidx
];
519 pci_unmap_single(pdev
, pci_unmap_addr(ce
, dma_addr
),
520 pci_unmap_len(ce
, dma_len
),
522 dev_kfree_skb(ce
->skb
);
524 if (++cidx
== q
->size
)
530 * Free RX free list and response queue resources.
532 static void free_rx_resources(struct sge
*sge
)
534 struct pci_dev
*pdev
= sge
->adapter
->pdev
;
535 unsigned int size
, i
;
537 if (sge
->respQ
.entries
) {
538 size
= sizeof(struct respQ_e
) * sge
->respQ
.size
;
539 pci_free_consistent(pdev
, size
, sge
->respQ
.entries
,
540 sge
->respQ
.dma_addr
);
543 for (i
= 0; i
< SGE_FREELQ_N
; i
++) {
544 struct freelQ
*q
= &sge
->freelQ
[i
];
547 free_freelQ_buffers(pdev
, q
);
551 size
= sizeof(struct freelQ_e
) * q
->size
;
552 pci_free_consistent(pdev
, size
, q
->entries
,
559 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
562 static int alloc_rx_resources(struct sge
*sge
, struct sge_params
*p
)
564 struct pci_dev
*pdev
= sge
->adapter
->pdev
;
565 unsigned int size
, i
;
567 for (i
= 0; i
< SGE_FREELQ_N
; i
++) {
568 struct freelQ
*q
= &sge
->freelQ
[i
];
571 q
->size
= p
->freelQ_size
[i
];
572 q
->dma_offset
= sge
->rx_pkt_pad
? 0 : NET_IP_ALIGN
;
573 size
= sizeof(struct freelQ_e
) * q
->size
;
574 q
->entries
= pci_alloc_consistent(pdev
, size
, &q
->dma_addr
);
578 size
= sizeof(struct freelQ_ce
) * q
->size
;
579 q
->centries
= kzalloc(size
, GFP_KERNEL
);
585 * Calculate the buffer sizes for the two free lists. FL0 accommodates
586 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
587 * including all the sk_buff overhead.
589 * Note: For T2 FL0 and FL1 are reversed.
591 sge
->freelQ
[!sge
->jumbo_fl
].rx_buffer_size
= SGE_RX_SM_BUF_SIZE
+
592 sizeof(struct cpl_rx_data
) +
593 sge
->freelQ
[!sge
->jumbo_fl
].dma_offset
;
596 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
598 sge
->freelQ
[sge
->jumbo_fl
].rx_buffer_size
= size
;
601 * Setup which skb recycle Q should be used when recycling buffers from
604 sge
->freelQ
[!sge
->jumbo_fl
].recycleq_idx
= 0;
605 sge
->freelQ
[sge
->jumbo_fl
].recycleq_idx
= 1;
607 sge
->respQ
.genbit
= 1;
608 sge
->respQ
.size
= SGE_RESPQ_E_N
;
609 sge
->respQ
.credits
= 0;
610 size
= sizeof(struct respQ_e
) * sge
->respQ
.size
;
612 pci_alloc_consistent(pdev
, size
, &sge
->respQ
.dma_addr
);
613 if (!sge
->respQ
.entries
)
618 free_rx_resources(sge
);
623 * Reclaims n TX descriptors and frees the buffers associated with them.
625 static void free_cmdQ_buffers(struct sge
*sge
, struct cmdQ
*q
, unsigned int n
)
628 struct pci_dev
*pdev
= sge
->adapter
->pdev
;
629 unsigned int cidx
= q
->cidx
;
632 ce
= &q
->centries
[cidx
];
634 if (likely(pci_unmap_len(ce
, dma_len
))) {
635 pci_unmap_single(pdev
, pci_unmap_addr(ce
, dma_addr
),
636 pci_unmap_len(ce
, dma_len
),
642 dev_kfree_skb_any(ce
->skb
);
646 if (++cidx
== q
->size
) {
657 * Assumes that SGE is stopped and all interrupts are disabled.
659 static void free_tx_resources(struct sge
*sge
)
661 struct pci_dev
*pdev
= sge
->adapter
->pdev
;
662 unsigned int size
, i
;
664 for (i
= 0; i
< SGE_CMDQ_N
; i
++) {
665 struct cmdQ
*q
= &sge
->cmdQ
[i
];
669 free_cmdQ_buffers(sge
, q
, q
->in_use
);
673 size
= sizeof(struct cmdQ_e
) * q
->size
;
674 pci_free_consistent(pdev
, size
, q
->entries
,
681 * Allocates basic TX resources, consisting of memory mapped command Qs.
683 static int alloc_tx_resources(struct sge
*sge
, struct sge_params
*p
)
685 struct pci_dev
*pdev
= sge
->adapter
->pdev
;
686 unsigned int size
, i
;
688 for (i
= 0; i
< SGE_CMDQ_N
; i
++) {
689 struct cmdQ
*q
= &sge
->cmdQ
[i
];
693 q
->size
= p
->cmdQ_size
[i
];
696 q
->processed
= q
->cleaned
= 0;
698 spin_lock_init(&q
->lock
);
699 size
= sizeof(struct cmdQ_e
) * q
->size
;
700 q
->entries
= pci_alloc_consistent(pdev
, size
, &q
->dma_addr
);
704 size
= sizeof(struct cmdQ_ce
) * q
->size
;
705 q
->centries
= kzalloc(size
, GFP_KERNEL
);
711 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
712 * only. For queue 0 set the stop threshold so we can handle one more
713 * packet from each port, plus reserve an additional 24 entries for
714 * Ethernet packets only. Queue 1 never suspends nor do we reserve
715 * space for Ethernet packets.
717 sge
->cmdQ
[0].stop_thres
= sge
->adapter
->params
.nports
*
722 free_tx_resources(sge
);
726 static inline void setup_ring_params(struct adapter
*adapter
, u64 addr
,
727 u32 size
, int base_reg_lo
,
728 int base_reg_hi
, int size_reg
)
730 writel((u32
)addr
, adapter
->regs
+ base_reg_lo
);
731 writel(addr
>> 32, adapter
->regs
+ base_reg_hi
);
732 writel(size
, adapter
->regs
+ size_reg
);
736 * Enable/disable VLAN acceleration.
738 void t1_set_vlan_accel(struct adapter
*adapter
, int on_off
)
740 struct sge
*sge
= adapter
->sge
;
742 sge
->sge_control
&= ~F_VLAN_XTRACT
;
744 sge
->sge_control
|= F_VLAN_XTRACT
;
745 if (adapter
->open_device_map
) {
746 writel(sge
->sge_control
, adapter
->regs
+ A_SG_CONTROL
);
747 readl(adapter
->regs
+ A_SG_CONTROL
); /* flush */
752 * Programs the various SGE registers. However, the engine is not yet enabled,
753 * but sge->sge_control is setup and ready to go.
755 static void configure_sge(struct sge
*sge
, struct sge_params
*p
)
757 struct adapter
*ap
= sge
->adapter
;
759 writel(0, ap
->regs
+ A_SG_CONTROL
);
760 setup_ring_params(ap
, sge
->cmdQ
[0].dma_addr
, sge
->cmdQ
[0].size
,
761 A_SG_CMD0BASELWR
, A_SG_CMD0BASEUPR
, A_SG_CMD0SIZE
);
762 setup_ring_params(ap
, sge
->cmdQ
[1].dma_addr
, sge
->cmdQ
[1].size
,
763 A_SG_CMD1BASELWR
, A_SG_CMD1BASEUPR
, A_SG_CMD1SIZE
);
764 setup_ring_params(ap
, sge
->freelQ
[0].dma_addr
,
765 sge
->freelQ
[0].size
, A_SG_FL0BASELWR
,
766 A_SG_FL0BASEUPR
, A_SG_FL0SIZE
);
767 setup_ring_params(ap
, sge
->freelQ
[1].dma_addr
,
768 sge
->freelQ
[1].size
, A_SG_FL1BASELWR
,
769 A_SG_FL1BASEUPR
, A_SG_FL1SIZE
);
771 /* The threshold comparison uses <. */
772 writel(SGE_RX_SM_BUF_SIZE
+ 1, ap
->regs
+ A_SG_FLTHRESHOLD
);
774 setup_ring_params(ap
, sge
->respQ
.dma_addr
, sge
->respQ
.size
,
775 A_SG_RSPBASELWR
, A_SG_RSPBASEUPR
, A_SG_RSPSIZE
);
776 writel((u32
)sge
->respQ
.size
- 1, ap
->regs
+ A_SG_RSPQUEUECREDIT
);
778 sge
->sge_control
= F_CMDQ0_ENABLE
| F_CMDQ1_ENABLE
| F_FL0_ENABLE
|
779 F_FL1_ENABLE
| F_CPL_ENABLE
| F_RESPONSE_QUEUE_ENABLE
|
780 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS
| F_ISCSI_COALESCE
|
781 V_RX_PKT_OFFSET(sge
->rx_pkt_pad
);
783 #if defined(__BIG_ENDIAN_BITFIELD)
784 sge
->sge_control
|= F_ENABLE_BIG_ENDIAN
;
787 /* Initialize no-resource timer */
788 sge
->intrtimer_nres
= SGE_INTRTIMER_NRES
* core_ticks_per_usec(ap
);
790 t1_sge_set_coalesce_params(sge
, p
);
794 * Return the payload capacity of the jumbo free-list buffers.
796 static inline unsigned int jumbo_payload_capacity(const struct sge
*sge
)
798 return sge
->freelQ
[sge
->jumbo_fl
].rx_buffer_size
-
799 sge
->freelQ
[sge
->jumbo_fl
].dma_offset
-
800 sizeof(struct cpl_rx_data
);
804 * Frees all SGE related resources and the sge structure itself
806 void t1_sge_destroy(struct sge
*sge
)
810 for_each_port(sge
->adapter
, i
)
811 free_percpu(sge
->port_stats
[i
]);
813 kfree(sge
->tx_sched
);
814 free_tx_resources(sge
);
815 free_rx_resources(sge
);
820 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
821 * context Q) until the Q is full or alloc_skb fails.
823 * It is possible that the generation bits already match, indicating that the
824 * buffer is already valid and nothing needs to be done. This happens when we
825 * copied a received buffer into a new sk_buff during the interrupt processing.
827 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
828 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
831 static void refill_free_list(struct sge
*sge
, struct freelQ
*q
)
833 struct pci_dev
*pdev
= sge
->adapter
->pdev
;
834 struct freelQ_ce
*ce
= &q
->centries
[q
->pidx
];
835 struct freelQ_e
*e
= &q
->entries
[q
->pidx
];
836 unsigned int dma_len
= q
->rx_buffer_size
- q
->dma_offset
;
838 while (q
->credits
< q
->size
) {
842 skb
= alloc_skb(q
->rx_buffer_size
, GFP_ATOMIC
);
846 skb_reserve(skb
, q
->dma_offset
);
847 mapping
= pci_map_single(pdev
, skb
->data
, dma_len
,
850 pci_unmap_addr_set(ce
, dma_addr
, mapping
);
851 pci_unmap_len_set(ce
, dma_len
, dma_len
);
852 e
->addr_lo
= (u32
)mapping
;
853 e
->addr_hi
= (u64
)mapping
>> 32;
854 e
->len_gen
= V_CMD_LEN(dma_len
) | V_CMD_GEN1(q
->genbit
);
856 e
->gen2
= V_CMD_GEN2(q
->genbit
);
860 if (++q
->pidx
== q
->size
) {
871 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
872 * of both rings, we go into 'few interrupt mode' in order to give the system
873 * time to free up resources.
875 static void freelQs_empty(struct sge
*sge
)
877 struct adapter
*adapter
= sge
->adapter
;
878 u32 irq_reg
= readl(adapter
->regs
+ A_SG_INT_ENABLE
);
881 refill_free_list(sge
, &sge
->freelQ
[0]);
882 refill_free_list(sge
, &sge
->freelQ
[1]);
884 if (sge
->freelQ
[0].credits
> (sge
->freelQ
[0].size
>> 2) &&
885 sge
->freelQ
[1].credits
> (sge
->freelQ
[1].size
>> 2)) {
886 irq_reg
|= F_FL_EXHAUSTED
;
887 irqholdoff_reg
= sge
->fixed_intrtimer
;
889 /* Clear the F_FL_EXHAUSTED interrupts for now */
890 irq_reg
&= ~F_FL_EXHAUSTED
;
891 irqholdoff_reg
= sge
->intrtimer_nres
;
893 writel(irqholdoff_reg
, adapter
->regs
+ A_SG_INTRTIMER
);
894 writel(irq_reg
, adapter
->regs
+ A_SG_INT_ENABLE
);
896 /* We reenable the Qs to force a freelist GTS interrupt later */
897 doorbell_pio(adapter
, F_FL0_ENABLE
| F_FL1_ENABLE
);
900 #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
901 #define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
902 #define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
903 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
906 * Disable SGE Interrupts
908 void t1_sge_intr_disable(struct sge
*sge
)
910 u32 val
= readl(sge
->adapter
->regs
+ A_PL_ENABLE
);
912 writel(val
& ~SGE_PL_INTR_MASK
, sge
->adapter
->regs
+ A_PL_ENABLE
);
913 writel(0, sge
->adapter
->regs
+ A_SG_INT_ENABLE
);
917 * Enable SGE interrupts.
919 void t1_sge_intr_enable(struct sge
*sge
)
921 u32 en
= SGE_INT_ENABLE
;
922 u32 val
= readl(sge
->adapter
->regs
+ A_PL_ENABLE
);
924 if (sge
->adapter
->flags
& TSO_CAPABLE
)
925 en
&= ~F_PACKET_TOO_BIG
;
926 writel(en
, sge
->adapter
->regs
+ A_SG_INT_ENABLE
);
927 writel(val
| SGE_PL_INTR_MASK
, sge
->adapter
->regs
+ A_PL_ENABLE
);
931 * Clear SGE interrupts.
933 void t1_sge_intr_clear(struct sge
*sge
)
935 writel(SGE_PL_INTR_MASK
, sge
->adapter
->regs
+ A_PL_CAUSE
);
936 writel(0xffffffff, sge
->adapter
->regs
+ A_SG_INT_CAUSE
);
940 * SGE 'Error' interrupt handler
942 int t1_sge_intr_error_handler(struct sge
*sge
)
944 struct adapter
*adapter
= sge
->adapter
;
945 u32 cause
= readl(adapter
->regs
+ A_SG_INT_CAUSE
);
947 if (adapter
->flags
& TSO_CAPABLE
)
948 cause
&= ~F_PACKET_TOO_BIG
;
949 if (cause
& F_RESPQ_EXHAUSTED
)
950 sge
->stats
.respQ_empty
++;
951 if (cause
& F_RESPQ_OVERFLOW
) {
952 sge
->stats
.respQ_overflow
++;
953 CH_ALERT("%s: SGE response queue overflow\n",
956 if (cause
& F_FL_EXHAUSTED
) {
957 sge
->stats
.freelistQ_empty
++;
960 if (cause
& F_PACKET_TOO_BIG
) {
961 sge
->stats
.pkt_too_big
++;
962 CH_ALERT("%s: SGE max packet size exceeded\n",
965 if (cause
& F_PACKET_MISMATCH
) {
966 sge
->stats
.pkt_mismatch
++;
967 CH_ALERT("%s: SGE packet mismatch\n", adapter
->name
);
969 if (cause
& SGE_INT_FATAL
)
970 t1_fatal_err(adapter
);
972 writel(cause
, adapter
->regs
+ A_SG_INT_CAUSE
);
976 const struct sge_intr_counts
*t1_sge_get_intr_counts(const struct sge
*sge
)
981 void t1_sge_get_port_stats(const struct sge
*sge
, int port
,
982 struct sge_port_stats
*ss
)
986 memset(ss
, 0, sizeof(*ss
));
987 for_each_possible_cpu(cpu
) {
988 struct sge_port_stats
*st
= per_cpu_ptr(sge
->port_stats
[port
], cpu
);
990 ss
->rx_packets
+= st
->rx_packets
;
991 ss
->rx_cso_good
+= st
->rx_cso_good
;
992 ss
->tx_packets
+= st
->tx_packets
;
993 ss
->tx_cso
+= st
->tx_cso
;
994 ss
->tx_tso
+= st
->tx_tso
;
995 ss
->vlan_xtract
+= st
->vlan_xtract
;
996 ss
->vlan_insert
+= st
->vlan_insert
;
1001 * recycle_fl_buf - recycle a free list buffer
1002 * @fl: the free list
1003 * @idx: index of buffer to recycle
1005 * Recycles the specified buffer on the given free list by adding it at
1006 * the next available slot on the list.
1008 static void recycle_fl_buf(struct freelQ
*fl
, int idx
)
1010 struct freelQ_e
*from
= &fl
->entries
[idx
];
1011 struct freelQ_e
*to
= &fl
->entries
[fl
->pidx
];
1013 fl
->centries
[fl
->pidx
] = fl
->centries
[idx
];
1014 to
->addr_lo
= from
->addr_lo
;
1015 to
->addr_hi
= from
->addr_hi
;
1016 to
->len_gen
= G_CMD_LEN(from
->len_gen
) | V_CMD_GEN1(fl
->genbit
);
1018 to
->gen2
= V_CMD_GEN2(fl
->genbit
);
1021 if (++fl
->pidx
== fl
->size
) {
1028 * get_packet - return the next ingress packet buffer
1029 * @pdev: the PCI device that received the packet
1030 * @fl: the SGE free list holding the packet
1031 * @len: the actual packet length, excluding any SGE padding
1032 * @dma_pad: padding at beginning of buffer left by SGE DMA
1033 * @skb_pad: padding to be used if the packet is copied
1034 * @copy_thres: length threshold under which a packet should be copied
1035 * @drop_thres: # of remaining buffers before we start dropping packets
1037 * Get the next packet from a free list and complete setup of the
1038 * sk_buff. If the packet is small we make a copy and recycle the
1039 * original buffer, otherwise we use the original buffer itself. If a
1040 * positive drop threshold is supplied packets are dropped and their
1041 * buffers recycled if (a) the number of remaining buffers is under the
1042 * threshold and the packet is too big to copy, or (b) the packet should
1043 * be copied but there is no memory for the copy.
1045 static inline struct sk_buff
*get_packet(struct pci_dev
*pdev
,
1046 struct freelQ
*fl
, unsigned int len
,
1047 int dma_pad
, int skb_pad
,
1048 unsigned int copy_thres
,
1049 unsigned int drop_thres
)
1051 struct sk_buff
*skb
;
1052 struct freelQ_ce
*ce
= &fl
->centries
[fl
->cidx
];
1054 if (len
< copy_thres
) {
1055 skb
= alloc_skb(len
+ skb_pad
, GFP_ATOMIC
);
1056 if (likely(skb
!= NULL
)) {
1057 skb_reserve(skb
, skb_pad
);
1059 pci_dma_sync_single_for_cpu(pdev
,
1060 pci_unmap_addr(ce
, dma_addr
),
1061 pci_unmap_len(ce
, dma_len
),
1062 PCI_DMA_FROMDEVICE
);
1063 memcpy(skb
->data
, ce
->skb
->data
+ dma_pad
, len
);
1064 pci_dma_sync_single_for_device(pdev
,
1065 pci_unmap_addr(ce
, dma_addr
),
1066 pci_unmap_len(ce
, dma_len
),
1067 PCI_DMA_FROMDEVICE
);
1068 } else if (!drop_thres
)
1071 recycle_fl_buf(fl
, fl
->cidx
);
1075 if (fl
->credits
< drop_thres
) {
1076 recycle_fl_buf(fl
, fl
->cidx
);
1081 pci_unmap_single(pdev
, pci_unmap_addr(ce
, dma_addr
),
1082 pci_unmap_len(ce
, dma_len
), PCI_DMA_FROMDEVICE
);
1084 skb_reserve(skb
, dma_pad
);
1090 * unexpected_offload - handle an unexpected offload packet
1091 * @adapter: the adapter
1092 * @fl: the free list that received the packet
1094 * Called when we receive an unexpected offload packet (e.g., the TOE
1095 * function is disabled or the card is a NIC). Prints a message and
1096 * recycles the buffer.
1098 static void unexpected_offload(struct adapter
*adapter
, struct freelQ
*fl
)
1100 struct freelQ_ce
*ce
= &fl
->centries
[fl
->cidx
];
1101 struct sk_buff
*skb
= ce
->skb
;
1103 pci_dma_sync_single_for_cpu(adapter
->pdev
, pci_unmap_addr(ce
, dma_addr
),
1104 pci_unmap_len(ce
, dma_len
), PCI_DMA_FROMDEVICE
);
1105 CH_ERR("%s: unexpected offload packet, cmd %u\n",
1106 adapter
->name
, *skb
->data
);
1107 recycle_fl_buf(fl
, fl
->cidx
);
1111 * T1/T2 SGE limits the maximum DMA size per TX descriptor to
1112 * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
1113 * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
1114 * Note that the *_large_page_tx_descs stuff will be optimized out when
1115 * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
1117 * compute_large_page_descs() computes how many additional descriptors are
1118 * required to break down the stack's request.
1120 static inline unsigned int compute_large_page_tx_descs(struct sk_buff
*skb
)
1122 unsigned int count
= 0;
1124 if (PAGE_SIZE
> SGE_TX_DESC_MAX_PLEN
) {
1125 unsigned int nfrags
= skb_shinfo(skb
)->nr_frags
;
1126 unsigned int i
, len
= skb
->len
- skb
->data_len
;
1127 while (len
> SGE_TX_DESC_MAX_PLEN
) {
1129 len
-= SGE_TX_DESC_MAX_PLEN
;
1131 for (i
= 0; nfrags
--; i
++) {
1132 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1134 while (len
> SGE_TX_DESC_MAX_PLEN
) {
1136 len
-= SGE_TX_DESC_MAX_PLEN
;
1144 * Write a cmdQ entry.
1146 * Since this function writes the 'flags' field, it must not be used to
1147 * write the first cmdQ entry.
1149 static inline void write_tx_desc(struct cmdQ_e
*e
, dma_addr_t mapping
,
1150 unsigned int len
, unsigned int gen
,
1153 if (unlikely(len
> SGE_TX_DESC_MAX_PLEN
))
1155 e
->addr_lo
= (u32
)mapping
;
1156 e
->addr_hi
= (u64
)mapping
>> 32;
1157 e
->len_gen
= V_CMD_LEN(len
) | V_CMD_GEN1(gen
);
1158 e
->flags
= F_CMD_DATAVALID
| V_CMD_EOP(eop
) | V_CMD_GEN2(gen
);
1162 * See comment for previous function.
1164 * write_tx_descs_large_page() writes additional SGE tx descriptors if
1165 * *desc_len exceeds HW's capability.
1167 static inline unsigned int write_large_page_tx_descs(unsigned int pidx
,
1169 struct cmdQ_ce
**ce
,
1171 dma_addr_t
*desc_mapping
,
1172 unsigned int *desc_len
,
1173 unsigned int nfrags
,
1176 if (PAGE_SIZE
> SGE_TX_DESC_MAX_PLEN
) {
1177 struct cmdQ_e
*e1
= *e
;
1178 struct cmdQ_ce
*ce1
= *ce
;
1180 while (*desc_len
> SGE_TX_DESC_MAX_PLEN
) {
1181 *desc_len
-= SGE_TX_DESC_MAX_PLEN
;
1182 write_tx_desc(e1
, *desc_mapping
, SGE_TX_DESC_MAX_PLEN
,
1183 *gen
, nfrags
== 0 && *desc_len
== 0);
1185 pci_unmap_len_set(ce1
, dma_len
, 0);
1186 *desc_mapping
+= SGE_TX_DESC_MAX_PLEN
;
1190 if (++pidx
== q
->size
) {
1205 * Write the command descriptors to transmit the given skb starting at
1206 * descriptor pidx with the given generation.
1208 static inline void write_tx_descs(struct adapter
*adapter
, struct sk_buff
*skb
,
1209 unsigned int pidx
, unsigned int gen
,
1212 dma_addr_t mapping
, desc_mapping
;
1213 struct cmdQ_e
*e
, *e1
;
1215 unsigned int i
, flags
, first_desc_len
, desc_len
,
1216 nfrags
= skb_shinfo(skb
)->nr_frags
;
1218 e
= e1
= &q
->entries
[pidx
];
1219 ce
= &q
->centries
[pidx
];
1221 mapping
= pci_map_single(adapter
->pdev
, skb
->data
,
1222 skb
->len
- skb
->data_len
, PCI_DMA_TODEVICE
);
1224 desc_mapping
= mapping
;
1225 desc_len
= skb
->len
- skb
->data_len
;
1227 flags
= F_CMD_DATAVALID
| F_CMD_SOP
|
1228 V_CMD_EOP(nfrags
== 0 && desc_len
<= SGE_TX_DESC_MAX_PLEN
) |
1230 first_desc_len
= (desc_len
<= SGE_TX_DESC_MAX_PLEN
) ?
1231 desc_len
: SGE_TX_DESC_MAX_PLEN
;
1232 e
->addr_lo
= (u32
)desc_mapping
;
1233 e
->addr_hi
= (u64
)desc_mapping
>> 32;
1234 e
->len_gen
= V_CMD_LEN(first_desc_len
) | V_CMD_GEN1(gen
);
1236 pci_unmap_len_set(ce
, dma_len
, 0);
1238 if (PAGE_SIZE
> SGE_TX_DESC_MAX_PLEN
&&
1239 desc_len
> SGE_TX_DESC_MAX_PLEN
) {
1240 desc_mapping
+= first_desc_len
;
1241 desc_len
-= first_desc_len
;
1244 if (++pidx
== q
->size
) {
1250 pidx
= write_large_page_tx_descs(pidx
, &e1
, &ce
, &gen
,
1251 &desc_mapping
, &desc_len
,
1254 if (likely(desc_len
))
1255 write_tx_desc(e1
, desc_mapping
, desc_len
, gen
,
1260 pci_unmap_addr_set(ce
, dma_addr
, mapping
);
1261 pci_unmap_len_set(ce
, dma_len
, skb
->len
- skb
->data_len
);
1263 for (i
= 0; nfrags
--; i
++) {
1264 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1267 if (++pidx
== q
->size
) {
1274 mapping
= pci_map_page(adapter
->pdev
, frag
->page
,
1275 frag
->page_offset
, frag
->size
,
1277 desc_mapping
= mapping
;
1278 desc_len
= frag
->size
;
1280 pidx
= write_large_page_tx_descs(pidx
, &e1
, &ce
, &gen
,
1281 &desc_mapping
, &desc_len
,
1283 if (likely(desc_len
))
1284 write_tx_desc(e1
, desc_mapping
, desc_len
, gen
,
1287 pci_unmap_addr_set(ce
, dma_addr
, mapping
);
1288 pci_unmap_len_set(ce
, dma_len
, frag
->size
);
1296 * Clean up completed Tx buffers.
1298 static inline void reclaim_completed_tx(struct sge
*sge
, struct cmdQ
*q
)
1300 unsigned int reclaim
= q
->processed
- q
->cleaned
;
1303 pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
1304 q
->processed
, q
->cleaned
);
1305 free_cmdQ_buffers(sge
, q
, reclaim
);
1306 q
->cleaned
+= reclaim
;
1311 * Called from tasklet. Checks the scheduler for any
1312 * pending skbs that can be sent.
1314 static void restart_sched(unsigned long arg
)
1316 struct sge
*sge
= (struct sge
*) arg
;
1317 struct adapter
*adapter
= sge
->adapter
;
1318 struct cmdQ
*q
= &sge
->cmdQ
[0];
1319 struct sk_buff
*skb
;
1320 unsigned int credits
, queued_skb
= 0;
1322 spin_lock(&q
->lock
);
1323 reclaim_completed_tx(sge
, q
);
1325 credits
= q
->size
- q
->in_use
;
1326 pr_debug("restart_sched credits=%d\n", credits
);
1327 while ((skb
= sched_skb(sge
, NULL
, credits
)) != NULL
) {
1328 unsigned int genbit
, pidx
, count
;
1329 count
= 1 + skb_shinfo(skb
)->nr_frags
;
1330 count
+= compute_large_page_tx_descs(skb
);
1335 if (q
->pidx
>= q
->size
) {
1339 write_tx_descs(adapter
, skb
, pidx
, genbit
, q
);
1340 credits
= q
->size
- q
->in_use
;
1345 clear_bit(CMDQ_STAT_LAST_PKT_DB
, &q
->status
);
1346 if (test_and_set_bit(CMDQ_STAT_RUNNING
, &q
->status
) == 0) {
1347 set_bit(CMDQ_STAT_LAST_PKT_DB
, &q
->status
);
1348 writel(F_CMDQ0_ENABLE
, adapter
->regs
+ A_SG_DOORBELL
);
1351 spin_unlock(&q
->lock
);
1355 * sge_rx - process an ingress ethernet packet
1356 * @sge: the sge structure
1357 * @fl: the free list that contains the packet buffer
1358 * @len: the packet length
1360 * Process an ingress ethernet pakcet and deliver it to the stack.
1362 static int sge_rx(struct sge
*sge
, struct freelQ
*fl
, unsigned int len
)
1364 struct sk_buff
*skb
;
1365 struct cpl_rx_pkt
*p
;
1366 struct adapter
*adapter
= sge
->adapter
;
1367 struct sge_port_stats
*st
;
1369 skb
= get_packet(adapter
->pdev
, fl
, len
- sge
->rx_pkt_pad
,
1370 sge
->rx_pkt_pad
, 2, SGE_RX_COPY_THRES
,
1372 if (unlikely(!skb
)) {
1373 sge
->stats
.rx_drops
++;
1377 p
= (struct cpl_rx_pkt
*)skb
->data
;
1378 skb_pull(skb
, sizeof(*p
));
1379 if (p
->iff
>= adapter
->params
.nports
) {
1384 skb
->dev
= adapter
->port
[p
->iff
].dev
;
1385 skb
->dev
->last_rx
= jiffies
;
1386 st
= per_cpu_ptr(sge
->port_stats
[p
->iff
], smp_processor_id());
1389 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1390 if ((adapter
->flags
& RX_CSUM_ENABLED
) && p
->csum
== 0xffff &&
1391 skb
->protocol
== htons(ETH_P_IP
) &&
1392 (skb
->data
[9] == IPPROTO_TCP
|| skb
->data
[9] == IPPROTO_UDP
)) {
1394 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1396 skb
->ip_summed
= CHECKSUM_NONE
;
1398 if (unlikely(adapter
->vlan_grp
&& p
->vlan_valid
)) {
1400 #ifdef CONFIG_CHELSIO_T1_NAPI
1401 vlan_hwaccel_receive_skb(skb
, adapter
->vlan_grp
,
1404 vlan_hwaccel_rx(skb
, adapter
->vlan_grp
,
1408 #ifdef CONFIG_CHELSIO_T1_NAPI
1409 netif_receive_skb(skb
);
1418 * Returns true if a command queue has enough available descriptors that
1419 * we can resume Tx operation after temporarily disabling its packet queue.
1421 static inline int enough_free_Tx_descs(const struct cmdQ
*q
)
1423 unsigned int r
= q
->processed
- q
->cleaned
;
1425 return q
->in_use
- r
< (q
->size
>> 1);
1429 * Called when sufficient space has become available in the SGE command queues
1430 * after the Tx packet schedulers have been suspended to restart the Tx path.
1432 static void restart_tx_queues(struct sge
*sge
)
1434 struct adapter
*adap
= sge
->adapter
;
1437 if (!enough_free_Tx_descs(&sge
->cmdQ
[0]))
1440 for_each_port(adap
, i
) {
1441 struct net_device
*nd
= adap
->port
[i
].dev
;
1443 if (test_and_clear_bit(nd
->if_port
, &sge
->stopped_tx_queues
) &&
1444 netif_running(nd
)) {
1445 sge
->stats
.cmdQ_restarted
[2]++;
1446 netif_wake_queue(nd
);
1452 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1455 static unsigned int update_tx_info(struct adapter
*adapter
,
1459 struct sge
*sge
= adapter
->sge
;
1460 struct cmdQ
*cmdq
= &sge
->cmdQ
[0];
1462 cmdq
->processed
+= pr0
;
1463 if (flags
& (F_FL0_ENABLE
| F_FL1_ENABLE
)) {
1465 flags
&= ~(F_FL0_ENABLE
| F_FL1_ENABLE
);
1467 if (flags
& F_CMDQ0_ENABLE
) {
1468 clear_bit(CMDQ_STAT_RUNNING
, &cmdq
->status
);
1470 if (cmdq
->cleaned
+ cmdq
->in_use
!= cmdq
->processed
&&
1471 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB
, &cmdq
->status
)) {
1472 set_bit(CMDQ_STAT_RUNNING
, &cmdq
->status
);
1473 writel(F_CMDQ0_ENABLE
, adapter
->regs
+ A_SG_DOORBELL
);
1476 tasklet_hi_schedule(&sge
->tx_sched
->sched_tsk
);
1478 flags
&= ~F_CMDQ0_ENABLE
;
1481 if (unlikely(sge
->stopped_tx_queues
!= 0))
1482 restart_tx_queues(sge
);
1488 * Process SGE responses, up to the supplied budget. Returns the number of
1489 * responses processed. A negative budget is effectively unlimited.
1491 static int process_responses(struct adapter
*adapter
, int budget
)
1493 struct sge
*sge
= adapter
->sge
;
1494 struct respQ
*q
= &sge
->respQ
;
1495 struct respQ_e
*e
= &q
->entries
[q
->cidx
];
1496 int budget_left
= budget
;
1497 unsigned int flags
= 0;
1498 unsigned int cmdq_processed
[SGE_CMDQ_N
] = {0, 0};
1501 while (likely(budget_left
&& e
->GenerationBit
== q
->genbit
)) {
1502 flags
|= e
->Qsleeping
;
1504 cmdq_processed
[0] += e
->Cmdq0CreditReturn
;
1505 cmdq_processed
[1] += e
->Cmdq1CreditReturn
;
1507 /* We batch updates to the TX side to avoid cacheline
1508 * ping-pong of TX state information on MP where the sender
1509 * might run on a different CPU than this function...
1511 if (unlikely(flags
& F_CMDQ0_ENABLE
|| cmdq_processed
[0] > 64)) {
1512 flags
= update_tx_info(adapter
, flags
, cmdq_processed
[0]);
1513 cmdq_processed
[0] = 0;
1515 if (unlikely(cmdq_processed
[1] > 16)) {
1516 sge
->cmdQ
[1].processed
+= cmdq_processed
[1];
1517 cmdq_processed
[1] = 0;
1519 if (likely(e
->DataValid
)) {
1520 struct freelQ
*fl
= &sge
->freelQ
[e
->FreelistQid
];
1522 BUG_ON(!e
->Sop
|| !e
->Eop
);
1523 if (unlikely(e
->Offload
))
1524 unexpected_offload(adapter
, fl
);
1526 sge_rx(sge
, fl
, e
->BufferLength
);
1529 * Note: this depends on each packet consuming a
1530 * single free-list buffer; cf. the BUG above.
1532 if (++fl
->cidx
== fl
->size
)
1534 if (unlikely(--fl
->credits
<
1535 fl
->size
- SGE_FREEL_REFILL_THRESH
))
1536 refill_free_list(sge
, fl
);
1538 sge
->stats
.pure_rsps
++;
1541 if (unlikely(++q
->cidx
== q
->size
)) {
1548 if (++q
->credits
> SGE_RESPQ_REPLENISH_THRES
) {
1549 writel(q
->credits
, adapter
->regs
+ A_SG_RSPQUEUECREDIT
);
1555 flags
= update_tx_info(adapter
, flags
, cmdq_processed
[0]);
1556 sge
->cmdQ
[1].processed
+= cmdq_processed
[1];
1558 budget
-= budget_left
;
1562 #ifdef CONFIG_CHELSIO_T1_NAPI
1564 * A simpler version of process_responses() that handles only pure (i.e.,
1565 * non data-carrying) responses. Such respones are too light-weight to justify
1566 * calling a softirq when using NAPI, so we handle them specially in hard
1567 * interrupt context. The function is called with a pointer to a response,
1568 * which the caller must ensure is a valid pure response. Returns 1 if it
1569 * encounters a valid data-carrying response, 0 otherwise.
1571 static int process_pure_responses(struct adapter
*adapter
, struct respQ_e
*e
)
1573 struct sge
*sge
= adapter
->sge
;
1574 struct respQ
*q
= &sge
->respQ
;
1575 unsigned int flags
= 0;
1576 unsigned int cmdq_processed
[SGE_CMDQ_N
] = {0, 0};
1579 flags
|= e
->Qsleeping
;
1581 cmdq_processed
[0] += e
->Cmdq0CreditReturn
;
1582 cmdq_processed
[1] += e
->Cmdq1CreditReturn
;
1585 if (unlikely(++q
->cidx
== q
->size
)) {
1592 if (++q
->credits
> SGE_RESPQ_REPLENISH_THRES
) {
1593 writel(q
->credits
, adapter
->regs
+ A_SG_RSPQUEUECREDIT
);
1596 sge
->stats
.pure_rsps
++;
1597 } while (e
->GenerationBit
== q
->genbit
&& !e
->DataValid
);
1599 flags
= update_tx_info(adapter
, flags
, cmdq_processed
[0]);
1600 sge
->cmdQ
[1].processed
+= cmdq_processed
[1];
1602 return e
->GenerationBit
== q
->genbit
;
1606 * Handler for new data events when using NAPI. This does not need any locking
1607 * or protection from interrupts as data interrupts are off at this point and
1608 * other adapter interrupts do not interfere.
1610 int t1_poll(struct net_device
*dev
, int *budget
)
1612 struct adapter
*adapter
= dev
->priv
;
1613 int effective_budget
= min(*budget
, dev
->quota
);
1614 int work_done
= process_responses(adapter
, effective_budget
);
1616 *budget
-= work_done
;
1617 dev
->quota
-= work_done
;
1619 if (work_done
>= effective_budget
)
1622 spin_lock_irq(&adapter
->async_lock
);
1623 __netif_rx_complete(dev
);
1624 writel(adapter
->sge
->respQ
.cidx
, adapter
->regs
+ A_SG_SLEEPING
);
1625 writel(adapter
->slow_intr_mask
| F_PL_INTR_SGE_DATA
,
1626 adapter
->regs
+ A_PL_ENABLE
);
1627 spin_unlock_irq(&adapter
->async_lock
);
1633 * NAPI version of the main interrupt handler.
1635 irqreturn_t
t1_interrupt(int irq
, void *data
)
1637 struct adapter
*adapter
= data
;
1638 struct net_device
*dev
= adapter
->sge
->netdev
;
1639 struct sge
*sge
= adapter
->sge
;
1643 cause
= readl(adapter
->regs
+ A_PL_CAUSE
);
1644 if (cause
== 0 || cause
== ~0)
1647 spin_lock(&adapter
->async_lock
);
1648 if (cause
& F_PL_INTR_SGE_DATA
) {
1649 struct respQ
*q
= &adapter
->sge
->respQ
;
1650 struct respQ_e
*e
= &q
->entries
[q
->cidx
];
1653 writel(F_PL_INTR_SGE_DATA
, adapter
->regs
+ A_PL_CAUSE
);
1655 if (e
->GenerationBit
== q
->genbit
&&
1656 __netif_rx_schedule_prep(dev
)) {
1657 if (e
->DataValid
|| process_pure_responses(adapter
, e
)) {
1658 /* mask off data IRQ */
1659 writel(adapter
->slow_intr_mask
,
1660 adapter
->regs
+ A_PL_ENABLE
);
1661 __netif_rx_schedule(sge
->netdev
);
1664 /* no data, no NAPI needed */
1665 netif_poll_enable(dev
);
1668 writel(q
->cidx
, adapter
->regs
+ A_SG_SLEEPING
);
1670 handled
= t1_slow_intr_handler(adapter
);
1673 sge
->stats
.unhandled_irqs
++;
1675 spin_unlock(&adapter
->async_lock
);
1676 return IRQ_RETVAL(handled
!= 0);
1681 * Main interrupt handler, optimized assuming that we took a 'DATA'
1684 * 1. Clear the interrupt
1685 * 2. Loop while we find valid descriptors and process them; accumulate
1686 * information that can be processed after the loop
1687 * 3. Tell the SGE at which index we stopped processing descriptors
1688 * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
1689 * outstanding TX buffers waiting, replenish RX buffers, potentially
1690 * reenable upper layers if they were turned off due to lack of TX
1691 * resources which are available again.
1692 * 5. If we took an interrupt, but no valid respQ descriptors was found we
1693 * let the slow_intr_handler run and do error handling.
1695 irqreturn_t
t1_interrupt(int irq
, void *cookie
)
1699 struct adapter
*adapter
= cookie
;
1700 struct respQ
*Q
= &adapter
->sge
->respQ
;
1702 spin_lock(&adapter
->async_lock
);
1703 e
= &Q
->entries
[Q
->cidx
];
1706 writel(F_PL_INTR_SGE_DATA
, adapter
->regs
+ A_PL_CAUSE
);
1708 if (likely(e
->GenerationBit
== Q
->genbit
))
1709 work_done
= process_responses(adapter
, -1);
1711 work_done
= t1_slow_intr_handler(adapter
);
1714 * The unconditional clearing of the PL_CAUSE above may have raced
1715 * with DMA completion and the corresponding generation of a response
1716 * to cause us to miss the resulting data interrupt. The next write
1717 * is also unconditional to recover the missed interrupt and render
1718 * this race harmless.
1720 writel(Q
->cidx
, adapter
->regs
+ A_SG_SLEEPING
);
1723 adapter
->sge
->stats
.unhandled_irqs
++;
1724 spin_unlock(&adapter
->async_lock
);
1725 return IRQ_RETVAL(work_done
!= 0);
1730 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1732 * The code figures out how many entries the sk_buff will require in the
1733 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1734 * has complete. Then, it doesn't access the global structure anymore, but
1735 * uses the corresponding fields on the stack. In conjuction with a spinlock
1736 * around that code, we can make the function reentrant without holding the
1737 * lock when we actually enqueue (which might be expensive, especially on
1738 * architectures with IO MMUs).
1740 * This runs with softirqs disabled.
1742 static int t1_sge_tx(struct sk_buff
*skb
, struct adapter
*adapter
,
1743 unsigned int qid
, struct net_device
*dev
)
1745 struct sge
*sge
= adapter
->sge
;
1746 struct cmdQ
*q
= &sge
->cmdQ
[qid
];
1747 unsigned int credits
, pidx
, genbit
, count
, use_sched_skb
= 0;
1749 if (!spin_trylock(&q
->lock
))
1750 return NETDEV_TX_LOCKED
;
1752 reclaim_completed_tx(sge
, q
);
1755 credits
= q
->size
- q
->in_use
;
1756 count
= 1 + skb_shinfo(skb
)->nr_frags
;
1757 count
+= compute_large_page_tx_descs(skb
);
1759 /* Ethernet packet */
1760 if (unlikely(credits
< count
)) {
1761 if (!netif_queue_stopped(dev
)) {
1762 netif_stop_queue(dev
);
1763 set_bit(dev
->if_port
, &sge
->stopped_tx_queues
);
1764 sge
->stats
.cmdQ_full
[2]++;
1765 CH_ERR("%s: Tx ring full while queue awake!\n",
1768 spin_unlock(&q
->lock
);
1769 return NETDEV_TX_BUSY
;
1772 if (unlikely(credits
- count
< q
->stop_thres
)) {
1773 netif_stop_queue(dev
);
1774 set_bit(dev
->if_port
, &sge
->stopped_tx_queues
);
1775 sge
->stats
.cmdQ_full
[2]++;
1778 /* T204 cmdQ0 skbs that are destined for a certain port have to go
1779 * through the scheduler.
1781 if (sge
->tx_sched
&& !qid
&& skb
->dev
) {
1784 /* Note that the scheduler might return a different skb than
1785 * the one passed in.
1787 skb
= sched_skb(sge
, skb
, credits
);
1789 spin_unlock(&q
->lock
);
1790 return NETDEV_TX_OK
;
1793 count
= 1 + skb_shinfo(skb
)->nr_frags
;
1794 count
+= compute_large_page_tx_descs(skb
);
1801 if (q
->pidx
>= q
->size
) {
1805 spin_unlock(&q
->lock
);
1807 write_tx_descs(adapter
, skb
, pidx
, genbit
, q
);
1810 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
1811 * the doorbell if the Q is asleep. There is a natural race, where
1812 * the hardware is going to sleep just after we checked, however,
1813 * then the interrupt handler will detect the outstanding TX packet
1814 * and ring the doorbell for us.
1817 doorbell_pio(adapter
, F_CMDQ1_ENABLE
);
1819 clear_bit(CMDQ_STAT_LAST_PKT_DB
, &q
->status
);
1820 if (test_and_set_bit(CMDQ_STAT_RUNNING
, &q
->status
) == 0) {
1821 set_bit(CMDQ_STAT_LAST_PKT_DB
, &q
->status
);
1822 writel(F_CMDQ0_ENABLE
, adapter
->regs
+ A_SG_DOORBELL
);
1826 if (use_sched_skb
) {
1827 if (spin_trylock(&q
->lock
)) {
1828 credits
= q
->size
- q
->in_use
;
1833 return NETDEV_TX_OK
;
1836 #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1839 * eth_hdr_len - return the length of an Ethernet header
1840 * @data: pointer to the start of the Ethernet header
1842 * Returns the length of an Ethernet header, including optional VLAN tag.
1844 static inline int eth_hdr_len(const void *data
)
1846 const struct ethhdr
*e
= data
;
1848 return e
->h_proto
== htons(ETH_P_8021Q
) ? VLAN_ETH_HLEN
: ETH_HLEN
;
1852 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1854 int t1_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1856 struct adapter
*adapter
= dev
->priv
;
1857 struct sge
*sge
= adapter
->sge
;
1858 struct sge_port_stats
*st
= per_cpu_ptr(sge
->port_stats
[dev
->if_port
], smp_processor_id());
1859 struct cpl_tx_pkt
*cpl
;
1860 struct sk_buff
*orig_skb
= skb
;
1863 if (skb
->protocol
== htons(ETH_P_CPL5
))
1866 if (skb_shinfo(skb
)->gso_size
) {
1868 struct cpl_tx_pkt_lso
*hdr
;
1872 eth_type
= skb
->nh
.raw
- skb
->data
== ETH_HLEN
?
1873 CPL_ETH_II
: CPL_ETH_II_VLAN
;
1875 hdr
= (struct cpl_tx_pkt_lso
*)skb_push(skb
, sizeof(*hdr
));
1876 hdr
->opcode
= CPL_TX_PKT_LSO
;
1877 hdr
->ip_csum_dis
= hdr
->l4_csum_dis
= 0;
1878 hdr
->ip_hdr_words
= skb
->nh
.iph
->ihl
;
1879 hdr
->tcp_hdr_words
= skb
->h
.th
->doff
;
1880 hdr
->eth_type_mss
= htons(MK_ETH_TYPE_MSS(eth_type
,
1881 skb_shinfo(skb
)->gso_size
));
1882 hdr
->len
= htonl(skb
->len
- sizeof(*hdr
));
1883 cpl
= (struct cpl_tx_pkt
*)hdr
;
1886 * Packets shorter than ETH_HLEN can break the MAC, drop them
1887 * early. Also, we may get oversized packets because some
1888 * parts of the kernel don't handle our unusual hard_header_len
1889 * right, drop those too.
1891 if (unlikely(skb
->len
< ETH_HLEN
||
1892 skb
->len
> dev
->mtu
+ eth_hdr_len(skb
->data
))) {
1893 pr_debug("%s: packet size %d hdr %d mtu%d\n", dev
->name
,
1894 skb
->len
, eth_hdr_len(skb
->data
), dev
->mtu
);
1895 dev_kfree_skb_any(skb
);
1896 return NETDEV_TX_OK
;
1900 * We are using a non-standard hard_header_len and some kernel
1901 * components, such as pktgen, do not handle it right.
1902 * Complain when this happens but try to fix things up.
1904 if (unlikely(skb_headroom(skb
) < dev
->hard_header_len
- ETH_HLEN
)) {
1905 pr_debug("%s: headroom %d header_len %d\n", dev
->name
,
1906 skb_headroom(skb
), dev
->hard_header_len
);
1908 if (net_ratelimit())
1909 printk(KERN_ERR
"%s: inadequate headroom in "
1910 "Tx packet\n", dev
->name
);
1911 skb
= skb_realloc_headroom(skb
, sizeof(*cpl
));
1912 dev_kfree_skb_any(orig_skb
);
1914 return NETDEV_TX_OK
;
1917 if (!(adapter
->flags
& UDP_CSUM_CAPABLE
) &&
1918 skb
->ip_summed
== CHECKSUM_PARTIAL
&&
1919 skb
->nh
.iph
->protocol
== IPPROTO_UDP
) {
1920 if (unlikely(skb_checksum_help(skb
))) {
1921 pr_debug("%s: unable to do udp checksum\n", dev
->name
);
1922 dev_kfree_skb_any(skb
);
1923 return NETDEV_TX_OK
;
1927 /* Hmmm, assuming to catch the gratious arp... and we'll use
1928 * it to flush out stuck espi packets...
1930 if ((unlikely(!adapter
->sge
->espibug_skb
[dev
->if_port
]))) {
1931 if (skb
->protocol
== htons(ETH_P_ARP
) &&
1932 skb
->nh
.arph
->ar_op
== htons(ARPOP_REQUEST
)) {
1933 adapter
->sge
->espibug_skb
[dev
->if_port
] = skb
;
1934 /* We want to re-use this skb later. We
1935 * simply bump the reference count and it
1936 * will not be freed...
1942 cpl
= (struct cpl_tx_pkt
*)__skb_push(skb
, sizeof(*cpl
));
1943 cpl
->opcode
= CPL_TX_PKT
;
1944 cpl
->ip_csum_dis
= 1; /* SW calculates IP csum */
1945 cpl
->l4_csum_dis
= skb
->ip_summed
== CHECKSUM_PARTIAL
? 0 : 1;
1946 /* the length field isn't used so don't bother setting it */
1948 st
->tx_cso
+= (skb
->ip_summed
== CHECKSUM_PARTIAL
);
1950 cpl
->iff
= dev
->if_port
;
1952 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1953 if (adapter
->vlan_grp
&& vlan_tx_tag_present(skb
)) {
1954 cpl
->vlan_valid
= 1;
1955 cpl
->vlan
= htons(vlan_tx_tag_get(skb
));
1959 cpl
->vlan_valid
= 0;
1963 dev
->trans_start
= jiffies
;
1964 ret
= t1_sge_tx(skb
, adapter
, 0, dev
);
1966 /* If transmit busy, and we reallocated skb's due to headroom limit,
1967 * then silently discard to avoid leak.
1969 if (unlikely(ret
!= NETDEV_TX_OK
&& skb
!= orig_skb
)) {
1970 dev_kfree_skb_any(skb
);
1977 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
1979 static void sge_tx_reclaim_cb(unsigned long data
)
1982 struct sge
*sge
= (struct sge
*)data
;
1984 for (i
= 0; i
< SGE_CMDQ_N
; ++i
) {
1985 struct cmdQ
*q
= &sge
->cmdQ
[i
];
1987 if (!spin_trylock(&q
->lock
))
1990 reclaim_completed_tx(sge
, q
);
1991 if (i
== 0 && q
->in_use
) { /* flush pending credits */
1992 writel(F_CMDQ0_ENABLE
, sge
->adapter
->regs
+ A_SG_DOORBELL
);
1994 spin_unlock(&q
->lock
);
1996 mod_timer(&sge
->tx_reclaim_timer
, jiffies
+ TX_RECLAIM_PERIOD
);
2000 * Propagate changes of the SGE coalescing parameters to the HW.
2002 int t1_sge_set_coalesce_params(struct sge
*sge
, struct sge_params
*p
)
2004 sge
->fixed_intrtimer
= p
->rx_coalesce_usecs
*
2005 core_ticks_per_usec(sge
->adapter
);
2006 writel(sge
->fixed_intrtimer
, sge
->adapter
->regs
+ A_SG_INTRTIMER
);
2011 * Allocates both RX and TX resources and configures the SGE. However,
2012 * the hardware is not enabled yet.
2014 int t1_sge_configure(struct sge
*sge
, struct sge_params
*p
)
2016 if (alloc_rx_resources(sge
, p
))
2018 if (alloc_tx_resources(sge
, p
)) {
2019 free_rx_resources(sge
);
2022 configure_sge(sge
, p
);
2025 * Now that we have sized the free lists calculate the payload
2026 * capacity of the large buffers. Other parts of the driver use
2027 * this to set the max offload coalescing size so that RX packets
2028 * do not overflow our large buffers.
2030 p
->large_buf_capacity
= jumbo_payload_capacity(sge
);
2035 * Disables the DMA engine.
2037 void t1_sge_stop(struct sge
*sge
)
2040 writel(0, sge
->adapter
->regs
+ A_SG_CONTROL
);
2041 readl(sge
->adapter
->regs
+ A_SG_CONTROL
); /* flush */
2043 if (is_T2(sge
->adapter
))
2044 del_timer_sync(&sge
->espibug_timer
);
2046 del_timer_sync(&sge
->tx_reclaim_timer
);
2050 for (i
= 0; i
< MAX_NPORTS
; i
++)
2051 if (sge
->espibug_skb
[i
])
2052 kfree_skb(sge
->espibug_skb
[i
]);
2056 * Enables the DMA engine.
2058 void t1_sge_start(struct sge
*sge
)
2060 refill_free_list(sge
, &sge
->freelQ
[0]);
2061 refill_free_list(sge
, &sge
->freelQ
[1]);
2063 writel(sge
->sge_control
, sge
->adapter
->regs
+ A_SG_CONTROL
);
2064 doorbell_pio(sge
->adapter
, F_FL0_ENABLE
| F_FL1_ENABLE
);
2065 readl(sge
->adapter
->regs
+ A_SG_CONTROL
); /* flush */
2067 mod_timer(&sge
->tx_reclaim_timer
, jiffies
+ TX_RECLAIM_PERIOD
);
2069 if (is_T2(sge
->adapter
))
2070 mod_timer(&sge
->espibug_timer
, jiffies
+ sge
->espibug_timeout
);
2074 * Callback for the T2 ESPI 'stuck packet feature' workaorund
2076 static void espibug_workaround_t204(unsigned long data
)
2078 struct adapter
*adapter
= (struct adapter
*)data
;
2079 struct sge
*sge
= adapter
->sge
;
2080 unsigned int nports
= adapter
->params
.nports
;
2081 u32 seop
[MAX_NPORTS
];
2083 if (adapter
->open_device_map
& PORT_MASK
) {
2086 if (t1_espi_get_mon_t204(adapter
, &(seop
[0]), 0) < 0)
2089 for (i
= 0; i
< nports
; i
++) {
2090 struct sk_buff
*skb
= sge
->espibug_skb
[i
];
2092 if (!netif_running(adapter
->port
[i
].dev
) ||
2093 netif_queue_stopped(adapter
->port
[i
].dev
) ||
2094 !seop
[i
] || ((seop
[i
] & 0xfff) != 0) || !skb
)
2098 u8 ch_mac_addr
[ETH_ALEN
] = {
2099 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
2102 memcpy(skb
->data
+ sizeof(struct cpl_tx_pkt
),
2103 ch_mac_addr
, ETH_ALEN
);
2104 memcpy(skb
->data
+ skb
->len
- 10,
2105 ch_mac_addr
, ETH_ALEN
);
2109 /* bump the reference count to avoid freeing of
2110 * the skb once the DMA has completed.
2113 t1_sge_tx(skb
, adapter
, 0, adapter
->port
[i
].dev
);
2116 mod_timer(&sge
->espibug_timer
, jiffies
+ sge
->espibug_timeout
);
2119 static void espibug_workaround(unsigned long data
)
2121 struct adapter
*adapter
= (struct adapter
*)data
;
2122 struct sge
*sge
= adapter
->sge
;
2124 if (netif_running(adapter
->port
[0].dev
)) {
2125 struct sk_buff
*skb
= sge
->espibug_skb
[0];
2126 u32 seop
= t1_espi_get_mon(adapter
, 0x930, 0);
2128 if ((seop
& 0xfff0fff) == 0xfff && skb
) {
2130 u8 ch_mac_addr
[ETH_ALEN
] =
2131 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
2132 memcpy(skb
->data
+ sizeof(struct cpl_tx_pkt
),
2133 ch_mac_addr
, ETH_ALEN
);
2134 memcpy(skb
->data
+ skb
->len
- 10, ch_mac_addr
,
2139 /* bump the reference count to avoid freeing of the
2140 * skb once the DMA has completed.
2143 t1_sge_tx(skb
, adapter
, 0, adapter
->port
[0].dev
);
2146 mod_timer(&sge
->espibug_timer
, jiffies
+ sge
->espibug_timeout
);
2150 * Creates a t1_sge structure and returns suggested resource parameters.
2152 struct sge
* __devinit
t1_sge_create(struct adapter
*adapter
,
2153 struct sge_params
*p
)
2155 struct sge
*sge
= kzalloc(sizeof(*sge
), GFP_KERNEL
);
2161 sge
->adapter
= adapter
;
2162 sge
->netdev
= adapter
->port
[0].dev
;
2163 sge
->rx_pkt_pad
= t1_is_T1B(adapter
) ? 0 : 2;
2164 sge
->jumbo_fl
= t1_is_T1B(adapter
) ? 1 : 0;
2166 for_each_port(adapter
, i
) {
2167 sge
->port_stats
[i
] = alloc_percpu(struct sge_port_stats
);
2168 if (!sge
->port_stats
[i
])
2172 init_timer(&sge
->tx_reclaim_timer
);
2173 sge
->tx_reclaim_timer
.data
= (unsigned long)sge
;
2174 sge
->tx_reclaim_timer
.function
= sge_tx_reclaim_cb
;
2176 if (is_T2(sge
->adapter
)) {
2177 init_timer(&sge
->espibug_timer
);
2179 if (adapter
->params
.nports
> 1) {
2181 sge
->espibug_timer
.function
= espibug_workaround_t204
;
2183 sge
->espibug_timer
.function
= espibug_workaround
;
2184 sge
->espibug_timer
.data
= (unsigned long)sge
->adapter
;
2186 sge
->espibug_timeout
= 1;
2187 /* for T204, every 10ms */
2188 if (adapter
->params
.nports
> 1)
2189 sge
->espibug_timeout
= HZ
/100;
2193 p
->cmdQ_size
[0] = SGE_CMDQ0_E_N
;
2194 p
->cmdQ_size
[1] = SGE_CMDQ1_E_N
;
2195 p
->freelQ_size
[!sge
->jumbo_fl
] = SGE_FREEL_SIZE
;
2196 p
->freelQ_size
[sge
->jumbo_fl
] = SGE_JUMBO_FREEL_SIZE
;
2197 if (sge
->tx_sched
) {
2198 if (board_info(sge
->adapter
)->board
== CHBT_BOARD_CHT204
)
2199 p
->rx_coalesce_usecs
= 15;
2201 p
->rx_coalesce_usecs
= 50;
2203 p
->rx_coalesce_usecs
= 50;
2205 p
->coalesce_enable
= 0;
2206 p
->sample_interval_usecs
= 0;
2211 free_percpu(sge
->port_stats
[i
]);