2 * Copyright (C) 2005 - 2009 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
20 #include <asm/div64.h>
22 MODULE_VERSION(DRV_VER
);
23 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
24 MODULE_DESCRIPTION(DRV_DESC
" " DRV_VER
);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
28 static unsigned int rx_frag_size
= 2048;
29 module_param(rx_frag_size
, uint
, S_IRUGO
);
30 MODULE_PARM_DESC(rx_frag_size
, "Size of a fragment that holds rcvd data.");
32 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids
) = {
33 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID1
) },
34 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID1
) },
35 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID2
) },
38 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
40 static void be_queue_free(struct be_adapter
*adapter
, struct be_queue_info
*q
)
42 struct be_dma_mem
*mem
= &q
->dma_mem
;
44 pci_free_consistent(adapter
->pdev
, mem
->size
,
48 static int be_queue_alloc(struct be_adapter
*adapter
, struct be_queue_info
*q
,
49 u16 len
, u16 entry_size
)
51 struct be_dma_mem
*mem
= &q
->dma_mem
;
53 memset(q
, 0, sizeof(*q
));
55 q
->entry_size
= entry_size
;
56 mem
->size
= len
* entry_size
;
57 mem
->va
= pci_alloc_consistent(adapter
->pdev
, mem
->size
, &mem
->dma
);
60 memset(mem
->va
, 0, mem
->size
);
64 static void be_intr_set(struct be_adapter
*adapter
, bool enable
)
66 u8 __iomem
*addr
= adapter
->pcicfg
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
;
67 u32 reg
= ioread32(addr
);
68 u32 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
69 if (!enabled
&& enable
) {
70 reg
|= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
71 } else if (enabled
&& !enable
) {
72 reg
&= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
74 printk(KERN_WARNING DRV_NAME
75 ": bad value in membar_int_ctrl reg=0x%x\n", reg
);
81 static void be_rxq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
84 val
|= qid
& DB_RQ_RING_ID_MASK
;
85 val
|= posted
<< DB_RQ_NUM_POSTED_SHIFT
;
86 iowrite32(val
, adapter
->db
+ DB_RQ_OFFSET
);
89 static void be_txq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
92 val
|= qid
& DB_TXULP_RING_ID_MASK
;
93 val
|= (posted
& DB_TXULP_NUM_POSTED_MASK
) << DB_TXULP_NUM_POSTED_SHIFT
;
94 iowrite32(val
, adapter
->db
+ DB_TXULP1_OFFSET
);
97 static void be_eq_notify(struct be_adapter
*adapter
, u16 qid
,
98 bool arm
, bool clear_int
, u16 num_popped
)
101 val
|= qid
& DB_EQ_RING_ID_MASK
;
103 val
|= 1 << DB_EQ_REARM_SHIFT
;
105 val
|= 1 << DB_EQ_CLR_SHIFT
;
106 val
|= 1 << DB_EQ_EVNT_SHIFT
;
107 val
|= num_popped
<< DB_EQ_NUM_POPPED_SHIFT
;
108 iowrite32(val
, adapter
->db
+ DB_EQ_OFFSET
);
111 void be_cq_notify(struct be_adapter
*adapter
, u16 qid
, bool arm
, u16 num_popped
)
114 val
|= qid
& DB_CQ_RING_ID_MASK
;
116 val
|= 1 << DB_CQ_REARM_SHIFT
;
117 val
|= num_popped
<< DB_CQ_NUM_POPPED_SHIFT
;
118 iowrite32(val
, adapter
->db
+ DB_CQ_OFFSET
);
122 static int be_mac_addr_set(struct net_device
*netdev
, void *p
)
124 struct be_adapter
*adapter
= netdev_priv(netdev
);
125 struct sockaddr
*addr
= p
;
128 if (netif_running(netdev
)) {
129 status
= be_cmd_pmac_del(adapter
, adapter
->if_handle
,
134 status
= be_cmd_pmac_add(adapter
, (u8
*)addr
->sa_data
,
135 adapter
->if_handle
, &adapter
->pmac_id
);
139 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
144 static void netdev_stats_update(struct be_adapter
*adapter
)
146 struct be_hw_stats
*hw_stats
= hw_stats_from_cmd(adapter
->stats
.cmd
.va
);
147 struct be_rxf_stats
*rxf_stats
= &hw_stats
->rxf
;
148 struct be_port_rxf_stats
*port_stats
=
149 &rxf_stats
->port
[adapter
->port_num
];
150 struct net_device_stats
*dev_stats
= &adapter
->stats
.net_stats
;
151 struct be_erx_stats
*erx_stats
= &hw_stats
->erx
;
153 dev_stats
->rx_packets
= port_stats
->rx_total_frames
;
154 dev_stats
->tx_packets
= port_stats
->tx_unicastframes
+
155 port_stats
->tx_multicastframes
+ port_stats
->tx_broadcastframes
;
156 dev_stats
->rx_bytes
= (u64
) port_stats
->rx_bytes_msd
<< 32 |
157 (u64
) port_stats
->rx_bytes_lsd
;
158 dev_stats
->tx_bytes
= (u64
) port_stats
->tx_bytes_msd
<< 32 |
159 (u64
) port_stats
->tx_bytes_lsd
;
161 /* bad pkts received */
162 dev_stats
->rx_errors
= port_stats
->rx_crc_errors
+
163 port_stats
->rx_alignment_symbol_errors
+
164 port_stats
->rx_in_range_errors
+
165 port_stats
->rx_out_range_errors
+
166 port_stats
->rx_frame_too_long
+
167 port_stats
->rx_dropped_too_small
+
168 port_stats
->rx_dropped_too_short
+
169 port_stats
->rx_dropped_header_too_small
+
170 port_stats
->rx_dropped_tcp_length
+
171 port_stats
->rx_dropped_runt
+
172 port_stats
->rx_tcp_checksum_errs
+
173 port_stats
->rx_ip_checksum_errs
+
174 port_stats
->rx_udp_checksum_errs
;
176 /* no space in linux buffers: best possible approximation */
177 dev_stats
->rx_dropped
= erx_stats
->rx_drops_no_fragments
[0];
179 /* detailed rx errors */
180 dev_stats
->rx_length_errors
= port_stats
->rx_in_range_errors
+
181 port_stats
->rx_out_range_errors
+
182 port_stats
->rx_frame_too_long
;
184 /* receive ring buffer overflow */
185 dev_stats
->rx_over_errors
= 0;
187 dev_stats
->rx_crc_errors
= port_stats
->rx_crc_errors
;
189 /* frame alignment errors */
190 dev_stats
->rx_frame_errors
= port_stats
->rx_alignment_symbol_errors
;
192 /* receiver fifo overrun */
193 /* drops_no_pbuf is no per i/f, it's per BE card */
194 dev_stats
->rx_fifo_errors
= port_stats
->rx_fifo_overflow
+
195 port_stats
->rx_input_fifo_overflow
+
196 rxf_stats
->rx_drops_no_pbuf
;
197 /* receiver missed packetd */
198 dev_stats
->rx_missed_errors
= 0;
200 /* packet transmit problems */
201 dev_stats
->tx_errors
= 0;
203 /* no space available in linux */
204 dev_stats
->tx_dropped
= 0;
206 dev_stats
->multicast
= port_stats
->tx_multicastframes
;
207 dev_stats
->collisions
= 0;
209 /* detailed tx_errors */
210 dev_stats
->tx_aborted_errors
= 0;
211 dev_stats
->tx_carrier_errors
= 0;
212 dev_stats
->tx_fifo_errors
= 0;
213 dev_stats
->tx_heartbeat_errors
= 0;
214 dev_stats
->tx_window_errors
= 0;
217 void be_link_status_update(struct be_adapter
*adapter
, bool link_up
)
219 struct net_device
*netdev
= adapter
->netdev
;
221 /* If link came up or went down */
222 if (adapter
->link_up
!= link_up
) {
224 netif_start_queue(netdev
);
225 netif_carrier_on(netdev
);
226 printk(KERN_INFO
"%s: Link up\n", netdev
->name
);
228 netif_stop_queue(netdev
);
229 netif_carrier_off(netdev
);
230 printk(KERN_INFO
"%s: Link down\n", netdev
->name
);
232 adapter
->link_up
= link_up
;
236 /* Update the EQ delay n BE based on the RX frags consumed / sec */
237 static void be_rx_eqd_update(struct be_adapter
*adapter
)
239 struct be_eq_obj
*rx_eq
= &adapter
->rx_eq
;
240 struct be_drvr_stats
*stats
= &adapter
->stats
.drvr_stats
;
244 if (!rx_eq
->enable_aic
)
248 if (time_before(now
, stats
->rx_fps_jiffies
)) {
249 stats
->rx_fps_jiffies
= now
;
253 /* Update once a second */
254 if ((now
- stats
->rx_fps_jiffies
) < HZ
)
257 stats
->be_rx_fps
= (stats
->be_rx_frags
- stats
->be_prev_rx_frags
) /
258 ((now
- stats
->rx_fps_jiffies
) / HZ
);
260 stats
->rx_fps_jiffies
= now
;
261 stats
->be_prev_rx_frags
= stats
->be_rx_frags
;
262 eqd
= stats
->be_rx_fps
/ 110000;
264 if (eqd
> rx_eq
->max_eqd
)
265 eqd
= rx_eq
->max_eqd
;
266 if (eqd
< rx_eq
->min_eqd
)
267 eqd
= rx_eq
->min_eqd
;
270 if (eqd
!= rx_eq
->cur_eqd
)
271 be_cmd_modify_eqd(adapter
, rx_eq
->q
.id
, eqd
);
273 rx_eq
->cur_eqd
= eqd
;
276 static struct net_device_stats
*be_get_stats(struct net_device
*dev
)
278 struct be_adapter
*adapter
= netdev_priv(dev
);
280 return &adapter
->stats
.net_stats
;
283 static u32
be_calc_rate(u64 bytes
, unsigned long ticks
)
287 do_div(rate
, ticks
/ HZ
);
288 rate
<<= 3; /* bytes/sec -> bits/sec */
289 do_div(rate
, 1000000ul); /* MB/Sec */
294 static void be_tx_rate_update(struct be_adapter
*adapter
)
296 struct be_drvr_stats
*stats
= drvr_stats(adapter
);
299 /* Wrapped around? */
300 if (time_before(now
, stats
->be_tx_jiffies
)) {
301 stats
->be_tx_jiffies
= now
;
305 /* Update tx rate once in two seconds */
306 if ((now
- stats
->be_tx_jiffies
) > 2 * HZ
) {
307 stats
->be_tx_rate
= be_calc_rate(stats
->be_tx_bytes
308 - stats
->be_tx_bytes_prev
,
309 now
- stats
->be_tx_jiffies
);
310 stats
->be_tx_jiffies
= now
;
311 stats
->be_tx_bytes_prev
= stats
->be_tx_bytes
;
315 static void be_tx_stats_update(struct be_adapter
*adapter
,
316 u32 wrb_cnt
, u32 copied
, bool stopped
)
318 struct be_drvr_stats
*stats
= drvr_stats(adapter
);
320 stats
->be_tx_wrbs
+= wrb_cnt
;
321 stats
->be_tx_bytes
+= copied
;
323 stats
->be_tx_stops
++;
326 /* Determine number of WRB entries needed to xmit data in an skb */
327 static u32
wrb_cnt_for_skb(struct sk_buff
*skb
, bool *dummy
)
329 int cnt
= (skb
->len
> skb
->data_len
);
331 cnt
+= skb_shinfo(skb
)->nr_frags
;
333 /* to account for hdr wrb */
336 /* add a dummy to make it an even num */
341 BUG_ON(cnt
> BE_MAX_TX_FRAG_COUNT
);
345 static inline void wrb_fill(struct be_eth_wrb
*wrb
, u64 addr
, int len
)
347 wrb
->frag_pa_hi
= upper_32_bits(addr
);
348 wrb
->frag_pa_lo
= addr
& 0xFFFFFFFF;
349 wrb
->frag_len
= len
& ETH_WRB_FRAG_LEN_MASK
;
352 static void wrb_fill_hdr(struct be_eth_hdr_wrb
*hdr
, struct sk_buff
*skb
,
353 bool vlan
, u32 wrb_cnt
, u32 len
)
355 memset(hdr
, 0, sizeof(*hdr
));
357 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, crc
, hdr
, 1);
359 if (skb_shinfo(skb
)->gso_segs
> 1 && skb_shinfo(skb
)->gso_size
) {
360 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso
, hdr
, 1);
361 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso_mss
,
362 hdr
, skb_shinfo(skb
)->gso_size
);
363 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
365 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, tcpcs
, hdr
, 1);
366 else if (is_udp_pkt(skb
))
367 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, udpcs
, hdr
, 1);
370 if (vlan
&& vlan_tx_tag_present(skb
)) {
371 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan
, hdr
, 1);
372 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan_tag
,
373 hdr
, vlan_tx_tag_get(skb
));
376 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, event
, hdr
, 1);
377 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, complete
, hdr
, 1);
378 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, num_wrb
, hdr
, wrb_cnt
);
379 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, len
, hdr
, len
);
383 static int make_tx_wrbs(struct be_adapter
*adapter
,
384 struct sk_buff
*skb
, u32 wrb_cnt
, bool dummy_wrb
)
388 struct pci_dev
*pdev
= adapter
->pdev
;
389 struct sk_buff
*first_skb
= skb
;
390 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
391 struct be_eth_wrb
*wrb
;
392 struct be_eth_hdr_wrb
*hdr
;
394 atomic_add(wrb_cnt
, &txq
->used
);
395 hdr
= queue_head_node(txq
);
398 if (skb
->len
> skb
->data_len
) {
399 int len
= skb
->len
- skb
->data_len
;
400 busaddr
= pci_map_single(pdev
, skb
->data
, len
,
402 wrb
= queue_head_node(txq
);
403 wrb_fill(wrb
, busaddr
, len
);
404 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
409 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
410 struct skb_frag_struct
*frag
=
411 &skb_shinfo(skb
)->frags
[i
];
412 busaddr
= pci_map_page(pdev
, frag
->page
,
414 frag
->size
, PCI_DMA_TODEVICE
);
415 wrb
= queue_head_node(txq
);
416 wrb_fill(wrb
, busaddr
, frag
->size
);
417 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
419 copied
+= frag
->size
;
423 wrb
= queue_head_node(txq
);
425 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
429 wrb_fill_hdr(hdr
, first_skb
, adapter
->vlan_grp
? true : false,
431 be_dws_cpu_to_le(hdr
, sizeof(*hdr
));
436 static int be_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
438 struct be_adapter
*adapter
= netdev_priv(netdev
);
439 struct be_tx_obj
*tx_obj
= &adapter
->tx_obj
;
440 struct be_queue_info
*txq
= &tx_obj
->q
;
441 u32 wrb_cnt
= 0, copied
= 0;
442 u32 start
= txq
->head
;
443 bool dummy_wrb
, stopped
= false;
445 wrb_cnt
= wrb_cnt_for_skb(skb
, &dummy_wrb
);
447 copied
= make_tx_wrbs(adapter
, skb
, wrb_cnt
, dummy_wrb
);
449 /* record the sent skb in the sent_skb table */
450 BUG_ON(tx_obj
->sent_skb_list
[start
]);
451 tx_obj
->sent_skb_list
[start
] = skb
;
453 /* Ensure that txq has space for the next skb; Else stop the queue
454 * *BEFORE* ringing the tx doorbell, so that we serialze the
455 * tx compls of the current transmit which'll wake up the queue
457 if ((BE_MAX_TX_FRAG_COUNT
+ atomic_read(&txq
->used
)) >= txq
->len
) {
458 netif_stop_queue(netdev
);
462 be_txq_notify(adapter
, txq
->id
, wrb_cnt
);
464 be_tx_stats_update(adapter
, wrb_cnt
, copied
, stopped
);
468 static int be_change_mtu(struct net_device
*netdev
, int new_mtu
)
470 struct be_adapter
*adapter
= netdev_priv(netdev
);
471 if (new_mtu
< BE_MIN_MTU
||
472 new_mtu
> BE_MAX_JUMBO_FRAME_SIZE
) {
473 dev_info(&adapter
->pdev
->dev
,
474 "MTU must be between %d and %d bytes\n",
475 BE_MIN_MTU
, BE_MAX_JUMBO_FRAME_SIZE
);
478 dev_info(&adapter
->pdev
->dev
, "MTU changed from %d to %d bytes\n",
479 netdev
->mtu
, new_mtu
);
480 netdev
->mtu
= new_mtu
;
485 * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
486 * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
487 * set the BE in promiscuous VLAN mode.
489 static void be_vid_config(struct net_device
*netdev
)
491 struct be_adapter
*adapter
= netdev_priv(netdev
);
492 u16 vtag
[BE_NUM_VLANS_SUPPORTED
];
495 if (adapter
->num_vlans
<= BE_NUM_VLANS_SUPPORTED
) {
496 /* Construct VLAN Table to give to HW */
497 for (i
= 0; i
< VLAN_GROUP_ARRAY_LEN
; i
++) {
498 if (adapter
->vlan_tag
[i
]) {
499 vtag
[ntags
] = cpu_to_le16(i
);
503 be_cmd_vlan_config(adapter
, adapter
->if_handle
,
506 be_cmd_vlan_config(adapter
, adapter
->if_handle
,
511 static void be_vlan_register(struct net_device
*netdev
, struct vlan_group
*grp
)
513 struct be_adapter
*adapter
= netdev_priv(netdev
);
514 struct be_eq_obj
*rx_eq
= &adapter
->rx_eq
;
515 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
517 be_eq_notify(adapter
, rx_eq
->q
.id
, false, false, 0);
518 be_eq_notify(adapter
, tx_eq
->q
.id
, false, false, 0);
519 adapter
->vlan_grp
= grp
;
520 be_eq_notify(adapter
, rx_eq
->q
.id
, true, false, 0);
521 be_eq_notify(adapter
, tx_eq
->q
.id
, true, false, 0);
524 static void be_vlan_add_vid(struct net_device
*netdev
, u16 vid
)
526 struct be_adapter
*adapter
= netdev_priv(netdev
);
528 adapter
->num_vlans
++;
529 adapter
->vlan_tag
[vid
] = 1;
531 be_vid_config(netdev
);
534 static void be_vlan_rem_vid(struct net_device
*netdev
, u16 vid
)
536 struct be_adapter
*adapter
= netdev_priv(netdev
);
538 adapter
->num_vlans
--;
539 adapter
->vlan_tag
[vid
] = 0;
541 vlan_group_set_device(adapter
->vlan_grp
, vid
, NULL
);
542 be_vid_config(netdev
);
545 static void be_set_multicast_list(struct net_device
*netdev
)
547 struct be_adapter
*adapter
= netdev_priv(netdev
);
549 if (netdev
->flags
& IFF_PROMISC
) {
550 be_cmd_promiscuous_config(adapter
, adapter
->port_num
, 1);
551 adapter
->promiscuous
= true;
555 /* BE was previously in promiscous mode; disable it */
556 if (adapter
->promiscuous
) {
557 adapter
->promiscuous
= false;
558 be_cmd_promiscuous_config(adapter
, adapter
->port_num
, 0);
561 if (netdev
->flags
& IFF_ALLMULTI
) {
562 be_cmd_multicast_set(adapter
, adapter
->if_handle
, NULL
, 0);
566 be_cmd_multicast_set(adapter
, adapter
->if_handle
, netdev
->mc_list
,
572 static void be_rx_rate_update(struct be_adapter
*adapter
)
574 struct be_drvr_stats
*stats
= drvr_stats(adapter
);
578 if (time_before(now
, stats
->be_rx_jiffies
)) {
579 stats
->be_rx_jiffies
= now
;
583 /* Update the rate once in two seconds */
584 if ((now
- stats
->be_rx_jiffies
) < 2 * HZ
)
587 stats
->be_rx_rate
= be_calc_rate(stats
->be_rx_bytes
588 - stats
->be_rx_bytes_prev
,
589 now
- stats
->be_rx_jiffies
);
590 stats
->be_rx_jiffies
= now
;
591 stats
->be_rx_bytes_prev
= stats
->be_rx_bytes
;
594 static void be_rx_stats_update(struct be_adapter
*adapter
,
595 u32 pktsize
, u16 numfrags
)
597 struct be_drvr_stats
*stats
= drvr_stats(adapter
);
599 stats
->be_rx_compl
++;
600 stats
->be_rx_frags
+= numfrags
;
601 stats
->be_rx_bytes
+= pktsize
;
604 static inline bool do_pkt_csum(struct be_eth_rx_compl
*rxcp
, bool cso
)
606 u8 l4_cksm
, ip_version
, ipcksm
, tcpf
= 0, udpf
= 0, ipv6_chk
;
608 l4_cksm
= AMAP_GET_BITS(struct amap_eth_rx_compl
, l4_cksm
, rxcp
);
609 ipcksm
= AMAP_GET_BITS(struct amap_eth_rx_compl
, ipcksm
, rxcp
);
610 ip_version
= AMAP_GET_BITS(struct amap_eth_rx_compl
, ip_version
, rxcp
);
612 tcpf
= AMAP_GET_BITS(struct amap_eth_rx_compl
, tcpf
, rxcp
);
613 udpf
= AMAP_GET_BITS(struct amap_eth_rx_compl
, udpf
, rxcp
);
615 ipv6_chk
= (ip_version
&& (tcpf
|| udpf
));
617 return ((l4_cksm
&& ipv6_chk
&& ipcksm
) && cso
) ? false : true;
620 static struct be_rx_page_info
*
621 get_rx_page_info(struct be_adapter
*adapter
, u16 frag_idx
)
623 struct be_rx_page_info
*rx_page_info
;
624 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
626 rx_page_info
= &adapter
->rx_obj
.page_info_tbl
[frag_idx
];
627 BUG_ON(!rx_page_info
->page
);
629 if (rx_page_info
->last_page_user
)
630 pci_unmap_page(adapter
->pdev
, pci_unmap_addr(rx_page_info
, bus
),
631 adapter
->big_page_size
, PCI_DMA_FROMDEVICE
);
633 atomic_dec(&rxq
->used
);
637 /* Throwaway the data in the Rx completion */
638 static void be_rx_compl_discard(struct be_adapter
*adapter
,
639 struct be_eth_rx_compl
*rxcp
)
641 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
642 struct be_rx_page_info
*page_info
;
643 u16 rxq_idx
, i
, num_rcvd
;
645 rxq_idx
= AMAP_GET_BITS(struct amap_eth_rx_compl
, fragndx
, rxcp
);
646 num_rcvd
= AMAP_GET_BITS(struct amap_eth_rx_compl
, numfrags
, rxcp
);
648 for (i
= 0; i
< num_rcvd
; i
++) {
649 page_info
= get_rx_page_info(adapter
, rxq_idx
);
650 put_page(page_info
->page
);
651 memset(page_info
, 0, sizeof(*page_info
));
652 index_inc(&rxq_idx
, rxq
->len
);
657 * skb_fill_rx_data forms a complete skb for an ether frame
660 static void skb_fill_rx_data(struct be_adapter
*adapter
,
661 struct sk_buff
*skb
, struct be_eth_rx_compl
*rxcp
)
663 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
664 struct be_rx_page_info
*page_info
;
665 u16 rxq_idx
, i
, num_rcvd
, j
;
666 u32 pktsize
, hdr_len
, curr_frag_len
, size
;
669 rxq_idx
= AMAP_GET_BITS(struct amap_eth_rx_compl
, fragndx
, rxcp
);
670 pktsize
= AMAP_GET_BITS(struct amap_eth_rx_compl
, pktsize
, rxcp
);
671 num_rcvd
= AMAP_GET_BITS(struct amap_eth_rx_compl
, numfrags
, rxcp
);
673 page_info
= get_rx_page_info(adapter
, rxq_idx
);
675 start
= page_address(page_info
->page
) + page_info
->page_offset
;
678 /* Copy data in the first descriptor of this completion */
679 curr_frag_len
= min(pktsize
, rx_frag_size
);
681 /* Copy the header portion into skb_data */
682 hdr_len
= min((u32
)BE_HDR_LEN
, curr_frag_len
);
683 memcpy(skb
->data
, start
, hdr_len
);
684 skb
->len
= curr_frag_len
;
685 if (curr_frag_len
<= BE_HDR_LEN
) { /* tiny packet */
686 /* Complete packet has now been moved to data */
687 put_page(page_info
->page
);
689 skb
->tail
+= curr_frag_len
;
691 skb_shinfo(skb
)->nr_frags
= 1;
692 skb_shinfo(skb
)->frags
[0].page
= page_info
->page
;
693 skb_shinfo(skb
)->frags
[0].page_offset
=
694 page_info
->page_offset
+ hdr_len
;
695 skb_shinfo(skb
)->frags
[0].size
= curr_frag_len
- hdr_len
;
696 skb
->data_len
= curr_frag_len
- hdr_len
;
697 skb
->tail
+= hdr_len
;
699 memset(page_info
, 0, sizeof(*page_info
));
701 if (pktsize
<= rx_frag_size
) {
702 BUG_ON(num_rcvd
!= 1);
706 /* More frags present for this completion */
708 for (i
= 1, j
= 0; i
< num_rcvd
; i
++) {
709 size
-= curr_frag_len
;
710 index_inc(&rxq_idx
, rxq
->len
);
711 page_info
= get_rx_page_info(adapter
, rxq_idx
);
713 curr_frag_len
= min(size
, rx_frag_size
);
715 /* Coalesce all frags from the same physical page in one slot */
716 if (page_info
->page_offset
== 0) {
719 skb_shinfo(skb
)->frags
[j
].page
= page_info
->page
;
720 skb_shinfo(skb
)->frags
[j
].page_offset
=
721 page_info
->page_offset
;
722 skb_shinfo(skb
)->frags
[j
].size
= 0;
723 skb_shinfo(skb
)->nr_frags
++;
725 put_page(page_info
->page
);
728 skb_shinfo(skb
)->frags
[j
].size
+= curr_frag_len
;
729 skb
->len
+= curr_frag_len
;
730 skb
->data_len
+= curr_frag_len
;
732 memset(page_info
, 0, sizeof(*page_info
));
734 BUG_ON(j
> MAX_SKB_FRAGS
);
737 be_rx_stats_update(adapter
, pktsize
, num_rcvd
);
741 /* Process the RX completion indicated by rxcp when GRO is disabled */
742 static void be_rx_compl_process(struct be_adapter
*adapter
,
743 struct be_eth_rx_compl
*rxcp
)
748 vtp
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vtp
, rxcp
);
750 skb
= netdev_alloc_skb(adapter
->netdev
, BE_HDR_LEN
+ NET_IP_ALIGN
);
753 dev_warn(&adapter
->pdev
->dev
, "skb alloc failed\n");
754 be_rx_compl_discard(adapter
, rxcp
);
758 skb_reserve(skb
, NET_IP_ALIGN
);
760 skb_fill_rx_data(adapter
, skb
, rxcp
);
762 if (do_pkt_csum(rxcp
, adapter
->rx_csum
))
763 skb
->ip_summed
= CHECKSUM_NONE
;
765 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
767 skb
->truesize
= skb
->len
+ sizeof(struct sk_buff
);
768 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
769 skb
->dev
= adapter
->netdev
;
772 if (!adapter
->vlan_grp
|| adapter
->num_vlans
== 0) {
776 vid
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vlan_tag
, rxcp
);
777 vid
= be16_to_cpu(vid
);
778 vlan_hwaccel_receive_skb(skb
, adapter
->vlan_grp
, vid
);
780 netif_receive_skb(skb
);
783 adapter
->netdev
->last_rx
= jiffies
;
788 /* Process the RX completion indicated by rxcp when GRO is enabled */
789 static void be_rx_compl_process_gro(struct be_adapter
*adapter
,
790 struct be_eth_rx_compl
*rxcp
)
792 struct be_rx_page_info
*page_info
;
793 struct sk_buff
*skb
= NULL
;
794 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
795 struct be_eq_obj
*eq_obj
= &adapter
->rx_eq
;
796 u32 num_rcvd
, pkt_size
, remaining
, vlanf
, curr_frag_len
;
797 u16 i
, rxq_idx
= 0, vid
, j
;
799 num_rcvd
= AMAP_GET_BITS(struct amap_eth_rx_compl
, numfrags
, rxcp
);
800 pkt_size
= AMAP_GET_BITS(struct amap_eth_rx_compl
, pktsize
, rxcp
);
801 vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vtp
, rxcp
);
802 rxq_idx
= AMAP_GET_BITS(struct amap_eth_rx_compl
, fragndx
, rxcp
);
804 skb
= napi_get_frags(&eq_obj
->napi
);
806 be_rx_compl_discard(adapter
, rxcp
);
810 remaining
= pkt_size
;
811 for (i
= 0, j
= -1; i
< num_rcvd
; i
++) {
812 page_info
= get_rx_page_info(adapter
, rxq_idx
);
814 curr_frag_len
= min(remaining
, rx_frag_size
);
816 /* Coalesce all frags from the same physical page in one slot */
817 if (i
== 0 || page_info
->page_offset
== 0) {
818 /* First frag or Fresh page */
820 skb_shinfo(skb
)->frags
[j
].page
= page_info
->page
;
821 skb_shinfo(skb
)->frags
[j
].page_offset
=
822 page_info
->page_offset
;
823 skb_shinfo(skb
)->frags
[j
].size
= 0;
825 put_page(page_info
->page
);
827 skb_shinfo(skb
)->frags
[j
].size
+= curr_frag_len
;
829 remaining
-= curr_frag_len
;
830 index_inc(&rxq_idx
, rxq
->len
);
831 memset(page_info
, 0, sizeof(*page_info
));
833 BUG_ON(j
> MAX_SKB_FRAGS
);
835 skb_shinfo(skb
)->nr_frags
= j
+ 1;
837 skb
->data_len
= pkt_size
;
838 skb
->truesize
+= pkt_size
;
839 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
841 if (likely(!vlanf
)) {
842 napi_gro_frags(&eq_obj
->napi
);
844 vid
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vlan_tag
, rxcp
);
845 vid
= be16_to_cpu(vid
);
847 if (!adapter
->vlan_grp
|| adapter
->num_vlans
== 0)
850 vlan_gro_frags(&eq_obj
->napi
, adapter
->vlan_grp
, vid
);
853 be_rx_stats_update(adapter
, pkt_size
, num_rcvd
);
857 static struct be_eth_rx_compl
*be_rx_compl_get(struct be_adapter
*adapter
)
859 struct be_eth_rx_compl
*rxcp
= queue_tail_node(&adapter
->rx_obj
.cq
);
861 if (rxcp
->dw
[offsetof(struct amap_eth_rx_compl
, valid
) / 32] == 0)
864 be_dws_le_to_cpu(rxcp
, sizeof(*rxcp
));
866 queue_tail_inc(&adapter
->rx_obj
.cq
);
870 /* To reset the valid bit, we need to reset the whole word as
871 * when walking the queue the valid entries are little-endian
872 * and invalid entries are host endian
874 static inline void be_rx_compl_reset(struct be_eth_rx_compl
*rxcp
)
876 rxcp
->dw
[offsetof(struct amap_eth_rx_compl
, valid
) / 32] = 0;
879 static inline struct page
*be_alloc_pages(u32 size
)
881 gfp_t alloc_flags
= GFP_ATOMIC
;
882 u32 order
= get_order(size
);
884 alloc_flags
|= __GFP_COMP
;
885 return alloc_pages(alloc_flags
, order
);
889 * Allocate a page, split it to fragments of size rx_frag_size and post as
890 * receive buffers to BE
892 static void be_post_rx_frags(struct be_adapter
*adapter
)
894 struct be_rx_page_info
*page_info_tbl
= adapter
->rx_obj
.page_info_tbl
;
895 struct be_rx_page_info
*page_info
= NULL
;
896 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
897 struct page
*pagep
= NULL
;
898 struct be_eth_rx_d
*rxd
;
899 u64 page_dmaaddr
= 0, frag_dmaaddr
;
900 u32 posted
, page_offset
= 0;
902 page_info
= &page_info_tbl
[rxq
->head
];
903 for (posted
= 0; posted
< MAX_RX_POST
&& !page_info
->page
; posted
++) {
905 pagep
= be_alloc_pages(adapter
->big_page_size
);
906 if (unlikely(!pagep
)) {
907 drvr_stats(adapter
)->be_ethrx_post_fail
++;
910 page_dmaaddr
= pci_map_page(adapter
->pdev
, pagep
, 0,
911 adapter
->big_page_size
,
913 page_info
->page_offset
= 0;
916 page_info
->page_offset
= page_offset
+ rx_frag_size
;
918 page_offset
= page_info
->page_offset
;
919 page_info
->page
= pagep
;
920 pci_unmap_addr_set(page_info
, bus
, page_dmaaddr
);
921 frag_dmaaddr
= page_dmaaddr
+ page_info
->page_offset
;
923 rxd
= queue_head_node(rxq
);
924 rxd
->fragpa_lo
= cpu_to_le32(frag_dmaaddr
& 0xFFFFFFFF);
925 rxd
->fragpa_hi
= cpu_to_le32(upper_32_bits(frag_dmaaddr
));
928 /* Any space left in the current big page for another frag? */
929 if ((page_offset
+ rx_frag_size
+ rx_frag_size
) >
930 adapter
->big_page_size
) {
932 page_info
->last_page_user
= true;
934 page_info
= &page_info_tbl
[rxq
->head
];
937 page_info
->last_page_user
= true;
940 atomic_add(posted
, &rxq
->used
);
941 be_rxq_notify(adapter
, rxq
->id
, posted
);
942 } else if (atomic_read(&rxq
->used
) == 0) {
943 /* Let be_worker replenish when memory is available */
944 adapter
->rx_post_starved
= true;
950 static struct be_eth_tx_compl
*be_tx_compl_get(struct be_queue_info
*tx_cq
)
952 struct be_eth_tx_compl
*txcp
= queue_tail_node(tx_cq
);
954 if (txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] == 0)
957 be_dws_le_to_cpu(txcp
, sizeof(*txcp
));
959 txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] = 0;
961 queue_tail_inc(tx_cq
);
965 static void be_tx_compl_process(struct be_adapter
*adapter
, u16 last_index
)
967 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
968 struct be_eth_wrb
*wrb
;
969 struct sk_buff
**sent_skbs
= adapter
->tx_obj
.sent_skb_list
;
970 struct sk_buff
*sent_skb
;
972 u16 cur_index
, num_wrbs
= 0;
974 cur_index
= txq
->tail
;
975 sent_skb
= sent_skbs
[cur_index
];
977 sent_skbs
[cur_index
] = NULL
;
980 cur_index
= txq
->tail
;
981 wrb
= queue_tail_node(txq
);
982 be_dws_le_to_cpu(wrb
, sizeof(*wrb
));
983 busaddr
= ((u64
)wrb
->frag_pa_hi
<< 32) | (u64
)wrb
->frag_pa_lo
;
985 pci_unmap_single(adapter
->pdev
, busaddr
,
986 wrb
->frag_len
, PCI_DMA_TODEVICE
);
990 } while (cur_index
!= last_index
);
992 atomic_sub(num_wrbs
, &txq
->used
);
997 static void be_rx_q_clean(struct be_adapter
*adapter
)
999 struct be_rx_page_info
*page_info
;
1000 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
1001 struct be_queue_info
*rx_cq
= &adapter
->rx_obj
.cq
;
1002 struct be_eth_rx_compl
*rxcp
;
1005 /* First cleanup pending rx completions */
1006 while ((rxcp
= be_rx_compl_get(adapter
)) != NULL
) {
1007 be_rx_compl_discard(adapter
, rxcp
);
1008 be_rx_compl_reset(rxcp
);
1009 be_cq_notify(adapter
, rx_cq
->id
, true, 1);
1012 /* Then free posted rx buffer that were not used */
1013 tail
= (rxq
->head
+ rxq
->len
- atomic_read(&rxq
->used
)) % rxq
->len
;
1014 for (; tail
!= rxq
->head
; index_inc(&tail
, rxq
->len
)) {
1015 page_info
= get_rx_page_info(adapter
, tail
);
1016 put_page(page_info
->page
);
1017 memset(page_info
, 0, sizeof(*page_info
));
1019 BUG_ON(atomic_read(&rxq
->used
));
1022 static void be_tx_q_clean(struct be_adapter
*adapter
)
1024 struct sk_buff
**sent_skbs
= adapter
->tx_obj
.sent_skb_list
;
1025 struct sk_buff
*sent_skb
;
1026 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
1030 while (atomic_read(&txq
->used
)) {
1031 sent_skb
= sent_skbs
[txq
->tail
];
1032 last_index
= txq
->tail
;
1033 index_adv(&last_index
,
1034 wrb_cnt_for_skb(sent_skb
, &dummy_wrb
) - 1, txq
->len
);
1035 be_tx_compl_process(adapter
, last_index
);
1039 static void be_mcc_queues_destroy(struct be_adapter
*adapter
)
1041 struct be_queue_info
*q
;
1043 q
= &adapter
->mcc_obj
.q
;
1045 be_cmd_q_destroy(adapter
, q
, QTYPE_MCCQ
);
1046 be_queue_free(adapter
, q
);
1048 q
= &adapter
->mcc_obj
.cq
;
1050 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1051 be_queue_free(adapter
, q
);
1054 /* Must be called only after TX qs are created as MCC shares TX EQ */
1055 static int be_mcc_queues_create(struct be_adapter
*adapter
)
1057 struct be_queue_info
*q
, *cq
;
1059 /* Alloc MCC compl queue */
1060 cq
= &adapter
->mcc_obj
.cq
;
1061 if (be_queue_alloc(adapter
, cq
, MCC_CQ_LEN
,
1062 sizeof(struct be_mcc_cq_entry
)))
1065 /* Ask BE to create MCC compl queue; share TX's eq */
1066 if (be_cmd_cq_create(adapter
, cq
, &adapter
->tx_eq
.q
, false, true, 0))
1069 /* Alloc MCC queue */
1070 q
= &adapter
->mcc_obj
.q
;
1071 if (be_queue_alloc(adapter
, q
, MCC_Q_LEN
, sizeof(struct be_mcc_wrb
)))
1072 goto mcc_cq_destroy
;
1074 /* Ask BE to create MCC queue */
1075 if (be_cmd_mccq_create(adapter
, q
, cq
))
1081 be_queue_free(adapter
, q
);
1083 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1085 be_queue_free(adapter
, cq
);
1090 static void be_tx_queues_destroy(struct be_adapter
*adapter
)
1092 struct be_queue_info
*q
;
1094 q
= &adapter
->tx_obj
.q
;
1096 be_cmd_q_destroy(adapter
, q
, QTYPE_TXQ
);
1098 /* No more tx completions can be rcvd now; clean up if there
1099 * are any pending completions or pending tx requests */
1100 be_tx_q_clean(adapter
);
1102 be_queue_free(adapter
, q
);
1104 q
= &adapter
->tx_obj
.cq
;
1106 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1107 be_queue_free(adapter
, q
);
1109 q
= &adapter
->tx_eq
.q
;
1111 be_cmd_q_destroy(adapter
, q
, QTYPE_EQ
);
1112 be_queue_free(adapter
, q
);
1115 static int be_tx_queues_create(struct be_adapter
*adapter
)
1117 struct be_queue_info
*eq
, *q
, *cq
;
1119 adapter
->tx_eq
.max_eqd
= 0;
1120 adapter
->tx_eq
.min_eqd
= 0;
1121 adapter
->tx_eq
.cur_eqd
= 96;
1122 adapter
->tx_eq
.enable_aic
= false;
1123 /* Alloc Tx Event queue */
1124 eq
= &adapter
->tx_eq
.q
;
1125 if (be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
, sizeof(struct be_eq_entry
)))
1128 /* Ask BE to create Tx Event queue */
1129 if (be_cmd_eq_create(adapter
, eq
, adapter
->tx_eq
.cur_eqd
))
1131 /* Alloc TX eth compl queue */
1132 cq
= &adapter
->tx_obj
.cq
;
1133 if (be_queue_alloc(adapter
, cq
, TX_CQ_LEN
,
1134 sizeof(struct be_eth_tx_compl
)))
1137 /* Ask BE to create Tx eth compl queue */
1138 if (be_cmd_cq_create(adapter
, cq
, eq
, false, false, 3))
1141 /* Alloc TX eth queue */
1142 q
= &adapter
->tx_obj
.q
;
1143 if (be_queue_alloc(adapter
, q
, TX_Q_LEN
, sizeof(struct be_eth_wrb
)))
1146 /* Ask BE to create Tx eth queue */
1147 if (be_cmd_txq_create(adapter
, q
, cq
))
1152 be_queue_free(adapter
, q
);
1154 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1156 be_queue_free(adapter
, cq
);
1158 be_cmd_q_destroy(adapter
, eq
, QTYPE_EQ
);
1160 be_queue_free(adapter
, eq
);
1164 static void be_rx_queues_destroy(struct be_adapter
*adapter
)
1166 struct be_queue_info
*q
;
1168 q
= &adapter
->rx_obj
.q
;
1170 be_cmd_q_destroy(adapter
, q
, QTYPE_RXQ
);
1171 be_rx_q_clean(adapter
);
1173 be_queue_free(adapter
, q
);
1175 q
= &adapter
->rx_obj
.cq
;
1177 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1178 be_queue_free(adapter
, q
);
1180 q
= &adapter
->rx_eq
.q
;
1182 be_cmd_q_destroy(adapter
, q
, QTYPE_EQ
);
1183 be_queue_free(adapter
, q
);
1186 static int be_rx_queues_create(struct be_adapter
*adapter
)
1188 struct be_queue_info
*eq
, *q
, *cq
;
1191 adapter
->big_page_size
= (1 << get_order(rx_frag_size
)) * PAGE_SIZE
;
1192 adapter
->rx_eq
.max_eqd
= BE_MAX_EQD
;
1193 adapter
->rx_eq
.min_eqd
= 0;
1194 adapter
->rx_eq
.cur_eqd
= 0;
1195 adapter
->rx_eq
.enable_aic
= true;
1197 /* Alloc Rx Event queue */
1198 eq
= &adapter
->rx_eq
.q
;
1199 rc
= be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
,
1200 sizeof(struct be_eq_entry
));
1204 /* Ask BE to create Rx Event queue */
1205 rc
= be_cmd_eq_create(adapter
, eq
, adapter
->rx_eq
.cur_eqd
);
1209 /* Alloc RX eth compl queue */
1210 cq
= &adapter
->rx_obj
.cq
;
1211 rc
= be_queue_alloc(adapter
, cq
, RX_CQ_LEN
,
1212 sizeof(struct be_eth_rx_compl
));
1216 /* Ask BE to create Rx eth compl queue */
1217 rc
= be_cmd_cq_create(adapter
, cq
, eq
, false, false, 3);
1221 /* Alloc RX eth queue */
1222 q
= &adapter
->rx_obj
.q
;
1223 rc
= be_queue_alloc(adapter
, q
, RX_Q_LEN
, sizeof(struct be_eth_rx_d
));
1227 /* Ask BE to create Rx eth queue */
1228 rc
= be_cmd_rxq_create(adapter
, q
, cq
->id
, rx_frag_size
,
1229 BE_MAX_JUMBO_FRAME_SIZE
, adapter
->if_handle
, false);
1235 be_queue_free(adapter
, q
);
1237 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1239 be_queue_free(adapter
, cq
);
1241 be_cmd_q_destroy(adapter
, eq
, QTYPE_EQ
);
1243 be_queue_free(adapter
, eq
);
1246 static bool event_get(struct be_eq_obj
*eq_obj
, u16
*rid
)
1248 struct be_eq_entry
*entry
= queue_tail_node(&eq_obj
->q
);
1249 u32 evt
= entry
->evt
;
1254 evt
= le32_to_cpu(evt
);
1255 *rid
= (evt
>> EQ_ENTRY_RES_ID_SHIFT
) & EQ_ENTRY_RES_ID_MASK
;
1257 queue_tail_inc(&eq_obj
->q
);
1261 static int event_handle(struct be_adapter
*adapter
, struct be_eq_obj
*eq_obj
)
1263 u16 rid
= 0, num
= 0;
1265 while (event_get(eq_obj
, &rid
))
1268 /* We can see an interrupt and no event */
1269 be_eq_notify(adapter
, eq_obj
->q
.id
, true, true, num
);
1271 napi_schedule(&eq_obj
->napi
);
1276 static irqreturn_t
be_intx(int irq
, void *dev
)
1278 struct be_adapter
*adapter
= dev
;
1281 isr
= ioread32(adapter
->csr
+ CEV_ISR0_OFFSET
+
1282 be_pci_func(adapter
) * CEV_ISR_SIZE
);
1286 event_handle(adapter
, &adapter
->tx_eq
);
1287 event_handle(adapter
, &adapter
->rx_eq
);
1292 static irqreturn_t
be_msix_rx(int irq
, void *dev
)
1294 struct be_adapter
*adapter
= dev
;
1296 event_handle(adapter
, &adapter
->rx_eq
);
1301 static irqreturn_t
be_msix_tx_mcc(int irq
, void *dev
)
1303 struct be_adapter
*adapter
= dev
;
1305 event_handle(adapter
, &adapter
->tx_eq
);
1310 static inline bool do_gro(struct be_adapter
*adapter
,
1311 struct be_eth_rx_compl
*rxcp
)
1313 int err
= AMAP_GET_BITS(struct amap_eth_rx_compl
, err
, rxcp
);
1314 int tcp_frame
= AMAP_GET_BITS(struct amap_eth_rx_compl
, tcpf
, rxcp
);
1317 drvr_stats(adapter
)->be_rxcp_err
++;
1319 return (tcp_frame
&& !err
) ? true : false;
1322 int be_poll_rx(struct napi_struct
*napi
, int budget
)
1324 struct be_eq_obj
*rx_eq
= container_of(napi
, struct be_eq_obj
, napi
);
1325 struct be_adapter
*adapter
=
1326 container_of(rx_eq
, struct be_adapter
, rx_eq
);
1327 struct be_queue_info
*rx_cq
= &adapter
->rx_obj
.cq
;
1328 struct be_eth_rx_compl
*rxcp
;
1331 for (work_done
= 0; work_done
< budget
; work_done
++) {
1332 rxcp
= be_rx_compl_get(adapter
);
1336 if (do_gro(adapter
, rxcp
))
1337 be_rx_compl_process_gro(adapter
, rxcp
);
1339 be_rx_compl_process(adapter
, rxcp
);
1341 be_rx_compl_reset(rxcp
);
1344 /* Refill the queue */
1345 if (atomic_read(&adapter
->rx_obj
.q
.used
) < RX_FRAGS_REFILL_WM
)
1346 be_post_rx_frags(adapter
);
1349 if (work_done
< budget
) {
1350 napi_complete(napi
);
1351 be_cq_notify(adapter
, rx_cq
->id
, true, work_done
);
1353 /* More to be consumed; continue with interrupts disabled */
1354 be_cq_notify(adapter
, rx_cq
->id
, false, work_done
);
1359 void be_process_tx(struct be_adapter
*adapter
)
1361 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
1362 struct be_queue_info
*tx_cq
= &adapter
->tx_obj
.cq
;
1363 struct be_eth_tx_compl
*txcp
;
1367 while ((txcp
= be_tx_compl_get(tx_cq
))) {
1368 end_idx
= AMAP_GET_BITS(struct amap_eth_tx_compl
,
1370 be_tx_compl_process(adapter
, end_idx
);
1375 be_cq_notify(adapter
, tx_cq
->id
, true, num_cmpl
);
1377 /* As Tx wrbs have been freed up, wake up netdev queue if
1378 * it was stopped due to lack of tx wrbs.
1380 if (netif_queue_stopped(adapter
->netdev
) &&
1381 atomic_read(&txq
->used
) < txq
->len
/ 2) {
1382 netif_wake_queue(adapter
->netdev
);
1385 drvr_stats(adapter
)->be_tx_events
++;
1386 drvr_stats(adapter
)->be_tx_compl
+= num_cmpl
;
1390 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1391 * For TX/MCC we don't honour budget; consume everything
1393 static int be_poll_tx_mcc(struct napi_struct
*napi
, int budget
)
1395 struct be_eq_obj
*tx_eq
= container_of(napi
, struct be_eq_obj
, napi
);
1396 struct be_adapter
*adapter
=
1397 container_of(tx_eq
, struct be_adapter
, tx_eq
);
1399 napi_complete(napi
);
1401 be_process_tx(adapter
);
1403 be_process_mcc(adapter
);
1408 static void be_worker(struct work_struct
*work
)
1410 struct be_adapter
*adapter
=
1411 container_of(work
, struct be_adapter
, work
.work
);
1415 status
= be_cmd_get_stats(adapter
, &adapter
->stats
.cmd
);
1417 netdev_stats_update(adapter
);
1420 be_rx_eqd_update(adapter
);
1422 be_tx_rate_update(adapter
);
1423 be_rx_rate_update(adapter
);
1425 if (adapter
->rx_post_starved
) {
1426 adapter
->rx_post_starved
= false;
1427 be_post_rx_frags(adapter
);
1430 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(1000));
1433 static void be_msix_enable(struct be_adapter
*adapter
)
1437 for (i
= 0; i
< BE_NUM_MSIX_VECTORS
; i
++)
1438 adapter
->msix_entries
[i
].entry
= i
;
1440 status
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
1441 BE_NUM_MSIX_VECTORS
);
1443 adapter
->msix_enabled
= true;
1447 static inline int be_msix_vec_get(struct be_adapter
*adapter
, u32 eq_id
)
1449 return adapter
->msix_entries
[eq_id
- 8 * be_pci_func(adapter
)].vector
;
1452 static int be_msix_register(struct be_adapter
*adapter
)
1454 struct net_device
*netdev
= adapter
->netdev
;
1455 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
1456 struct be_eq_obj
*rx_eq
= &adapter
->rx_eq
;
1459 sprintf(tx_eq
->desc
, "%s-tx", netdev
->name
);
1460 vec
= be_msix_vec_get(adapter
, tx_eq
->q
.id
);
1461 status
= request_irq(vec
, be_msix_tx_mcc
, 0, tx_eq
->desc
, adapter
);
1465 sprintf(rx_eq
->desc
, "%s-rx", netdev
->name
);
1466 vec
= be_msix_vec_get(adapter
, rx_eq
->q
.id
);
1467 status
= request_irq(vec
, be_msix_rx
, 0, rx_eq
->desc
, adapter
);
1468 if (status
) { /* Free TX IRQ */
1469 vec
= be_msix_vec_get(adapter
, tx_eq
->q
.id
);
1470 free_irq(vec
, adapter
);
1475 dev_warn(&adapter
->pdev
->dev
,
1476 "MSIX Request IRQ failed - err %d\n", status
);
1477 pci_disable_msix(adapter
->pdev
);
1478 adapter
->msix_enabled
= false;
1482 static int be_irq_register(struct be_adapter
*adapter
)
1484 struct net_device
*netdev
= adapter
->netdev
;
1487 if (adapter
->msix_enabled
) {
1488 status
= be_msix_register(adapter
);
1494 netdev
->irq
= adapter
->pdev
->irq
;
1495 status
= request_irq(netdev
->irq
, be_intx
, IRQF_SHARED
, netdev
->name
,
1498 dev_err(&adapter
->pdev
->dev
,
1499 "INTx request IRQ failed - err %d\n", status
);
1503 adapter
->isr_registered
= true;
1507 static void be_irq_unregister(struct be_adapter
*adapter
)
1509 struct net_device
*netdev
= adapter
->netdev
;
1512 if (!adapter
->isr_registered
)
1516 if (!adapter
->msix_enabled
) {
1517 free_irq(netdev
->irq
, adapter
);
1522 vec
= be_msix_vec_get(adapter
, adapter
->tx_eq
.q
.id
);
1523 free_irq(vec
, adapter
);
1524 vec
= be_msix_vec_get(adapter
, adapter
->rx_eq
.q
.id
);
1525 free_irq(vec
, adapter
);
1527 adapter
->isr_registered
= false;
1531 static int be_open(struct net_device
*netdev
)
1533 struct be_adapter
*adapter
= netdev_priv(netdev
);
1534 struct be_eq_obj
*rx_eq
= &adapter
->rx_eq
;
1535 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
1539 /* First time posting */
1540 be_post_rx_frags(adapter
);
1542 napi_enable(&rx_eq
->napi
);
1543 napi_enable(&tx_eq
->napi
);
1545 be_irq_register(adapter
);
1547 be_intr_set(adapter
, true);
1549 /* The evt queues are created in unarmed state; arm them */
1550 be_eq_notify(adapter
, rx_eq
->q
.id
, true, false, 0);
1551 be_eq_notify(adapter
, tx_eq
->q
.id
, true, false, 0);
1553 /* Rx compl queue may be in unarmed state; rearm it */
1554 be_cq_notify(adapter
, adapter
->rx_obj
.cq
.id
, true, 0);
1556 status
= be_cmd_link_status_query(adapter
, &link_up
);
1559 be_link_status_update(adapter
, link_up
);
1561 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(100));
1565 static int be_setup(struct be_adapter
*adapter
)
1567 struct net_device
*netdev
= adapter
->netdev
;
1571 if_flags
= BE_IF_FLAGS_BROADCAST
| BE_IF_FLAGS_PROMISCUOUS
|
1572 BE_IF_FLAGS_MCAST_PROMISCUOUS
| BE_IF_FLAGS_UNTAGGED
|
1573 BE_IF_FLAGS_PASS_L3L4_ERRORS
;
1574 status
= be_cmd_if_create(adapter
, if_flags
, netdev
->dev_addr
,
1575 false/* pmac_invalid */, &adapter
->if_handle
,
1580 be_vid_config(netdev
);
1582 status
= be_cmd_set_flow_control(adapter
, true, true);
1586 status
= be_tx_queues_create(adapter
);
1590 status
= be_rx_queues_create(adapter
);
1594 status
= be_mcc_queues_create(adapter
);
1601 be_rx_queues_destroy(adapter
);
1603 be_tx_queues_destroy(adapter
);
1605 be_cmd_if_destroy(adapter
, adapter
->if_handle
);
1610 static int be_clear(struct be_adapter
*adapter
)
1612 be_rx_queues_destroy(adapter
);
1613 be_tx_queues_destroy(adapter
);
1615 be_cmd_if_destroy(adapter
, adapter
->if_handle
);
1617 be_mcc_queues_destroy(adapter
);
1621 static int be_close(struct net_device
*netdev
)
1623 struct be_adapter
*adapter
= netdev_priv(netdev
);
1624 struct be_eq_obj
*rx_eq
= &adapter
->rx_eq
;
1625 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
1628 cancel_delayed_work_sync(&adapter
->work
);
1630 netif_stop_queue(netdev
);
1631 netif_carrier_off(netdev
);
1632 adapter
->link_up
= false;
1634 be_intr_set(adapter
, false);
1636 if (adapter
->msix_enabled
) {
1637 vec
= be_msix_vec_get(adapter
, tx_eq
->q
.id
);
1638 synchronize_irq(vec
);
1639 vec
= be_msix_vec_get(adapter
, rx_eq
->q
.id
);
1640 synchronize_irq(vec
);
1642 synchronize_irq(netdev
->irq
);
1644 be_irq_unregister(adapter
);
1646 napi_disable(&rx_eq
->napi
);
1647 napi_disable(&tx_eq
->napi
);
1652 static struct net_device_ops be_netdev_ops
= {
1653 .ndo_open
= be_open
,
1654 .ndo_stop
= be_close
,
1655 .ndo_start_xmit
= be_xmit
,
1656 .ndo_get_stats
= be_get_stats
,
1657 .ndo_set_rx_mode
= be_set_multicast_list
,
1658 .ndo_set_mac_address
= be_mac_addr_set
,
1659 .ndo_change_mtu
= be_change_mtu
,
1660 .ndo_validate_addr
= eth_validate_addr
,
1661 .ndo_vlan_rx_register
= be_vlan_register
,
1662 .ndo_vlan_rx_add_vid
= be_vlan_add_vid
,
1663 .ndo_vlan_rx_kill_vid
= be_vlan_rem_vid
,
1666 static void be_netdev_init(struct net_device
*netdev
)
1668 struct be_adapter
*adapter
= netdev_priv(netdev
);
1670 netdev
->features
|= NETIF_F_SG
| NETIF_F_HW_VLAN_RX
| NETIF_F_TSO
|
1671 NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_FILTER
| NETIF_F_IP_CSUM
|
1672 NETIF_F_IPV6_CSUM
| NETIF_F_GRO
;
1674 netdev
->flags
|= IFF_MULTICAST
;
1676 adapter
->rx_csum
= true;
1678 BE_SET_NETDEV_OPS(netdev
, &be_netdev_ops
);
1680 SET_ETHTOOL_OPS(netdev
, &be_ethtool_ops
);
1682 netif_napi_add(netdev
, &adapter
->rx_eq
.napi
, be_poll_rx
,
1684 netif_napi_add(netdev
, &adapter
->tx_eq
.napi
, be_poll_tx_mcc
,
1687 netif_carrier_off(netdev
);
1688 netif_stop_queue(netdev
);
1691 static void be_unmap_pci_bars(struct be_adapter
*adapter
)
1694 iounmap(adapter
->csr
);
1696 iounmap(adapter
->db
);
1697 if (adapter
->pcicfg
)
1698 iounmap(adapter
->pcicfg
);
1701 static int be_map_pci_bars(struct be_adapter
*adapter
)
1705 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, 2),
1706 pci_resource_len(adapter
->pdev
, 2));
1709 adapter
->csr
= addr
;
1711 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, 4),
1717 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, 1),
1718 pci_resource_len(adapter
->pdev
, 1));
1721 adapter
->pcicfg
= addr
;
1725 be_unmap_pci_bars(adapter
);
1730 static void be_ctrl_cleanup(struct be_adapter
*adapter
)
1732 struct be_dma_mem
*mem
= &adapter
->mbox_mem_alloced
;
1734 be_unmap_pci_bars(adapter
);
1737 pci_free_consistent(adapter
->pdev
, mem
->size
,
1741 static int be_ctrl_init(struct be_adapter
*adapter
)
1743 struct be_dma_mem
*mbox_mem_alloc
= &adapter
->mbox_mem_alloced
;
1744 struct be_dma_mem
*mbox_mem_align
= &adapter
->mbox_mem
;
1747 status
= be_map_pci_bars(adapter
);
1751 mbox_mem_alloc
->size
= sizeof(struct be_mcc_mailbox
) + 16;
1752 mbox_mem_alloc
->va
= pci_alloc_consistent(adapter
->pdev
,
1753 mbox_mem_alloc
->size
, &mbox_mem_alloc
->dma
);
1754 if (!mbox_mem_alloc
->va
) {
1755 be_unmap_pci_bars(adapter
);
1758 mbox_mem_align
->size
= sizeof(struct be_mcc_mailbox
);
1759 mbox_mem_align
->va
= PTR_ALIGN(mbox_mem_alloc
->va
, 16);
1760 mbox_mem_align
->dma
= PTR_ALIGN(mbox_mem_alloc
->dma
, 16);
1761 memset(mbox_mem_align
->va
, 0, sizeof(struct be_mcc_mailbox
));
1762 spin_lock_init(&adapter
->mbox_lock
);
1763 spin_lock_init(&adapter
->mcc_lock
);
1764 spin_lock_init(&adapter
->mcc_cq_lock
);
1769 static void be_stats_cleanup(struct be_adapter
*adapter
)
1771 struct be_stats_obj
*stats
= &adapter
->stats
;
1772 struct be_dma_mem
*cmd
= &stats
->cmd
;
1775 pci_free_consistent(adapter
->pdev
, cmd
->size
,
1779 static int be_stats_init(struct be_adapter
*adapter
)
1781 struct be_stats_obj
*stats
= &adapter
->stats
;
1782 struct be_dma_mem
*cmd
= &stats
->cmd
;
1784 cmd
->size
= sizeof(struct be_cmd_req_get_stats
);
1785 cmd
->va
= pci_alloc_consistent(adapter
->pdev
, cmd
->size
, &cmd
->dma
);
1786 if (cmd
->va
== NULL
)
1791 static void __devexit
be_remove(struct pci_dev
*pdev
)
1793 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
1797 unregister_netdev(adapter
->netdev
);
1801 be_stats_cleanup(adapter
);
1803 be_ctrl_cleanup(adapter
);
1805 if (adapter
->msix_enabled
) {
1806 pci_disable_msix(adapter
->pdev
);
1807 adapter
->msix_enabled
= false;
1810 pci_set_drvdata(pdev
, NULL
);
1811 pci_release_regions(pdev
);
1812 pci_disable_device(pdev
);
1814 free_netdev(adapter
->netdev
);
1817 static int be_hw_up(struct be_adapter
*adapter
)
1821 status
= be_cmd_POST(adapter
);
1825 status
= be_cmd_get_fw_ver(adapter
, adapter
->fw_ver
);
1829 status
= be_cmd_query_fw_cfg(adapter
, &adapter
->port_num
);
1833 static int __devinit
be_probe(struct pci_dev
*pdev
,
1834 const struct pci_device_id
*pdev_id
)
1837 struct be_adapter
*adapter
;
1838 struct net_device
*netdev
;
1841 status
= pci_enable_device(pdev
);
1845 status
= pci_request_regions(pdev
, DRV_NAME
);
1848 pci_set_master(pdev
);
1850 netdev
= alloc_etherdev(sizeof(struct be_adapter
));
1851 if (netdev
== NULL
) {
1855 adapter
= netdev_priv(netdev
);
1856 adapter
->pdev
= pdev
;
1857 pci_set_drvdata(pdev
, adapter
);
1858 adapter
->netdev
= netdev
;
1860 be_msix_enable(adapter
);
1862 status
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
1864 netdev
->features
|= NETIF_F_HIGHDMA
;
1866 status
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1868 dev_err(&pdev
->dev
, "Could not set PCI DMA Mask\n");
1873 status
= be_ctrl_init(adapter
);
1877 status
= be_stats_init(adapter
);
1881 status
= be_hw_up(adapter
);
1885 status
= be_cmd_mac_addr_query(adapter
, mac
, MAC_ADDRESS_TYPE_NETWORK
,
1886 true /* permanent */, 0);
1889 memcpy(netdev
->dev_addr
, mac
, ETH_ALEN
);
1891 INIT_DELAYED_WORK(&adapter
->work
, be_worker
);
1892 be_netdev_init(netdev
);
1893 SET_NETDEV_DEV(netdev
, &adapter
->pdev
->dev
);
1895 status
= be_setup(adapter
);
1898 status
= register_netdev(netdev
);
1902 dev_info(&pdev
->dev
, "%s port %d\n", nic_name(pdev
), adapter
->port_num
);
1908 be_stats_cleanup(adapter
);
1910 be_ctrl_cleanup(adapter
);
1912 free_netdev(adapter
->netdev
);
1914 pci_release_regions(pdev
);
1916 pci_disable_device(pdev
);
1918 dev_err(&pdev
->dev
, "%s initialization failed\n", nic_name(pdev
));
1922 static int be_suspend(struct pci_dev
*pdev
, pm_message_t state
)
1924 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
1925 struct net_device
*netdev
= adapter
->netdev
;
1927 netif_device_detach(netdev
);
1928 if (netif_running(netdev
)) {
1935 pci_save_state(pdev
);
1936 pci_disable_device(pdev
);
1937 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
1941 static int be_resume(struct pci_dev
*pdev
)
1944 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
1945 struct net_device
*netdev
= adapter
->netdev
;
1947 netif_device_detach(netdev
);
1949 status
= pci_enable_device(pdev
);
1953 pci_set_power_state(pdev
, 0);
1954 pci_restore_state(pdev
);
1956 if (netif_running(netdev
)) {
1962 netif_device_attach(netdev
);
1966 static struct pci_driver be_driver
= {
1968 .id_table
= be_dev_ids
,
1970 .remove
= be_remove
,
1971 .suspend
= be_suspend
,
1975 static int __init
be_init_module(void)
1977 if (rx_frag_size
!= 8192 && rx_frag_size
!= 4096
1978 && rx_frag_size
!= 2048) {
1979 printk(KERN_WARNING DRV_NAME
1980 " : Module param rx_frag_size must be 2048/4096/8192."
1982 rx_frag_size
= 2048;
1984 /* Ensure rx_frag_size is aligned to chache line */
1985 if (SKB_DATA_ALIGN(rx_frag_size
) != rx_frag_size
) {
1986 printk(KERN_WARNING DRV_NAME
1987 " : Bad module param rx_frag_size. Using 2048\n");
1988 rx_frag_size
= 2048;
1991 return pci_register_driver(&be_driver
);
1993 module_init(be_init_module
);
1995 static void __exit
be_exit_module(void)
1997 pci_unregister_driver(&be_driver
);
1999 module_exit(be_exit_module
);