2 * Copyright (C) 2005 - 2010 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
20 #include <asm/div64.h>
22 MODULE_VERSION(DRV_VER
);
23 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
24 MODULE_DESCRIPTION(DRV_DESC
" " DRV_VER
);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
28 static unsigned int rx_frag_size
= 2048;
29 module_param(rx_frag_size
, uint
, S_IRUGO
);
30 MODULE_PARM_DESC(rx_frag_size
, "Size of a fragment that holds rcvd data.");
32 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids
) = {
33 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID1
) },
34 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID2
) },
35 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID1
) },
36 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID2
) },
39 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
41 static void be_queue_free(struct be_adapter
*adapter
, struct be_queue_info
*q
)
43 struct be_dma_mem
*mem
= &q
->dma_mem
;
45 pci_free_consistent(adapter
->pdev
, mem
->size
,
49 static int be_queue_alloc(struct be_adapter
*adapter
, struct be_queue_info
*q
,
50 u16 len
, u16 entry_size
)
52 struct be_dma_mem
*mem
= &q
->dma_mem
;
54 memset(q
, 0, sizeof(*q
));
56 q
->entry_size
= entry_size
;
57 mem
->size
= len
* entry_size
;
58 mem
->va
= pci_alloc_consistent(adapter
->pdev
, mem
->size
, &mem
->dma
);
61 memset(mem
->va
, 0, mem
->size
);
65 static void be_intr_set(struct be_adapter
*adapter
, bool enable
)
67 u8 __iomem
*addr
= adapter
->pcicfg
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
;
68 u32 reg
= ioread32(addr
);
69 u32 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
74 if (!enabled
&& enable
)
75 reg
|= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
76 else if (enabled
&& !enable
)
77 reg
&= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
84 static void be_rxq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
87 val
|= qid
& DB_RQ_RING_ID_MASK
;
88 val
|= posted
<< DB_RQ_NUM_POSTED_SHIFT
;
89 iowrite32(val
, adapter
->db
+ DB_RQ_OFFSET
);
92 static void be_txq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
95 val
|= qid
& DB_TXULP_RING_ID_MASK
;
96 val
|= (posted
& DB_TXULP_NUM_POSTED_MASK
) << DB_TXULP_NUM_POSTED_SHIFT
;
97 iowrite32(val
, adapter
->db
+ DB_TXULP1_OFFSET
);
100 static void be_eq_notify(struct be_adapter
*adapter
, u16 qid
,
101 bool arm
, bool clear_int
, u16 num_popped
)
104 val
|= qid
& DB_EQ_RING_ID_MASK
;
106 if (adapter
->eeh_err
)
110 val
|= 1 << DB_EQ_REARM_SHIFT
;
112 val
|= 1 << DB_EQ_CLR_SHIFT
;
113 val
|= 1 << DB_EQ_EVNT_SHIFT
;
114 val
|= num_popped
<< DB_EQ_NUM_POPPED_SHIFT
;
115 iowrite32(val
, adapter
->db
+ DB_EQ_OFFSET
);
118 void be_cq_notify(struct be_adapter
*adapter
, u16 qid
, bool arm
, u16 num_popped
)
121 val
|= qid
& DB_CQ_RING_ID_MASK
;
123 if (adapter
->eeh_err
)
127 val
|= 1 << DB_CQ_REARM_SHIFT
;
128 val
|= num_popped
<< DB_CQ_NUM_POPPED_SHIFT
;
129 iowrite32(val
, adapter
->db
+ DB_CQ_OFFSET
);
132 static int be_mac_addr_set(struct net_device
*netdev
, void *p
)
134 struct be_adapter
*adapter
= netdev_priv(netdev
);
135 struct sockaddr
*addr
= p
;
138 if (!is_valid_ether_addr(addr
->sa_data
))
139 return -EADDRNOTAVAIL
;
141 status
= be_cmd_pmac_del(adapter
, adapter
->if_handle
, adapter
->pmac_id
);
145 status
= be_cmd_pmac_add(adapter
, (u8
*)addr
->sa_data
,
146 adapter
->if_handle
, &adapter
->pmac_id
);
148 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
153 void netdev_stats_update(struct be_adapter
*adapter
)
155 struct be_hw_stats
*hw_stats
= hw_stats_from_cmd(adapter
->stats
.cmd
.va
);
156 struct be_rxf_stats
*rxf_stats
= &hw_stats
->rxf
;
157 struct be_port_rxf_stats
*port_stats
=
158 &rxf_stats
->port
[adapter
->port_num
];
159 struct net_device_stats
*dev_stats
= &adapter
->netdev
->stats
;
160 struct be_erx_stats
*erx_stats
= &hw_stats
->erx
;
162 dev_stats
->rx_packets
= drvr_stats(adapter
)->be_rx_pkts
;
163 dev_stats
->tx_packets
= drvr_stats(adapter
)->be_tx_pkts
;
164 dev_stats
->rx_bytes
= drvr_stats(adapter
)->be_rx_bytes
;
165 dev_stats
->tx_bytes
= drvr_stats(adapter
)->be_tx_bytes
;
167 /* bad pkts received */
168 dev_stats
->rx_errors
= port_stats
->rx_crc_errors
+
169 port_stats
->rx_alignment_symbol_errors
+
170 port_stats
->rx_in_range_errors
+
171 port_stats
->rx_out_range_errors
+
172 port_stats
->rx_frame_too_long
+
173 port_stats
->rx_dropped_too_small
+
174 port_stats
->rx_dropped_too_short
+
175 port_stats
->rx_dropped_header_too_small
+
176 port_stats
->rx_dropped_tcp_length
+
177 port_stats
->rx_dropped_runt
+
178 port_stats
->rx_tcp_checksum_errs
+
179 port_stats
->rx_ip_checksum_errs
+
180 port_stats
->rx_udp_checksum_errs
;
182 /* no space in linux buffers: best possible approximation */
183 dev_stats
->rx_dropped
=
184 erx_stats
->rx_drops_no_fragments
[adapter
->rx_obj
.q
.id
];
186 /* detailed rx errors */
187 dev_stats
->rx_length_errors
= port_stats
->rx_in_range_errors
+
188 port_stats
->rx_out_range_errors
+
189 port_stats
->rx_frame_too_long
;
191 /* receive ring buffer overflow */
192 dev_stats
->rx_over_errors
= 0;
194 dev_stats
->rx_crc_errors
= port_stats
->rx_crc_errors
;
196 /* frame alignment errors */
197 dev_stats
->rx_frame_errors
= port_stats
->rx_alignment_symbol_errors
;
199 /* receiver fifo overrun */
200 /* drops_no_pbuf is no per i/f, it's per BE card */
201 dev_stats
->rx_fifo_errors
= port_stats
->rx_fifo_overflow
+
202 port_stats
->rx_input_fifo_overflow
+
203 rxf_stats
->rx_drops_no_pbuf
;
204 /* receiver missed packetd */
205 dev_stats
->rx_missed_errors
= 0;
207 /* packet transmit problems */
208 dev_stats
->tx_errors
= 0;
210 /* no space available in linux */
211 dev_stats
->tx_dropped
= 0;
213 dev_stats
->multicast
= port_stats
->rx_multicast_frames
;
214 dev_stats
->collisions
= 0;
216 /* detailed tx_errors */
217 dev_stats
->tx_aborted_errors
= 0;
218 dev_stats
->tx_carrier_errors
= 0;
219 dev_stats
->tx_fifo_errors
= 0;
220 dev_stats
->tx_heartbeat_errors
= 0;
221 dev_stats
->tx_window_errors
= 0;
224 void be_link_status_update(struct be_adapter
*adapter
, bool link_up
)
226 struct net_device
*netdev
= adapter
->netdev
;
228 /* If link came up or went down */
229 if (adapter
->link_up
!= link_up
) {
230 adapter
->link_speed
= -1;
232 netif_start_queue(netdev
);
233 netif_carrier_on(netdev
);
234 printk(KERN_INFO
"%s: Link up\n", netdev
->name
);
236 netif_stop_queue(netdev
);
237 netif_carrier_off(netdev
);
238 printk(KERN_INFO
"%s: Link down\n", netdev
->name
);
240 adapter
->link_up
= link_up
;
244 /* Update the EQ delay n BE based on the RX frags consumed / sec */
245 static void be_rx_eqd_update(struct be_adapter
*adapter
)
247 struct be_eq_obj
*rx_eq
= &adapter
->rx_eq
;
248 struct be_drvr_stats
*stats
= &adapter
->stats
.drvr_stats
;
252 if (!rx_eq
->enable_aic
)
256 if (time_before(now
, stats
->rx_fps_jiffies
)) {
257 stats
->rx_fps_jiffies
= now
;
261 /* Update once a second */
262 if ((now
- stats
->rx_fps_jiffies
) < HZ
)
265 stats
->be_rx_fps
= (stats
->be_rx_frags
- stats
->be_prev_rx_frags
) /
266 ((now
- stats
->rx_fps_jiffies
) / HZ
);
268 stats
->rx_fps_jiffies
= now
;
269 stats
->be_prev_rx_frags
= stats
->be_rx_frags
;
270 eqd
= stats
->be_rx_fps
/ 110000;
272 if (eqd
> rx_eq
->max_eqd
)
273 eqd
= rx_eq
->max_eqd
;
274 if (eqd
< rx_eq
->min_eqd
)
275 eqd
= rx_eq
->min_eqd
;
278 if (eqd
!= rx_eq
->cur_eqd
)
279 be_cmd_modify_eqd(adapter
, rx_eq
->q
.id
, eqd
);
281 rx_eq
->cur_eqd
= eqd
;
284 static struct net_device_stats
*be_get_stats(struct net_device
*dev
)
289 static u32
be_calc_rate(u64 bytes
, unsigned long ticks
)
293 do_div(rate
, ticks
/ HZ
);
294 rate
<<= 3; /* bytes/sec -> bits/sec */
295 do_div(rate
, 1000000ul); /* MB/Sec */
300 static void be_tx_rate_update(struct be_adapter
*adapter
)
302 struct be_drvr_stats
*stats
= drvr_stats(adapter
);
305 /* Wrapped around? */
306 if (time_before(now
, stats
->be_tx_jiffies
)) {
307 stats
->be_tx_jiffies
= now
;
311 /* Update tx rate once in two seconds */
312 if ((now
- stats
->be_tx_jiffies
) > 2 * HZ
) {
313 stats
->be_tx_rate
= be_calc_rate(stats
->be_tx_bytes
314 - stats
->be_tx_bytes_prev
,
315 now
- stats
->be_tx_jiffies
);
316 stats
->be_tx_jiffies
= now
;
317 stats
->be_tx_bytes_prev
= stats
->be_tx_bytes
;
321 static void be_tx_stats_update(struct be_adapter
*adapter
,
322 u32 wrb_cnt
, u32 copied
, u32 gso_segs
, bool stopped
)
324 struct be_drvr_stats
*stats
= drvr_stats(adapter
);
326 stats
->be_tx_wrbs
+= wrb_cnt
;
327 stats
->be_tx_bytes
+= copied
;
328 stats
->be_tx_pkts
+= (gso_segs
? gso_segs
: 1);
330 stats
->be_tx_stops
++;
333 /* Determine number of WRB entries needed to xmit data in an skb */
334 static u32
wrb_cnt_for_skb(struct sk_buff
*skb
, bool *dummy
)
336 int cnt
= (skb
->len
> skb
->data_len
);
338 cnt
+= skb_shinfo(skb
)->nr_frags
;
340 /* to account for hdr wrb */
343 /* add a dummy to make it an even num */
348 BUG_ON(cnt
> BE_MAX_TX_FRAG_COUNT
);
352 static inline void wrb_fill(struct be_eth_wrb
*wrb
, u64 addr
, int len
)
354 wrb
->frag_pa_hi
= upper_32_bits(addr
);
355 wrb
->frag_pa_lo
= addr
& 0xFFFFFFFF;
356 wrb
->frag_len
= len
& ETH_WRB_FRAG_LEN_MASK
;
359 static void wrb_fill_hdr(struct be_eth_hdr_wrb
*hdr
, struct sk_buff
*skb
,
360 bool vlan
, u32 wrb_cnt
, u32 len
)
362 memset(hdr
, 0, sizeof(*hdr
));
364 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, crc
, hdr
, 1);
366 if (skb_shinfo(skb
)->gso_segs
> 1 && skb_shinfo(skb
)->gso_size
) {
367 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso
, hdr
, 1);
368 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso_mss
,
369 hdr
, skb_shinfo(skb
)->gso_size
);
370 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
372 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, tcpcs
, hdr
, 1);
373 else if (is_udp_pkt(skb
))
374 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, udpcs
, hdr
, 1);
377 if (vlan
&& vlan_tx_tag_present(skb
)) {
378 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan
, hdr
, 1);
379 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan_tag
,
380 hdr
, vlan_tx_tag_get(skb
));
383 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, event
, hdr
, 1);
384 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, complete
, hdr
, 1);
385 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, num_wrb
, hdr
, wrb_cnt
);
386 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, len
, hdr
, len
);
389 static void unmap_tx_frag(struct pci_dev
*pdev
, struct be_eth_wrb
*wrb
,
394 be_dws_le_to_cpu(wrb
, sizeof(*wrb
));
396 dma
= (u64
)wrb
->frag_pa_hi
<< 32 | (u64
)wrb
->frag_pa_lo
;
399 pci_unmap_single(pdev
, dma
, wrb
->frag_len
,
402 pci_unmap_page(pdev
, dma
, wrb
->frag_len
,
407 static int make_tx_wrbs(struct be_adapter
*adapter
,
408 struct sk_buff
*skb
, u32 wrb_cnt
, bool dummy_wrb
)
412 struct pci_dev
*pdev
= adapter
->pdev
;
413 struct sk_buff
*first_skb
= skb
;
414 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
415 struct be_eth_wrb
*wrb
;
416 struct be_eth_hdr_wrb
*hdr
;
417 bool map_single
= false;
420 hdr
= queue_head_node(txq
);
422 map_head
= txq
->head
;
424 if (skb
->len
> skb
->data_len
) {
425 int len
= skb
->len
- skb
->data_len
;
426 busaddr
= pci_map_single(pdev
, skb
->data
, len
,
428 if (pci_dma_mapping_error(pdev
, busaddr
))
431 wrb
= queue_head_node(txq
);
432 wrb_fill(wrb
, busaddr
, len
);
433 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
438 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
439 struct skb_frag_struct
*frag
=
440 &skb_shinfo(skb
)->frags
[i
];
441 busaddr
= pci_map_page(pdev
, frag
->page
,
443 frag
->size
, PCI_DMA_TODEVICE
);
444 if (pci_dma_mapping_error(pdev
, busaddr
))
446 wrb
= queue_head_node(txq
);
447 wrb_fill(wrb
, busaddr
, frag
->size
);
448 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
450 copied
+= frag
->size
;
454 wrb
= queue_head_node(txq
);
456 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
460 wrb_fill_hdr(hdr
, first_skb
, adapter
->vlan_grp
? true : false,
462 be_dws_cpu_to_le(hdr
, sizeof(*hdr
));
466 txq
->head
= map_head
;
468 wrb
= queue_head_node(txq
);
469 unmap_tx_frag(pdev
, wrb
, map_single
);
471 copied
-= wrb
->frag_len
;
477 static netdev_tx_t
be_xmit(struct sk_buff
*skb
,
478 struct net_device
*netdev
)
480 struct be_adapter
*adapter
= netdev_priv(netdev
);
481 struct be_tx_obj
*tx_obj
= &adapter
->tx_obj
;
482 struct be_queue_info
*txq
= &tx_obj
->q
;
483 u32 wrb_cnt
= 0, copied
= 0;
484 u32 start
= txq
->head
;
485 bool dummy_wrb
, stopped
= false;
487 wrb_cnt
= wrb_cnt_for_skb(skb
, &dummy_wrb
);
489 copied
= make_tx_wrbs(adapter
, skb
, wrb_cnt
, dummy_wrb
);
491 /* record the sent skb in the sent_skb table */
492 BUG_ON(tx_obj
->sent_skb_list
[start
]);
493 tx_obj
->sent_skb_list
[start
] = skb
;
495 /* Ensure txq has space for the next skb; Else stop the queue
496 * *BEFORE* ringing the tx doorbell, so that we serialze the
497 * tx compls of the current transmit which'll wake up the queue
499 atomic_add(wrb_cnt
, &txq
->used
);
500 if ((BE_MAX_TX_FRAG_COUNT
+ atomic_read(&txq
->used
)) >=
502 netif_stop_queue(netdev
);
506 be_txq_notify(adapter
, txq
->id
, wrb_cnt
);
508 be_tx_stats_update(adapter
, wrb_cnt
, copied
,
509 skb_shinfo(skb
)->gso_segs
, stopped
);
512 dev_kfree_skb_any(skb
);
517 static int be_change_mtu(struct net_device
*netdev
, int new_mtu
)
519 struct be_adapter
*adapter
= netdev_priv(netdev
);
520 if (new_mtu
< BE_MIN_MTU
||
521 new_mtu
> (BE_MAX_JUMBO_FRAME_SIZE
-
522 (ETH_HLEN
+ ETH_FCS_LEN
))) {
523 dev_info(&adapter
->pdev
->dev
,
524 "MTU must be between %d and %d bytes\n",
526 (BE_MAX_JUMBO_FRAME_SIZE
- (ETH_HLEN
+ ETH_FCS_LEN
)));
529 dev_info(&adapter
->pdev
->dev
, "MTU changed from %d to %d bytes\n",
530 netdev
->mtu
, new_mtu
);
531 netdev
->mtu
= new_mtu
;
536 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
537 * If the user configures more, place BE in vlan promiscuous mode.
539 static int be_vid_config(struct be_adapter
*adapter
)
541 u16 vtag
[BE_NUM_VLANS_SUPPORTED
];
545 if (adapter
->vlans_added
<= adapter
->max_vlans
) {
546 /* Construct VLAN Table to give to HW */
547 for (i
= 0; i
< VLAN_GROUP_ARRAY_LEN
; i
++) {
548 if (adapter
->vlan_tag
[i
]) {
549 vtag
[ntags
] = cpu_to_le16(i
);
553 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
,
556 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
,
562 static void be_vlan_register(struct net_device
*netdev
, struct vlan_group
*grp
)
564 struct be_adapter
*adapter
= netdev_priv(netdev
);
565 struct be_eq_obj
*rx_eq
= &adapter
->rx_eq
;
566 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
568 be_eq_notify(adapter
, rx_eq
->q
.id
, false, false, 0);
569 be_eq_notify(adapter
, tx_eq
->q
.id
, false, false, 0);
570 adapter
->vlan_grp
= grp
;
571 be_eq_notify(adapter
, rx_eq
->q
.id
, true, false, 0);
572 be_eq_notify(adapter
, tx_eq
->q
.id
, true, false, 0);
575 static void be_vlan_add_vid(struct net_device
*netdev
, u16 vid
)
577 struct be_adapter
*adapter
= netdev_priv(netdev
);
579 adapter
->vlan_tag
[vid
] = 1;
580 adapter
->vlans_added
++;
581 if (adapter
->vlans_added
<= (adapter
->max_vlans
+ 1))
582 be_vid_config(adapter
);
585 static void be_vlan_rem_vid(struct net_device
*netdev
, u16 vid
)
587 struct be_adapter
*adapter
= netdev_priv(netdev
);
589 adapter
->vlan_tag
[vid
] = 0;
590 vlan_group_set_device(adapter
->vlan_grp
, vid
, NULL
);
591 adapter
->vlans_added
--;
592 if (adapter
->vlans_added
<= adapter
->max_vlans
)
593 be_vid_config(adapter
);
596 static void be_set_multicast_list(struct net_device
*netdev
)
598 struct be_adapter
*adapter
= netdev_priv(netdev
);
600 if (netdev
->flags
& IFF_PROMISC
) {
601 be_cmd_promiscuous_config(adapter
, adapter
->port_num
, 1);
602 adapter
->promiscuous
= true;
606 /* BE was previously in promiscous mode; disable it */
607 if (adapter
->promiscuous
) {
608 adapter
->promiscuous
= false;
609 be_cmd_promiscuous_config(adapter
, adapter
->port_num
, 0);
612 /* Enable multicast promisc if num configured exceeds what we support */
613 if (netdev
->flags
& IFF_ALLMULTI
||
614 netdev_mc_count(netdev
) > BE_MAX_MC
) {
615 be_cmd_multicast_set(adapter
, adapter
->if_handle
, NULL
,
616 &adapter
->mc_cmd_mem
);
620 be_cmd_multicast_set(adapter
, adapter
->if_handle
, netdev
,
621 &adapter
->mc_cmd_mem
);
626 static void be_rx_rate_update(struct be_adapter
*adapter
)
628 struct be_drvr_stats
*stats
= drvr_stats(adapter
);
632 if (time_before(now
, stats
->be_rx_jiffies
)) {
633 stats
->be_rx_jiffies
= now
;
637 /* Update the rate once in two seconds */
638 if ((now
- stats
->be_rx_jiffies
) < 2 * HZ
)
641 stats
->be_rx_rate
= be_calc_rate(stats
->be_rx_bytes
642 - stats
->be_rx_bytes_prev
,
643 now
- stats
->be_rx_jiffies
);
644 stats
->be_rx_jiffies
= now
;
645 stats
->be_rx_bytes_prev
= stats
->be_rx_bytes
;
648 static void be_rx_stats_update(struct be_adapter
*adapter
,
649 u32 pktsize
, u16 numfrags
)
651 struct be_drvr_stats
*stats
= drvr_stats(adapter
);
653 stats
->be_rx_compl
++;
654 stats
->be_rx_frags
+= numfrags
;
655 stats
->be_rx_bytes
+= pktsize
;
659 static inline bool do_pkt_csum(struct be_eth_rx_compl
*rxcp
, bool cso
)
661 u8 l4_cksm
, ip_version
, ipcksm
, tcpf
= 0, udpf
= 0, ipv6_chk
;
663 l4_cksm
= AMAP_GET_BITS(struct amap_eth_rx_compl
, l4_cksm
, rxcp
);
664 ipcksm
= AMAP_GET_BITS(struct amap_eth_rx_compl
, ipcksm
, rxcp
);
665 ip_version
= AMAP_GET_BITS(struct amap_eth_rx_compl
, ip_version
, rxcp
);
667 tcpf
= AMAP_GET_BITS(struct amap_eth_rx_compl
, tcpf
, rxcp
);
668 udpf
= AMAP_GET_BITS(struct amap_eth_rx_compl
, udpf
, rxcp
);
670 ipv6_chk
= (ip_version
&& (tcpf
|| udpf
));
672 return ((l4_cksm
&& ipv6_chk
&& ipcksm
) && cso
) ? false : true;
675 static struct be_rx_page_info
*
676 get_rx_page_info(struct be_adapter
*adapter
, u16 frag_idx
)
678 struct be_rx_page_info
*rx_page_info
;
679 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
681 rx_page_info
= &adapter
->rx_obj
.page_info_tbl
[frag_idx
];
682 BUG_ON(!rx_page_info
->page
);
684 if (rx_page_info
->last_page_user
) {
685 pci_unmap_page(adapter
->pdev
, pci_unmap_addr(rx_page_info
, bus
),
686 adapter
->big_page_size
, PCI_DMA_FROMDEVICE
);
687 rx_page_info
->last_page_user
= false;
690 atomic_dec(&rxq
->used
);
694 /* Throwaway the data in the Rx completion */
695 static void be_rx_compl_discard(struct be_adapter
*adapter
,
696 struct be_eth_rx_compl
*rxcp
)
698 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
699 struct be_rx_page_info
*page_info
;
700 u16 rxq_idx
, i
, num_rcvd
;
702 rxq_idx
= AMAP_GET_BITS(struct amap_eth_rx_compl
, fragndx
, rxcp
);
703 num_rcvd
= AMAP_GET_BITS(struct amap_eth_rx_compl
, numfrags
, rxcp
);
705 for (i
= 0; i
< num_rcvd
; i
++) {
706 page_info
= get_rx_page_info(adapter
, rxq_idx
);
707 put_page(page_info
->page
);
708 memset(page_info
, 0, sizeof(*page_info
));
709 index_inc(&rxq_idx
, rxq
->len
);
714 * skb_fill_rx_data forms a complete skb for an ether frame
717 static void skb_fill_rx_data(struct be_adapter
*adapter
,
718 struct sk_buff
*skb
, struct be_eth_rx_compl
*rxcp
,
721 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
722 struct be_rx_page_info
*page_info
;
724 u32 pktsize
, hdr_len
, curr_frag_len
, size
;
727 rxq_idx
= AMAP_GET_BITS(struct amap_eth_rx_compl
, fragndx
, rxcp
);
728 pktsize
= AMAP_GET_BITS(struct amap_eth_rx_compl
, pktsize
, rxcp
);
730 page_info
= get_rx_page_info(adapter
, rxq_idx
);
732 start
= page_address(page_info
->page
) + page_info
->page_offset
;
735 /* Copy data in the first descriptor of this completion */
736 curr_frag_len
= min(pktsize
, rx_frag_size
);
738 /* Copy the header portion into skb_data */
739 hdr_len
= min((u32
)BE_HDR_LEN
, curr_frag_len
);
740 memcpy(skb
->data
, start
, hdr_len
);
741 skb
->len
= curr_frag_len
;
742 if (curr_frag_len
<= BE_HDR_LEN
) { /* tiny packet */
743 /* Complete packet has now been moved to data */
744 put_page(page_info
->page
);
746 skb
->tail
+= curr_frag_len
;
748 skb_shinfo(skb
)->nr_frags
= 1;
749 skb_shinfo(skb
)->frags
[0].page
= page_info
->page
;
750 skb_shinfo(skb
)->frags
[0].page_offset
=
751 page_info
->page_offset
+ hdr_len
;
752 skb_shinfo(skb
)->frags
[0].size
= curr_frag_len
- hdr_len
;
753 skb
->data_len
= curr_frag_len
- hdr_len
;
754 skb
->tail
+= hdr_len
;
756 page_info
->page
= NULL
;
758 if (pktsize
<= rx_frag_size
) {
759 BUG_ON(num_rcvd
!= 1);
763 /* More frags present for this completion */
765 for (i
= 1, j
= 0; i
< num_rcvd
; i
++) {
766 size
-= curr_frag_len
;
767 index_inc(&rxq_idx
, rxq
->len
);
768 page_info
= get_rx_page_info(adapter
, rxq_idx
);
770 curr_frag_len
= min(size
, rx_frag_size
);
772 /* Coalesce all frags from the same physical page in one slot */
773 if (page_info
->page_offset
== 0) {
776 skb_shinfo(skb
)->frags
[j
].page
= page_info
->page
;
777 skb_shinfo(skb
)->frags
[j
].page_offset
=
778 page_info
->page_offset
;
779 skb_shinfo(skb
)->frags
[j
].size
= 0;
780 skb_shinfo(skb
)->nr_frags
++;
782 put_page(page_info
->page
);
785 skb_shinfo(skb
)->frags
[j
].size
+= curr_frag_len
;
786 skb
->len
+= curr_frag_len
;
787 skb
->data_len
+= curr_frag_len
;
789 page_info
->page
= NULL
;
791 BUG_ON(j
> MAX_SKB_FRAGS
);
794 be_rx_stats_update(adapter
, pktsize
, num_rcvd
);
798 /* Process the RX completion indicated by rxcp when GRO is disabled */
799 static void be_rx_compl_process(struct be_adapter
*adapter
,
800 struct be_eth_rx_compl
*rxcp
)
807 num_rcvd
= AMAP_GET_BITS(struct amap_eth_rx_compl
, numfrags
, rxcp
);
808 /* Is it a flush compl that has no data */
809 if (unlikely(num_rcvd
== 0))
812 skb
= netdev_alloc_skb_ip_align(adapter
->netdev
, BE_HDR_LEN
);
813 if (unlikely(!skb
)) {
815 dev_warn(&adapter
->pdev
->dev
, "skb alloc failed\n");
816 be_rx_compl_discard(adapter
, rxcp
);
820 skb_fill_rx_data(adapter
, skb
, rxcp
, num_rcvd
);
822 if (do_pkt_csum(rxcp
, adapter
->rx_csum
))
823 skb
->ip_summed
= CHECKSUM_NONE
;
825 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
827 skb
->truesize
= skb
->len
+ sizeof(struct sk_buff
);
828 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
829 skb
->dev
= adapter
->netdev
;
831 vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vtp
, rxcp
);
832 vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vtm
, rxcp
);
834 /* vlanf could be wrongly set in some cards.
835 * ignore if vtm is not set */
836 if ((adapter
->cap
& 0x400) && !vtm
)
839 if (unlikely(vlanf
)) {
840 if (!adapter
->vlan_grp
|| adapter
->vlans_added
== 0) {
844 vid
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vlan_tag
, rxcp
);
845 vid
= be16_to_cpu(vid
);
846 vlan_hwaccel_receive_skb(skb
, adapter
->vlan_grp
, vid
);
848 netif_receive_skb(skb
);
854 /* Process the RX completion indicated by rxcp when GRO is enabled */
855 static void be_rx_compl_process_gro(struct be_adapter
*adapter
,
856 struct be_eth_rx_compl
*rxcp
)
858 struct be_rx_page_info
*page_info
;
859 struct sk_buff
*skb
= NULL
;
860 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
861 struct be_eq_obj
*eq_obj
= &adapter
->rx_eq
;
862 u32 num_rcvd
, pkt_size
, remaining
, vlanf
, curr_frag_len
;
863 u16 i
, rxq_idx
= 0, vid
, j
;
866 num_rcvd
= AMAP_GET_BITS(struct amap_eth_rx_compl
, numfrags
, rxcp
);
867 /* Is it a flush compl that has no data */
868 if (unlikely(num_rcvd
== 0))
871 pkt_size
= AMAP_GET_BITS(struct amap_eth_rx_compl
, pktsize
, rxcp
);
872 vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vtp
, rxcp
);
873 rxq_idx
= AMAP_GET_BITS(struct amap_eth_rx_compl
, fragndx
, rxcp
);
874 vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vtm
, rxcp
);
876 /* vlanf could be wrongly set in some cards.
877 * ignore if vtm is not set */
878 if ((adapter
->cap
& 0x400) && !vtm
)
881 skb
= napi_get_frags(&eq_obj
->napi
);
883 be_rx_compl_discard(adapter
, rxcp
);
887 remaining
= pkt_size
;
888 for (i
= 0, j
= -1; i
< num_rcvd
; i
++) {
889 page_info
= get_rx_page_info(adapter
, rxq_idx
);
891 curr_frag_len
= min(remaining
, rx_frag_size
);
893 /* Coalesce all frags from the same physical page in one slot */
894 if (i
== 0 || page_info
->page_offset
== 0) {
895 /* First frag or Fresh page */
897 skb_shinfo(skb
)->frags
[j
].page
= page_info
->page
;
898 skb_shinfo(skb
)->frags
[j
].page_offset
=
899 page_info
->page_offset
;
900 skb_shinfo(skb
)->frags
[j
].size
= 0;
902 put_page(page_info
->page
);
904 skb_shinfo(skb
)->frags
[j
].size
+= curr_frag_len
;
906 remaining
-= curr_frag_len
;
907 index_inc(&rxq_idx
, rxq
->len
);
908 memset(page_info
, 0, sizeof(*page_info
));
910 BUG_ON(j
> MAX_SKB_FRAGS
);
912 skb_shinfo(skb
)->nr_frags
= j
+ 1;
914 skb
->data_len
= pkt_size
;
915 skb
->truesize
+= pkt_size
;
916 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
918 if (likely(!vlanf
)) {
919 napi_gro_frags(&eq_obj
->napi
);
921 vid
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vlan_tag
, rxcp
);
922 vid
= be16_to_cpu(vid
);
924 if (!adapter
->vlan_grp
|| adapter
->vlans_added
== 0)
927 vlan_gro_frags(&eq_obj
->napi
, adapter
->vlan_grp
, vid
);
930 be_rx_stats_update(adapter
, pkt_size
, num_rcvd
);
934 static struct be_eth_rx_compl
*be_rx_compl_get(struct be_adapter
*adapter
)
936 struct be_eth_rx_compl
*rxcp
= queue_tail_node(&adapter
->rx_obj
.cq
);
938 if (rxcp
->dw
[offsetof(struct amap_eth_rx_compl
, valid
) / 32] == 0)
941 be_dws_le_to_cpu(rxcp
, sizeof(*rxcp
));
943 queue_tail_inc(&adapter
->rx_obj
.cq
);
947 /* To reset the valid bit, we need to reset the whole word as
948 * when walking the queue the valid entries are little-endian
949 * and invalid entries are host endian
951 static inline void be_rx_compl_reset(struct be_eth_rx_compl
*rxcp
)
953 rxcp
->dw
[offsetof(struct amap_eth_rx_compl
, valid
) / 32] = 0;
956 static inline struct page
*be_alloc_pages(u32 size
)
958 gfp_t alloc_flags
= GFP_ATOMIC
;
959 u32 order
= get_order(size
);
961 alloc_flags
|= __GFP_COMP
;
962 return alloc_pages(alloc_flags
, order
);
966 * Allocate a page, split it to fragments of size rx_frag_size and post as
967 * receive buffers to BE
969 static void be_post_rx_frags(struct be_adapter
*adapter
)
971 struct be_rx_page_info
*page_info_tbl
= adapter
->rx_obj
.page_info_tbl
;
972 struct be_rx_page_info
*page_info
= NULL
, *prev_page_info
= NULL
;
973 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
974 struct page
*pagep
= NULL
;
975 struct be_eth_rx_d
*rxd
;
976 u64 page_dmaaddr
= 0, frag_dmaaddr
;
977 u32 posted
, page_offset
= 0;
979 page_info
= &page_info_tbl
[rxq
->head
];
980 for (posted
= 0; posted
< MAX_RX_POST
&& !page_info
->page
; posted
++) {
982 pagep
= be_alloc_pages(adapter
->big_page_size
);
983 if (unlikely(!pagep
)) {
984 drvr_stats(adapter
)->be_ethrx_post_fail
++;
987 page_dmaaddr
= pci_map_page(adapter
->pdev
, pagep
, 0,
988 adapter
->big_page_size
,
990 page_info
->page_offset
= 0;
993 page_info
->page_offset
= page_offset
+ rx_frag_size
;
995 page_offset
= page_info
->page_offset
;
996 page_info
->page
= pagep
;
997 pci_unmap_addr_set(page_info
, bus
, page_dmaaddr
);
998 frag_dmaaddr
= page_dmaaddr
+ page_info
->page_offset
;
1000 rxd
= queue_head_node(rxq
);
1001 rxd
->fragpa_lo
= cpu_to_le32(frag_dmaaddr
& 0xFFFFFFFF);
1002 rxd
->fragpa_hi
= cpu_to_le32(upper_32_bits(frag_dmaaddr
));
1004 /* Any space left in the current big page for another frag? */
1005 if ((page_offset
+ rx_frag_size
+ rx_frag_size
) >
1006 adapter
->big_page_size
) {
1008 page_info
->last_page_user
= true;
1011 prev_page_info
= page_info
;
1012 queue_head_inc(rxq
);
1013 page_info
= &page_info_tbl
[rxq
->head
];
1016 prev_page_info
->last_page_user
= true;
1019 atomic_add(posted
, &rxq
->used
);
1020 be_rxq_notify(adapter
, rxq
->id
, posted
);
1021 } else if (atomic_read(&rxq
->used
) == 0) {
1022 /* Let be_worker replenish when memory is available */
1023 adapter
->rx_post_starved
= true;
1029 static struct be_eth_tx_compl
*be_tx_compl_get(struct be_queue_info
*tx_cq
)
1031 struct be_eth_tx_compl
*txcp
= queue_tail_node(tx_cq
);
1033 if (txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] == 0)
1036 be_dws_le_to_cpu(txcp
, sizeof(*txcp
));
1038 txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] = 0;
1040 queue_tail_inc(tx_cq
);
1044 static void be_tx_compl_process(struct be_adapter
*adapter
, u16 last_index
)
1046 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
1047 struct be_eth_wrb
*wrb
;
1048 struct sk_buff
**sent_skbs
= adapter
->tx_obj
.sent_skb_list
;
1049 struct sk_buff
*sent_skb
;
1050 u16 cur_index
, num_wrbs
= 1; /* account for hdr wrb */
1051 bool unmap_skb_hdr
= true;
1053 sent_skb
= sent_skbs
[txq
->tail
];
1055 sent_skbs
[txq
->tail
] = NULL
;
1057 /* skip header wrb */
1058 queue_tail_inc(txq
);
1061 cur_index
= txq
->tail
;
1062 wrb
= queue_tail_node(txq
);
1063 unmap_tx_frag(adapter
->pdev
, wrb
, (unmap_skb_hdr
&&
1064 sent_skb
->len
> sent_skb
->data_len
));
1065 unmap_skb_hdr
= false;
1068 queue_tail_inc(txq
);
1069 } while (cur_index
!= last_index
);
1071 atomic_sub(num_wrbs
, &txq
->used
);
1073 kfree_skb(sent_skb
);
1076 static inline struct be_eq_entry
*event_get(struct be_eq_obj
*eq_obj
)
1078 struct be_eq_entry
*eqe
= queue_tail_node(&eq_obj
->q
);
1083 eqe
->evt
= le32_to_cpu(eqe
->evt
);
1084 queue_tail_inc(&eq_obj
->q
);
1088 static int event_handle(struct be_adapter
*adapter
,
1089 struct be_eq_obj
*eq_obj
)
1091 struct be_eq_entry
*eqe
;
1094 while ((eqe
= event_get(eq_obj
)) != NULL
) {
1099 /* Deal with any spurious interrupts that come
1102 be_eq_notify(adapter
, eq_obj
->q
.id
, true, true, num
);
1104 napi_schedule(&eq_obj
->napi
);
1109 /* Just read and notify events without processing them.
1110 * Used at the time of destroying event queues */
1111 static void be_eq_clean(struct be_adapter
*adapter
,
1112 struct be_eq_obj
*eq_obj
)
1114 struct be_eq_entry
*eqe
;
1117 while ((eqe
= event_get(eq_obj
)) != NULL
) {
1123 be_eq_notify(adapter
, eq_obj
->q
.id
, false, true, num
);
1126 static void be_rx_q_clean(struct be_adapter
*adapter
)
1128 struct be_rx_page_info
*page_info
;
1129 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
1130 struct be_queue_info
*rx_cq
= &adapter
->rx_obj
.cq
;
1131 struct be_eth_rx_compl
*rxcp
;
1134 /* First cleanup pending rx completions */
1135 while ((rxcp
= be_rx_compl_get(adapter
)) != NULL
) {
1136 be_rx_compl_discard(adapter
, rxcp
);
1137 be_rx_compl_reset(rxcp
);
1138 be_cq_notify(adapter
, rx_cq
->id
, true, 1);
1141 /* Then free posted rx buffer that were not used */
1142 tail
= (rxq
->head
+ rxq
->len
- atomic_read(&rxq
->used
)) % rxq
->len
;
1143 for (; atomic_read(&rxq
->used
) > 0; index_inc(&tail
, rxq
->len
)) {
1144 page_info
= get_rx_page_info(adapter
, tail
);
1145 put_page(page_info
->page
);
1146 memset(page_info
, 0, sizeof(*page_info
));
1148 BUG_ON(atomic_read(&rxq
->used
));
1151 static void be_tx_compl_clean(struct be_adapter
*adapter
)
1153 struct be_queue_info
*tx_cq
= &adapter
->tx_obj
.cq
;
1154 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
1155 struct be_eth_tx_compl
*txcp
;
1156 u16 end_idx
, cmpl
= 0, timeo
= 0;
1157 struct sk_buff
**sent_skbs
= adapter
->tx_obj
.sent_skb_list
;
1158 struct sk_buff
*sent_skb
;
1161 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1163 while ((txcp
= be_tx_compl_get(tx_cq
))) {
1164 end_idx
= AMAP_GET_BITS(struct amap_eth_tx_compl
,
1166 be_tx_compl_process(adapter
, end_idx
);
1170 be_cq_notify(adapter
, tx_cq
->id
, false, cmpl
);
1174 if (atomic_read(&txq
->used
) == 0 || ++timeo
> 200)
1180 if (atomic_read(&txq
->used
))
1181 dev_err(&adapter
->pdev
->dev
, "%d pending tx-completions\n",
1182 atomic_read(&txq
->used
));
1184 /* free posted tx for which compls will never arrive */
1185 while (atomic_read(&txq
->used
)) {
1186 sent_skb
= sent_skbs
[txq
->tail
];
1187 end_idx
= txq
->tail
;
1189 wrb_cnt_for_skb(sent_skb
, &dummy_wrb
) - 1, txq
->len
);
1190 be_tx_compl_process(adapter
, end_idx
);
1194 static void be_mcc_queues_destroy(struct be_adapter
*adapter
)
1196 struct be_queue_info
*q
;
1198 q
= &adapter
->mcc_obj
.q
;
1200 be_cmd_q_destroy(adapter
, q
, QTYPE_MCCQ
);
1201 be_queue_free(adapter
, q
);
1203 q
= &adapter
->mcc_obj
.cq
;
1205 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1206 be_queue_free(adapter
, q
);
1209 /* Must be called only after TX qs are created as MCC shares TX EQ */
1210 static int be_mcc_queues_create(struct be_adapter
*adapter
)
1212 struct be_queue_info
*q
, *cq
;
1214 /* Alloc MCC compl queue */
1215 cq
= &adapter
->mcc_obj
.cq
;
1216 if (be_queue_alloc(adapter
, cq
, MCC_CQ_LEN
,
1217 sizeof(struct be_mcc_compl
)))
1220 /* Ask BE to create MCC compl queue; share TX's eq */
1221 if (be_cmd_cq_create(adapter
, cq
, &adapter
->tx_eq
.q
, false, true, 0))
1224 /* Alloc MCC queue */
1225 q
= &adapter
->mcc_obj
.q
;
1226 if (be_queue_alloc(adapter
, q
, MCC_Q_LEN
, sizeof(struct be_mcc_wrb
)))
1227 goto mcc_cq_destroy
;
1229 /* Ask BE to create MCC queue */
1230 if (be_cmd_mccq_create(adapter
, q
, cq
))
1236 be_queue_free(adapter
, q
);
1238 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1240 be_queue_free(adapter
, cq
);
1245 static void be_tx_queues_destroy(struct be_adapter
*adapter
)
1247 struct be_queue_info
*q
;
1249 q
= &adapter
->tx_obj
.q
;
1251 be_cmd_q_destroy(adapter
, q
, QTYPE_TXQ
);
1252 be_queue_free(adapter
, q
);
1254 q
= &adapter
->tx_obj
.cq
;
1256 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1257 be_queue_free(adapter
, q
);
1259 /* Clear any residual events */
1260 be_eq_clean(adapter
, &adapter
->tx_eq
);
1262 q
= &adapter
->tx_eq
.q
;
1264 be_cmd_q_destroy(adapter
, q
, QTYPE_EQ
);
1265 be_queue_free(adapter
, q
);
1268 static int be_tx_queues_create(struct be_adapter
*adapter
)
1270 struct be_queue_info
*eq
, *q
, *cq
;
1272 adapter
->tx_eq
.max_eqd
= 0;
1273 adapter
->tx_eq
.min_eqd
= 0;
1274 adapter
->tx_eq
.cur_eqd
= 96;
1275 adapter
->tx_eq
.enable_aic
= false;
1276 /* Alloc Tx Event queue */
1277 eq
= &adapter
->tx_eq
.q
;
1278 if (be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
, sizeof(struct be_eq_entry
)))
1281 /* Ask BE to create Tx Event queue */
1282 if (be_cmd_eq_create(adapter
, eq
, adapter
->tx_eq
.cur_eqd
))
1284 /* Alloc TX eth compl queue */
1285 cq
= &adapter
->tx_obj
.cq
;
1286 if (be_queue_alloc(adapter
, cq
, TX_CQ_LEN
,
1287 sizeof(struct be_eth_tx_compl
)))
1290 /* Ask BE to create Tx eth compl queue */
1291 if (be_cmd_cq_create(adapter
, cq
, eq
, false, false, 3))
1294 /* Alloc TX eth queue */
1295 q
= &adapter
->tx_obj
.q
;
1296 if (be_queue_alloc(adapter
, q
, TX_Q_LEN
, sizeof(struct be_eth_wrb
)))
1299 /* Ask BE to create Tx eth queue */
1300 if (be_cmd_txq_create(adapter
, q
, cq
))
1305 be_queue_free(adapter
, q
);
1307 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1309 be_queue_free(adapter
, cq
);
1311 be_cmd_q_destroy(adapter
, eq
, QTYPE_EQ
);
1313 be_queue_free(adapter
, eq
);
1317 static void be_rx_queues_destroy(struct be_adapter
*adapter
)
1319 struct be_queue_info
*q
;
1321 q
= &adapter
->rx_obj
.q
;
1323 be_cmd_q_destroy(adapter
, q
, QTYPE_RXQ
);
1325 /* After the rxq is invalidated, wait for a grace time
1326 * of 1ms for all dma to end and the flush compl to arrive
1329 be_rx_q_clean(adapter
);
1331 be_queue_free(adapter
, q
);
1333 q
= &adapter
->rx_obj
.cq
;
1335 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1336 be_queue_free(adapter
, q
);
1338 /* Clear any residual events */
1339 be_eq_clean(adapter
, &adapter
->rx_eq
);
1341 q
= &adapter
->rx_eq
.q
;
1343 be_cmd_q_destroy(adapter
, q
, QTYPE_EQ
);
1344 be_queue_free(adapter
, q
);
1347 static int be_rx_queues_create(struct be_adapter
*adapter
)
1349 struct be_queue_info
*eq
, *q
, *cq
;
1352 adapter
->big_page_size
= (1 << get_order(rx_frag_size
)) * PAGE_SIZE
;
1353 adapter
->rx_eq
.max_eqd
= BE_MAX_EQD
;
1354 adapter
->rx_eq
.min_eqd
= 0;
1355 adapter
->rx_eq
.cur_eqd
= 0;
1356 adapter
->rx_eq
.enable_aic
= true;
1358 /* Alloc Rx Event queue */
1359 eq
= &adapter
->rx_eq
.q
;
1360 rc
= be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
,
1361 sizeof(struct be_eq_entry
));
1365 /* Ask BE to create Rx Event queue */
1366 rc
= be_cmd_eq_create(adapter
, eq
, adapter
->rx_eq
.cur_eqd
);
1370 /* Alloc RX eth compl queue */
1371 cq
= &adapter
->rx_obj
.cq
;
1372 rc
= be_queue_alloc(adapter
, cq
, RX_CQ_LEN
,
1373 sizeof(struct be_eth_rx_compl
));
1377 /* Ask BE to create Rx eth compl queue */
1378 rc
= be_cmd_cq_create(adapter
, cq
, eq
, false, false, 3);
1382 /* Alloc RX eth queue */
1383 q
= &adapter
->rx_obj
.q
;
1384 rc
= be_queue_alloc(adapter
, q
, RX_Q_LEN
, sizeof(struct be_eth_rx_d
));
1388 /* Ask BE to create Rx eth queue */
1389 rc
= be_cmd_rxq_create(adapter
, q
, cq
->id
, rx_frag_size
,
1390 BE_MAX_JUMBO_FRAME_SIZE
, adapter
->if_handle
, false);
1396 be_queue_free(adapter
, q
);
1398 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1400 be_queue_free(adapter
, cq
);
1402 be_cmd_q_destroy(adapter
, eq
, QTYPE_EQ
);
1404 be_queue_free(adapter
, eq
);
1408 /* There are 8 evt ids per func. Retruns the evt id's bit number */
1409 static inline int be_evt_bit_get(struct be_adapter
*adapter
, u32 eq_id
)
1414 static irqreturn_t
be_intx(int irq
, void *dev
)
1416 struct be_adapter
*adapter
= dev
;
1419 isr
= ioread32(adapter
->csr
+ CEV_ISR0_OFFSET
+
1420 (adapter
->tx_eq
.q
.id
/ 8) * CEV_ISR_SIZE
);
1424 event_handle(adapter
, &adapter
->tx_eq
);
1425 event_handle(adapter
, &adapter
->rx_eq
);
1430 static irqreturn_t
be_msix_rx(int irq
, void *dev
)
1432 struct be_adapter
*adapter
= dev
;
1434 event_handle(adapter
, &adapter
->rx_eq
);
1439 static irqreturn_t
be_msix_tx_mcc(int irq
, void *dev
)
1441 struct be_adapter
*adapter
= dev
;
1443 event_handle(adapter
, &adapter
->tx_eq
);
1448 static inline bool do_gro(struct be_adapter
*adapter
,
1449 struct be_eth_rx_compl
*rxcp
)
1451 int err
= AMAP_GET_BITS(struct amap_eth_rx_compl
, err
, rxcp
);
1452 int tcp_frame
= AMAP_GET_BITS(struct amap_eth_rx_compl
, tcpf
, rxcp
);
1455 drvr_stats(adapter
)->be_rxcp_err
++;
1457 return (tcp_frame
&& !err
) ? true : false;
1460 int be_poll_rx(struct napi_struct
*napi
, int budget
)
1462 struct be_eq_obj
*rx_eq
= container_of(napi
, struct be_eq_obj
, napi
);
1463 struct be_adapter
*adapter
=
1464 container_of(rx_eq
, struct be_adapter
, rx_eq
);
1465 struct be_queue_info
*rx_cq
= &adapter
->rx_obj
.cq
;
1466 struct be_eth_rx_compl
*rxcp
;
1469 adapter
->stats
.drvr_stats
.be_rx_polls
++;
1470 for (work_done
= 0; work_done
< budget
; work_done
++) {
1471 rxcp
= be_rx_compl_get(adapter
);
1475 if (do_gro(adapter
, rxcp
))
1476 be_rx_compl_process_gro(adapter
, rxcp
);
1478 be_rx_compl_process(adapter
, rxcp
);
1480 be_rx_compl_reset(rxcp
);
1483 /* Refill the queue */
1484 if (atomic_read(&adapter
->rx_obj
.q
.used
) < RX_FRAGS_REFILL_WM
)
1485 be_post_rx_frags(adapter
);
1488 if (work_done
< budget
) {
1489 napi_complete(napi
);
1490 be_cq_notify(adapter
, rx_cq
->id
, true, work_done
);
1492 /* More to be consumed; continue with interrupts disabled */
1493 be_cq_notify(adapter
, rx_cq
->id
, false, work_done
);
1498 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1499 * For TX/MCC we don't honour budget; consume everything
1501 static int be_poll_tx_mcc(struct napi_struct
*napi
, int budget
)
1503 struct be_eq_obj
*tx_eq
= container_of(napi
, struct be_eq_obj
, napi
);
1504 struct be_adapter
*adapter
=
1505 container_of(tx_eq
, struct be_adapter
, tx_eq
);
1506 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
1507 struct be_queue_info
*tx_cq
= &adapter
->tx_obj
.cq
;
1508 struct be_eth_tx_compl
*txcp
;
1509 int tx_compl
= 0, mcc_compl
, status
= 0;
1512 while ((txcp
= be_tx_compl_get(tx_cq
))) {
1513 end_idx
= AMAP_GET_BITS(struct amap_eth_tx_compl
,
1515 be_tx_compl_process(adapter
, end_idx
);
1519 mcc_compl
= be_process_mcc(adapter
, &status
);
1521 napi_complete(napi
);
1524 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
1525 be_cq_notify(adapter
, mcc_obj
->cq
.id
, true, mcc_compl
);
1529 be_cq_notify(adapter
, adapter
->tx_obj
.cq
.id
, true, tx_compl
);
1531 /* As Tx wrbs have been freed up, wake up netdev queue if
1532 * it was stopped due to lack of tx wrbs.
1534 if (netif_queue_stopped(adapter
->netdev
) &&
1535 atomic_read(&txq
->used
) < txq
->len
/ 2) {
1536 netif_wake_queue(adapter
->netdev
);
1539 drvr_stats(adapter
)->be_tx_events
++;
1540 drvr_stats(adapter
)->be_tx_compl
+= tx_compl
;
1546 static void be_worker(struct work_struct
*work
)
1548 struct be_adapter
*adapter
=
1549 container_of(work
, struct be_adapter
, work
.work
);
1551 be_cmd_get_stats(adapter
, &adapter
->stats
.cmd
);
1554 be_rx_eqd_update(adapter
);
1556 be_tx_rate_update(adapter
);
1557 be_rx_rate_update(adapter
);
1559 if (adapter
->rx_post_starved
) {
1560 adapter
->rx_post_starved
= false;
1561 be_post_rx_frags(adapter
);
1564 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(1000));
1567 static void be_msix_disable(struct be_adapter
*adapter
)
1569 if (adapter
->msix_enabled
) {
1570 pci_disable_msix(adapter
->pdev
);
1571 adapter
->msix_enabled
= false;
1575 static void be_msix_enable(struct be_adapter
*adapter
)
1579 for (i
= 0; i
< BE_NUM_MSIX_VECTORS
; i
++)
1580 adapter
->msix_entries
[i
].entry
= i
;
1582 status
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
1583 BE_NUM_MSIX_VECTORS
);
1585 adapter
->msix_enabled
= true;
1589 static inline int be_msix_vec_get(struct be_adapter
*adapter
, u32 eq_id
)
1591 return adapter
->msix_entries
[
1592 be_evt_bit_get(adapter
, eq_id
)].vector
;
1595 static int be_request_irq(struct be_adapter
*adapter
,
1596 struct be_eq_obj
*eq_obj
,
1597 void *handler
, char *desc
)
1599 struct net_device
*netdev
= adapter
->netdev
;
1602 sprintf(eq_obj
->desc
, "%s-%s", netdev
->name
, desc
);
1603 vec
= be_msix_vec_get(adapter
, eq_obj
->q
.id
);
1604 return request_irq(vec
, handler
, 0, eq_obj
->desc
, adapter
);
1607 static void be_free_irq(struct be_adapter
*adapter
, struct be_eq_obj
*eq_obj
)
1609 int vec
= be_msix_vec_get(adapter
, eq_obj
->q
.id
);
1610 free_irq(vec
, adapter
);
1613 static int be_msix_register(struct be_adapter
*adapter
)
1617 status
= be_request_irq(adapter
, &adapter
->tx_eq
, be_msix_tx_mcc
, "tx");
1621 status
= be_request_irq(adapter
, &adapter
->rx_eq
, be_msix_rx
, "rx");
1628 be_free_irq(adapter
, &adapter
->tx_eq
);
1630 dev_warn(&adapter
->pdev
->dev
,
1631 "MSIX Request IRQ failed - err %d\n", status
);
1632 pci_disable_msix(adapter
->pdev
);
1633 adapter
->msix_enabled
= false;
1637 static int be_irq_register(struct be_adapter
*adapter
)
1639 struct net_device
*netdev
= adapter
->netdev
;
1642 if (adapter
->msix_enabled
) {
1643 status
= be_msix_register(adapter
);
1649 netdev
->irq
= adapter
->pdev
->irq
;
1650 status
= request_irq(netdev
->irq
, be_intx
, IRQF_SHARED
, netdev
->name
,
1653 dev_err(&adapter
->pdev
->dev
,
1654 "INTx request IRQ failed - err %d\n", status
);
1658 adapter
->isr_registered
= true;
1662 static void be_irq_unregister(struct be_adapter
*adapter
)
1664 struct net_device
*netdev
= adapter
->netdev
;
1666 if (!adapter
->isr_registered
)
1670 if (!adapter
->msix_enabled
) {
1671 free_irq(netdev
->irq
, adapter
);
1676 be_free_irq(adapter
, &adapter
->tx_eq
);
1677 be_free_irq(adapter
, &adapter
->rx_eq
);
1679 adapter
->isr_registered
= false;
1683 static int be_open(struct net_device
*netdev
)
1685 struct be_adapter
*adapter
= netdev_priv(netdev
);
1686 struct be_eq_obj
*rx_eq
= &adapter
->rx_eq
;
1687 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
1693 /* First time posting */
1694 be_post_rx_frags(adapter
);
1696 napi_enable(&rx_eq
->napi
);
1697 napi_enable(&tx_eq
->napi
);
1699 be_irq_register(adapter
);
1701 be_intr_set(adapter
, true);
1703 /* The evt queues are created in unarmed state; arm them */
1704 be_eq_notify(adapter
, rx_eq
->q
.id
, true, false, 0);
1705 be_eq_notify(adapter
, tx_eq
->q
.id
, true, false, 0);
1707 /* Rx compl queue may be in unarmed state; rearm it */
1708 be_cq_notify(adapter
, adapter
->rx_obj
.cq
.id
, true, 0);
1710 /* Now that interrupts are on we can process async mcc */
1711 be_async_mcc_enable(adapter
);
1713 status
= be_cmd_link_status_query(adapter
, &link_up
, &mac_speed
,
1717 be_link_status_update(adapter
, link_up
);
1719 status
= be_vid_config(adapter
);
1723 status
= be_cmd_set_flow_control(adapter
,
1724 adapter
->tx_fc
, adapter
->rx_fc
);
1728 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(100));
1733 static int be_setup_wol(struct be_adapter
*adapter
, bool enable
)
1735 struct be_dma_mem cmd
;
1739 memset(mac
, 0, ETH_ALEN
);
1741 cmd
.size
= sizeof(struct be_cmd_req_acpi_wol_magic_config
);
1742 cmd
.va
= pci_alloc_consistent(adapter
->pdev
, cmd
.size
, &cmd
.dma
);
1745 memset(cmd
.va
, 0, cmd
.size
);
1748 status
= pci_write_config_dword(adapter
->pdev
,
1749 PCICFG_PM_CONTROL_OFFSET
, PCICFG_PM_CONTROL_MASK
);
1751 dev_err(&adapter
->pdev
->dev
,
1752 "Could not enable Wake-on-lan \n");
1753 pci_free_consistent(adapter
->pdev
, cmd
.size
, cmd
.va
,
1757 status
= be_cmd_enable_magic_wol(adapter
,
1758 adapter
->netdev
->dev_addr
, &cmd
);
1759 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 1);
1760 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 1);
1762 status
= be_cmd_enable_magic_wol(adapter
, mac
, &cmd
);
1763 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 0);
1764 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 0);
1767 pci_free_consistent(adapter
->pdev
, cmd
.size
, cmd
.va
, cmd
.dma
);
1771 static int be_setup(struct be_adapter
*adapter
)
1773 struct net_device
*netdev
= adapter
->netdev
;
1774 u32 cap_flags
, en_flags
;
1777 cap_flags
= BE_IF_FLAGS_UNTAGGED
| BE_IF_FLAGS_BROADCAST
|
1778 BE_IF_FLAGS_MCAST_PROMISCUOUS
|
1779 BE_IF_FLAGS_PROMISCUOUS
|
1780 BE_IF_FLAGS_PASS_L3L4_ERRORS
;
1781 en_flags
= BE_IF_FLAGS_UNTAGGED
| BE_IF_FLAGS_BROADCAST
|
1782 BE_IF_FLAGS_PASS_L3L4_ERRORS
;
1784 status
= be_cmd_if_create(adapter
, cap_flags
, en_flags
,
1785 netdev
->dev_addr
, false/* pmac_invalid */,
1786 &adapter
->if_handle
, &adapter
->pmac_id
);
1790 status
= be_tx_queues_create(adapter
);
1794 status
= be_rx_queues_create(adapter
);
1798 status
= be_mcc_queues_create(adapter
);
1802 adapter
->link_speed
= -1;
1807 be_rx_queues_destroy(adapter
);
1809 be_tx_queues_destroy(adapter
);
1811 be_cmd_if_destroy(adapter
, adapter
->if_handle
);
1816 static int be_clear(struct be_adapter
*adapter
)
1818 be_mcc_queues_destroy(adapter
);
1819 be_rx_queues_destroy(adapter
);
1820 be_tx_queues_destroy(adapter
);
1822 be_cmd_if_destroy(adapter
, adapter
->if_handle
);
1824 /* tell fw we're done with firing cmds */
1825 be_cmd_fw_clean(adapter
);
1829 static int be_close(struct net_device
*netdev
)
1831 struct be_adapter
*adapter
= netdev_priv(netdev
);
1832 struct be_eq_obj
*rx_eq
= &adapter
->rx_eq
;
1833 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
1836 cancel_delayed_work_sync(&adapter
->work
);
1838 be_async_mcc_disable(adapter
);
1840 netif_stop_queue(netdev
);
1841 netif_carrier_off(netdev
);
1842 adapter
->link_up
= false;
1844 be_intr_set(adapter
, false);
1846 if (adapter
->msix_enabled
) {
1847 vec
= be_msix_vec_get(adapter
, tx_eq
->q
.id
);
1848 synchronize_irq(vec
);
1849 vec
= be_msix_vec_get(adapter
, rx_eq
->q
.id
);
1850 synchronize_irq(vec
);
1852 synchronize_irq(netdev
->irq
);
1854 be_irq_unregister(adapter
);
1856 napi_disable(&rx_eq
->napi
);
1857 napi_disable(&tx_eq
->napi
);
1859 /* Wait for all pending tx completions to arrive so that
1860 * all tx skbs are freed.
1862 be_tx_compl_clean(adapter
);
1867 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
1868 char flash_cookie
[2][16] = {"*** SE FLAS",
1869 "H DIRECTORY *** "};
1871 static bool be_flash_redboot(struct be_adapter
*adapter
,
1872 const u8
*p
, u32 img_start
, int image_size
,
1879 crc_offset
= hdr_size
+ img_start
+ image_size
- 4;
1883 status
= be_cmd_get_flash_crc(adapter
, flashed_crc
,
1884 (img_start
+ image_size
- 4));
1886 dev_err(&adapter
->pdev
->dev
,
1887 "could not get crc from flash, not flashing redboot\n");
1891 /*update redboot only if crc does not match*/
1892 if (!memcmp(flashed_crc
, p
, 4))
1898 static int be_flash_data(struct be_adapter
*adapter
,
1899 const struct firmware
*fw
,
1900 struct be_dma_mem
*flash_cmd
, int num_of_images
)
1903 int status
= 0, i
, filehdr_size
= 0;
1904 u32 total_bytes
= 0, flash_op
;
1906 const u8
*p
= fw
->data
;
1907 struct be_cmd_write_flashrom
*req
= flash_cmd
->va
;
1908 struct flash_comp
*pflashcomp
;
1911 struct flash_comp gen3_flash_types
[9] = {
1912 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3
, IMG_TYPE_ISCSI_ACTIVE
,
1913 FLASH_IMAGE_MAX_SIZE_g3
},
1914 { FLASH_REDBOOT_START_g3
, IMG_TYPE_REDBOOT
,
1915 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3
},
1916 { FLASH_iSCSI_BIOS_START_g3
, IMG_TYPE_BIOS
,
1917 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
1918 { FLASH_PXE_BIOS_START_g3
, IMG_TYPE_PXE_BIOS
,
1919 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
1920 { FLASH_FCoE_BIOS_START_g3
, IMG_TYPE_FCOE_BIOS
,
1921 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
1922 { FLASH_iSCSI_BACKUP_IMAGE_START_g3
, IMG_TYPE_ISCSI_BACKUP
,
1923 FLASH_IMAGE_MAX_SIZE_g3
},
1924 { FLASH_FCoE_PRIMARY_IMAGE_START_g3
, IMG_TYPE_FCOE_FW_ACTIVE
,
1925 FLASH_IMAGE_MAX_SIZE_g3
},
1926 { FLASH_FCoE_BACKUP_IMAGE_START_g3
, IMG_TYPE_FCOE_FW_BACKUP
,
1927 FLASH_IMAGE_MAX_SIZE_g3
},
1928 { FLASH_NCSI_START_g3
, IMG_TYPE_NCSI_FW
,
1929 FLASH_NCSI_IMAGE_MAX_SIZE_g3
}
1931 struct flash_comp gen2_flash_types
[8] = {
1932 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2
, IMG_TYPE_ISCSI_ACTIVE
,
1933 FLASH_IMAGE_MAX_SIZE_g2
},
1934 { FLASH_REDBOOT_START_g2
, IMG_TYPE_REDBOOT
,
1935 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2
},
1936 { FLASH_iSCSI_BIOS_START_g2
, IMG_TYPE_BIOS
,
1937 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
1938 { FLASH_PXE_BIOS_START_g2
, IMG_TYPE_PXE_BIOS
,
1939 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
1940 { FLASH_FCoE_BIOS_START_g2
, IMG_TYPE_FCOE_BIOS
,
1941 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
1942 { FLASH_iSCSI_BACKUP_IMAGE_START_g2
, IMG_TYPE_ISCSI_BACKUP
,
1943 FLASH_IMAGE_MAX_SIZE_g2
},
1944 { FLASH_FCoE_PRIMARY_IMAGE_START_g2
, IMG_TYPE_FCOE_FW_ACTIVE
,
1945 FLASH_IMAGE_MAX_SIZE_g2
},
1946 { FLASH_FCoE_BACKUP_IMAGE_START_g2
, IMG_TYPE_FCOE_FW_BACKUP
,
1947 FLASH_IMAGE_MAX_SIZE_g2
}
1950 if (adapter
->generation
== BE_GEN3
) {
1951 pflashcomp
= gen3_flash_types
;
1952 filehdr_size
= sizeof(struct flash_file_hdr_g3
);
1955 pflashcomp
= gen2_flash_types
;
1956 filehdr_size
= sizeof(struct flash_file_hdr_g2
);
1959 for (i
= 0; i
< num_comp
; i
++) {
1960 if ((pflashcomp
[i
].optype
== IMG_TYPE_NCSI_FW
) &&
1961 memcmp(adapter
->fw_ver
, "3.102.148.0", 11) < 0)
1963 if ((pflashcomp
[i
].optype
== IMG_TYPE_REDBOOT
) &&
1964 (!be_flash_redboot(adapter
, fw
->data
,
1965 pflashcomp
[i
].offset
, pflashcomp
[i
].size
,
1969 p
+= filehdr_size
+ pflashcomp
[i
].offset
1970 + (num_of_images
* sizeof(struct image_hdr
));
1971 if (p
+ pflashcomp
[i
].size
> fw
->data
+ fw
->size
)
1973 total_bytes
= pflashcomp
[i
].size
;
1974 while (total_bytes
) {
1975 if (total_bytes
> 32*1024)
1976 num_bytes
= 32*1024;
1978 num_bytes
= total_bytes
;
1979 total_bytes
-= num_bytes
;
1982 flash_op
= FLASHROM_OPER_FLASH
;
1984 flash_op
= FLASHROM_OPER_SAVE
;
1985 memcpy(req
->params
.data_buf
, p
, num_bytes
);
1987 status
= be_cmd_write_flashrom(adapter
, flash_cmd
,
1988 pflashcomp
[i
].optype
, flash_op
, num_bytes
);
1990 dev_err(&adapter
->pdev
->dev
,
1991 "cmd to write to flash rom failed.\n");
2000 static int get_ufigen_type(struct flash_file_hdr_g2
*fhdr
)
2004 if (fhdr
->build
[0] == '3')
2006 else if (fhdr
->build
[0] == '2')
2012 int be_load_fw(struct be_adapter
*adapter
, u8
*func
)
2014 char fw_file
[ETHTOOL_FLASH_MAX_FILENAME
];
2015 const struct firmware
*fw
;
2016 struct flash_file_hdr_g2
*fhdr
;
2017 struct flash_file_hdr_g3
*fhdr3
;
2018 struct image_hdr
*img_hdr_ptr
= NULL
;
2019 struct be_dma_mem flash_cmd
;
2023 strcpy(fw_file
, func
);
2025 status
= request_firmware(&fw
, fw_file
, &adapter
->pdev
->dev
);
2030 fhdr
= (struct flash_file_hdr_g2
*) p
;
2031 dev_info(&adapter
->pdev
->dev
, "Flashing firmware file %s\n", fw_file
);
2033 flash_cmd
.size
= sizeof(struct be_cmd_write_flashrom
) + 32*1024;
2034 flash_cmd
.va
= pci_alloc_consistent(adapter
->pdev
, flash_cmd
.size
,
2036 if (!flash_cmd
.va
) {
2038 dev_err(&adapter
->pdev
->dev
,
2039 "Memory allocation failure while flashing\n");
2043 if ((adapter
->generation
== BE_GEN3
) &&
2044 (get_ufigen_type(fhdr
) == BE_GEN3
)) {
2045 fhdr3
= (struct flash_file_hdr_g3
*) fw
->data
;
2046 for (i
= 0; i
< fhdr3
->num_imgs
; i
++) {
2047 img_hdr_ptr
= (struct image_hdr
*) (fw
->data
+
2048 (sizeof(struct flash_file_hdr_g3
) +
2049 i
* sizeof(struct image_hdr
)));
2050 if (img_hdr_ptr
->imageid
== 1) {
2051 status
= be_flash_data(adapter
, fw
,
2052 &flash_cmd
, fhdr3
->num_imgs
);
2056 } else if ((adapter
->generation
== BE_GEN2
) &&
2057 (get_ufigen_type(fhdr
) == BE_GEN2
)) {
2058 status
= be_flash_data(adapter
, fw
, &flash_cmd
, 0);
2060 dev_err(&adapter
->pdev
->dev
,
2061 "UFI and Interface are not compatible for flashing\n");
2065 pci_free_consistent(adapter
->pdev
, flash_cmd
.size
, flash_cmd
.va
,
2068 dev_err(&adapter
->pdev
->dev
, "Firmware load error\n");
2072 dev_info(&adapter
->pdev
->dev
, "Firmware flashed successfully\n");
2075 release_firmware(fw
);
2079 static struct net_device_ops be_netdev_ops
= {
2080 .ndo_open
= be_open
,
2081 .ndo_stop
= be_close
,
2082 .ndo_start_xmit
= be_xmit
,
2083 .ndo_get_stats
= be_get_stats
,
2084 .ndo_set_rx_mode
= be_set_multicast_list
,
2085 .ndo_set_mac_address
= be_mac_addr_set
,
2086 .ndo_change_mtu
= be_change_mtu
,
2087 .ndo_validate_addr
= eth_validate_addr
,
2088 .ndo_vlan_rx_register
= be_vlan_register
,
2089 .ndo_vlan_rx_add_vid
= be_vlan_add_vid
,
2090 .ndo_vlan_rx_kill_vid
= be_vlan_rem_vid
,
2093 static void be_netdev_init(struct net_device
*netdev
)
2095 struct be_adapter
*adapter
= netdev_priv(netdev
);
2097 netdev
->features
|= NETIF_F_SG
| NETIF_F_HW_VLAN_RX
| NETIF_F_TSO
|
2098 NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_FILTER
| NETIF_F_HW_CSUM
|
2101 netdev
->vlan_features
|= NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_HW_CSUM
;
2103 netdev
->flags
|= IFF_MULTICAST
;
2105 adapter
->rx_csum
= true;
2107 /* Default settings for Rx and Tx flow control */
2108 adapter
->rx_fc
= true;
2109 adapter
->tx_fc
= true;
2111 netif_set_gso_max_size(netdev
, 65535);
2113 BE_SET_NETDEV_OPS(netdev
, &be_netdev_ops
);
2115 SET_ETHTOOL_OPS(netdev
, &be_ethtool_ops
);
2117 netif_napi_add(netdev
, &adapter
->rx_eq
.napi
, be_poll_rx
,
2119 netif_napi_add(netdev
, &adapter
->tx_eq
.napi
, be_poll_tx_mcc
,
2122 netif_carrier_off(netdev
);
2123 netif_stop_queue(netdev
);
2126 static void be_unmap_pci_bars(struct be_adapter
*adapter
)
2129 iounmap(adapter
->csr
);
2131 iounmap(adapter
->db
);
2132 if (adapter
->pcicfg
)
2133 iounmap(adapter
->pcicfg
);
2136 static int be_map_pci_bars(struct be_adapter
*adapter
)
2141 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, 2),
2142 pci_resource_len(adapter
->pdev
, 2));
2145 adapter
->csr
= addr
;
2147 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, 4),
2153 if (adapter
->generation
== BE_GEN2
)
2158 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, pcicfg_reg
),
2159 pci_resource_len(adapter
->pdev
, pcicfg_reg
));
2162 adapter
->pcicfg
= addr
;
2166 be_unmap_pci_bars(adapter
);
2171 static void be_ctrl_cleanup(struct be_adapter
*adapter
)
2173 struct be_dma_mem
*mem
= &adapter
->mbox_mem_alloced
;
2175 be_unmap_pci_bars(adapter
);
2178 pci_free_consistent(adapter
->pdev
, mem
->size
,
2181 mem
= &adapter
->mc_cmd_mem
;
2183 pci_free_consistent(adapter
->pdev
, mem
->size
,
2187 static int be_ctrl_init(struct be_adapter
*adapter
)
2189 struct be_dma_mem
*mbox_mem_alloc
= &adapter
->mbox_mem_alloced
;
2190 struct be_dma_mem
*mbox_mem_align
= &adapter
->mbox_mem
;
2191 struct be_dma_mem
*mc_cmd_mem
= &adapter
->mc_cmd_mem
;
2194 status
= be_map_pci_bars(adapter
);
2198 mbox_mem_alloc
->size
= sizeof(struct be_mcc_mailbox
) + 16;
2199 mbox_mem_alloc
->va
= pci_alloc_consistent(adapter
->pdev
,
2200 mbox_mem_alloc
->size
, &mbox_mem_alloc
->dma
);
2201 if (!mbox_mem_alloc
->va
) {
2203 goto unmap_pci_bars
;
2206 mbox_mem_align
->size
= sizeof(struct be_mcc_mailbox
);
2207 mbox_mem_align
->va
= PTR_ALIGN(mbox_mem_alloc
->va
, 16);
2208 mbox_mem_align
->dma
= PTR_ALIGN(mbox_mem_alloc
->dma
, 16);
2209 memset(mbox_mem_align
->va
, 0, sizeof(struct be_mcc_mailbox
));
2211 mc_cmd_mem
->size
= sizeof(struct be_cmd_req_mcast_mac_config
);
2212 mc_cmd_mem
->va
= pci_alloc_consistent(adapter
->pdev
, mc_cmd_mem
->size
,
2214 if (mc_cmd_mem
->va
== NULL
) {
2218 memset(mc_cmd_mem
->va
, 0, mc_cmd_mem
->size
);
2220 spin_lock_init(&adapter
->mbox_lock
);
2221 spin_lock_init(&adapter
->mcc_lock
);
2222 spin_lock_init(&adapter
->mcc_cq_lock
);
2224 pci_save_state(adapter
->pdev
);
2228 pci_free_consistent(adapter
->pdev
, mbox_mem_alloc
->size
,
2229 mbox_mem_alloc
->va
, mbox_mem_alloc
->dma
);
2232 be_unmap_pci_bars(adapter
);
2238 static void be_stats_cleanup(struct be_adapter
*adapter
)
2240 struct be_stats_obj
*stats
= &adapter
->stats
;
2241 struct be_dma_mem
*cmd
= &stats
->cmd
;
2244 pci_free_consistent(adapter
->pdev
, cmd
->size
,
2248 static int be_stats_init(struct be_adapter
*adapter
)
2250 struct be_stats_obj
*stats
= &adapter
->stats
;
2251 struct be_dma_mem
*cmd
= &stats
->cmd
;
2253 cmd
->size
= sizeof(struct be_cmd_req_get_stats
);
2254 cmd
->va
= pci_alloc_consistent(adapter
->pdev
, cmd
->size
, &cmd
->dma
);
2255 if (cmd
->va
== NULL
)
2257 memset(cmd
->va
, 0, cmd
->size
);
2261 static void __devexit
be_remove(struct pci_dev
*pdev
)
2263 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
2268 unregister_netdev(adapter
->netdev
);
2272 be_stats_cleanup(adapter
);
2274 be_ctrl_cleanup(adapter
);
2276 be_msix_disable(adapter
);
2278 pci_set_drvdata(pdev
, NULL
);
2279 pci_release_regions(pdev
);
2280 pci_disable_device(pdev
);
2282 free_netdev(adapter
->netdev
);
2285 static int be_get_config(struct be_adapter
*adapter
)
2290 status
= be_cmd_get_fw_ver(adapter
, adapter
->fw_ver
);
2294 status
= be_cmd_query_fw_cfg(adapter
,
2295 &adapter
->port_num
, &adapter
->cap
);
2299 memset(mac
, 0, ETH_ALEN
);
2300 status
= be_cmd_mac_addr_query(adapter
, mac
,
2301 MAC_ADDRESS_TYPE_NETWORK
, true /*permanent */, 0);
2305 if (!is_valid_ether_addr(mac
))
2306 return -EADDRNOTAVAIL
;
2308 memcpy(adapter
->netdev
->dev_addr
, mac
, ETH_ALEN
);
2309 memcpy(adapter
->netdev
->perm_addr
, mac
, ETH_ALEN
);
2311 if (adapter
->cap
& 0x400)
2312 adapter
->max_vlans
= BE_NUM_VLANS_SUPPORTED
/4;
2314 adapter
->max_vlans
= BE_NUM_VLANS_SUPPORTED
;
2319 static int __devinit
be_probe(struct pci_dev
*pdev
,
2320 const struct pci_device_id
*pdev_id
)
2323 struct be_adapter
*adapter
;
2324 struct net_device
*netdev
;
2326 status
= pci_enable_device(pdev
);
2330 status
= pci_request_regions(pdev
, DRV_NAME
);
2333 pci_set_master(pdev
);
2335 netdev
= alloc_etherdev(sizeof(struct be_adapter
));
2336 if (netdev
== NULL
) {
2340 adapter
= netdev_priv(netdev
);
2342 switch (pdev
->device
) {
2345 adapter
->generation
= BE_GEN2
;
2349 adapter
->generation
= BE_GEN3
;
2352 adapter
->generation
= 0;
2355 adapter
->pdev
= pdev
;
2356 pci_set_drvdata(pdev
, adapter
);
2357 adapter
->netdev
= netdev
;
2358 be_netdev_init(netdev
);
2359 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2361 be_msix_enable(adapter
);
2363 status
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
2365 netdev
->features
|= NETIF_F_HIGHDMA
;
2367 status
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2369 dev_err(&pdev
->dev
, "Could not set PCI DMA Mask\n");
2374 status
= be_ctrl_init(adapter
);
2378 /* sync up with fw's ready state */
2379 status
= be_cmd_POST(adapter
);
2383 /* tell fw we're ready to fire cmds */
2384 status
= be_cmd_fw_init(adapter
);
2388 status
= be_cmd_reset_function(adapter
);
2392 status
= be_stats_init(adapter
);
2396 status
= be_get_config(adapter
);
2400 INIT_DELAYED_WORK(&adapter
->work
, be_worker
);
2402 status
= be_setup(adapter
);
2406 status
= register_netdev(netdev
);
2410 dev_info(&pdev
->dev
, "%s port %d\n", nic_name(pdev
), adapter
->port_num
);
2416 be_stats_cleanup(adapter
);
2418 be_ctrl_cleanup(adapter
);
2420 be_msix_disable(adapter
);
2421 free_netdev(adapter
->netdev
);
2422 pci_set_drvdata(pdev
, NULL
);
2424 pci_release_regions(pdev
);
2426 pci_disable_device(pdev
);
2428 dev_err(&pdev
->dev
, "%s initialization failed\n", nic_name(pdev
));
2432 static int be_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2434 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
2435 struct net_device
*netdev
= adapter
->netdev
;
2438 be_setup_wol(adapter
, true);
2440 netif_device_detach(netdev
);
2441 if (netif_running(netdev
)) {
2446 be_cmd_get_flow_control(adapter
, &adapter
->tx_fc
, &adapter
->rx_fc
);
2449 pci_save_state(pdev
);
2450 pci_disable_device(pdev
);
2451 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
2455 static int be_resume(struct pci_dev
*pdev
)
2458 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
2459 struct net_device
*netdev
= adapter
->netdev
;
2461 netif_device_detach(netdev
);
2463 status
= pci_enable_device(pdev
);
2467 pci_set_power_state(pdev
, 0);
2468 pci_restore_state(pdev
);
2470 /* tell fw we're ready to fire cmds */
2471 status
= be_cmd_fw_init(adapter
);
2476 if (netif_running(netdev
)) {
2481 netif_device_attach(netdev
);
2484 be_setup_wol(adapter
, false);
2489 * An FLR will stop BE from DMAing any data.
2491 static void be_shutdown(struct pci_dev
*pdev
)
2493 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
2494 struct net_device
*netdev
= adapter
->netdev
;
2496 netif_device_detach(netdev
);
2498 be_cmd_reset_function(adapter
);
2501 be_setup_wol(adapter
, true);
2503 pci_disable_device(pdev
);
2508 static pci_ers_result_t
be_eeh_err_detected(struct pci_dev
*pdev
,
2509 pci_channel_state_t state
)
2511 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
2512 struct net_device
*netdev
= adapter
->netdev
;
2514 dev_err(&adapter
->pdev
->dev
, "EEH error detected\n");
2516 adapter
->eeh_err
= true;
2518 netif_device_detach(netdev
);
2520 if (netif_running(netdev
)) {
2527 if (state
== pci_channel_io_perm_failure
)
2528 return PCI_ERS_RESULT_DISCONNECT
;
2530 pci_disable_device(pdev
);
2532 return PCI_ERS_RESULT_NEED_RESET
;
2535 static pci_ers_result_t
be_eeh_reset(struct pci_dev
*pdev
)
2537 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
2540 dev_info(&adapter
->pdev
->dev
, "EEH reset\n");
2541 adapter
->eeh_err
= false;
2543 status
= pci_enable_device(pdev
);
2545 return PCI_ERS_RESULT_DISCONNECT
;
2547 pci_set_master(pdev
);
2548 pci_set_power_state(pdev
, 0);
2549 pci_restore_state(pdev
);
2551 /* Check if card is ok and fw is ready */
2552 status
= be_cmd_POST(adapter
);
2554 return PCI_ERS_RESULT_DISCONNECT
;
2556 return PCI_ERS_RESULT_RECOVERED
;
2559 static void be_eeh_resume(struct pci_dev
*pdev
)
2562 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
2563 struct net_device
*netdev
= adapter
->netdev
;
2565 dev_info(&adapter
->pdev
->dev
, "EEH resume\n");
2567 pci_save_state(pdev
);
2569 /* tell fw we're ready to fire cmds */
2570 status
= be_cmd_fw_init(adapter
);
2574 status
= be_setup(adapter
);
2578 if (netif_running(netdev
)) {
2579 status
= be_open(netdev
);
2583 netif_device_attach(netdev
);
2586 dev_err(&adapter
->pdev
->dev
, "EEH resume failed\n");
2590 static struct pci_error_handlers be_eeh_handlers
= {
2591 .error_detected
= be_eeh_err_detected
,
2592 .slot_reset
= be_eeh_reset
,
2593 .resume
= be_eeh_resume
,
2596 static struct pci_driver be_driver
= {
2598 .id_table
= be_dev_ids
,
2600 .remove
= be_remove
,
2601 .suspend
= be_suspend
,
2602 .resume
= be_resume
,
2603 .shutdown
= be_shutdown
,
2604 .err_handler
= &be_eeh_handlers
2607 static int __init
be_init_module(void)
2609 if (rx_frag_size
!= 8192 && rx_frag_size
!= 4096 &&
2610 rx_frag_size
!= 2048) {
2611 printk(KERN_WARNING DRV_NAME
2612 " : Module param rx_frag_size must be 2048/4096/8192."
2614 rx_frag_size
= 2048;
2617 return pci_register_driver(&be_driver
);
2619 module_init(be_init_module
);
2621 static void __exit
be_exit_module(void)
2623 pci_unregister_driver(&be_driver
);
2625 module_exit(be_exit_module
);