2 * Copyright (C) 2005 - 2009 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
20 #include <asm/div64.h>
22 MODULE_VERSION(DRV_VER
);
23 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
24 MODULE_DESCRIPTION(DRV_DESC
" " DRV_VER
);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
28 static unsigned int rx_frag_size
= 2048;
29 module_param(rx_frag_size
, uint
, S_IRUGO
);
30 MODULE_PARM_DESC(rx_frag_size
, "Size of a fragment that holds rcvd data.");
32 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids
) = {
33 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID1
) },
34 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID2
) },
35 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID1
) },
36 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID2
) },
37 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID3
) },
40 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
42 static void be_queue_free(struct be_adapter
*adapter
, struct be_queue_info
*q
)
44 struct be_dma_mem
*mem
= &q
->dma_mem
;
46 pci_free_consistent(adapter
->pdev
, mem
->size
,
50 static int be_queue_alloc(struct be_adapter
*adapter
, struct be_queue_info
*q
,
51 u16 len
, u16 entry_size
)
53 struct be_dma_mem
*mem
= &q
->dma_mem
;
55 memset(q
, 0, sizeof(*q
));
57 q
->entry_size
= entry_size
;
58 mem
->size
= len
* entry_size
;
59 mem
->va
= pci_alloc_consistent(adapter
->pdev
, mem
->size
, &mem
->dma
);
62 memset(mem
->va
, 0, mem
->size
);
66 static void be_intr_set(struct be_adapter
*adapter
, bool enable
)
68 u8 __iomem
*addr
= adapter
->pcicfg
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
;
69 u32 reg
= ioread32(addr
);
70 u32 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
72 if (!enabled
&& enable
)
73 reg
|= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
74 else if (enabled
&& !enable
)
75 reg
&= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
82 static void be_rxq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
85 val
|= qid
& DB_RQ_RING_ID_MASK
;
86 val
|= posted
<< DB_RQ_NUM_POSTED_SHIFT
;
87 iowrite32(val
, adapter
->db
+ DB_RQ_OFFSET
);
90 static void be_txq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
93 val
|= qid
& DB_TXULP_RING_ID_MASK
;
94 val
|= (posted
& DB_TXULP_NUM_POSTED_MASK
) << DB_TXULP_NUM_POSTED_SHIFT
;
95 iowrite32(val
, adapter
->db
+ DB_TXULP1_OFFSET
);
98 static void be_eq_notify(struct be_adapter
*adapter
, u16 qid
,
99 bool arm
, bool clear_int
, u16 num_popped
)
102 val
|= qid
& DB_EQ_RING_ID_MASK
;
104 val
|= 1 << DB_EQ_REARM_SHIFT
;
106 val
|= 1 << DB_EQ_CLR_SHIFT
;
107 val
|= 1 << DB_EQ_EVNT_SHIFT
;
108 val
|= num_popped
<< DB_EQ_NUM_POPPED_SHIFT
;
109 iowrite32(val
, adapter
->db
+ DB_EQ_OFFSET
);
112 void be_cq_notify(struct be_adapter
*adapter
, u16 qid
, bool arm
, u16 num_popped
)
115 val
|= qid
& DB_CQ_RING_ID_MASK
;
117 val
|= 1 << DB_CQ_REARM_SHIFT
;
118 val
|= num_popped
<< DB_CQ_NUM_POPPED_SHIFT
;
119 iowrite32(val
, adapter
->db
+ DB_CQ_OFFSET
);
122 static int be_mac_addr_set(struct net_device
*netdev
, void *p
)
124 struct be_adapter
*adapter
= netdev_priv(netdev
);
125 struct sockaddr
*addr
= p
;
128 if (!is_valid_ether_addr(addr
->sa_data
))
129 return -EADDRNOTAVAIL
;
131 status
= be_cmd_pmac_del(adapter
, adapter
->if_handle
, adapter
->pmac_id
);
135 status
= be_cmd_pmac_add(adapter
, (u8
*)addr
->sa_data
,
136 adapter
->if_handle
, &adapter
->pmac_id
);
138 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
143 void netdev_stats_update(struct be_adapter
*adapter
)
145 struct be_hw_stats
*hw_stats
= hw_stats_from_cmd(adapter
->stats
.cmd
.va
);
146 struct be_rxf_stats
*rxf_stats
= &hw_stats
->rxf
;
147 struct be_port_rxf_stats
*port_stats
=
148 &rxf_stats
->port
[adapter
->port_num
];
149 struct net_device_stats
*dev_stats
= &adapter
->netdev
->stats
;
150 struct be_erx_stats
*erx_stats
= &hw_stats
->erx
;
152 dev_stats
->rx_packets
= port_stats
->rx_total_frames
;
153 dev_stats
->tx_packets
= port_stats
->tx_unicastframes
+
154 port_stats
->tx_multicastframes
+ port_stats
->tx_broadcastframes
;
155 dev_stats
->rx_bytes
= (u64
) port_stats
->rx_bytes_msd
<< 32 |
156 (u64
) port_stats
->rx_bytes_lsd
;
157 dev_stats
->tx_bytes
= (u64
) port_stats
->tx_bytes_msd
<< 32 |
158 (u64
) port_stats
->tx_bytes_lsd
;
160 /* bad pkts received */
161 dev_stats
->rx_errors
= port_stats
->rx_crc_errors
+
162 port_stats
->rx_alignment_symbol_errors
+
163 port_stats
->rx_in_range_errors
+
164 port_stats
->rx_out_range_errors
+
165 port_stats
->rx_frame_too_long
+
166 port_stats
->rx_dropped_too_small
+
167 port_stats
->rx_dropped_too_short
+
168 port_stats
->rx_dropped_header_too_small
+
169 port_stats
->rx_dropped_tcp_length
+
170 port_stats
->rx_dropped_runt
+
171 port_stats
->rx_tcp_checksum_errs
+
172 port_stats
->rx_ip_checksum_errs
+
173 port_stats
->rx_udp_checksum_errs
;
175 /* no space in linux buffers: best possible approximation */
176 dev_stats
->rx_dropped
=
177 erx_stats
->rx_drops_no_fragments
[adapter
->rx_obj
.q
.id
];
179 /* detailed rx errors */
180 dev_stats
->rx_length_errors
= port_stats
->rx_in_range_errors
+
181 port_stats
->rx_out_range_errors
+
182 port_stats
->rx_frame_too_long
;
184 /* receive ring buffer overflow */
185 dev_stats
->rx_over_errors
= 0;
187 dev_stats
->rx_crc_errors
= port_stats
->rx_crc_errors
;
189 /* frame alignment errors */
190 dev_stats
->rx_frame_errors
= port_stats
->rx_alignment_symbol_errors
;
192 /* receiver fifo overrun */
193 /* drops_no_pbuf is no per i/f, it's per BE card */
194 dev_stats
->rx_fifo_errors
= port_stats
->rx_fifo_overflow
+
195 port_stats
->rx_input_fifo_overflow
+
196 rxf_stats
->rx_drops_no_pbuf
;
197 /* receiver missed packetd */
198 dev_stats
->rx_missed_errors
= 0;
200 /* packet transmit problems */
201 dev_stats
->tx_errors
= 0;
203 /* no space available in linux */
204 dev_stats
->tx_dropped
= 0;
206 dev_stats
->multicast
= port_stats
->rx_multicast_frames
;
207 dev_stats
->collisions
= 0;
209 /* detailed tx_errors */
210 dev_stats
->tx_aborted_errors
= 0;
211 dev_stats
->tx_carrier_errors
= 0;
212 dev_stats
->tx_fifo_errors
= 0;
213 dev_stats
->tx_heartbeat_errors
= 0;
214 dev_stats
->tx_window_errors
= 0;
217 void be_link_status_update(struct be_adapter
*adapter
, bool link_up
)
219 struct net_device
*netdev
= adapter
->netdev
;
221 /* If link came up or went down */
222 if (adapter
->link_up
!= link_up
) {
223 adapter
->link_speed
= -1;
225 netif_start_queue(netdev
);
226 netif_carrier_on(netdev
);
227 printk(KERN_INFO
"%s: Link up\n", netdev
->name
);
229 netif_stop_queue(netdev
);
230 netif_carrier_off(netdev
);
231 printk(KERN_INFO
"%s: Link down\n", netdev
->name
);
233 adapter
->link_up
= link_up
;
237 /* Update the EQ delay n BE based on the RX frags consumed / sec */
238 static void be_rx_eqd_update(struct be_adapter
*adapter
)
240 struct be_eq_obj
*rx_eq
= &adapter
->rx_eq
;
241 struct be_drvr_stats
*stats
= &adapter
->stats
.drvr_stats
;
245 if (!rx_eq
->enable_aic
)
249 if (time_before(now
, stats
->rx_fps_jiffies
)) {
250 stats
->rx_fps_jiffies
= now
;
254 /* Update once a second */
255 if ((now
- stats
->rx_fps_jiffies
) < HZ
)
258 stats
->be_rx_fps
= (stats
->be_rx_frags
- stats
->be_prev_rx_frags
) /
259 ((now
- stats
->rx_fps_jiffies
) / HZ
);
261 stats
->rx_fps_jiffies
= now
;
262 stats
->be_prev_rx_frags
= stats
->be_rx_frags
;
263 eqd
= stats
->be_rx_fps
/ 110000;
265 if (eqd
> rx_eq
->max_eqd
)
266 eqd
= rx_eq
->max_eqd
;
267 if (eqd
< rx_eq
->min_eqd
)
268 eqd
= rx_eq
->min_eqd
;
271 if (eqd
!= rx_eq
->cur_eqd
)
272 be_cmd_modify_eqd(adapter
, rx_eq
->q
.id
, eqd
);
274 rx_eq
->cur_eqd
= eqd
;
277 static struct net_device_stats
*be_get_stats(struct net_device
*dev
)
282 static u32
be_calc_rate(u64 bytes
, unsigned long ticks
)
286 do_div(rate
, ticks
/ HZ
);
287 rate
<<= 3; /* bytes/sec -> bits/sec */
288 do_div(rate
, 1000000ul); /* MB/Sec */
293 static void be_tx_rate_update(struct be_adapter
*adapter
)
295 struct be_drvr_stats
*stats
= drvr_stats(adapter
);
298 /* Wrapped around? */
299 if (time_before(now
, stats
->be_tx_jiffies
)) {
300 stats
->be_tx_jiffies
= now
;
304 /* Update tx rate once in two seconds */
305 if ((now
- stats
->be_tx_jiffies
) > 2 * HZ
) {
306 stats
->be_tx_rate
= be_calc_rate(stats
->be_tx_bytes
307 - stats
->be_tx_bytes_prev
,
308 now
- stats
->be_tx_jiffies
);
309 stats
->be_tx_jiffies
= now
;
310 stats
->be_tx_bytes_prev
= stats
->be_tx_bytes
;
314 static void be_tx_stats_update(struct be_adapter
*adapter
,
315 u32 wrb_cnt
, u32 copied
, bool stopped
)
317 struct be_drvr_stats
*stats
= drvr_stats(adapter
);
319 stats
->be_tx_wrbs
+= wrb_cnt
;
320 stats
->be_tx_bytes
+= copied
;
322 stats
->be_tx_stops
++;
325 /* Determine number of WRB entries needed to xmit data in an skb */
326 static u32
wrb_cnt_for_skb(struct sk_buff
*skb
, bool *dummy
)
328 int cnt
= (skb
->len
> skb
->data_len
);
330 cnt
+= skb_shinfo(skb
)->nr_frags
;
332 /* to account for hdr wrb */
335 /* add a dummy to make it an even num */
340 BUG_ON(cnt
> BE_MAX_TX_FRAG_COUNT
);
344 static inline void wrb_fill(struct be_eth_wrb
*wrb
, u64 addr
, int len
)
346 wrb
->frag_pa_hi
= upper_32_bits(addr
);
347 wrb
->frag_pa_lo
= addr
& 0xFFFFFFFF;
348 wrb
->frag_len
= len
& ETH_WRB_FRAG_LEN_MASK
;
351 static void wrb_fill_hdr(struct be_eth_hdr_wrb
*hdr
, struct sk_buff
*skb
,
352 bool vlan
, u32 wrb_cnt
, u32 len
)
354 memset(hdr
, 0, sizeof(*hdr
));
356 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, crc
, hdr
, 1);
358 if (skb_shinfo(skb
)->gso_segs
> 1 && skb_shinfo(skb
)->gso_size
) {
359 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso
, hdr
, 1);
360 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso_mss
,
361 hdr
, skb_shinfo(skb
)->gso_size
);
362 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
364 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, tcpcs
, hdr
, 1);
365 else if (is_udp_pkt(skb
))
366 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, udpcs
, hdr
, 1);
369 if (vlan
&& vlan_tx_tag_present(skb
)) {
370 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan
, hdr
, 1);
371 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan_tag
,
372 hdr
, vlan_tx_tag_get(skb
));
375 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, event
, hdr
, 1);
376 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, complete
, hdr
, 1);
377 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, num_wrb
, hdr
, wrb_cnt
);
378 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, len
, hdr
, len
);
382 static int make_tx_wrbs(struct be_adapter
*adapter
,
383 struct sk_buff
*skb
, u32 wrb_cnt
, bool dummy_wrb
)
387 struct pci_dev
*pdev
= adapter
->pdev
;
388 struct sk_buff
*first_skb
= skb
;
389 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
390 struct be_eth_wrb
*wrb
;
391 struct be_eth_hdr_wrb
*hdr
;
393 hdr
= queue_head_node(txq
);
394 atomic_add(wrb_cnt
, &txq
->used
);
397 if (skb
->len
> skb
->data_len
) {
398 int len
= skb
->len
- skb
->data_len
;
399 busaddr
= pci_map_single(pdev
, skb
->data
, len
,
401 wrb
= queue_head_node(txq
);
402 wrb_fill(wrb
, busaddr
, len
);
403 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
408 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
409 struct skb_frag_struct
*frag
=
410 &skb_shinfo(skb
)->frags
[i
];
411 busaddr
= pci_map_page(pdev
, frag
->page
,
413 frag
->size
, PCI_DMA_TODEVICE
);
414 wrb
= queue_head_node(txq
);
415 wrb_fill(wrb
, busaddr
, frag
->size
);
416 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
418 copied
+= frag
->size
;
422 wrb
= queue_head_node(txq
);
424 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
428 wrb_fill_hdr(hdr
, first_skb
, adapter
->vlan_grp
? true : false,
430 be_dws_cpu_to_le(hdr
, sizeof(*hdr
));
435 static netdev_tx_t
be_xmit(struct sk_buff
*skb
,
436 struct net_device
*netdev
)
438 struct be_adapter
*adapter
= netdev_priv(netdev
);
439 struct be_tx_obj
*tx_obj
= &adapter
->tx_obj
;
440 struct be_queue_info
*txq
= &tx_obj
->q
;
441 u32 wrb_cnt
= 0, copied
= 0;
442 u32 start
= txq
->head
;
443 bool dummy_wrb
, stopped
= false;
445 wrb_cnt
= wrb_cnt_for_skb(skb
, &dummy_wrb
);
447 copied
= make_tx_wrbs(adapter
, skb
, wrb_cnt
, dummy_wrb
);
449 /* record the sent skb in the sent_skb table */
450 BUG_ON(tx_obj
->sent_skb_list
[start
]);
451 tx_obj
->sent_skb_list
[start
] = skb
;
453 /* Ensure txq has space for the next skb; Else stop the queue
454 * *BEFORE* ringing the tx doorbell, so that we serialze the
455 * tx compls of the current transmit which'll wake up the queue
457 if ((BE_MAX_TX_FRAG_COUNT
+ atomic_read(&txq
->used
)) >=
459 netif_stop_queue(netdev
);
463 be_txq_notify(adapter
, txq
->id
, wrb_cnt
);
465 be_tx_stats_update(adapter
, wrb_cnt
, copied
, stopped
);
468 dev_kfree_skb_any(skb
);
473 static int be_change_mtu(struct net_device
*netdev
, int new_mtu
)
475 struct be_adapter
*adapter
= netdev_priv(netdev
);
476 if (new_mtu
< BE_MIN_MTU
||
477 new_mtu
> BE_MAX_JUMBO_FRAME_SIZE
) {
478 dev_info(&adapter
->pdev
->dev
,
479 "MTU must be between %d and %d bytes\n",
480 BE_MIN_MTU
, BE_MAX_JUMBO_FRAME_SIZE
);
483 dev_info(&adapter
->pdev
->dev
, "MTU changed from %d to %d bytes\n",
484 netdev
->mtu
, new_mtu
);
485 netdev
->mtu
= new_mtu
;
490 * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
491 * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
492 * set the BE in promiscuous VLAN mode.
494 static int be_vid_config(struct be_adapter
*adapter
)
496 u16 vtag
[BE_NUM_VLANS_SUPPORTED
];
500 if (adapter
->num_vlans
<= BE_NUM_VLANS_SUPPORTED
) {
501 /* Construct VLAN Table to give to HW */
502 for (i
= 0; i
< VLAN_GROUP_ARRAY_LEN
; i
++) {
503 if (adapter
->vlan_tag
[i
]) {
504 vtag
[ntags
] = cpu_to_le16(i
);
508 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
,
511 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
,
517 static void be_vlan_register(struct net_device
*netdev
, struct vlan_group
*grp
)
519 struct be_adapter
*adapter
= netdev_priv(netdev
);
520 struct be_eq_obj
*rx_eq
= &adapter
->rx_eq
;
521 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
523 be_eq_notify(adapter
, rx_eq
->q
.id
, false, false, 0);
524 be_eq_notify(adapter
, tx_eq
->q
.id
, false, false, 0);
525 adapter
->vlan_grp
= grp
;
526 be_eq_notify(adapter
, rx_eq
->q
.id
, true, false, 0);
527 be_eq_notify(adapter
, tx_eq
->q
.id
, true, false, 0);
530 static void be_vlan_add_vid(struct net_device
*netdev
, u16 vid
)
532 struct be_adapter
*adapter
= netdev_priv(netdev
);
534 adapter
->num_vlans
++;
535 adapter
->vlan_tag
[vid
] = 1;
537 be_vid_config(adapter
);
540 static void be_vlan_rem_vid(struct net_device
*netdev
, u16 vid
)
542 struct be_adapter
*adapter
= netdev_priv(netdev
);
544 adapter
->num_vlans
--;
545 adapter
->vlan_tag
[vid
] = 0;
547 vlan_group_set_device(adapter
->vlan_grp
, vid
, NULL
);
548 be_vid_config(adapter
);
551 static void be_set_multicast_list(struct net_device
*netdev
)
553 struct be_adapter
*adapter
= netdev_priv(netdev
);
555 if (netdev
->flags
& IFF_PROMISC
) {
556 be_cmd_promiscuous_config(adapter
, adapter
->port_num
, 1);
557 adapter
->promiscuous
= true;
561 /* BE was previously in promiscous mode; disable it */
562 if (adapter
->promiscuous
) {
563 adapter
->promiscuous
= false;
564 be_cmd_promiscuous_config(adapter
, adapter
->port_num
, 0);
567 /* Enable multicast promisc if num configured exceeds what we support */
568 if (netdev
->flags
& IFF_ALLMULTI
||
569 netdev_mc_count(netdev
) > BE_MAX_MC
) {
570 be_cmd_multicast_set(adapter
, adapter
->if_handle
, NULL
, 0,
571 &adapter
->mc_cmd_mem
);
575 be_cmd_multicast_set(adapter
, adapter
->if_handle
, netdev
->mc_list
,
576 netdev_mc_count(netdev
), &adapter
->mc_cmd_mem
);
581 static void be_rx_rate_update(struct be_adapter
*adapter
)
583 struct be_drvr_stats
*stats
= drvr_stats(adapter
);
587 if (time_before(now
, stats
->be_rx_jiffies
)) {
588 stats
->be_rx_jiffies
= now
;
592 /* Update the rate once in two seconds */
593 if ((now
- stats
->be_rx_jiffies
) < 2 * HZ
)
596 stats
->be_rx_rate
= be_calc_rate(stats
->be_rx_bytes
597 - stats
->be_rx_bytes_prev
,
598 now
- stats
->be_rx_jiffies
);
599 stats
->be_rx_jiffies
= now
;
600 stats
->be_rx_bytes_prev
= stats
->be_rx_bytes
;
603 static void be_rx_stats_update(struct be_adapter
*adapter
,
604 u32 pktsize
, u16 numfrags
)
606 struct be_drvr_stats
*stats
= drvr_stats(adapter
);
608 stats
->be_rx_compl
++;
609 stats
->be_rx_frags
+= numfrags
;
610 stats
->be_rx_bytes
+= pktsize
;
613 static inline bool do_pkt_csum(struct be_eth_rx_compl
*rxcp
, bool cso
)
615 u8 l4_cksm
, ip_version
, ipcksm
, tcpf
= 0, udpf
= 0, ipv6_chk
;
617 l4_cksm
= AMAP_GET_BITS(struct amap_eth_rx_compl
, l4_cksm
, rxcp
);
618 ipcksm
= AMAP_GET_BITS(struct amap_eth_rx_compl
, ipcksm
, rxcp
);
619 ip_version
= AMAP_GET_BITS(struct amap_eth_rx_compl
, ip_version
, rxcp
);
621 tcpf
= AMAP_GET_BITS(struct amap_eth_rx_compl
, tcpf
, rxcp
);
622 udpf
= AMAP_GET_BITS(struct amap_eth_rx_compl
, udpf
, rxcp
);
624 ipv6_chk
= (ip_version
&& (tcpf
|| udpf
));
626 return ((l4_cksm
&& ipv6_chk
&& ipcksm
) && cso
) ? false : true;
629 static struct be_rx_page_info
*
630 get_rx_page_info(struct be_adapter
*adapter
, u16 frag_idx
)
632 struct be_rx_page_info
*rx_page_info
;
633 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
635 rx_page_info
= &adapter
->rx_obj
.page_info_tbl
[frag_idx
];
636 BUG_ON(!rx_page_info
->page
);
638 if (rx_page_info
->last_page_user
)
639 pci_unmap_page(adapter
->pdev
, pci_unmap_addr(rx_page_info
, bus
),
640 adapter
->big_page_size
, PCI_DMA_FROMDEVICE
);
642 atomic_dec(&rxq
->used
);
646 /* Throwaway the data in the Rx completion */
647 static void be_rx_compl_discard(struct be_adapter
*adapter
,
648 struct be_eth_rx_compl
*rxcp
)
650 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
651 struct be_rx_page_info
*page_info
;
652 u16 rxq_idx
, i
, num_rcvd
;
654 rxq_idx
= AMAP_GET_BITS(struct amap_eth_rx_compl
, fragndx
, rxcp
);
655 num_rcvd
= AMAP_GET_BITS(struct amap_eth_rx_compl
, numfrags
, rxcp
);
657 for (i
= 0; i
< num_rcvd
; i
++) {
658 page_info
= get_rx_page_info(adapter
, rxq_idx
);
659 put_page(page_info
->page
);
660 memset(page_info
, 0, sizeof(*page_info
));
661 index_inc(&rxq_idx
, rxq
->len
);
666 * skb_fill_rx_data forms a complete skb for an ether frame
669 static void skb_fill_rx_data(struct be_adapter
*adapter
,
670 struct sk_buff
*skb
, struct be_eth_rx_compl
*rxcp
)
672 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
673 struct be_rx_page_info
*page_info
;
674 u16 rxq_idx
, i
, num_rcvd
, j
;
675 u32 pktsize
, hdr_len
, curr_frag_len
, size
;
678 rxq_idx
= AMAP_GET_BITS(struct amap_eth_rx_compl
, fragndx
, rxcp
);
679 pktsize
= AMAP_GET_BITS(struct amap_eth_rx_compl
, pktsize
, rxcp
);
680 num_rcvd
= AMAP_GET_BITS(struct amap_eth_rx_compl
, numfrags
, rxcp
);
682 page_info
= get_rx_page_info(adapter
, rxq_idx
);
684 start
= page_address(page_info
->page
) + page_info
->page_offset
;
687 /* Copy data in the first descriptor of this completion */
688 curr_frag_len
= min(pktsize
, rx_frag_size
);
690 /* Copy the header portion into skb_data */
691 hdr_len
= min((u32
)BE_HDR_LEN
, curr_frag_len
);
692 memcpy(skb
->data
, start
, hdr_len
);
693 skb
->len
= curr_frag_len
;
694 if (curr_frag_len
<= BE_HDR_LEN
) { /* tiny packet */
695 /* Complete packet has now been moved to data */
696 put_page(page_info
->page
);
698 skb
->tail
+= curr_frag_len
;
700 skb_shinfo(skb
)->nr_frags
= 1;
701 skb_shinfo(skb
)->frags
[0].page
= page_info
->page
;
702 skb_shinfo(skb
)->frags
[0].page_offset
=
703 page_info
->page_offset
+ hdr_len
;
704 skb_shinfo(skb
)->frags
[0].size
= curr_frag_len
- hdr_len
;
705 skb
->data_len
= curr_frag_len
- hdr_len
;
706 skb
->tail
+= hdr_len
;
708 memset(page_info
, 0, sizeof(*page_info
));
710 if (pktsize
<= rx_frag_size
) {
711 BUG_ON(num_rcvd
!= 1);
715 /* More frags present for this completion */
717 for (i
= 1, j
= 0; i
< num_rcvd
; i
++) {
718 size
-= curr_frag_len
;
719 index_inc(&rxq_idx
, rxq
->len
);
720 page_info
= get_rx_page_info(adapter
, rxq_idx
);
722 curr_frag_len
= min(size
, rx_frag_size
);
724 /* Coalesce all frags from the same physical page in one slot */
725 if (page_info
->page_offset
== 0) {
728 skb_shinfo(skb
)->frags
[j
].page
= page_info
->page
;
729 skb_shinfo(skb
)->frags
[j
].page_offset
=
730 page_info
->page_offset
;
731 skb_shinfo(skb
)->frags
[j
].size
= 0;
732 skb_shinfo(skb
)->nr_frags
++;
734 put_page(page_info
->page
);
737 skb_shinfo(skb
)->frags
[j
].size
+= curr_frag_len
;
738 skb
->len
+= curr_frag_len
;
739 skb
->data_len
+= curr_frag_len
;
741 memset(page_info
, 0, sizeof(*page_info
));
743 BUG_ON(j
> MAX_SKB_FRAGS
);
746 be_rx_stats_update(adapter
, pktsize
, num_rcvd
);
750 /* Process the RX completion indicated by rxcp when GRO is disabled */
751 static void be_rx_compl_process(struct be_adapter
*adapter
,
752 struct be_eth_rx_compl
*rxcp
)
758 vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vtp
, rxcp
);
759 vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vtm
, rxcp
);
761 /* vlanf could be wrongly set in some cards.
762 * ignore if vtm is not set */
763 if ((adapter
->cap
& 0x400) && !vtm
)
766 skb
= netdev_alloc_skb_ip_align(adapter
->netdev
, BE_HDR_LEN
);
769 dev_warn(&adapter
->pdev
->dev
, "skb alloc failed\n");
770 be_rx_compl_discard(adapter
, rxcp
);
774 skb_fill_rx_data(adapter
, skb
, rxcp
);
776 if (do_pkt_csum(rxcp
, adapter
->rx_csum
))
777 skb
->ip_summed
= CHECKSUM_NONE
;
779 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
781 skb
->truesize
= skb
->len
+ sizeof(struct sk_buff
);
782 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
783 skb
->dev
= adapter
->netdev
;
786 if (!adapter
->vlan_grp
|| adapter
->num_vlans
== 0) {
790 vid
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vlan_tag
, rxcp
);
791 vid
= be16_to_cpu(vid
);
792 vlan_hwaccel_receive_skb(skb
, adapter
->vlan_grp
, vid
);
794 netif_receive_skb(skb
);
800 /* Process the RX completion indicated by rxcp when GRO is enabled */
801 static void be_rx_compl_process_gro(struct be_adapter
*adapter
,
802 struct be_eth_rx_compl
*rxcp
)
804 struct be_rx_page_info
*page_info
;
805 struct sk_buff
*skb
= NULL
;
806 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
807 struct be_eq_obj
*eq_obj
= &adapter
->rx_eq
;
808 u32 num_rcvd
, pkt_size
, remaining
, vlanf
, curr_frag_len
;
809 u16 i
, rxq_idx
= 0, vid
, j
;
812 num_rcvd
= AMAP_GET_BITS(struct amap_eth_rx_compl
, numfrags
, rxcp
);
813 pkt_size
= AMAP_GET_BITS(struct amap_eth_rx_compl
, pktsize
, rxcp
);
814 vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vtp
, rxcp
);
815 rxq_idx
= AMAP_GET_BITS(struct amap_eth_rx_compl
, fragndx
, rxcp
);
816 vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vtm
, rxcp
);
818 /* vlanf could be wrongly set in some cards.
819 * ignore if vtm is not set */
820 if ((adapter
->cap
& 0x400) && !vtm
)
823 skb
= napi_get_frags(&eq_obj
->napi
);
825 be_rx_compl_discard(adapter
, rxcp
);
829 remaining
= pkt_size
;
830 for (i
= 0, j
= -1; i
< num_rcvd
; i
++) {
831 page_info
= get_rx_page_info(adapter
, rxq_idx
);
833 curr_frag_len
= min(remaining
, rx_frag_size
);
835 /* Coalesce all frags from the same physical page in one slot */
836 if (i
== 0 || page_info
->page_offset
== 0) {
837 /* First frag or Fresh page */
839 skb_shinfo(skb
)->frags
[j
].page
= page_info
->page
;
840 skb_shinfo(skb
)->frags
[j
].page_offset
=
841 page_info
->page_offset
;
842 skb_shinfo(skb
)->frags
[j
].size
= 0;
844 put_page(page_info
->page
);
846 skb_shinfo(skb
)->frags
[j
].size
+= curr_frag_len
;
848 remaining
-= curr_frag_len
;
849 index_inc(&rxq_idx
, rxq
->len
);
850 memset(page_info
, 0, sizeof(*page_info
));
852 BUG_ON(j
> MAX_SKB_FRAGS
);
854 skb_shinfo(skb
)->nr_frags
= j
+ 1;
856 skb
->data_len
= pkt_size
;
857 skb
->truesize
+= pkt_size
;
858 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
860 if (likely(!vlanf
)) {
861 napi_gro_frags(&eq_obj
->napi
);
863 vid
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vlan_tag
, rxcp
);
864 vid
= be16_to_cpu(vid
);
866 if (!adapter
->vlan_grp
|| adapter
->num_vlans
== 0)
869 vlan_gro_frags(&eq_obj
->napi
, adapter
->vlan_grp
, vid
);
872 be_rx_stats_update(adapter
, pkt_size
, num_rcvd
);
876 static struct be_eth_rx_compl
*be_rx_compl_get(struct be_adapter
*adapter
)
878 struct be_eth_rx_compl
*rxcp
= queue_tail_node(&adapter
->rx_obj
.cq
);
880 if (rxcp
->dw
[offsetof(struct amap_eth_rx_compl
, valid
) / 32] == 0)
883 be_dws_le_to_cpu(rxcp
, sizeof(*rxcp
));
885 queue_tail_inc(&adapter
->rx_obj
.cq
);
889 /* To reset the valid bit, we need to reset the whole word as
890 * when walking the queue the valid entries are little-endian
891 * and invalid entries are host endian
893 static inline void be_rx_compl_reset(struct be_eth_rx_compl
*rxcp
)
895 rxcp
->dw
[offsetof(struct amap_eth_rx_compl
, valid
) / 32] = 0;
898 static inline struct page
*be_alloc_pages(u32 size
)
900 gfp_t alloc_flags
= GFP_ATOMIC
;
901 u32 order
= get_order(size
);
903 alloc_flags
|= __GFP_COMP
;
904 return alloc_pages(alloc_flags
, order
);
908 * Allocate a page, split it to fragments of size rx_frag_size and post as
909 * receive buffers to BE
911 static void be_post_rx_frags(struct be_adapter
*adapter
)
913 struct be_rx_page_info
*page_info_tbl
= adapter
->rx_obj
.page_info_tbl
;
914 struct be_rx_page_info
*page_info
= NULL
, *prev_page_info
= NULL
;
915 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
916 struct page
*pagep
= NULL
;
917 struct be_eth_rx_d
*rxd
;
918 u64 page_dmaaddr
= 0, frag_dmaaddr
;
919 u32 posted
, page_offset
= 0;
921 page_info
= &page_info_tbl
[rxq
->head
];
922 for (posted
= 0; posted
< MAX_RX_POST
&& !page_info
->page
; posted
++) {
924 pagep
= be_alloc_pages(adapter
->big_page_size
);
925 if (unlikely(!pagep
)) {
926 drvr_stats(adapter
)->be_ethrx_post_fail
++;
929 page_dmaaddr
= pci_map_page(adapter
->pdev
, pagep
, 0,
930 adapter
->big_page_size
,
932 page_info
->page_offset
= 0;
935 page_info
->page_offset
= page_offset
+ rx_frag_size
;
937 page_offset
= page_info
->page_offset
;
938 page_info
->page
= pagep
;
939 pci_unmap_addr_set(page_info
, bus
, page_dmaaddr
);
940 frag_dmaaddr
= page_dmaaddr
+ page_info
->page_offset
;
942 rxd
= queue_head_node(rxq
);
943 rxd
->fragpa_lo
= cpu_to_le32(frag_dmaaddr
& 0xFFFFFFFF);
944 rxd
->fragpa_hi
= cpu_to_le32(upper_32_bits(frag_dmaaddr
));
946 /* Any space left in the current big page for another frag? */
947 if ((page_offset
+ rx_frag_size
+ rx_frag_size
) >
948 adapter
->big_page_size
) {
950 page_info
->last_page_user
= true;
953 prev_page_info
= page_info
;
955 page_info
= &page_info_tbl
[rxq
->head
];
958 prev_page_info
->last_page_user
= true;
961 atomic_add(posted
, &rxq
->used
);
962 be_rxq_notify(adapter
, rxq
->id
, posted
);
963 } else if (atomic_read(&rxq
->used
) == 0) {
964 /* Let be_worker replenish when memory is available */
965 adapter
->rx_post_starved
= true;
971 static struct be_eth_tx_compl
*be_tx_compl_get(struct be_queue_info
*tx_cq
)
973 struct be_eth_tx_compl
*txcp
= queue_tail_node(tx_cq
);
975 if (txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] == 0)
978 be_dws_le_to_cpu(txcp
, sizeof(*txcp
));
980 txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] = 0;
982 queue_tail_inc(tx_cq
);
986 static void be_tx_compl_process(struct be_adapter
*adapter
, u16 last_index
)
988 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
989 struct be_eth_wrb
*wrb
;
990 struct sk_buff
**sent_skbs
= adapter
->tx_obj
.sent_skb_list
;
991 struct sk_buff
*sent_skb
;
993 u16 cur_index
, num_wrbs
= 0;
995 cur_index
= txq
->tail
;
996 sent_skb
= sent_skbs
[cur_index
];
998 sent_skbs
[cur_index
] = NULL
;
999 wrb
= queue_tail_node(txq
);
1000 be_dws_le_to_cpu(wrb
, sizeof(*wrb
));
1001 busaddr
= ((u64
)wrb
->frag_pa_hi
<< 32) | (u64
)wrb
->frag_pa_lo
;
1003 pci_unmap_single(adapter
->pdev
, busaddr
,
1004 wrb
->frag_len
, PCI_DMA_TODEVICE
);
1007 queue_tail_inc(txq
);
1009 while (cur_index
!= last_index
) {
1010 cur_index
= txq
->tail
;
1011 wrb
= queue_tail_node(txq
);
1012 be_dws_le_to_cpu(wrb
, sizeof(*wrb
));
1013 busaddr
= ((u64
)wrb
->frag_pa_hi
<< 32) | (u64
)wrb
->frag_pa_lo
;
1015 pci_unmap_page(adapter
->pdev
, busaddr
,
1016 wrb
->frag_len
, PCI_DMA_TODEVICE
);
1019 queue_tail_inc(txq
);
1022 atomic_sub(num_wrbs
, &txq
->used
);
1024 kfree_skb(sent_skb
);
1027 static inline struct be_eq_entry
*event_get(struct be_eq_obj
*eq_obj
)
1029 struct be_eq_entry
*eqe
= queue_tail_node(&eq_obj
->q
);
1034 eqe
->evt
= le32_to_cpu(eqe
->evt
);
1035 queue_tail_inc(&eq_obj
->q
);
1039 static int event_handle(struct be_adapter
*adapter
,
1040 struct be_eq_obj
*eq_obj
)
1042 struct be_eq_entry
*eqe
;
1045 while ((eqe
= event_get(eq_obj
)) != NULL
) {
1050 /* Deal with any spurious interrupts that come
1053 be_eq_notify(adapter
, eq_obj
->q
.id
, true, true, num
);
1055 napi_schedule(&eq_obj
->napi
);
1060 /* Just read and notify events without processing them.
1061 * Used at the time of destroying event queues */
1062 static void be_eq_clean(struct be_adapter
*adapter
,
1063 struct be_eq_obj
*eq_obj
)
1065 struct be_eq_entry
*eqe
;
1068 while ((eqe
= event_get(eq_obj
)) != NULL
) {
1074 be_eq_notify(adapter
, eq_obj
->q
.id
, false, true, num
);
1077 static void be_rx_q_clean(struct be_adapter
*adapter
)
1079 struct be_rx_page_info
*page_info
;
1080 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
1081 struct be_queue_info
*rx_cq
= &adapter
->rx_obj
.cq
;
1082 struct be_eth_rx_compl
*rxcp
;
1085 /* First cleanup pending rx completions */
1086 while ((rxcp
= be_rx_compl_get(adapter
)) != NULL
) {
1087 be_rx_compl_discard(adapter
, rxcp
);
1088 be_rx_compl_reset(rxcp
);
1089 be_cq_notify(adapter
, rx_cq
->id
, true, 1);
1092 /* Then free posted rx buffer that were not used */
1093 tail
= (rxq
->head
+ rxq
->len
- atomic_read(&rxq
->used
)) % rxq
->len
;
1094 for (; atomic_read(&rxq
->used
) > 0; index_inc(&tail
, rxq
->len
)) {
1095 page_info
= get_rx_page_info(adapter
, tail
);
1096 put_page(page_info
->page
);
1097 memset(page_info
, 0, sizeof(*page_info
));
1099 BUG_ON(atomic_read(&rxq
->used
));
1102 static void be_tx_compl_clean(struct be_adapter
*adapter
)
1104 struct be_queue_info
*tx_cq
= &adapter
->tx_obj
.cq
;
1105 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
1106 struct be_eth_tx_compl
*txcp
;
1107 u16 end_idx
, cmpl
= 0, timeo
= 0;
1109 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1111 while ((txcp
= be_tx_compl_get(tx_cq
))) {
1112 end_idx
= AMAP_GET_BITS(struct amap_eth_tx_compl
,
1114 be_tx_compl_process(adapter
, end_idx
);
1118 be_cq_notify(adapter
, tx_cq
->id
, false, cmpl
);
1122 if (atomic_read(&txq
->used
) == 0 || ++timeo
> 200)
1128 if (atomic_read(&txq
->used
))
1129 dev_err(&adapter
->pdev
->dev
, "%d pending tx-completions\n",
1130 atomic_read(&txq
->used
));
1133 static void be_mcc_queues_destroy(struct be_adapter
*adapter
)
1135 struct be_queue_info
*q
;
1137 q
= &adapter
->mcc_obj
.q
;
1139 be_cmd_q_destroy(adapter
, q
, QTYPE_MCCQ
);
1140 be_queue_free(adapter
, q
);
1142 q
= &adapter
->mcc_obj
.cq
;
1144 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1145 be_queue_free(adapter
, q
);
1148 /* Must be called only after TX qs are created as MCC shares TX EQ */
1149 static int be_mcc_queues_create(struct be_adapter
*adapter
)
1151 struct be_queue_info
*q
, *cq
;
1153 /* Alloc MCC compl queue */
1154 cq
= &adapter
->mcc_obj
.cq
;
1155 if (be_queue_alloc(adapter
, cq
, MCC_CQ_LEN
,
1156 sizeof(struct be_mcc_compl
)))
1159 /* Ask BE to create MCC compl queue; share TX's eq */
1160 if (be_cmd_cq_create(adapter
, cq
, &adapter
->tx_eq
.q
, false, true, 0))
1163 /* Alloc MCC queue */
1164 q
= &adapter
->mcc_obj
.q
;
1165 if (be_queue_alloc(adapter
, q
, MCC_Q_LEN
, sizeof(struct be_mcc_wrb
)))
1166 goto mcc_cq_destroy
;
1168 /* Ask BE to create MCC queue */
1169 if (be_cmd_mccq_create(adapter
, q
, cq
))
1175 be_queue_free(adapter
, q
);
1177 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1179 be_queue_free(adapter
, cq
);
1184 static void be_tx_queues_destroy(struct be_adapter
*adapter
)
1186 struct be_queue_info
*q
;
1188 q
= &adapter
->tx_obj
.q
;
1190 be_cmd_q_destroy(adapter
, q
, QTYPE_TXQ
);
1191 be_queue_free(adapter
, q
);
1193 q
= &adapter
->tx_obj
.cq
;
1195 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1196 be_queue_free(adapter
, q
);
1198 /* Clear any residual events */
1199 be_eq_clean(adapter
, &adapter
->tx_eq
);
1201 q
= &adapter
->tx_eq
.q
;
1203 be_cmd_q_destroy(adapter
, q
, QTYPE_EQ
);
1204 be_queue_free(adapter
, q
);
1207 static int be_tx_queues_create(struct be_adapter
*adapter
)
1209 struct be_queue_info
*eq
, *q
, *cq
;
1211 adapter
->tx_eq
.max_eqd
= 0;
1212 adapter
->tx_eq
.min_eqd
= 0;
1213 adapter
->tx_eq
.cur_eqd
= 96;
1214 adapter
->tx_eq
.enable_aic
= false;
1215 /* Alloc Tx Event queue */
1216 eq
= &adapter
->tx_eq
.q
;
1217 if (be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
, sizeof(struct be_eq_entry
)))
1220 /* Ask BE to create Tx Event queue */
1221 if (be_cmd_eq_create(adapter
, eq
, adapter
->tx_eq
.cur_eqd
))
1223 /* Alloc TX eth compl queue */
1224 cq
= &adapter
->tx_obj
.cq
;
1225 if (be_queue_alloc(adapter
, cq
, TX_CQ_LEN
,
1226 sizeof(struct be_eth_tx_compl
)))
1229 /* Ask BE to create Tx eth compl queue */
1230 if (be_cmd_cq_create(adapter
, cq
, eq
, false, false, 3))
1233 /* Alloc TX eth queue */
1234 q
= &adapter
->tx_obj
.q
;
1235 if (be_queue_alloc(adapter
, q
, TX_Q_LEN
, sizeof(struct be_eth_wrb
)))
1238 /* Ask BE to create Tx eth queue */
1239 if (be_cmd_txq_create(adapter
, q
, cq
))
1244 be_queue_free(adapter
, q
);
1246 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1248 be_queue_free(adapter
, cq
);
1250 be_cmd_q_destroy(adapter
, eq
, QTYPE_EQ
);
1252 be_queue_free(adapter
, eq
);
1256 static void be_rx_queues_destroy(struct be_adapter
*adapter
)
1258 struct be_queue_info
*q
;
1260 q
= &adapter
->rx_obj
.q
;
1262 be_cmd_q_destroy(adapter
, q
, QTYPE_RXQ
);
1263 be_rx_q_clean(adapter
);
1265 be_queue_free(adapter
, q
);
1267 q
= &adapter
->rx_obj
.cq
;
1269 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1270 be_queue_free(adapter
, q
);
1272 /* Clear any residual events */
1273 be_eq_clean(adapter
, &adapter
->rx_eq
);
1275 q
= &adapter
->rx_eq
.q
;
1277 be_cmd_q_destroy(adapter
, q
, QTYPE_EQ
);
1278 be_queue_free(adapter
, q
);
1281 static int be_rx_queues_create(struct be_adapter
*adapter
)
1283 struct be_queue_info
*eq
, *q
, *cq
;
1286 adapter
->big_page_size
= (1 << get_order(rx_frag_size
)) * PAGE_SIZE
;
1287 adapter
->rx_eq
.max_eqd
= BE_MAX_EQD
;
1288 adapter
->rx_eq
.min_eqd
= 0;
1289 adapter
->rx_eq
.cur_eqd
= 0;
1290 adapter
->rx_eq
.enable_aic
= true;
1292 /* Alloc Rx Event queue */
1293 eq
= &adapter
->rx_eq
.q
;
1294 rc
= be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
,
1295 sizeof(struct be_eq_entry
));
1299 /* Ask BE to create Rx Event queue */
1300 rc
= be_cmd_eq_create(adapter
, eq
, adapter
->rx_eq
.cur_eqd
);
1304 /* Alloc RX eth compl queue */
1305 cq
= &adapter
->rx_obj
.cq
;
1306 rc
= be_queue_alloc(adapter
, cq
, RX_CQ_LEN
,
1307 sizeof(struct be_eth_rx_compl
));
1311 /* Ask BE to create Rx eth compl queue */
1312 rc
= be_cmd_cq_create(adapter
, cq
, eq
, false, false, 3);
1316 /* Alloc RX eth queue */
1317 q
= &adapter
->rx_obj
.q
;
1318 rc
= be_queue_alloc(adapter
, q
, RX_Q_LEN
, sizeof(struct be_eth_rx_d
));
1322 /* Ask BE to create Rx eth queue */
1323 rc
= be_cmd_rxq_create(adapter
, q
, cq
->id
, rx_frag_size
,
1324 BE_MAX_JUMBO_FRAME_SIZE
, adapter
->if_handle
, false);
1330 be_queue_free(adapter
, q
);
1332 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1334 be_queue_free(adapter
, cq
);
1336 be_cmd_q_destroy(adapter
, eq
, QTYPE_EQ
);
1338 be_queue_free(adapter
, eq
);
1342 /* There are 8 evt ids per func. Retruns the evt id's bit number */
1343 static inline int be_evt_bit_get(struct be_adapter
*adapter
, u32 eq_id
)
1345 return eq_id
- 8 * be_pci_func(adapter
);
1348 static irqreturn_t
be_intx(int irq
, void *dev
)
1350 struct be_adapter
*adapter
= dev
;
1353 isr
= ioread32(adapter
->csr
+ CEV_ISR0_OFFSET
+
1354 (adapter
->tx_eq
.q
.id
/ 8) * CEV_ISR_SIZE
);
1358 event_handle(adapter
, &adapter
->tx_eq
);
1359 event_handle(adapter
, &adapter
->rx_eq
);
1364 static irqreturn_t
be_msix_rx(int irq
, void *dev
)
1366 struct be_adapter
*adapter
= dev
;
1368 event_handle(adapter
, &adapter
->rx_eq
);
1373 static irqreturn_t
be_msix_tx_mcc(int irq
, void *dev
)
1375 struct be_adapter
*adapter
= dev
;
1377 event_handle(adapter
, &adapter
->tx_eq
);
1382 static inline bool do_gro(struct be_adapter
*adapter
,
1383 struct be_eth_rx_compl
*rxcp
)
1385 int err
= AMAP_GET_BITS(struct amap_eth_rx_compl
, err
, rxcp
);
1386 int tcp_frame
= AMAP_GET_BITS(struct amap_eth_rx_compl
, tcpf
, rxcp
);
1389 drvr_stats(adapter
)->be_rxcp_err
++;
1391 return (tcp_frame
&& !err
) ? true : false;
1394 int be_poll_rx(struct napi_struct
*napi
, int budget
)
1396 struct be_eq_obj
*rx_eq
= container_of(napi
, struct be_eq_obj
, napi
);
1397 struct be_adapter
*adapter
=
1398 container_of(rx_eq
, struct be_adapter
, rx_eq
);
1399 struct be_queue_info
*rx_cq
= &adapter
->rx_obj
.cq
;
1400 struct be_eth_rx_compl
*rxcp
;
1403 adapter
->stats
.drvr_stats
.be_rx_polls
++;
1404 for (work_done
= 0; work_done
< budget
; work_done
++) {
1405 rxcp
= be_rx_compl_get(adapter
);
1409 if (do_gro(adapter
, rxcp
))
1410 be_rx_compl_process_gro(adapter
, rxcp
);
1412 be_rx_compl_process(adapter
, rxcp
);
1414 be_rx_compl_reset(rxcp
);
1417 /* Refill the queue */
1418 if (atomic_read(&adapter
->rx_obj
.q
.used
) < RX_FRAGS_REFILL_WM
)
1419 be_post_rx_frags(adapter
);
1422 if (work_done
< budget
) {
1423 napi_complete(napi
);
1424 be_cq_notify(adapter
, rx_cq
->id
, true, work_done
);
1426 /* More to be consumed; continue with interrupts disabled */
1427 be_cq_notify(adapter
, rx_cq
->id
, false, work_done
);
1432 void be_process_tx(struct be_adapter
*adapter
)
1434 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
1435 struct be_queue_info
*tx_cq
= &adapter
->tx_obj
.cq
;
1436 struct be_eth_tx_compl
*txcp
;
1440 while ((txcp
= be_tx_compl_get(tx_cq
))) {
1441 end_idx
= AMAP_GET_BITS(struct amap_eth_tx_compl
,
1443 be_tx_compl_process(adapter
, end_idx
);
1448 be_cq_notify(adapter
, tx_cq
->id
, true, num_cmpl
);
1450 /* As Tx wrbs have been freed up, wake up netdev queue if
1451 * it was stopped due to lack of tx wrbs.
1453 if (netif_queue_stopped(adapter
->netdev
) &&
1454 atomic_read(&txq
->used
) < txq
->len
/ 2) {
1455 netif_wake_queue(adapter
->netdev
);
1458 drvr_stats(adapter
)->be_tx_events
++;
1459 drvr_stats(adapter
)->be_tx_compl
+= num_cmpl
;
1463 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1464 * For TX/MCC we don't honour budget; consume everything
1466 static int be_poll_tx_mcc(struct napi_struct
*napi
, int budget
)
1468 struct be_eq_obj
*tx_eq
= container_of(napi
, struct be_eq_obj
, napi
);
1469 struct be_adapter
*adapter
=
1470 container_of(tx_eq
, struct be_adapter
, tx_eq
);
1472 napi_complete(napi
);
1474 be_process_tx(adapter
);
1476 be_process_mcc(adapter
);
1481 static void be_worker(struct work_struct
*work
)
1483 struct be_adapter
*adapter
=
1484 container_of(work
, struct be_adapter
, work
.work
);
1486 be_cmd_get_stats(adapter
, &adapter
->stats
.cmd
);
1489 be_rx_eqd_update(adapter
);
1491 be_tx_rate_update(adapter
);
1492 be_rx_rate_update(adapter
);
1494 if (adapter
->rx_post_starved
) {
1495 adapter
->rx_post_starved
= false;
1496 be_post_rx_frags(adapter
);
1499 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(1000));
1502 static void be_msix_disable(struct be_adapter
*adapter
)
1504 if (adapter
->msix_enabled
) {
1505 pci_disable_msix(adapter
->pdev
);
1506 adapter
->msix_enabled
= false;
1510 static void be_msix_enable(struct be_adapter
*adapter
)
1514 for (i
= 0; i
< BE_NUM_MSIX_VECTORS
; i
++)
1515 adapter
->msix_entries
[i
].entry
= i
;
1517 status
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
1518 BE_NUM_MSIX_VECTORS
);
1520 adapter
->msix_enabled
= true;
1524 static inline int be_msix_vec_get(struct be_adapter
*adapter
, u32 eq_id
)
1526 return adapter
->msix_entries
[
1527 be_evt_bit_get(adapter
, eq_id
)].vector
;
1530 static int be_request_irq(struct be_adapter
*adapter
,
1531 struct be_eq_obj
*eq_obj
,
1532 void *handler
, char *desc
)
1534 struct net_device
*netdev
= adapter
->netdev
;
1537 sprintf(eq_obj
->desc
, "%s-%s", netdev
->name
, desc
);
1538 vec
= be_msix_vec_get(adapter
, eq_obj
->q
.id
);
1539 return request_irq(vec
, handler
, 0, eq_obj
->desc
, adapter
);
1542 static void be_free_irq(struct be_adapter
*adapter
, struct be_eq_obj
*eq_obj
)
1544 int vec
= be_msix_vec_get(adapter
, eq_obj
->q
.id
);
1545 free_irq(vec
, adapter
);
1548 static int be_msix_register(struct be_adapter
*adapter
)
1552 status
= be_request_irq(adapter
, &adapter
->tx_eq
, be_msix_tx_mcc
, "tx");
1556 status
= be_request_irq(adapter
, &adapter
->rx_eq
, be_msix_rx
, "rx");
1563 be_free_irq(adapter
, &adapter
->tx_eq
);
1565 dev_warn(&adapter
->pdev
->dev
,
1566 "MSIX Request IRQ failed - err %d\n", status
);
1567 pci_disable_msix(adapter
->pdev
);
1568 adapter
->msix_enabled
= false;
1572 static int be_irq_register(struct be_adapter
*adapter
)
1574 struct net_device
*netdev
= adapter
->netdev
;
1577 if (adapter
->msix_enabled
) {
1578 status
= be_msix_register(adapter
);
1584 netdev
->irq
= adapter
->pdev
->irq
;
1585 status
= request_irq(netdev
->irq
, be_intx
, IRQF_SHARED
, netdev
->name
,
1588 dev_err(&adapter
->pdev
->dev
,
1589 "INTx request IRQ failed - err %d\n", status
);
1593 adapter
->isr_registered
= true;
1597 static void be_irq_unregister(struct be_adapter
*adapter
)
1599 struct net_device
*netdev
= adapter
->netdev
;
1601 if (!adapter
->isr_registered
)
1605 if (!adapter
->msix_enabled
) {
1606 free_irq(netdev
->irq
, adapter
);
1611 be_free_irq(adapter
, &adapter
->tx_eq
);
1612 be_free_irq(adapter
, &adapter
->rx_eq
);
1614 adapter
->isr_registered
= false;
1618 static int be_open(struct net_device
*netdev
)
1620 struct be_adapter
*adapter
= netdev_priv(netdev
);
1621 struct be_eq_obj
*rx_eq
= &adapter
->rx_eq
;
1622 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
1628 /* First time posting */
1629 be_post_rx_frags(adapter
);
1631 napi_enable(&rx_eq
->napi
);
1632 napi_enable(&tx_eq
->napi
);
1634 be_irq_register(adapter
);
1636 be_intr_set(adapter
, true);
1638 /* The evt queues are created in unarmed state; arm them */
1639 be_eq_notify(adapter
, rx_eq
->q
.id
, true, false, 0);
1640 be_eq_notify(adapter
, tx_eq
->q
.id
, true, false, 0);
1642 /* Rx compl queue may be in unarmed state; rearm it */
1643 be_cq_notify(adapter
, adapter
->rx_obj
.cq
.id
, true, 0);
1645 status
= be_cmd_link_status_query(adapter
, &link_up
, &mac_speed
,
1649 be_link_status_update(adapter
, link_up
);
1651 status
= be_vid_config(adapter
);
1655 status
= be_cmd_set_flow_control(adapter
,
1656 adapter
->tx_fc
, adapter
->rx_fc
);
1660 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(100));
1665 static int be_setup_wol(struct be_adapter
*adapter
, bool enable
)
1667 struct be_dma_mem cmd
;
1671 memset(mac
, 0, ETH_ALEN
);
1673 cmd
.size
= sizeof(struct be_cmd_req_acpi_wol_magic_config
);
1674 cmd
.va
= pci_alloc_consistent(adapter
->pdev
, cmd
.size
, &cmd
.dma
);
1677 memset(cmd
.va
, 0, cmd
.size
);
1680 status
= pci_write_config_dword(adapter
->pdev
,
1681 PCICFG_PM_CONTROL_OFFSET
, PCICFG_PM_CONTROL_MASK
);
1683 dev_err(&adapter
->pdev
->dev
,
1684 "Could not enable Wake-on-lan \n");
1685 pci_free_consistent(adapter
->pdev
, cmd
.size
, cmd
.va
,
1689 status
= be_cmd_enable_magic_wol(adapter
,
1690 adapter
->netdev
->dev_addr
, &cmd
);
1691 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 1);
1692 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 1);
1694 status
= be_cmd_enable_magic_wol(adapter
, mac
, &cmd
);
1695 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 0);
1696 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 0);
1699 pci_free_consistent(adapter
->pdev
, cmd
.size
, cmd
.va
, cmd
.dma
);
1703 static int be_setup(struct be_adapter
*adapter
)
1705 struct net_device
*netdev
= adapter
->netdev
;
1706 u32 cap_flags
, en_flags
;
1709 cap_flags
= BE_IF_FLAGS_UNTAGGED
| BE_IF_FLAGS_BROADCAST
|
1710 BE_IF_FLAGS_MCAST_PROMISCUOUS
|
1711 BE_IF_FLAGS_PROMISCUOUS
|
1712 BE_IF_FLAGS_PASS_L3L4_ERRORS
;
1713 en_flags
= BE_IF_FLAGS_UNTAGGED
| BE_IF_FLAGS_BROADCAST
|
1714 BE_IF_FLAGS_PASS_L3L4_ERRORS
;
1716 status
= be_cmd_if_create(adapter
, cap_flags
, en_flags
,
1717 netdev
->dev_addr
, false/* pmac_invalid */,
1718 &adapter
->if_handle
, &adapter
->pmac_id
);
1722 status
= be_tx_queues_create(adapter
);
1726 status
= be_rx_queues_create(adapter
);
1730 status
= be_mcc_queues_create(adapter
);
1734 adapter
->link_speed
= -1;
1739 be_rx_queues_destroy(adapter
);
1741 be_tx_queues_destroy(adapter
);
1743 be_cmd_if_destroy(adapter
, adapter
->if_handle
);
1748 static int be_clear(struct be_adapter
*adapter
)
1750 be_mcc_queues_destroy(adapter
);
1751 be_rx_queues_destroy(adapter
);
1752 be_tx_queues_destroy(adapter
);
1754 be_cmd_if_destroy(adapter
, adapter
->if_handle
);
1756 /* tell fw we're done with firing cmds */
1757 be_cmd_fw_clean(adapter
);
1761 static int be_close(struct net_device
*netdev
)
1763 struct be_adapter
*adapter
= netdev_priv(netdev
);
1764 struct be_eq_obj
*rx_eq
= &adapter
->rx_eq
;
1765 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
1768 cancel_delayed_work_sync(&adapter
->work
);
1770 netif_stop_queue(netdev
);
1771 netif_carrier_off(netdev
);
1772 adapter
->link_up
= false;
1774 be_intr_set(adapter
, false);
1776 if (adapter
->msix_enabled
) {
1777 vec
= be_msix_vec_get(adapter
, tx_eq
->q
.id
);
1778 synchronize_irq(vec
);
1779 vec
= be_msix_vec_get(adapter
, rx_eq
->q
.id
);
1780 synchronize_irq(vec
);
1782 synchronize_irq(netdev
->irq
);
1784 be_irq_unregister(adapter
);
1786 napi_disable(&rx_eq
->napi
);
1787 napi_disable(&tx_eq
->napi
);
1789 /* Wait for all pending tx completions to arrive so that
1790 * all tx skbs are freed.
1792 be_tx_compl_clean(adapter
);
1797 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
1798 char flash_cookie
[2][16] = {"*** SE FLAS",
1799 "H DIRECTORY *** "};
1801 static bool be_flash_redboot(struct be_adapter
*adapter
,
1807 crc_offset
= FLASH_REDBOOT_START
+ FLASH_REDBOOT_IMAGE_MAX_SIZE
- 4
1808 + sizeof(struct flash_file_hdr
) - 32*1024;
1810 status
= be_cmd_get_flash_crc(adapter
, flashed_crc
);
1812 dev_err(&adapter
->pdev
->dev
,
1813 "could not get crc from flash, not flashing redboot\n");
1817 /*update redboot only if crc does not match*/
1818 if (!memcmp(flashed_crc
, p
, 4))
1825 static int be_flash_image(struct be_adapter
*adapter
,
1826 const struct firmware
*fw
,
1827 struct be_dma_mem
*flash_cmd
, u32 flash_type
)
1830 u32 flash_op
, image_offset
= 0, total_bytes
, image_size
= 0;
1832 const u8
*p
= fw
->data
;
1833 struct be_cmd_write_flashrom
*req
= flash_cmd
->va
;
1835 switch (flash_type
) {
1836 case FLASHROM_TYPE_ISCSI_ACTIVE
:
1837 image_offset
= FLASH_iSCSI_PRIMARY_IMAGE_START
;
1838 image_size
= FLASH_IMAGE_MAX_SIZE
;
1840 case FLASHROM_TYPE_ISCSI_BACKUP
:
1841 image_offset
= FLASH_iSCSI_BACKUP_IMAGE_START
;
1842 image_size
= FLASH_IMAGE_MAX_SIZE
;
1844 case FLASHROM_TYPE_FCOE_FW_ACTIVE
:
1845 image_offset
= FLASH_FCoE_PRIMARY_IMAGE_START
;
1846 image_size
= FLASH_IMAGE_MAX_SIZE
;
1848 case FLASHROM_TYPE_FCOE_FW_BACKUP
:
1849 image_offset
= FLASH_FCoE_BACKUP_IMAGE_START
;
1850 image_size
= FLASH_IMAGE_MAX_SIZE
;
1852 case FLASHROM_TYPE_BIOS
:
1853 image_offset
= FLASH_iSCSI_BIOS_START
;
1854 image_size
= FLASH_BIOS_IMAGE_MAX_SIZE
;
1856 case FLASHROM_TYPE_FCOE_BIOS
:
1857 image_offset
= FLASH_FCoE_BIOS_START
;
1858 image_size
= FLASH_BIOS_IMAGE_MAX_SIZE
;
1860 case FLASHROM_TYPE_PXE_BIOS
:
1861 image_offset
= FLASH_PXE_BIOS_START
;
1862 image_size
= FLASH_BIOS_IMAGE_MAX_SIZE
;
1864 case FLASHROM_TYPE_REDBOOT
:
1865 if (!be_flash_redboot(adapter
, fw
->data
))
1867 image_offset
= FLASH_REDBOOT_ISM_START
;
1868 image_size
= FLASH_REDBOOT_IMAGE_MAX_SIZE
;
1874 p
+= sizeof(struct flash_file_hdr
) + image_offset
;
1875 if (p
+ image_size
> fw
->data
+ fw
->size
)
1878 total_bytes
= image_size
;
1880 while (total_bytes
) {
1881 if (total_bytes
> 32*1024)
1882 num_bytes
= 32*1024;
1884 num_bytes
= total_bytes
;
1885 total_bytes
-= num_bytes
;
1888 flash_op
= FLASHROM_OPER_FLASH
;
1890 flash_op
= FLASHROM_OPER_SAVE
;
1891 memcpy(req
->params
.data_buf
, p
, num_bytes
);
1893 status
= be_cmd_write_flashrom(adapter
, flash_cmd
,
1894 flash_type
, flash_op
, num_bytes
);
1896 dev_err(&adapter
->pdev
->dev
,
1897 "cmd to write to flash rom failed. type/op %d/%d\n",
1898 flash_type
, flash_op
);
1907 int be_load_fw(struct be_adapter
*adapter
, u8
*func
)
1909 char fw_file
[ETHTOOL_FLASH_MAX_FILENAME
];
1910 const struct firmware
*fw
;
1911 struct flash_file_hdr
*fhdr
;
1912 struct flash_section_info
*fsec
= NULL
;
1913 struct be_dma_mem flash_cmd
;
1916 bool entry_found
= false;
1918 char fw_ver
[FW_VER_LEN
];
1921 status
= be_cmd_get_fw_ver(adapter
, fw_ver
);
1925 fw_cfg
= *(fw_ver
+ 2);
1928 strcpy(fw_file
, func
);
1930 status
= request_firmware(&fw
, fw_file
, &adapter
->pdev
->dev
);
1935 fhdr
= (struct flash_file_hdr
*) p
;
1936 if (memcmp(fhdr
->sign
, FW_FILE_HDR_SIGN
, strlen(FW_FILE_HDR_SIGN
))) {
1937 dev_err(&adapter
->pdev
->dev
,
1938 "Firmware(%s) load error (signature did not match)\n",
1944 dev_info(&adapter
->pdev
->dev
, "Flashing firmware file %s\n", fw_file
);
1946 p
+= sizeof(struct flash_file_hdr
);
1947 while (p
< (fw
->data
+ fw
->size
)) {
1948 fsec
= (struct flash_section_info
*)p
;
1949 if (!memcmp(flash_cookie
, fsec
->cookie
, sizeof(flash_cookie
))) {
1958 dev_err(&adapter
->pdev
->dev
,
1959 "Flash cookie not found in firmware image\n");
1963 flash_cmd
.size
= sizeof(struct be_cmd_write_flashrom
) + 32*1024;
1964 flash_cmd
.va
= pci_alloc_consistent(adapter
->pdev
, flash_cmd
.size
,
1966 if (!flash_cmd
.va
) {
1968 dev_err(&adapter
->pdev
->dev
,
1969 "Memory allocation failure while flashing\n");
1973 for (flash_type
= FLASHROM_TYPE_ISCSI_ACTIVE
;
1974 flash_type
<= FLASHROM_TYPE_FCOE_FW_BACKUP
; flash_type
++) {
1975 status
= be_flash_image(adapter
, fw
, &flash_cmd
,
1981 pci_free_consistent(adapter
->pdev
, flash_cmd
.size
, flash_cmd
.va
,
1984 dev_err(&adapter
->pdev
->dev
, "Firmware load error\n");
1988 dev_info(&adapter
->pdev
->dev
, "Firmware flashed successfully\n");
1991 release_firmware(fw
);
1995 static struct net_device_ops be_netdev_ops
= {
1996 .ndo_open
= be_open
,
1997 .ndo_stop
= be_close
,
1998 .ndo_start_xmit
= be_xmit
,
1999 .ndo_get_stats
= be_get_stats
,
2000 .ndo_set_rx_mode
= be_set_multicast_list
,
2001 .ndo_set_mac_address
= be_mac_addr_set
,
2002 .ndo_change_mtu
= be_change_mtu
,
2003 .ndo_validate_addr
= eth_validate_addr
,
2004 .ndo_vlan_rx_register
= be_vlan_register
,
2005 .ndo_vlan_rx_add_vid
= be_vlan_add_vid
,
2006 .ndo_vlan_rx_kill_vid
= be_vlan_rem_vid
,
2009 static void be_netdev_init(struct net_device
*netdev
)
2011 struct be_adapter
*adapter
= netdev_priv(netdev
);
2013 netdev
->features
|= NETIF_F_SG
| NETIF_F_HW_VLAN_RX
| NETIF_F_TSO
|
2014 NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_FILTER
| NETIF_F_HW_CSUM
|
2017 netdev
->vlan_features
|= NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_HW_CSUM
;
2019 netdev
->flags
|= IFF_MULTICAST
;
2021 adapter
->rx_csum
= true;
2023 /* Default settings for Rx and Tx flow control */
2024 adapter
->rx_fc
= true;
2025 adapter
->tx_fc
= true;
2027 netif_set_gso_max_size(netdev
, 65535);
2029 BE_SET_NETDEV_OPS(netdev
, &be_netdev_ops
);
2031 SET_ETHTOOL_OPS(netdev
, &be_ethtool_ops
);
2033 netif_napi_add(netdev
, &adapter
->rx_eq
.napi
, be_poll_rx
,
2035 netif_napi_add(netdev
, &adapter
->tx_eq
.napi
, be_poll_tx_mcc
,
2038 netif_carrier_off(netdev
);
2039 netif_stop_queue(netdev
);
2042 static void be_unmap_pci_bars(struct be_adapter
*adapter
)
2045 iounmap(adapter
->csr
);
2047 iounmap(adapter
->db
);
2048 if (adapter
->pcicfg
)
2049 iounmap(adapter
->pcicfg
);
2052 static int be_map_pci_bars(struct be_adapter
*adapter
)
2057 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, 2),
2058 pci_resource_len(adapter
->pdev
, 2));
2061 adapter
->csr
= addr
;
2063 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, 4),
2069 if (adapter
->generation
== BE_GEN2
)
2074 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, pcicfg_reg
),
2075 pci_resource_len(adapter
->pdev
, pcicfg_reg
));
2078 adapter
->pcicfg
= addr
;
2082 be_unmap_pci_bars(adapter
);
2087 static void be_ctrl_cleanup(struct be_adapter
*adapter
)
2089 struct be_dma_mem
*mem
= &adapter
->mbox_mem_alloced
;
2091 be_unmap_pci_bars(adapter
);
2094 pci_free_consistent(adapter
->pdev
, mem
->size
,
2097 mem
= &adapter
->mc_cmd_mem
;
2099 pci_free_consistent(adapter
->pdev
, mem
->size
,
2103 static int be_ctrl_init(struct be_adapter
*adapter
)
2105 struct be_dma_mem
*mbox_mem_alloc
= &adapter
->mbox_mem_alloced
;
2106 struct be_dma_mem
*mbox_mem_align
= &adapter
->mbox_mem
;
2107 struct be_dma_mem
*mc_cmd_mem
= &adapter
->mc_cmd_mem
;
2110 status
= be_map_pci_bars(adapter
);
2114 mbox_mem_alloc
->size
= sizeof(struct be_mcc_mailbox
) + 16;
2115 mbox_mem_alloc
->va
= pci_alloc_consistent(adapter
->pdev
,
2116 mbox_mem_alloc
->size
, &mbox_mem_alloc
->dma
);
2117 if (!mbox_mem_alloc
->va
) {
2119 goto unmap_pci_bars
;
2122 mbox_mem_align
->size
= sizeof(struct be_mcc_mailbox
);
2123 mbox_mem_align
->va
= PTR_ALIGN(mbox_mem_alloc
->va
, 16);
2124 mbox_mem_align
->dma
= PTR_ALIGN(mbox_mem_alloc
->dma
, 16);
2125 memset(mbox_mem_align
->va
, 0, sizeof(struct be_mcc_mailbox
));
2127 mc_cmd_mem
->size
= sizeof(struct be_cmd_req_mcast_mac_config
);
2128 mc_cmd_mem
->va
= pci_alloc_consistent(adapter
->pdev
, mc_cmd_mem
->size
,
2130 if (mc_cmd_mem
->va
== NULL
) {
2134 memset(mc_cmd_mem
->va
, 0, mc_cmd_mem
->size
);
2136 spin_lock_init(&adapter
->mbox_lock
);
2137 spin_lock_init(&adapter
->mcc_lock
);
2138 spin_lock_init(&adapter
->mcc_cq_lock
);
2143 pci_free_consistent(adapter
->pdev
, mbox_mem_alloc
->size
,
2144 mbox_mem_alloc
->va
, mbox_mem_alloc
->dma
);
2147 be_unmap_pci_bars(adapter
);
2153 static void be_stats_cleanup(struct be_adapter
*adapter
)
2155 struct be_stats_obj
*stats
= &adapter
->stats
;
2156 struct be_dma_mem
*cmd
= &stats
->cmd
;
2159 pci_free_consistent(adapter
->pdev
, cmd
->size
,
2163 static int be_stats_init(struct be_adapter
*adapter
)
2165 struct be_stats_obj
*stats
= &adapter
->stats
;
2166 struct be_dma_mem
*cmd
= &stats
->cmd
;
2168 cmd
->size
= sizeof(struct be_cmd_req_get_stats
);
2169 cmd
->va
= pci_alloc_consistent(adapter
->pdev
, cmd
->size
, &cmd
->dma
);
2170 if (cmd
->va
== NULL
)
2172 memset(cmd
->va
, 0, cmd
->size
);
2176 static void __devexit
be_remove(struct pci_dev
*pdev
)
2178 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
2183 unregister_netdev(adapter
->netdev
);
2187 be_stats_cleanup(adapter
);
2189 be_ctrl_cleanup(adapter
);
2191 be_msix_disable(adapter
);
2193 pci_set_drvdata(pdev
, NULL
);
2194 pci_release_regions(pdev
);
2195 pci_disable_device(pdev
);
2197 free_netdev(adapter
->netdev
);
2200 static int be_get_config(struct be_adapter
*adapter
)
2205 status
= be_cmd_get_fw_ver(adapter
, adapter
->fw_ver
);
2209 status
= be_cmd_query_fw_cfg(adapter
,
2210 &adapter
->port_num
, &adapter
->cap
);
2214 memset(mac
, 0, ETH_ALEN
);
2215 status
= be_cmd_mac_addr_query(adapter
, mac
,
2216 MAC_ADDRESS_TYPE_NETWORK
, true /*permanent */, 0);
2220 if (!is_valid_ether_addr(mac
))
2221 return -EADDRNOTAVAIL
;
2223 memcpy(adapter
->netdev
->dev_addr
, mac
, ETH_ALEN
);
2224 memcpy(adapter
->netdev
->perm_addr
, mac
, ETH_ALEN
);
2229 static int __devinit
be_probe(struct pci_dev
*pdev
,
2230 const struct pci_device_id
*pdev_id
)
2233 struct be_adapter
*adapter
;
2234 struct net_device
*netdev
;
2236 status
= pci_enable_device(pdev
);
2240 status
= pci_request_regions(pdev
, DRV_NAME
);
2243 pci_set_master(pdev
);
2245 netdev
= alloc_etherdev(sizeof(struct be_adapter
));
2246 if (netdev
== NULL
) {
2250 adapter
= netdev_priv(netdev
);
2252 switch (pdev
->device
) {
2255 adapter
->generation
= BE_GEN2
;
2259 adapter
->generation
= BE_GEN3
;
2262 adapter
->generation
= 0;
2265 adapter
->pdev
= pdev
;
2266 pci_set_drvdata(pdev
, adapter
);
2267 adapter
->netdev
= netdev
;
2268 be_netdev_init(netdev
);
2269 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2271 be_msix_enable(adapter
);
2273 status
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
2275 netdev
->features
|= NETIF_F_HIGHDMA
;
2277 status
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2279 dev_err(&pdev
->dev
, "Could not set PCI DMA Mask\n");
2284 status
= be_ctrl_init(adapter
);
2288 /* sync up with fw's ready state */
2289 status
= be_cmd_POST(adapter
);
2293 /* tell fw we're ready to fire cmds */
2294 status
= be_cmd_fw_init(adapter
);
2298 status
= be_cmd_reset_function(adapter
);
2302 status
= be_stats_init(adapter
);
2306 status
= be_get_config(adapter
);
2310 INIT_DELAYED_WORK(&adapter
->work
, be_worker
);
2312 status
= be_setup(adapter
);
2316 status
= register_netdev(netdev
);
2320 dev_info(&pdev
->dev
, "%s port %d\n", nic_name(pdev
), adapter
->port_num
);
2326 be_stats_cleanup(adapter
);
2328 be_ctrl_cleanup(adapter
);
2330 be_msix_disable(adapter
);
2331 free_netdev(adapter
->netdev
);
2332 pci_set_drvdata(pdev
, NULL
);
2334 pci_release_regions(pdev
);
2336 pci_disable_device(pdev
);
2338 dev_err(&pdev
->dev
, "%s initialization failed\n", nic_name(pdev
));
2342 static int be_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2344 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
2345 struct net_device
*netdev
= adapter
->netdev
;
2348 be_setup_wol(adapter
, true);
2350 netif_device_detach(netdev
);
2351 if (netif_running(netdev
)) {
2356 be_cmd_get_flow_control(adapter
, &adapter
->tx_fc
, &adapter
->rx_fc
);
2359 pci_save_state(pdev
);
2360 pci_disable_device(pdev
);
2361 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
2365 static int be_resume(struct pci_dev
*pdev
)
2368 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
2369 struct net_device
*netdev
= adapter
->netdev
;
2371 netif_device_detach(netdev
);
2373 status
= pci_enable_device(pdev
);
2377 pci_set_power_state(pdev
, 0);
2378 pci_restore_state(pdev
);
2380 /* tell fw we're ready to fire cmds */
2381 status
= be_cmd_fw_init(adapter
);
2386 if (netif_running(netdev
)) {
2391 netif_device_attach(netdev
);
2394 be_setup_wol(adapter
, false);
2398 static struct pci_driver be_driver
= {
2400 .id_table
= be_dev_ids
,
2402 .remove
= be_remove
,
2403 .suspend
= be_suspend
,
2407 static int __init
be_init_module(void)
2409 if (rx_frag_size
!= 8192 && rx_frag_size
!= 4096 &&
2410 rx_frag_size
!= 2048) {
2411 printk(KERN_WARNING DRV_NAME
2412 " : Module param rx_frag_size must be 2048/4096/8192."
2414 rx_frag_size
= 2048;
2417 return pci_register_driver(&be_driver
);
2419 module_init(be_init_module
);
2421 static void __exit
be_exit_module(void)
2423 pci_unregister_driver(&be_driver
);
2425 module_exit(be_exit_module
);