2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
21 #include <asm/div64.h>
23 MODULE_VERSION(DRV_VER
);
24 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
25 MODULE_DESCRIPTION(DRV_DESC
" " DRV_VER
);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
29 static ushort rx_frag_size
= 2048;
30 static unsigned int num_vfs
;
31 module_param(rx_frag_size
, ushort
, S_IRUGO
);
32 module_param(num_vfs
, uint
, S_IRUGO
);
33 MODULE_PARM_DESC(rx_frag_size
, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs
, "Number of PCI VFs to initialize");
36 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids
) = {
37 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID1
) },
38 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID2
) },
39 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID1
) },
40 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID2
) },
41 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID3
)},
42 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID4
)},
45 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
46 /* UE Status Low CSR */
47 static const char * const ue_status_low_desc
[] = {
81 /* UE Status High CSR */
82 static const char * const ue_status_hi_desc
[] = {
117 /* Is BE in a multi-channel mode */
118 static inline bool be_is_mc(struct be_adapter
*adapter
) {
119 return (adapter
->function_mode
& FLEX10_MODE
||
120 adapter
->function_mode
& VNIC_MODE
||
121 adapter
->function_mode
& UMC_ENABLED
);
124 static void be_queue_free(struct be_adapter
*adapter
, struct be_queue_info
*q
)
126 struct be_dma_mem
*mem
= &q
->dma_mem
;
128 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
132 static int be_queue_alloc(struct be_adapter
*adapter
, struct be_queue_info
*q
,
133 u16 len
, u16 entry_size
)
135 struct be_dma_mem
*mem
= &q
->dma_mem
;
137 memset(q
, 0, sizeof(*q
));
139 q
->entry_size
= entry_size
;
140 mem
->size
= len
* entry_size
;
141 mem
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
, mem
->size
, &mem
->dma
,
145 memset(mem
->va
, 0, mem
->size
);
149 static void be_intr_set(struct be_adapter
*adapter
, bool enable
)
153 if (adapter
->eeh_err
)
156 pci_read_config_dword(adapter
->pdev
, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
,
158 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
160 if (!enabled
&& enable
)
161 reg
|= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
162 else if (enabled
&& !enable
)
163 reg
&= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
167 pci_write_config_dword(adapter
->pdev
,
168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
, reg
);
171 static void be_rxq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
174 val
|= qid
& DB_RQ_RING_ID_MASK
;
175 val
|= posted
<< DB_RQ_NUM_POSTED_SHIFT
;
178 iowrite32(val
, adapter
->db
+ DB_RQ_OFFSET
);
181 static void be_txq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
184 val
|= qid
& DB_TXULP_RING_ID_MASK
;
185 val
|= (posted
& DB_TXULP_NUM_POSTED_MASK
) << DB_TXULP_NUM_POSTED_SHIFT
;
188 iowrite32(val
, adapter
->db
+ DB_TXULP1_OFFSET
);
191 static void be_eq_notify(struct be_adapter
*adapter
, u16 qid
,
192 bool arm
, bool clear_int
, u16 num_popped
)
195 val
|= qid
& DB_EQ_RING_ID_MASK
;
196 val
|= ((qid
& DB_EQ_RING_ID_EXT_MASK
) <<
197 DB_EQ_RING_ID_EXT_MASK_SHIFT
);
199 if (adapter
->eeh_err
)
203 val
|= 1 << DB_EQ_REARM_SHIFT
;
205 val
|= 1 << DB_EQ_CLR_SHIFT
;
206 val
|= 1 << DB_EQ_EVNT_SHIFT
;
207 val
|= num_popped
<< DB_EQ_NUM_POPPED_SHIFT
;
208 iowrite32(val
, adapter
->db
+ DB_EQ_OFFSET
);
211 void be_cq_notify(struct be_adapter
*adapter
, u16 qid
, bool arm
, u16 num_popped
)
214 val
|= qid
& DB_CQ_RING_ID_MASK
;
215 val
|= ((qid
& DB_CQ_RING_ID_EXT_MASK
) <<
216 DB_CQ_RING_ID_EXT_MASK_SHIFT
);
218 if (adapter
->eeh_err
)
222 val
|= 1 << DB_CQ_REARM_SHIFT
;
223 val
|= num_popped
<< DB_CQ_NUM_POPPED_SHIFT
;
224 iowrite32(val
, adapter
->db
+ DB_CQ_OFFSET
);
227 static int be_mac_addr_set(struct net_device
*netdev
, void *p
)
229 struct be_adapter
*adapter
= netdev_priv(netdev
);
230 struct sockaddr
*addr
= p
;
233 if (!is_valid_ether_addr(addr
->sa_data
))
234 return -EADDRNOTAVAIL
;
236 /* MAC addr configuration will be done in hardware for VFs
237 * by their corresponding PFs. Just copy to netdev addr here
239 if (!be_physfn(adapter
))
242 status
= be_cmd_pmac_del(adapter
, adapter
->if_handle
,
243 adapter
->pmac_id
, 0);
247 status
= be_cmd_pmac_add(adapter
, (u8
*)addr
->sa_data
,
248 adapter
->if_handle
, &adapter
->pmac_id
, 0);
251 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
256 static void populate_be2_stats(struct be_adapter
*adapter
)
258 struct be_hw_stats_v0
*hw_stats
= hw_stats_from_cmd(adapter
);
259 struct be_pmem_stats
*pmem_sts
= &hw_stats
->pmem
;
260 struct be_rxf_stats_v0
*rxf_stats
= &hw_stats
->rxf
;
261 struct be_port_rxf_stats_v0
*port_stats
=
262 &rxf_stats
->port
[adapter
->port_num
];
263 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
265 be_dws_le_to_cpu(hw_stats
, sizeof(*hw_stats
));
266 drvs
->rx_pause_frames
= port_stats
->rx_pause_frames
;
267 drvs
->rx_crc_errors
= port_stats
->rx_crc_errors
;
268 drvs
->rx_control_frames
= port_stats
->rx_control_frames
;
269 drvs
->rx_in_range_errors
= port_stats
->rx_in_range_errors
;
270 drvs
->rx_frame_too_long
= port_stats
->rx_frame_too_long
;
271 drvs
->rx_dropped_runt
= port_stats
->rx_dropped_runt
;
272 drvs
->rx_ip_checksum_errs
= port_stats
->rx_ip_checksum_errs
;
273 drvs
->rx_tcp_checksum_errs
= port_stats
->rx_tcp_checksum_errs
;
274 drvs
->rx_udp_checksum_errs
= port_stats
->rx_udp_checksum_errs
;
275 drvs
->rxpp_fifo_overflow_drop
= port_stats
->rx_fifo_overflow
;
276 drvs
->rx_dropped_tcp_length
= port_stats
->rx_dropped_tcp_length
;
277 drvs
->rx_dropped_too_small
= port_stats
->rx_dropped_too_small
;
278 drvs
->rx_dropped_too_short
= port_stats
->rx_dropped_too_short
;
279 drvs
->rx_out_range_errors
= port_stats
->rx_out_range_errors
;
280 drvs
->rx_input_fifo_overflow_drop
= port_stats
->rx_input_fifo_overflow
;
281 drvs
->rx_dropped_header_too_small
=
282 port_stats
->rx_dropped_header_too_small
;
283 drvs
->rx_address_match_errors
= port_stats
->rx_address_match_errors
;
284 drvs
->rx_alignment_symbol_errors
=
285 port_stats
->rx_alignment_symbol_errors
;
287 drvs
->tx_pauseframes
= port_stats
->tx_pauseframes
;
288 drvs
->tx_controlframes
= port_stats
->tx_controlframes
;
290 if (adapter
->port_num
)
291 drvs
->jabber_events
= rxf_stats
->port1_jabber_events
;
293 drvs
->jabber_events
= rxf_stats
->port0_jabber_events
;
294 drvs
->rx_drops_no_pbuf
= rxf_stats
->rx_drops_no_pbuf
;
295 drvs
->rx_drops_no_txpb
= rxf_stats
->rx_drops_no_txpb
;
296 drvs
->rx_drops_no_erx_descr
= rxf_stats
->rx_drops_no_erx_descr
;
297 drvs
->rx_drops_invalid_ring
= rxf_stats
->rx_drops_invalid_ring
;
298 drvs
->forwarded_packets
= rxf_stats
->forwarded_packets
;
299 drvs
->rx_drops_mtu
= rxf_stats
->rx_drops_mtu
;
300 drvs
->rx_drops_no_tpre_descr
= rxf_stats
->rx_drops_no_tpre_descr
;
301 drvs
->rx_drops_too_many_frags
= rxf_stats
->rx_drops_too_many_frags
;
302 adapter
->drv_stats
.eth_red_drops
= pmem_sts
->eth_red_drops
;
305 static void populate_be3_stats(struct be_adapter
*adapter
)
307 struct be_hw_stats_v1
*hw_stats
= hw_stats_from_cmd(adapter
);
308 struct be_pmem_stats
*pmem_sts
= &hw_stats
->pmem
;
309 struct be_rxf_stats_v1
*rxf_stats
= &hw_stats
->rxf
;
310 struct be_port_rxf_stats_v1
*port_stats
=
311 &rxf_stats
->port
[adapter
->port_num
];
312 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
314 be_dws_le_to_cpu(hw_stats
, sizeof(*hw_stats
));
315 drvs
->rx_pause_frames
= port_stats
->rx_pause_frames
;
316 drvs
->rx_crc_errors
= port_stats
->rx_crc_errors
;
317 drvs
->rx_control_frames
= port_stats
->rx_control_frames
;
318 drvs
->rx_in_range_errors
= port_stats
->rx_in_range_errors
;
319 drvs
->rx_frame_too_long
= port_stats
->rx_frame_too_long
;
320 drvs
->rx_dropped_runt
= port_stats
->rx_dropped_runt
;
321 drvs
->rx_ip_checksum_errs
= port_stats
->rx_ip_checksum_errs
;
322 drvs
->rx_tcp_checksum_errs
= port_stats
->rx_tcp_checksum_errs
;
323 drvs
->rx_udp_checksum_errs
= port_stats
->rx_udp_checksum_errs
;
324 drvs
->rx_dropped_tcp_length
= port_stats
->rx_dropped_tcp_length
;
325 drvs
->rx_dropped_too_small
= port_stats
->rx_dropped_too_small
;
326 drvs
->rx_dropped_too_short
= port_stats
->rx_dropped_too_short
;
327 drvs
->rx_out_range_errors
= port_stats
->rx_out_range_errors
;
328 drvs
->rx_dropped_header_too_small
=
329 port_stats
->rx_dropped_header_too_small
;
330 drvs
->rx_input_fifo_overflow_drop
=
331 port_stats
->rx_input_fifo_overflow_drop
;
332 drvs
->rx_address_match_errors
= port_stats
->rx_address_match_errors
;
333 drvs
->rx_alignment_symbol_errors
=
334 port_stats
->rx_alignment_symbol_errors
;
335 drvs
->rxpp_fifo_overflow_drop
= port_stats
->rxpp_fifo_overflow_drop
;
336 drvs
->tx_pauseframes
= port_stats
->tx_pauseframes
;
337 drvs
->tx_controlframes
= port_stats
->tx_controlframes
;
338 drvs
->jabber_events
= port_stats
->jabber_events
;
339 drvs
->rx_drops_no_pbuf
= rxf_stats
->rx_drops_no_pbuf
;
340 drvs
->rx_drops_no_txpb
= rxf_stats
->rx_drops_no_txpb
;
341 drvs
->rx_drops_no_erx_descr
= rxf_stats
->rx_drops_no_erx_descr
;
342 drvs
->rx_drops_invalid_ring
= rxf_stats
->rx_drops_invalid_ring
;
343 drvs
->forwarded_packets
= rxf_stats
->forwarded_packets
;
344 drvs
->rx_drops_mtu
= rxf_stats
->rx_drops_mtu
;
345 drvs
->rx_drops_no_tpre_descr
= rxf_stats
->rx_drops_no_tpre_descr
;
346 drvs
->rx_drops_too_many_frags
= rxf_stats
->rx_drops_too_many_frags
;
347 adapter
->drv_stats
.eth_red_drops
= pmem_sts
->eth_red_drops
;
350 static void populate_lancer_stats(struct be_adapter
*adapter
)
353 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
354 struct lancer_pport_stats
*pport_stats
=
355 pport_stats_from_cmd(adapter
);
357 be_dws_le_to_cpu(pport_stats
, sizeof(*pport_stats
));
358 drvs
->rx_pause_frames
= pport_stats
->rx_pause_frames_lo
;
359 drvs
->rx_crc_errors
= pport_stats
->rx_crc_errors_lo
;
360 drvs
->rx_control_frames
= pport_stats
->rx_control_frames_lo
;
361 drvs
->rx_in_range_errors
= pport_stats
->rx_in_range_errors
;
362 drvs
->rx_frame_too_long
= pport_stats
->rx_frames_too_long_lo
;
363 drvs
->rx_dropped_runt
= pport_stats
->rx_dropped_runt
;
364 drvs
->rx_ip_checksum_errs
= pport_stats
->rx_ip_checksum_errors
;
365 drvs
->rx_tcp_checksum_errs
= pport_stats
->rx_tcp_checksum_errors
;
366 drvs
->rx_udp_checksum_errs
= pport_stats
->rx_udp_checksum_errors
;
367 drvs
->rx_dropped_tcp_length
=
368 pport_stats
->rx_dropped_invalid_tcp_length
;
369 drvs
->rx_dropped_too_small
= pport_stats
->rx_dropped_too_small
;
370 drvs
->rx_dropped_too_short
= pport_stats
->rx_dropped_too_short
;
371 drvs
->rx_out_range_errors
= pport_stats
->rx_out_of_range_errors
;
372 drvs
->rx_dropped_header_too_small
=
373 pport_stats
->rx_dropped_header_too_small
;
374 drvs
->rx_input_fifo_overflow_drop
= pport_stats
->rx_fifo_overflow
;
375 drvs
->rx_address_match_errors
= pport_stats
->rx_address_match_errors
;
376 drvs
->rx_alignment_symbol_errors
= pport_stats
->rx_symbol_errors_lo
;
377 drvs
->rxpp_fifo_overflow_drop
= pport_stats
->rx_fifo_overflow
;
378 drvs
->tx_pauseframes
= pport_stats
->tx_pause_frames_lo
;
379 drvs
->tx_controlframes
= pport_stats
->tx_control_frames_lo
;
380 drvs
->jabber_events
= pport_stats
->rx_jabbers
;
381 drvs
->rx_drops_invalid_ring
= pport_stats
->rx_drops_invalid_queue
;
382 drvs
->forwarded_packets
= pport_stats
->num_forwards_lo
;
383 drvs
->rx_drops_mtu
= pport_stats
->rx_drops_mtu_lo
;
384 drvs
->rx_drops_too_many_frags
=
385 pport_stats
->rx_drops_too_many_frags_lo
;
388 static void accumulate_16bit_val(u32
*acc
, u16 val
)
390 #define lo(x) (x & 0xFFFF)
391 #define hi(x) (x & 0xFFFF0000)
392 bool wrapped
= val
< lo(*acc
);
393 u32 newacc
= hi(*acc
) + val
;
397 ACCESS_ONCE(*acc
) = newacc
;
400 void be_parse_stats(struct be_adapter
*adapter
)
402 struct be_erx_stats_v1
*erx
= be_erx_stats_from_cmd(adapter
);
403 struct be_rx_obj
*rxo
;
406 if (adapter
->generation
== BE_GEN3
) {
407 if (lancer_chip(adapter
))
408 populate_lancer_stats(adapter
);
410 populate_be3_stats(adapter
);
412 populate_be2_stats(adapter
);
415 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
416 for_all_rx_queues(adapter
, rxo
, i
) {
417 /* below erx HW counter can actually wrap around after
418 * 65535. Driver accumulates a 32-bit value
420 accumulate_16bit_val(&rx_stats(rxo
)->rx_drops_no_frags
,
421 (u16
)erx
->rx_drops_no_fragments
[rxo
->q
.id
]);
425 static struct rtnl_link_stats64
*be_get_stats64(struct net_device
*netdev
,
426 struct rtnl_link_stats64
*stats
)
428 struct be_adapter
*adapter
= netdev_priv(netdev
);
429 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
430 struct be_rx_obj
*rxo
;
431 struct be_tx_obj
*txo
;
436 for_all_rx_queues(adapter
, rxo
, i
) {
437 const struct be_rx_stats
*rx_stats
= rx_stats(rxo
);
439 start
= u64_stats_fetch_begin_bh(&rx_stats
->sync
);
440 pkts
= rx_stats(rxo
)->rx_pkts
;
441 bytes
= rx_stats(rxo
)->rx_bytes
;
442 } while (u64_stats_fetch_retry_bh(&rx_stats
->sync
, start
));
443 stats
->rx_packets
+= pkts
;
444 stats
->rx_bytes
+= bytes
;
445 stats
->multicast
+= rx_stats(rxo
)->rx_mcast_pkts
;
446 stats
->rx_dropped
+= rx_stats(rxo
)->rx_drops_no_skbs
+
447 rx_stats(rxo
)->rx_drops_no_frags
;
450 for_all_tx_queues(adapter
, txo
, i
) {
451 const struct be_tx_stats
*tx_stats
= tx_stats(txo
);
453 start
= u64_stats_fetch_begin_bh(&tx_stats
->sync
);
454 pkts
= tx_stats(txo
)->tx_pkts
;
455 bytes
= tx_stats(txo
)->tx_bytes
;
456 } while (u64_stats_fetch_retry_bh(&tx_stats
->sync
, start
));
457 stats
->tx_packets
+= pkts
;
458 stats
->tx_bytes
+= bytes
;
461 /* bad pkts received */
462 stats
->rx_errors
= drvs
->rx_crc_errors
+
463 drvs
->rx_alignment_symbol_errors
+
464 drvs
->rx_in_range_errors
+
465 drvs
->rx_out_range_errors
+
466 drvs
->rx_frame_too_long
+
467 drvs
->rx_dropped_too_small
+
468 drvs
->rx_dropped_too_short
+
469 drvs
->rx_dropped_header_too_small
+
470 drvs
->rx_dropped_tcp_length
+
471 drvs
->rx_dropped_runt
;
473 /* detailed rx errors */
474 stats
->rx_length_errors
= drvs
->rx_in_range_errors
+
475 drvs
->rx_out_range_errors
+
476 drvs
->rx_frame_too_long
;
478 stats
->rx_crc_errors
= drvs
->rx_crc_errors
;
480 /* frame alignment errors */
481 stats
->rx_frame_errors
= drvs
->rx_alignment_symbol_errors
;
483 /* receiver fifo overrun */
484 /* drops_no_pbuf is no per i/f, it's per BE card */
485 stats
->rx_fifo_errors
= drvs
->rxpp_fifo_overflow_drop
+
486 drvs
->rx_input_fifo_overflow_drop
+
487 drvs
->rx_drops_no_pbuf
;
491 void be_link_status_update(struct be_adapter
*adapter
, u32 link_status
)
493 struct net_device
*netdev
= adapter
->netdev
;
495 /* when link status changes, link speed must be re-queried from card */
496 adapter
->link_speed
= -1;
497 if ((link_status
& LINK_STATUS_MASK
) == LINK_UP
) {
498 netif_carrier_on(netdev
);
499 dev_info(&adapter
->pdev
->dev
, "%s: Link up\n", netdev
->name
);
501 netif_carrier_off(netdev
);
502 dev_info(&adapter
->pdev
->dev
, "%s: Link down\n", netdev
->name
);
506 static void be_tx_stats_update(struct be_tx_obj
*txo
,
507 u32 wrb_cnt
, u32 copied
, u32 gso_segs
, bool stopped
)
509 struct be_tx_stats
*stats
= tx_stats(txo
);
511 u64_stats_update_begin(&stats
->sync
);
513 stats
->tx_wrbs
+= wrb_cnt
;
514 stats
->tx_bytes
+= copied
;
515 stats
->tx_pkts
+= (gso_segs
? gso_segs
: 1);
518 u64_stats_update_end(&stats
->sync
);
521 /* Determine number of WRB entries needed to xmit data in an skb */
522 static u32
wrb_cnt_for_skb(struct be_adapter
*adapter
, struct sk_buff
*skb
,
525 int cnt
= (skb
->len
> skb
->data_len
);
527 cnt
+= skb_shinfo(skb
)->nr_frags
;
529 /* to account for hdr wrb */
531 if (lancer_chip(adapter
) || !(cnt
& 1)) {
534 /* add a dummy to make it an even num */
538 BUG_ON(cnt
> BE_MAX_TX_FRAG_COUNT
);
542 static inline void wrb_fill(struct be_eth_wrb
*wrb
, u64 addr
, int len
)
544 wrb
->frag_pa_hi
= upper_32_bits(addr
);
545 wrb
->frag_pa_lo
= addr
& 0xFFFFFFFF;
546 wrb
->frag_len
= len
& ETH_WRB_FRAG_LEN_MASK
;
549 static void wrb_fill_hdr(struct be_adapter
*adapter
, struct be_eth_hdr_wrb
*hdr
,
550 struct sk_buff
*skb
, u32 wrb_cnt
, u32 len
)
555 memset(hdr
, 0, sizeof(*hdr
));
557 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, crc
, hdr
, 1);
559 if (skb_is_gso(skb
)) {
560 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso
, hdr
, 1);
561 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso_mss
,
562 hdr
, skb_shinfo(skb
)->gso_size
);
563 if (skb_is_gso_v6(skb
) && !lancer_chip(adapter
))
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso6
, hdr
, 1);
565 if (lancer_chip(adapter
) && adapter
->sli_family
==
566 LANCER_A0_SLI_FAMILY
) {
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, ipcs
, hdr
, 1);
569 AMAP_SET_BITS(struct amap_eth_hdr_wrb
,
571 else if (is_udp_pkt(skb
))
572 AMAP_SET_BITS(struct amap_eth_hdr_wrb
,
575 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
577 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, tcpcs
, hdr
, 1);
578 else if (is_udp_pkt(skb
))
579 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, udpcs
, hdr
, 1);
582 if (vlan_tx_tag_present(skb
)) {
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan
, hdr
, 1);
584 vlan_tag
= vlan_tx_tag_get(skb
);
585 vlan_prio
= (vlan_tag
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
586 /* If vlan priority provided by OS is NOT in available bmap */
587 if (!(adapter
->vlan_prio_bmap
& (1 << vlan_prio
)))
588 vlan_tag
= (vlan_tag
& ~VLAN_PRIO_MASK
) |
589 adapter
->recommended_prio
;
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan_tag
, hdr
, vlan_tag
);
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, event
, hdr
, 1);
594 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, complete
, hdr
, 1);
595 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, num_wrb
, hdr
, wrb_cnt
);
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, len
, hdr
, len
);
599 static void unmap_tx_frag(struct device
*dev
, struct be_eth_wrb
*wrb
,
604 be_dws_le_to_cpu(wrb
, sizeof(*wrb
));
606 dma
= (u64
)wrb
->frag_pa_hi
<< 32 | (u64
)wrb
->frag_pa_lo
;
609 dma_unmap_single(dev
, dma
, wrb
->frag_len
,
612 dma_unmap_page(dev
, dma
, wrb
->frag_len
, DMA_TO_DEVICE
);
616 static int make_tx_wrbs(struct be_adapter
*adapter
, struct be_queue_info
*txq
,
617 struct sk_buff
*skb
, u32 wrb_cnt
, bool dummy_wrb
)
621 struct device
*dev
= &adapter
->pdev
->dev
;
622 struct sk_buff
*first_skb
= skb
;
623 struct be_eth_wrb
*wrb
;
624 struct be_eth_hdr_wrb
*hdr
;
625 bool map_single
= false;
628 hdr
= queue_head_node(txq
);
630 map_head
= txq
->head
;
632 if (skb
->len
> skb
->data_len
) {
633 int len
= skb_headlen(skb
);
634 busaddr
= dma_map_single(dev
, skb
->data
, len
, DMA_TO_DEVICE
);
635 if (dma_mapping_error(dev
, busaddr
))
638 wrb
= queue_head_node(txq
);
639 wrb_fill(wrb
, busaddr
, len
);
640 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
645 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
646 const struct skb_frag_struct
*frag
=
647 &skb_shinfo(skb
)->frags
[i
];
648 busaddr
= skb_frag_dma_map(dev
, frag
, 0,
649 skb_frag_size(frag
), DMA_TO_DEVICE
);
650 if (dma_mapping_error(dev
, busaddr
))
652 wrb
= queue_head_node(txq
);
653 wrb_fill(wrb
, busaddr
, skb_frag_size(frag
));
654 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
656 copied
+= skb_frag_size(frag
);
660 wrb
= queue_head_node(txq
);
662 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
666 wrb_fill_hdr(adapter
, hdr
, first_skb
, wrb_cnt
, copied
);
667 be_dws_cpu_to_le(hdr
, sizeof(*hdr
));
671 txq
->head
= map_head
;
673 wrb
= queue_head_node(txq
);
674 unmap_tx_frag(dev
, wrb
, map_single
);
676 copied
-= wrb
->frag_len
;
682 static netdev_tx_t
be_xmit(struct sk_buff
*skb
,
683 struct net_device
*netdev
)
685 struct be_adapter
*adapter
= netdev_priv(netdev
);
686 struct be_tx_obj
*txo
= &adapter
->tx_obj
[skb_get_queue_mapping(skb
)];
687 struct be_queue_info
*txq
= &txo
->q
;
688 u32 wrb_cnt
= 0, copied
= 0;
689 u32 start
= txq
->head
;
690 bool dummy_wrb
, stopped
= false;
692 wrb_cnt
= wrb_cnt_for_skb(adapter
, skb
, &dummy_wrb
);
694 copied
= make_tx_wrbs(adapter
, txq
, skb
, wrb_cnt
, dummy_wrb
);
696 /* record the sent skb in the sent_skb table */
697 BUG_ON(txo
->sent_skb_list
[start
]);
698 txo
->sent_skb_list
[start
] = skb
;
700 /* Ensure txq has space for the next skb; Else stop the queue
701 * *BEFORE* ringing the tx doorbell, so that we serialze the
702 * tx compls of the current transmit which'll wake up the queue
704 atomic_add(wrb_cnt
, &txq
->used
);
705 if ((BE_MAX_TX_FRAG_COUNT
+ atomic_read(&txq
->used
)) >=
707 netif_stop_subqueue(netdev
, skb_get_queue_mapping(skb
));
711 be_txq_notify(adapter
, txq
->id
, wrb_cnt
);
713 be_tx_stats_update(txo
, wrb_cnt
, copied
,
714 skb_shinfo(skb
)->gso_segs
, stopped
);
717 dev_kfree_skb_any(skb
);
722 static int be_change_mtu(struct net_device
*netdev
, int new_mtu
)
724 struct be_adapter
*adapter
= netdev_priv(netdev
);
725 if (new_mtu
< BE_MIN_MTU
||
726 new_mtu
> (BE_MAX_JUMBO_FRAME_SIZE
-
727 (ETH_HLEN
+ ETH_FCS_LEN
))) {
728 dev_info(&adapter
->pdev
->dev
,
729 "MTU must be between %d and %d bytes\n",
731 (BE_MAX_JUMBO_FRAME_SIZE
- (ETH_HLEN
+ ETH_FCS_LEN
)));
734 dev_info(&adapter
->pdev
->dev
, "MTU changed from %d to %d bytes\n",
735 netdev
->mtu
, new_mtu
);
736 netdev
->mtu
= new_mtu
;
741 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
742 * If the user configures more, place BE in vlan promiscuous mode.
744 static int be_vid_config(struct be_adapter
*adapter
, bool vf
, u32 vf_num
)
746 u16 vtag
[BE_NUM_VLANS_SUPPORTED
];
752 if_handle
= adapter
->vf_cfg
[vf_num
].vf_if_handle
;
753 vtag
[0] = cpu_to_le16(adapter
->vf_cfg
[vf_num
].vf_vlan_tag
);
754 status
= be_cmd_vlan_config(adapter
, if_handle
, vtag
, 1, 1, 0);
757 /* No need to further configure vids if in promiscuous mode */
758 if (adapter
->promiscuous
)
761 if (adapter
->vlans_added
<= adapter
->max_vlans
) {
762 /* Construct VLAN Table to give to HW */
763 for (i
= 0; i
< VLAN_N_VID
; i
++) {
764 if (adapter
->vlan_tag
[i
]) {
765 vtag
[ntags
] = cpu_to_le16(i
);
769 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
,
772 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
,
779 static void be_vlan_add_vid(struct net_device
*netdev
, u16 vid
)
781 struct be_adapter
*adapter
= netdev_priv(netdev
);
783 adapter
->vlans_added
++;
784 if (!be_physfn(adapter
))
787 adapter
->vlan_tag
[vid
] = 1;
788 if (adapter
->vlans_added
<= (adapter
->max_vlans
+ 1))
789 be_vid_config(adapter
, false, 0);
792 static void be_vlan_rem_vid(struct net_device
*netdev
, u16 vid
)
794 struct be_adapter
*adapter
= netdev_priv(netdev
);
796 adapter
->vlans_added
--;
798 if (!be_physfn(adapter
))
801 adapter
->vlan_tag
[vid
] = 0;
802 if (adapter
->vlans_added
<= adapter
->max_vlans
)
803 be_vid_config(adapter
, false, 0);
806 static void be_set_rx_mode(struct net_device
*netdev
)
808 struct be_adapter
*adapter
= netdev_priv(netdev
);
810 if (netdev
->flags
& IFF_PROMISC
) {
811 be_cmd_rx_filter(adapter
, IFF_PROMISC
, ON
);
812 adapter
->promiscuous
= true;
816 /* BE was previously in promiscuous mode; disable it */
817 if (adapter
->promiscuous
) {
818 adapter
->promiscuous
= false;
819 be_cmd_rx_filter(adapter
, IFF_PROMISC
, OFF
);
821 if (adapter
->vlans_added
)
822 be_vid_config(adapter
, false, 0);
825 /* Enable multicast promisc if num configured exceeds what we support */
826 if (netdev
->flags
& IFF_ALLMULTI
||
827 netdev_mc_count(netdev
) > BE_MAX_MC
) {
828 be_cmd_rx_filter(adapter
, IFF_ALLMULTI
, ON
);
832 be_cmd_rx_filter(adapter
, IFF_MULTICAST
, ON
);
837 static int be_set_vf_mac(struct net_device
*netdev
, int vf
, u8
*mac
)
839 struct be_adapter
*adapter
= netdev_priv(netdev
);
842 if (!adapter
->sriov_enabled
)
845 if (!is_valid_ether_addr(mac
) || (vf
>= num_vfs
))
848 if (adapter
->vf_cfg
[vf
].vf_pmac_id
!= BE_INVALID_PMAC_ID
)
849 status
= be_cmd_pmac_del(adapter
,
850 adapter
->vf_cfg
[vf
].vf_if_handle
,
851 adapter
->vf_cfg
[vf
].vf_pmac_id
, vf
+ 1);
853 status
= be_cmd_pmac_add(adapter
, mac
,
854 adapter
->vf_cfg
[vf
].vf_if_handle
,
855 &adapter
->vf_cfg
[vf
].vf_pmac_id
, vf
+ 1);
858 dev_err(&adapter
->pdev
->dev
, "MAC %pM set on VF %d Failed\n",
861 memcpy(adapter
->vf_cfg
[vf
].vf_mac_addr
, mac
, ETH_ALEN
);
866 static int be_get_vf_config(struct net_device
*netdev
, int vf
,
867 struct ifla_vf_info
*vi
)
869 struct be_adapter
*adapter
= netdev_priv(netdev
);
871 if (!adapter
->sriov_enabled
)
878 vi
->tx_rate
= adapter
->vf_cfg
[vf
].vf_tx_rate
;
879 vi
->vlan
= adapter
->vf_cfg
[vf
].vf_vlan_tag
;
881 memcpy(&vi
->mac
, adapter
->vf_cfg
[vf
].vf_mac_addr
, ETH_ALEN
);
886 static int be_set_vf_vlan(struct net_device
*netdev
,
887 int vf
, u16 vlan
, u8 qos
)
889 struct be_adapter
*adapter
= netdev_priv(netdev
);
892 if (!adapter
->sriov_enabled
)
895 if ((vf
>= num_vfs
) || (vlan
> 4095))
899 adapter
->vf_cfg
[vf
].vf_vlan_tag
= vlan
;
900 adapter
->vlans_added
++;
902 adapter
->vf_cfg
[vf
].vf_vlan_tag
= 0;
903 adapter
->vlans_added
--;
906 status
= be_vid_config(adapter
, true, vf
);
909 dev_info(&adapter
->pdev
->dev
,
910 "VLAN %d config on VF %d failed\n", vlan
, vf
);
914 static int be_set_vf_tx_rate(struct net_device
*netdev
,
917 struct be_adapter
*adapter
= netdev_priv(netdev
);
920 if (!adapter
->sriov_enabled
)
923 if ((vf
>= num_vfs
) || (rate
< 0))
929 adapter
->vf_cfg
[vf
].vf_tx_rate
= rate
;
930 status
= be_cmd_set_qos(adapter
, rate
/ 10, vf
+ 1);
933 dev_info(&adapter
->pdev
->dev
,
934 "tx rate %d on VF %d failed\n", rate
, vf
);
938 static void be_rx_eqd_update(struct be_adapter
*adapter
, struct be_rx_obj
*rxo
)
940 struct be_eq_obj
*rx_eq
= &rxo
->rx_eq
;
941 struct be_rx_stats
*stats
= rx_stats(rxo
);
943 ulong delta
= now
- stats
->rx_jiffies
;
945 unsigned int start
, eqd
;
947 if (!rx_eq
->enable_aic
)
951 if (time_before(now
, stats
->rx_jiffies
)) {
952 stats
->rx_jiffies
= now
;
956 /* Update once a second */
961 start
= u64_stats_fetch_begin_bh(&stats
->sync
);
962 pkts
= stats
->rx_pkts
;
963 } while (u64_stats_fetch_retry_bh(&stats
->sync
, start
));
965 stats
->rx_pps
= (unsigned long)(pkts
- stats
->rx_pkts_prev
) / (delta
/ HZ
);
966 stats
->rx_pkts_prev
= pkts
;
967 stats
->rx_jiffies
= now
;
968 eqd
= stats
->rx_pps
/ 110000;
970 if (eqd
> rx_eq
->max_eqd
)
971 eqd
= rx_eq
->max_eqd
;
972 if (eqd
< rx_eq
->min_eqd
)
973 eqd
= rx_eq
->min_eqd
;
976 if (eqd
!= rx_eq
->cur_eqd
) {
977 be_cmd_modify_eqd(adapter
, rx_eq
->q
.id
, eqd
);
978 rx_eq
->cur_eqd
= eqd
;
982 static void be_rx_stats_update(struct be_rx_obj
*rxo
,
983 struct be_rx_compl_info
*rxcp
)
985 struct be_rx_stats
*stats
= rx_stats(rxo
);
987 u64_stats_update_begin(&stats
->sync
);
989 stats
->rx_bytes
+= rxcp
->pkt_size
;
991 if (rxcp
->pkt_type
== BE_MULTICAST_PACKET
)
992 stats
->rx_mcast_pkts
++;
994 stats
->rx_compl_err
++;
995 u64_stats_update_end(&stats
->sync
);
998 static inline bool csum_passed(struct be_rx_compl_info
*rxcp
)
1000 /* L4 checksum is not reliable for non TCP/UDP packets.
1001 * Also ignore ipcksm for ipv6 pkts */
1002 return (rxcp
->tcpf
|| rxcp
->udpf
) && rxcp
->l4_csum
&&
1003 (rxcp
->ip_csum
|| rxcp
->ipv6
);
1006 static struct be_rx_page_info
*
1007 get_rx_page_info(struct be_adapter
*adapter
,
1008 struct be_rx_obj
*rxo
,
1011 struct be_rx_page_info
*rx_page_info
;
1012 struct be_queue_info
*rxq
= &rxo
->q
;
1014 rx_page_info
= &rxo
->page_info_tbl
[frag_idx
];
1015 BUG_ON(!rx_page_info
->page
);
1017 if (rx_page_info
->last_page_user
) {
1018 dma_unmap_page(&adapter
->pdev
->dev
,
1019 dma_unmap_addr(rx_page_info
, bus
),
1020 adapter
->big_page_size
, DMA_FROM_DEVICE
);
1021 rx_page_info
->last_page_user
= false;
1024 atomic_dec(&rxq
->used
);
1025 return rx_page_info
;
1028 /* Throwaway the data in the Rx completion */
1029 static void be_rx_compl_discard(struct be_adapter
*adapter
,
1030 struct be_rx_obj
*rxo
,
1031 struct be_rx_compl_info
*rxcp
)
1033 struct be_queue_info
*rxq
= &rxo
->q
;
1034 struct be_rx_page_info
*page_info
;
1035 u16 i
, num_rcvd
= rxcp
->num_rcvd
;
1037 for (i
= 0; i
< num_rcvd
; i
++) {
1038 page_info
= get_rx_page_info(adapter
, rxo
, rxcp
->rxq_idx
);
1039 put_page(page_info
->page
);
1040 memset(page_info
, 0, sizeof(*page_info
));
1041 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
1046 * skb_fill_rx_data forms a complete skb for an ether frame
1047 * indicated by rxcp.
1049 static void skb_fill_rx_data(struct be_adapter
*adapter
, struct be_rx_obj
*rxo
,
1050 struct sk_buff
*skb
, struct be_rx_compl_info
*rxcp
)
1052 struct be_queue_info
*rxq
= &rxo
->q
;
1053 struct be_rx_page_info
*page_info
;
1055 u16 hdr_len
, curr_frag_len
, remaining
;
1058 page_info
= get_rx_page_info(adapter
, rxo
, rxcp
->rxq_idx
);
1059 start
= page_address(page_info
->page
) + page_info
->page_offset
;
1062 /* Copy data in the first descriptor of this completion */
1063 curr_frag_len
= min(rxcp
->pkt_size
, rx_frag_size
);
1065 /* Copy the header portion into skb_data */
1066 hdr_len
= min(BE_HDR_LEN
, curr_frag_len
);
1067 memcpy(skb
->data
, start
, hdr_len
);
1068 skb
->len
= curr_frag_len
;
1069 if (curr_frag_len
<= BE_HDR_LEN
) { /* tiny packet */
1070 /* Complete packet has now been moved to data */
1071 put_page(page_info
->page
);
1073 skb
->tail
+= curr_frag_len
;
1075 skb_shinfo(skb
)->nr_frags
= 1;
1076 skb_frag_set_page(skb
, 0, page_info
->page
);
1077 skb_shinfo(skb
)->frags
[0].page_offset
=
1078 page_info
->page_offset
+ hdr_len
;
1079 skb_frag_size_set(&skb_shinfo(skb
)->frags
[0], curr_frag_len
- hdr_len
);
1080 skb
->data_len
= curr_frag_len
- hdr_len
;
1081 skb
->truesize
+= rx_frag_size
;
1082 skb
->tail
+= hdr_len
;
1084 page_info
->page
= NULL
;
1086 if (rxcp
->pkt_size
<= rx_frag_size
) {
1087 BUG_ON(rxcp
->num_rcvd
!= 1);
1091 /* More frags present for this completion */
1092 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
1093 remaining
= rxcp
->pkt_size
- curr_frag_len
;
1094 for (i
= 1, j
= 0; i
< rxcp
->num_rcvd
; i
++) {
1095 page_info
= get_rx_page_info(adapter
, rxo
, rxcp
->rxq_idx
);
1096 curr_frag_len
= min(remaining
, rx_frag_size
);
1098 /* Coalesce all frags from the same physical page in one slot */
1099 if (page_info
->page_offset
== 0) {
1102 skb_frag_set_page(skb
, j
, page_info
->page
);
1103 skb_shinfo(skb
)->frags
[j
].page_offset
=
1104 page_info
->page_offset
;
1105 skb_frag_size_set(&skb_shinfo(skb
)->frags
[j
], 0);
1106 skb_shinfo(skb
)->nr_frags
++;
1108 put_page(page_info
->page
);
1111 skb_frag_size_add(&skb_shinfo(skb
)->frags
[j
], curr_frag_len
);
1112 skb
->len
+= curr_frag_len
;
1113 skb
->data_len
+= curr_frag_len
;
1114 skb
->truesize
+= rx_frag_size
;
1115 remaining
-= curr_frag_len
;
1116 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
1117 page_info
->page
= NULL
;
1119 BUG_ON(j
> MAX_SKB_FRAGS
);
1122 /* Process the RX completion indicated by rxcp when GRO is disabled */
1123 static void be_rx_compl_process(struct be_adapter
*adapter
,
1124 struct be_rx_obj
*rxo
,
1125 struct be_rx_compl_info
*rxcp
)
1127 struct net_device
*netdev
= adapter
->netdev
;
1128 struct sk_buff
*skb
;
1130 skb
= netdev_alloc_skb_ip_align(netdev
, BE_HDR_LEN
);
1131 if (unlikely(!skb
)) {
1132 rx_stats(rxo
)->rx_drops_no_skbs
++;
1133 be_rx_compl_discard(adapter
, rxo
, rxcp
);
1137 skb_fill_rx_data(adapter
, rxo
, skb
, rxcp
);
1139 if (likely((netdev
->features
& NETIF_F_RXCSUM
) && csum_passed(rxcp
)))
1140 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1142 skb_checksum_none_assert(skb
);
1144 skb
->protocol
= eth_type_trans(skb
, netdev
);
1145 if (adapter
->netdev
->features
& NETIF_F_RXHASH
)
1146 skb
->rxhash
= rxcp
->rss_hash
;
1150 __vlan_hwaccel_put_tag(skb
, rxcp
->vlan_tag
);
1152 netif_receive_skb(skb
);
1155 /* Process the RX completion indicated by rxcp when GRO is enabled */
1156 static void be_rx_compl_process_gro(struct be_adapter
*adapter
,
1157 struct be_rx_obj
*rxo
,
1158 struct be_rx_compl_info
*rxcp
)
1160 struct be_rx_page_info
*page_info
;
1161 struct sk_buff
*skb
= NULL
;
1162 struct be_queue_info
*rxq
= &rxo
->q
;
1163 struct be_eq_obj
*eq_obj
= &rxo
->rx_eq
;
1164 u16 remaining
, curr_frag_len
;
1167 skb
= napi_get_frags(&eq_obj
->napi
);
1169 be_rx_compl_discard(adapter
, rxo
, rxcp
);
1173 remaining
= rxcp
->pkt_size
;
1174 for (i
= 0, j
= -1; i
< rxcp
->num_rcvd
; i
++) {
1175 page_info
= get_rx_page_info(adapter
, rxo
, rxcp
->rxq_idx
);
1177 curr_frag_len
= min(remaining
, rx_frag_size
);
1179 /* Coalesce all frags from the same physical page in one slot */
1180 if (i
== 0 || page_info
->page_offset
== 0) {
1181 /* First frag or Fresh page */
1183 skb_frag_set_page(skb
, j
, page_info
->page
);
1184 skb_shinfo(skb
)->frags
[j
].page_offset
=
1185 page_info
->page_offset
;
1186 skb_frag_size_set(&skb_shinfo(skb
)->frags
[j
], 0);
1188 put_page(page_info
->page
);
1190 skb_frag_size_add(&skb_shinfo(skb
)->frags
[j
], curr_frag_len
);
1191 skb
->truesize
+= rx_frag_size
;
1192 remaining
-= curr_frag_len
;
1193 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
1194 memset(page_info
, 0, sizeof(*page_info
));
1196 BUG_ON(j
> MAX_SKB_FRAGS
);
1198 skb_shinfo(skb
)->nr_frags
= j
+ 1;
1199 skb
->len
= rxcp
->pkt_size
;
1200 skb
->data_len
= rxcp
->pkt_size
;
1201 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1202 if (adapter
->netdev
->features
& NETIF_F_RXHASH
)
1203 skb
->rxhash
= rxcp
->rss_hash
;
1206 __vlan_hwaccel_put_tag(skb
, rxcp
->vlan_tag
);
1208 napi_gro_frags(&eq_obj
->napi
);
1211 static void be_parse_rx_compl_v1(struct be_adapter
*adapter
,
1212 struct be_eth_rx_compl
*compl,
1213 struct be_rx_compl_info
*rxcp
)
1216 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, pktsize
, compl);
1217 rxcp
->vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, vtp
, compl);
1218 rxcp
->err
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, err
, compl);
1219 rxcp
->tcpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, tcpf
, compl);
1220 rxcp
->udpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, udpf
, compl);
1222 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, ipcksm
, compl);
1224 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, l4_cksm
, compl);
1226 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, ip_version
, compl);
1228 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, fragndx
, compl);
1230 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, numfrags
, compl);
1232 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, cast_enc
, compl);
1234 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, rsshash
, rxcp
);
1236 rxcp
->vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, vtm
,
1238 rxcp
->vlan_tag
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, vlan_tag
,
1241 rxcp
->port
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, port
, compl);
1244 static void be_parse_rx_compl_v0(struct be_adapter
*adapter
,
1245 struct be_eth_rx_compl
*compl,
1246 struct be_rx_compl_info
*rxcp
)
1249 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, pktsize
, compl);
1250 rxcp
->vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, vtp
, compl);
1251 rxcp
->err
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, err
, compl);
1252 rxcp
->tcpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, tcpf
, compl);
1253 rxcp
->udpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, udpf
, compl);
1255 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, ipcksm
, compl);
1257 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, l4_cksm
, compl);
1259 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, ip_version
, compl);
1261 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, fragndx
, compl);
1263 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, numfrags
, compl);
1265 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, cast_enc
, compl);
1267 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, rsshash
, rxcp
);
1269 rxcp
->vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, vtm
,
1271 rxcp
->vlan_tag
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, vlan_tag
,
1274 rxcp
->port
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, port
, compl);
1277 static struct be_rx_compl_info
*be_rx_compl_get(struct be_rx_obj
*rxo
)
1279 struct be_eth_rx_compl
*compl = queue_tail_node(&rxo
->cq
);
1280 struct be_rx_compl_info
*rxcp
= &rxo
->rxcp
;
1281 struct be_adapter
*adapter
= rxo
->adapter
;
1283 /* For checking the valid bit it is Ok to use either definition as the
1284 * valid bit is at the same position in both v0 and v1 Rx compl */
1285 if (compl->dw
[offsetof(struct amap_eth_rx_compl_v1
, valid
) / 32] == 0)
1289 be_dws_le_to_cpu(compl, sizeof(*compl));
1291 if (adapter
->be3_native
)
1292 be_parse_rx_compl_v1(adapter
, compl, rxcp
);
1294 be_parse_rx_compl_v0(adapter
, compl, rxcp
);
1297 /* vlanf could be wrongly set in some cards.
1298 * ignore if vtm is not set */
1299 if ((adapter
->function_mode
& FLEX10_MODE
) && !rxcp
->vtm
)
1302 if (!lancer_chip(adapter
))
1303 rxcp
->vlan_tag
= swab16(rxcp
->vlan_tag
);
1305 if (adapter
->pvid
== (rxcp
->vlan_tag
& VLAN_VID_MASK
) &&
1306 !adapter
->vlan_tag
[rxcp
->vlan_tag
])
1310 /* As the compl has been parsed, reset it; we wont touch it again */
1311 compl->dw
[offsetof(struct amap_eth_rx_compl_v1
, valid
) / 32] = 0;
1313 queue_tail_inc(&rxo
->cq
);
1317 static inline struct page
*be_alloc_pages(u32 size
, gfp_t gfp
)
1319 u32 order
= get_order(size
);
1323 return alloc_pages(gfp
, order
);
1327 * Allocate a page, split it to fragments of size rx_frag_size and post as
1328 * receive buffers to BE
1330 static void be_post_rx_frags(struct be_rx_obj
*rxo
, gfp_t gfp
)
1332 struct be_adapter
*adapter
= rxo
->adapter
;
1333 struct be_rx_page_info
*page_info_tbl
= rxo
->page_info_tbl
;
1334 struct be_rx_page_info
*page_info
= NULL
, *prev_page_info
= NULL
;
1335 struct be_queue_info
*rxq
= &rxo
->q
;
1336 struct page
*pagep
= NULL
;
1337 struct be_eth_rx_d
*rxd
;
1338 u64 page_dmaaddr
= 0, frag_dmaaddr
;
1339 u32 posted
, page_offset
= 0;
1341 page_info
= &rxo
->page_info_tbl
[rxq
->head
];
1342 for (posted
= 0; posted
< MAX_RX_POST
&& !page_info
->page
; posted
++) {
1344 pagep
= be_alloc_pages(adapter
->big_page_size
, gfp
);
1345 if (unlikely(!pagep
)) {
1346 rx_stats(rxo
)->rx_post_fail
++;
1349 page_dmaaddr
= dma_map_page(&adapter
->pdev
->dev
, pagep
,
1350 0, adapter
->big_page_size
,
1352 page_info
->page_offset
= 0;
1355 page_info
->page_offset
= page_offset
+ rx_frag_size
;
1357 page_offset
= page_info
->page_offset
;
1358 page_info
->page
= pagep
;
1359 dma_unmap_addr_set(page_info
, bus
, page_dmaaddr
);
1360 frag_dmaaddr
= page_dmaaddr
+ page_info
->page_offset
;
1362 rxd
= queue_head_node(rxq
);
1363 rxd
->fragpa_lo
= cpu_to_le32(frag_dmaaddr
& 0xFFFFFFFF);
1364 rxd
->fragpa_hi
= cpu_to_le32(upper_32_bits(frag_dmaaddr
));
1366 /* Any space left in the current big page for another frag? */
1367 if ((page_offset
+ rx_frag_size
+ rx_frag_size
) >
1368 adapter
->big_page_size
) {
1370 page_info
->last_page_user
= true;
1373 prev_page_info
= page_info
;
1374 queue_head_inc(rxq
);
1375 page_info
= &page_info_tbl
[rxq
->head
];
1378 prev_page_info
->last_page_user
= true;
1381 atomic_add(posted
, &rxq
->used
);
1382 be_rxq_notify(adapter
, rxq
->id
, posted
);
1383 } else if (atomic_read(&rxq
->used
) == 0) {
1384 /* Let be_worker replenish when memory is available */
1385 rxo
->rx_post_starved
= true;
1389 static struct be_eth_tx_compl
*be_tx_compl_get(struct be_queue_info
*tx_cq
)
1391 struct be_eth_tx_compl
*txcp
= queue_tail_node(tx_cq
);
1393 if (txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] == 0)
1397 be_dws_le_to_cpu(txcp
, sizeof(*txcp
));
1399 txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] = 0;
1401 queue_tail_inc(tx_cq
);
1405 static u16
be_tx_compl_process(struct be_adapter
*adapter
,
1406 struct be_tx_obj
*txo
, u16 last_index
)
1408 struct be_queue_info
*txq
= &txo
->q
;
1409 struct be_eth_wrb
*wrb
;
1410 struct sk_buff
**sent_skbs
= txo
->sent_skb_list
;
1411 struct sk_buff
*sent_skb
;
1412 u16 cur_index
, num_wrbs
= 1; /* account for hdr wrb */
1413 bool unmap_skb_hdr
= true;
1415 sent_skb
= sent_skbs
[txq
->tail
];
1417 sent_skbs
[txq
->tail
] = NULL
;
1419 /* skip header wrb */
1420 queue_tail_inc(txq
);
1423 cur_index
= txq
->tail
;
1424 wrb
= queue_tail_node(txq
);
1425 unmap_tx_frag(&adapter
->pdev
->dev
, wrb
,
1426 (unmap_skb_hdr
&& skb_headlen(sent_skb
)));
1427 unmap_skb_hdr
= false;
1430 queue_tail_inc(txq
);
1431 } while (cur_index
!= last_index
);
1433 kfree_skb(sent_skb
);
1437 static inline struct be_eq_entry
*event_get(struct be_eq_obj
*eq_obj
)
1439 struct be_eq_entry
*eqe
= queue_tail_node(&eq_obj
->q
);
1445 eqe
->evt
= le32_to_cpu(eqe
->evt
);
1446 queue_tail_inc(&eq_obj
->q
);
1450 static int event_handle(struct be_adapter
*adapter
,
1451 struct be_eq_obj
*eq_obj
,
1454 struct be_eq_entry
*eqe
;
1457 while ((eqe
= event_get(eq_obj
)) != NULL
) {
1462 /* Deal with any spurious interrupts that come
1468 be_eq_notify(adapter
, eq_obj
->q
.id
, rearm
, true, num
);
1470 napi_schedule(&eq_obj
->napi
);
1475 /* Just read and notify events without processing them.
1476 * Used at the time of destroying event queues */
1477 static void be_eq_clean(struct be_adapter
*adapter
,
1478 struct be_eq_obj
*eq_obj
)
1480 struct be_eq_entry
*eqe
;
1483 while ((eqe
= event_get(eq_obj
)) != NULL
) {
1489 be_eq_notify(adapter
, eq_obj
->q
.id
, false, true, num
);
1492 static void be_rx_q_clean(struct be_adapter
*adapter
, struct be_rx_obj
*rxo
)
1494 struct be_rx_page_info
*page_info
;
1495 struct be_queue_info
*rxq
= &rxo
->q
;
1496 struct be_queue_info
*rx_cq
= &rxo
->cq
;
1497 struct be_rx_compl_info
*rxcp
;
1500 /* First cleanup pending rx completions */
1501 while ((rxcp
= be_rx_compl_get(rxo
)) != NULL
) {
1502 be_rx_compl_discard(adapter
, rxo
, rxcp
);
1503 be_cq_notify(adapter
, rx_cq
->id
, false, 1);
1506 /* Then free posted rx buffer that were not used */
1507 tail
= (rxq
->head
+ rxq
->len
- atomic_read(&rxq
->used
)) % rxq
->len
;
1508 for (; atomic_read(&rxq
->used
) > 0; index_inc(&tail
, rxq
->len
)) {
1509 page_info
= get_rx_page_info(adapter
, rxo
, tail
);
1510 put_page(page_info
->page
);
1511 memset(page_info
, 0, sizeof(*page_info
));
1513 BUG_ON(atomic_read(&rxq
->used
));
1514 rxq
->tail
= rxq
->head
= 0;
1517 static void be_tx_compl_clean(struct be_adapter
*adapter
,
1518 struct be_tx_obj
*txo
)
1520 struct be_queue_info
*tx_cq
= &txo
->cq
;
1521 struct be_queue_info
*txq
= &txo
->q
;
1522 struct be_eth_tx_compl
*txcp
;
1523 u16 end_idx
, cmpl
= 0, timeo
= 0, num_wrbs
= 0;
1524 struct sk_buff
**sent_skbs
= txo
->sent_skb_list
;
1525 struct sk_buff
*sent_skb
;
1528 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1530 while ((txcp
= be_tx_compl_get(tx_cq
))) {
1531 end_idx
= AMAP_GET_BITS(struct amap_eth_tx_compl
,
1533 num_wrbs
+= be_tx_compl_process(adapter
, txo
, end_idx
);
1537 be_cq_notify(adapter
, tx_cq
->id
, false, cmpl
);
1538 atomic_sub(num_wrbs
, &txq
->used
);
1543 if (atomic_read(&txq
->used
) == 0 || ++timeo
> 200)
1549 if (atomic_read(&txq
->used
))
1550 dev_err(&adapter
->pdev
->dev
, "%d pending tx-completions\n",
1551 atomic_read(&txq
->used
));
1553 /* free posted tx for which compls will never arrive */
1554 while (atomic_read(&txq
->used
)) {
1555 sent_skb
= sent_skbs
[txq
->tail
];
1556 end_idx
= txq
->tail
;
1558 wrb_cnt_for_skb(adapter
, sent_skb
, &dummy_wrb
) - 1,
1560 num_wrbs
= be_tx_compl_process(adapter
, txo
, end_idx
);
1561 atomic_sub(num_wrbs
, &txq
->used
);
1565 static void be_mcc_queues_destroy(struct be_adapter
*adapter
)
1567 struct be_queue_info
*q
;
1569 q
= &adapter
->mcc_obj
.q
;
1571 be_cmd_q_destroy(adapter
, q
, QTYPE_MCCQ
);
1572 be_queue_free(adapter
, q
);
1574 q
= &adapter
->mcc_obj
.cq
;
1576 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1577 be_queue_free(adapter
, q
);
1580 /* Must be called only after TX qs are created as MCC shares TX EQ */
1581 static int be_mcc_queues_create(struct be_adapter
*adapter
)
1583 struct be_queue_info
*q
, *cq
;
1585 /* Alloc MCC compl queue */
1586 cq
= &adapter
->mcc_obj
.cq
;
1587 if (be_queue_alloc(adapter
, cq
, MCC_CQ_LEN
,
1588 sizeof(struct be_mcc_compl
)))
1591 /* Ask BE to create MCC compl queue; share TX's eq */
1592 if (be_cmd_cq_create(adapter
, cq
, &adapter
->tx_eq
.q
, false, true, 0))
1595 /* Alloc MCC queue */
1596 q
= &adapter
->mcc_obj
.q
;
1597 if (be_queue_alloc(adapter
, q
, MCC_Q_LEN
, sizeof(struct be_mcc_wrb
)))
1598 goto mcc_cq_destroy
;
1600 /* Ask BE to create MCC queue */
1601 if (be_cmd_mccq_create(adapter
, q
, cq
))
1607 be_queue_free(adapter
, q
);
1609 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1611 be_queue_free(adapter
, cq
);
1616 static void be_tx_queues_destroy(struct be_adapter
*adapter
)
1618 struct be_queue_info
*q
;
1619 struct be_tx_obj
*txo
;
1622 for_all_tx_queues(adapter
, txo
, i
) {
1625 be_cmd_q_destroy(adapter
, q
, QTYPE_TXQ
);
1626 be_queue_free(adapter
, q
);
1630 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1631 be_queue_free(adapter
, q
);
1634 /* Clear any residual events */
1635 be_eq_clean(adapter
, &adapter
->tx_eq
);
1637 q
= &adapter
->tx_eq
.q
;
1639 be_cmd_q_destroy(adapter
, q
, QTYPE_EQ
);
1640 be_queue_free(adapter
, q
);
1643 static int be_num_txqs_want(struct be_adapter
*adapter
)
1645 if ((num_vfs
&& adapter
->sriov_enabled
) ||
1646 be_is_mc(adapter
) ||
1647 lancer_chip(adapter
) || !be_physfn(adapter
) ||
1648 adapter
->generation
== BE_GEN2
)
1654 /* One TX event queue is shared by all TX compl qs */
1655 static int be_tx_queues_create(struct be_adapter
*adapter
)
1657 struct be_queue_info
*eq
, *q
, *cq
;
1658 struct be_tx_obj
*txo
;
1661 adapter
->num_tx_qs
= be_num_txqs_want(adapter
);
1662 if (adapter
->num_tx_qs
!= MAX_TX_QS
)
1663 netif_set_real_num_tx_queues(adapter
->netdev
,
1664 adapter
->num_tx_qs
);
1666 adapter
->tx_eq
.max_eqd
= 0;
1667 adapter
->tx_eq
.min_eqd
= 0;
1668 adapter
->tx_eq
.cur_eqd
= 96;
1669 adapter
->tx_eq
.enable_aic
= false;
1671 eq
= &adapter
->tx_eq
.q
;
1672 if (be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
,
1673 sizeof(struct be_eq_entry
)))
1676 if (be_cmd_eq_create(adapter
, eq
, adapter
->tx_eq
.cur_eqd
))
1678 adapter
->tx_eq
.eq_idx
= adapter
->eq_next_idx
++;
1680 for_all_tx_queues(adapter
, txo
, i
) {
1682 if (be_queue_alloc(adapter
, cq
, TX_CQ_LEN
,
1683 sizeof(struct be_eth_tx_compl
)))
1686 if (be_cmd_cq_create(adapter
, cq
, eq
, false, false, 3))
1690 if (be_queue_alloc(adapter
, q
, TX_Q_LEN
,
1691 sizeof(struct be_eth_wrb
)))
1694 if (be_cmd_txq_create(adapter
, q
, cq
))
1700 be_tx_queues_destroy(adapter
);
1704 static void be_rx_queues_destroy(struct be_adapter
*adapter
)
1706 struct be_queue_info
*q
;
1707 struct be_rx_obj
*rxo
;
1710 for_all_rx_queues(adapter
, rxo
, i
) {
1711 be_queue_free(adapter
, &rxo
->q
);
1715 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1716 be_queue_free(adapter
, q
);
1720 be_cmd_q_destroy(adapter
, q
, QTYPE_EQ
);
1721 be_queue_free(adapter
, q
);
1725 static u32
be_num_rxqs_want(struct be_adapter
*adapter
)
1727 if ((adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
) &&
1728 !adapter
->sriov_enabled
&& be_physfn(adapter
) &&
1729 !be_is_mc(adapter
)) {
1730 return 1 + MAX_RSS_QS
; /* one default non-RSS queue */
1732 dev_warn(&adapter
->pdev
->dev
,
1733 "No support for multiple RX queues\n");
1738 static int be_rx_queues_create(struct be_adapter
*adapter
)
1740 struct be_queue_info
*eq
, *q
, *cq
;
1741 struct be_rx_obj
*rxo
;
1744 adapter
->num_rx_qs
= min(be_num_rxqs_want(adapter
),
1745 msix_enabled(adapter
) ?
1746 adapter
->num_msix_vec
- 1 : 1);
1747 if (adapter
->num_rx_qs
!= MAX_RX_QS
)
1748 dev_warn(&adapter
->pdev
->dev
,
1749 "Can create only %d RX queues", adapter
->num_rx_qs
);
1751 adapter
->big_page_size
= (1 << get_order(rx_frag_size
)) * PAGE_SIZE
;
1752 for_all_rx_queues(adapter
, rxo
, i
) {
1753 rxo
->adapter
= adapter
;
1754 rxo
->rx_eq
.max_eqd
= BE_MAX_EQD
;
1755 rxo
->rx_eq
.enable_aic
= true;
1759 rc
= be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
,
1760 sizeof(struct be_eq_entry
));
1764 rc
= be_cmd_eq_create(adapter
, eq
, rxo
->rx_eq
.cur_eqd
);
1768 rxo
->rx_eq
.eq_idx
= adapter
->eq_next_idx
++;
1772 rc
= be_queue_alloc(adapter
, cq
, RX_CQ_LEN
,
1773 sizeof(struct be_eth_rx_compl
));
1777 rc
= be_cmd_cq_create(adapter
, cq
, eq
, false, false, 3);
1781 /* Rx Q - will be created in be_open() */
1783 rc
= be_queue_alloc(adapter
, q
, RX_Q_LEN
,
1784 sizeof(struct be_eth_rx_d
));
1792 be_rx_queues_destroy(adapter
);
1796 static bool event_peek(struct be_eq_obj
*eq_obj
)
1798 struct be_eq_entry
*eqe
= queue_tail_node(&eq_obj
->q
);
1805 static irqreturn_t
be_intx(int irq
, void *dev
)
1807 struct be_adapter
*adapter
= dev
;
1808 struct be_rx_obj
*rxo
;
1809 int isr
, i
, tx
= 0 , rx
= 0;
1811 if (lancer_chip(adapter
)) {
1812 if (event_peek(&adapter
->tx_eq
))
1813 tx
= event_handle(adapter
, &adapter
->tx_eq
, false);
1814 for_all_rx_queues(adapter
, rxo
, i
) {
1815 if (event_peek(&rxo
->rx_eq
))
1816 rx
|= event_handle(adapter
, &rxo
->rx_eq
, true);
1823 isr
= ioread32(adapter
->csr
+ CEV_ISR0_OFFSET
+
1824 (adapter
->tx_eq
.q
.id
/ 8) * CEV_ISR_SIZE
);
1828 if ((1 << adapter
->tx_eq
.eq_idx
& isr
))
1829 event_handle(adapter
, &adapter
->tx_eq
, false);
1831 for_all_rx_queues(adapter
, rxo
, i
) {
1832 if ((1 << rxo
->rx_eq
.eq_idx
& isr
))
1833 event_handle(adapter
, &rxo
->rx_eq
, true);
1840 static irqreturn_t
be_msix_rx(int irq
, void *dev
)
1842 struct be_rx_obj
*rxo
= dev
;
1843 struct be_adapter
*adapter
= rxo
->adapter
;
1845 event_handle(adapter
, &rxo
->rx_eq
, true);
1850 static irqreturn_t
be_msix_tx_mcc(int irq
, void *dev
)
1852 struct be_adapter
*adapter
= dev
;
1854 event_handle(adapter
, &adapter
->tx_eq
, false);
1859 static inline bool do_gro(struct be_rx_compl_info
*rxcp
)
1861 return (rxcp
->tcpf
&& !rxcp
->err
) ? true : false;
1864 static int be_poll_rx(struct napi_struct
*napi
, int budget
)
1866 struct be_eq_obj
*rx_eq
= container_of(napi
, struct be_eq_obj
, napi
);
1867 struct be_rx_obj
*rxo
= container_of(rx_eq
, struct be_rx_obj
, rx_eq
);
1868 struct be_adapter
*adapter
= rxo
->adapter
;
1869 struct be_queue_info
*rx_cq
= &rxo
->cq
;
1870 struct be_rx_compl_info
*rxcp
;
1873 rx_stats(rxo
)->rx_polls
++;
1874 for (work_done
= 0; work_done
< budget
; work_done
++) {
1875 rxcp
= be_rx_compl_get(rxo
);
1879 /* Is it a flush compl that has no data */
1880 if (unlikely(rxcp
->num_rcvd
== 0))
1883 /* Discard compl with partial DMA Lancer B0 */
1884 if (unlikely(!rxcp
->pkt_size
)) {
1885 be_rx_compl_discard(adapter
, rxo
, rxcp
);
1889 /* On BE drop pkts that arrive due to imperfect filtering in
1890 * promiscuous mode on some skews
1892 if (unlikely(rxcp
->port
!= adapter
->port_num
&&
1893 !lancer_chip(adapter
))) {
1894 be_rx_compl_discard(adapter
, rxo
, rxcp
);
1899 be_rx_compl_process_gro(adapter
, rxo
, rxcp
);
1901 be_rx_compl_process(adapter
, rxo
, rxcp
);
1903 be_rx_stats_update(rxo
, rxcp
);
1906 /* Refill the queue */
1907 if (work_done
&& atomic_read(&rxo
->q
.used
) < RX_FRAGS_REFILL_WM
)
1908 be_post_rx_frags(rxo
, GFP_ATOMIC
);
1911 if (work_done
< budget
) {
1912 napi_complete(napi
);
1913 be_cq_notify(adapter
, rx_cq
->id
, true, work_done
);
1915 /* More to be consumed; continue with interrupts disabled */
1916 be_cq_notify(adapter
, rx_cq
->id
, false, work_done
);
1921 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1922 * For TX/MCC we don't honour budget; consume everything
1924 static int be_poll_tx_mcc(struct napi_struct
*napi
, int budget
)
1926 struct be_eq_obj
*tx_eq
= container_of(napi
, struct be_eq_obj
, napi
);
1927 struct be_adapter
*adapter
=
1928 container_of(tx_eq
, struct be_adapter
, tx_eq
);
1929 struct be_tx_obj
*txo
;
1930 struct be_eth_tx_compl
*txcp
;
1931 int tx_compl
, mcc_compl
, status
= 0;
1935 for_all_tx_queues(adapter
, txo
, i
) {
1938 while ((txcp
= be_tx_compl_get(&txo
->cq
))) {
1939 num_wrbs
+= be_tx_compl_process(adapter
, txo
,
1940 AMAP_GET_BITS(struct amap_eth_tx_compl
,
1945 be_cq_notify(adapter
, txo
->cq
.id
, true, tx_compl
);
1947 atomic_sub(num_wrbs
, &txo
->q
.used
);
1949 /* As Tx wrbs have been freed up, wake up netdev queue
1950 * if it was stopped due to lack of tx wrbs. */
1951 if (__netif_subqueue_stopped(adapter
->netdev
, i
) &&
1952 atomic_read(&txo
->q
.used
) < txo
->q
.len
/ 2) {
1953 netif_wake_subqueue(adapter
->netdev
, i
);
1956 u64_stats_update_begin(&tx_stats(txo
)->sync_compl
);
1957 tx_stats(txo
)->tx_compl
+= tx_compl
;
1958 u64_stats_update_end(&tx_stats(txo
)->sync_compl
);
1962 mcc_compl
= be_process_mcc(adapter
, &status
);
1965 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
1966 be_cq_notify(adapter
, mcc_obj
->cq
.id
, true, mcc_compl
);
1969 napi_complete(napi
);
1971 be_eq_notify(adapter
, tx_eq
->q
.id
, true, false, 0);
1972 adapter
->drv_stats
.tx_events
++;
1976 void be_detect_dump_ue(struct be_adapter
*adapter
)
1978 u32 ue_status_lo
, ue_status_hi
, ue_status_lo_mask
, ue_status_hi_mask
;
1981 pci_read_config_dword(adapter
->pdev
,
1982 PCICFG_UE_STATUS_LOW
, &ue_status_lo
);
1983 pci_read_config_dword(adapter
->pdev
,
1984 PCICFG_UE_STATUS_HIGH
, &ue_status_hi
);
1985 pci_read_config_dword(adapter
->pdev
,
1986 PCICFG_UE_STATUS_LOW_MASK
, &ue_status_lo_mask
);
1987 pci_read_config_dword(adapter
->pdev
,
1988 PCICFG_UE_STATUS_HI_MASK
, &ue_status_hi_mask
);
1990 ue_status_lo
= (ue_status_lo
& (~ue_status_lo_mask
));
1991 ue_status_hi
= (ue_status_hi
& (~ue_status_hi_mask
));
1993 if (ue_status_lo
|| ue_status_hi
) {
1994 adapter
->ue_detected
= true;
1995 adapter
->eeh_err
= true;
1996 dev_err(&adapter
->pdev
->dev
, "UE Detected!!\n");
2000 for (i
= 0; ue_status_lo
; ue_status_lo
>>= 1, i
++) {
2001 if (ue_status_lo
& 1)
2002 dev_err(&adapter
->pdev
->dev
,
2003 "UE: %s bit set\n", ue_status_low_desc
[i
]);
2007 for (i
= 0; ue_status_hi
; ue_status_hi
>>= 1, i
++) {
2008 if (ue_status_hi
& 1)
2009 dev_err(&adapter
->pdev
->dev
,
2010 "UE: %s bit set\n", ue_status_hi_desc
[i
]);
2016 static void be_worker(struct work_struct
*work
)
2018 struct be_adapter
*adapter
=
2019 container_of(work
, struct be_adapter
, work
.work
);
2020 struct be_rx_obj
*rxo
;
2023 if (!adapter
->ue_detected
&& !lancer_chip(adapter
))
2024 be_detect_dump_ue(adapter
);
2026 /* when interrupts are not yet enabled, just reap any pending
2027 * mcc completions */
2028 if (!netif_running(adapter
->netdev
)) {
2029 int mcc_compl
, status
= 0;
2031 mcc_compl
= be_process_mcc(adapter
, &status
);
2034 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
2035 be_cq_notify(adapter
, mcc_obj
->cq
.id
, false, mcc_compl
);
2041 if (!adapter
->stats_cmd_sent
) {
2042 if (lancer_chip(adapter
))
2043 lancer_cmd_get_pport_stats(adapter
,
2044 &adapter
->stats_cmd
);
2046 be_cmd_get_stats(adapter
, &adapter
->stats_cmd
);
2049 for_all_rx_queues(adapter
, rxo
, i
) {
2050 be_rx_eqd_update(adapter
, rxo
);
2052 if (rxo
->rx_post_starved
) {
2053 rxo
->rx_post_starved
= false;
2054 be_post_rx_frags(rxo
, GFP_KERNEL
);
2059 adapter
->work_counter
++;
2060 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(1000));
2063 static void be_msix_disable(struct be_adapter
*adapter
)
2065 if (msix_enabled(adapter
)) {
2066 pci_disable_msix(adapter
->pdev
);
2067 adapter
->num_msix_vec
= 0;
2071 static void be_msix_enable(struct be_adapter
*adapter
)
2073 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
2074 int i
, status
, num_vec
;
2076 num_vec
= be_num_rxqs_want(adapter
) + 1;
2078 for (i
= 0; i
< num_vec
; i
++)
2079 adapter
->msix_entries
[i
].entry
= i
;
2081 status
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
, num_vec
);
2084 } else if (status
>= BE_MIN_MSIX_VECTORS
) {
2086 if (pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
2092 adapter
->num_msix_vec
= num_vec
;
2096 static int be_sriov_enable(struct be_adapter
*adapter
)
2098 be_check_sriov_fn_type(adapter
);
2099 #ifdef CONFIG_PCI_IOV
2100 if (be_physfn(adapter
) && num_vfs
) {
2104 pos
= pci_find_ext_capability(adapter
->pdev
,
2105 PCI_EXT_CAP_ID_SRIOV
);
2106 pci_read_config_word(adapter
->pdev
,
2107 pos
+ PCI_SRIOV_TOTAL_VF
, &nvfs
);
2109 if (num_vfs
> nvfs
) {
2110 dev_info(&adapter
->pdev
->dev
,
2111 "Device supports %d VFs and not %d\n",
2116 status
= pci_enable_sriov(adapter
->pdev
, num_vfs
);
2117 adapter
->sriov_enabled
= status
? false : true;
2119 if (adapter
->sriov_enabled
) {
2120 adapter
->vf_cfg
= kcalloc(num_vfs
,
2121 sizeof(struct be_vf_cfg
),
2123 if (!adapter
->vf_cfg
)
2131 static void be_sriov_disable(struct be_adapter
*adapter
)
2133 #ifdef CONFIG_PCI_IOV
2134 if (adapter
->sriov_enabled
) {
2135 pci_disable_sriov(adapter
->pdev
);
2136 kfree(adapter
->vf_cfg
);
2137 adapter
->sriov_enabled
= false;
2142 static inline int be_msix_vec_get(struct be_adapter
*adapter
,
2143 struct be_eq_obj
*eq_obj
)
2145 return adapter
->msix_entries
[eq_obj
->eq_idx
].vector
;
2148 static int be_request_irq(struct be_adapter
*adapter
,
2149 struct be_eq_obj
*eq_obj
,
2150 void *handler
, char *desc
, void *context
)
2152 struct net_device
*netdev
= adapter
->netdev
;
2155 sprintf(eq_obj
->desc
, "%s-%s", netdev
->name
, desc
);
2156 vec
= be_msix_vec_get(adapter
, eq_obj
);
2157 return request_irq(vec
, handler
, 0, eq_obj
->desc
, context
);
2160 static void be_free_irq(struct be_adapter
*adapter
, struct be_eq_obj
*eq_obj
,
2163 int vec
= be_msix_vec_get(adapter
, eq_obj
);
2164 free_irq(vec
, context
);
2167 static int be_msix_register(struct be_adapter
*adapter
)
2169 struct be_rx_obj
*rxo
;
2173 status
= be_request_irq(adapter
, &adapter
->tx_eq
, be_msix_tx_mcc
, "tx",
2178 for_all_rx_queues(adapter
, rxo
, i
) {
2179 sprintf(qname
, "rxq%d", i
);
2180 status
= be_request_irq(adapter
, &rxo
->rx_eq
, be_msix_rx
,
2189 be_free_irq(adapter
, &adapter
->tx_eq
, adapter
);
2191 for (i
--, rxo
= &adapter
->rx_obj
[i
]; i
>= 0; i
--, rxo
--)
2192 be_free_irq(adapter
, &rxo
->rx_eq
, rxo
);
2195 dev_warn(&adapter
->pdev
->dev
,
2196 "MSIX Request IRQ failed - err %d\n", status
);
2197 be_msix_disable(adapter
);
2201 static int be_irq_register(struct be_adapter
*adapter
)
2203 struct net_device
*netdev
= adapter
->netdev
;
2206 if (msix_enabled(adapter
)) {
2207 status
= be_msix_register(adapter
);
2210 /* INTx is not supported for VF */
2211 if (!be_physfn(adapter
))
2216 netdev
->irq
= adapter
->pdev
->irq
;
2217 status
= request_irq(netdev
->irq
, be_intx
, IRQF_SHARED
, netdev
->name
,
2220 dev_err(&adapter
->pdev
->dev
,
2221 "INTx request IRQ failed - err %d\n", status
);
2225 adapter
->isr_registered
= true;
2229 static void be_irq_unregister(struct be_adapter
*adapter
)
2231 struct net_device
*netdev
= adapter
->netdev
;
2232 struct be_rx_obj
*rxo
;
2235 if (!adapter
->isr_registered
)
2239 if (!msix_enabled(adapter
)) {
2240 free_irq(netdev
->irq
, adapter
);
2245 be_free_irq(adapter
, &adapter
->tx_eq
, adapter
);
2247 for_all_rx_queues(adapter
, rxo
, i
)
2248 be_free_irq(adapter
, &rxo
->rx_eq
, rxo
);
2251 adapter
->isr_registered
= false;
2254 static void be_rx_queues_clear(struct be_adapter
*adapter
)
2256 struct be_queue_info
*q
;
2257 struct be_rx_obj
*rxo
;
2260 for_all_rx_queues(adapter
, rxo
, i
) {
2263 be_cmd_rxq_destroy(adapter
, q
);
2264 /* After the rxq is invalidated, wait for a grace time
2265 * of 1ms for all dma to end and the flush compl to
2269 be_rx_q_clean(adapter
, rxo
);
2272 /* Clear any residual events */
2275 be_eq_clean(adapter
, &rxo
->rx_eq
);
2279 static int be_close(struct net_device
*netdev
)
2281 struct be_adapter
*adapter
= netdev_priv(netdev
);
2282 struct be_rx_obj
*rxo
;
2283 struct be_tx_obj
*txo
;
2284 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
2287 be_async_mcc_disable(adapter
);
2289 if (!lancer_chip(adapter
))
2290 be_intr_set(adapter
, false);
2292 for_all_rx_queues(adapter
, rxo
, i
)
2293 napi_disable(&rxo
->rx_eq
.napi
);
2295 napi_disable(&tx_eq
->napi
);
2297 if (lancer_chip(adapter
)) {
2298 be_cq_notify(adapter
, adapter
->mcc_obj
.cq
.id
, false, 0);
2299 for_all_rx_queues(adapter
, rxo
, i
)
2300 be_cq_notify(adapter
, rxo
->cq
.id
, false, 0);
2301 for_all_tx_queues(adapter
, txo
, i
)
2302 be_cq_notify(adapter
, txo
->cq
.id
, false, 0);
2305 if (msix_enabled(adapter
)) {
2306 vec
= be_msix_vec_get(adapter
, tx_eq
);
2307 synchronize_irq(vec
);
2309 for_all_rx_queues(adapter
, rxo
, i
) {
2310 vec
= be_msix_vec_get(adapter
, &rxo
->rx_eq
);
2311 synchronize_irq(vec
);
2314 synchronize_irq(netdev
->irq
);
2316 be_irq_unregister(adapter
);
2318 /* Wait for all pending tx completions to arrive so that
2319 * all tx skbs are freed.
2321 for_all_tx_queues(adapter
, txo
, i
)
2322 be_tx_compl_clean(adapter
, txo
);
2324 be_rx_queues_clear(adapter
);
2328 static int be_rx_queues_setup(struct be_adapter
*adapter
)
2330 struct be_rx_obj
*rxo
;
2332 u8 rsstable
[MAX_RSS_QS
];
2334 for_all_rx_queues(adapter
, rxo
, i
) {
2335 rc
= be_cmd_rxq_create(adapter
, &rxo
->q
, rxo
->cq
.id
,
2336 rx_frag_size
, BE_MAX_JUMBO_FRAME_SIZE
,
2338 (i
> 0) ? 1 : 0/* rss enable */, &rxo
->rss_id
);
2343 if (be_multi_rxq(adapter
)) {
2344 for_all_rss_queues(adapter
, rxo
, i
)
2345 rsstable
[i
] = rxo
->rss_id
;
2347 rc
= be_cmd_rss_config(adapter
, rsstable
,
2348 adapter
->num_rx_qs
- 1);
2353 /* First time posting */
2354 for_all_rx_queues(adapter
, rxo
, i
) {
2355 be_post_rx_frags(rxo
, GFP_KERNEL
);
2356 napi_enable(&rxo
->rx_eq
.napi
);
2361 static int be_open(struct net_device
*netdev
)
2363 struct be_adapter
*adapter
= netdev_priv(netdev
);
2364 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
2365 struct be_rx_obj
*rxo
;
2368 status
= be_rx_queues_setup(adapter
);
2372 napi_enable(&tx_eq
->napi
);
2374 be_irq_register(adapter
);
2376 if (!lancer_chip(adapter
))
2377 be_intr_set(adapter
, true);
2379 /* The evt queues are created in unarmed state; arm them */
2380 for_all_rx_queues(adapter
, rxo
, i
) {
2381 be_eq_notify(adapter
, rxo
->rx_eq
.q
.id
, true, false, 0);
2382 be_cq_notify(adapter
, rxo
->cq
.id
, true, 0);
2384 be_eq_notify(adapter
, tx_eq
->q
.id
, true, false, 0);
2386 /* Now that interrupts are on we can process async mcc */
2387 be_async_mcc_enable(adapter
);
2391 be_close(adapter
->netdev
);
2395 static int be_setup_wol(struct be_adapter
*adapter
, bool enable
)
2397 struct be_dma_mem cmd
;
2401 memset(mac
, 0, ETH_ALEN
);
2403 cmd
.size
= sizeof(struct be_cmd_req_acpi_wol_magic_config
);
2404 cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, cmd
.size
, &cmd
.dma
,
2408 memset(cmd
.va
, 0, cmd
.size
);
2411 status
= pci_write_config_dword(adapter
->pdev
,
2412 PCICFG_PM_CONTROL_OFFSET
, PCICFG_PM_CONTROL_MASK
);
2414 dev_err(&adapter
->pdev
->dev
,
2415 "Could not enable Wake-on-lan\n");
2416 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
,
2420 status
= be_cmd_enable_magic_wol(adapter
,
2421 adapter
->netdev
->dev_addr
, &cmd
);
2422 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 1);
2423 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 1);
2425 status
= be_cmd_enable_magic_wol(adapter
, mac
, &cmd
);
2426 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 0);
2427 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 0);
2430 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
, cmd
.dma
);
2435 * Generate a seed MAC address from the PF MAC Address using jhash.
2436 * MAC Address for VFs are assigned incrementally starting from the seed.
2437 * These addresses are programmed in the ASIC by the PF and the VF driver
2438 * queries for the MAC address during its probe.
2440 static inline int be_vf_eth_addr_config(struct be_adapter
*adapter
)
2446 be_vf_eth_addr_generate(adapter
, mac
);
2448 for (vf
= 0; vf
< num_vfs
; vf
++) {
2449 status
= be_cmd_pmac_add(adapter
, mac
,
2450 adapter
->vf_cfg
[vf
].vf_if_handle
,
2451 &adapter
->vf_cfg
[vf
].vf_pmac_id
,
2454 dev_err(&adapter
->pdev
->dev
,
2455 "Mac address add failed for VF %d\n", vf
);
2457 memcpy(adapter
->vf_cfg
[vf
].vf_mac_addr
, mac
, ETH_ALEN
);
2464 static void be_vf_clear(struct be_adapter
*adapter
)
2468 for (vf
= 0; vf
< num_vfs
; vf
++) {
2469 if (adapter
->vf_cfg
[vf
].vf_pmac_id
!= BE_INVALID_PMAC_ID
)
2470 be_cmd_pmac_del(adapter
,
2471 adapter
->vf_cfg
[vf
].vf_if_handle
,
2472 adapter
->vf_cfg
[vf
].vf_pmac_id
, vf
+ 1);
2475 for (vf
= 0; vf
< num_vfs
; vf
++)
2476 if (adapter
->vf_cfg
[vf
].vf_if_handle
)
2477 be_cmd_if_destroy(adapter
,
2478 adapter
->vf_cfg
[vf
].vf_if_handle
, vf
+ 1);
2481 static int be_clear(struct be_adapter
*adapter
)
2483 if (be_physfn(adapter
) && adapter
->sriov_enabled
)
2484 be_vf_clear(adapter
);
2486 be_cmd_if_destroy(adapter
, adapter
->if_handle
, 0);
2488 be_mcc_queues_destroy(adapter
);
2489 be_rx_queues_destroy(adapter
);
2490 be_tx_queues_destroy(adapter
);
2491 adapter
->eq_next_idx
= 0;
2493 adapter
->be3_native
= false;
2494 adapter
->promiscuous
= false;
2496 /* tell fw we're done with firing cmds */
2497 be_cmd_fw_clean(adapter
);
2501 static int be_vf_setup(struct be_adapter
*adapter
)
2503 u32 cap_flags
, en_flags
, vf
;
2507 cap_flags
= en_flags
= BE_IF_FLAGS_UNTAGGED
| BE_IF_FLAGS_BROADCAST
;
2508 for (vf
= 0; vf
< num_vfs
; vf
++) {
2509 status
= be_cmd_if_create(adapter
, cap_flags
, en_flags
, NULL
,
2510 &adapter
->vf_cfg
[vf
].vf_if_handle
,
2514 adapter
->vf_cfg
[vf
].vf_pmac_id
= BE_INVALID_PMAC_ID
;
2517 if (!lancer_chip(adapter
)) {
2518 status
= be_vf_eth_addr_config(adapter
);
2523 for (vf
= 0; vf
< num_vfs
; vf
++) {
2524 status
= be_cmd_link_status_query(adapter
, NULL
, &lnk_speed
,
2528 adapter
->vf_cfg
[vf
].vf_tx_rate
= lnk_speed
* 10;
2535 static int be_setup(struct be_adapter
*adapter
)
2537 struct net_device
*netdev
= adapter
->netdev
;
2538 u32 cap_flags
, en_flags
;
2543 /* Allow all priorities by default. A GRP5 evt may modify this */
2544 adapter
->vlan_prio_bmap
= 0xff;
2545 adapter
->link_speed
= -1;
2547 be_cmd_req_native_mode(adapter
);
2549 status
= be_tx_queues_create(adapter
);
2553 status
= be_rx_queues_create(adapter
);
2557 status
= be_mcc_queues_create(adapter
);
2561 memset(mac
, 0, ETH_ALEN
);
2562 status
= be_cmd_mac_addr_query(adapter
, mac
, MAC_ADDRESS_TYPE_NETWORK
,
2563 true /*permanent */, 0);
2566 memcpy(adapter
->netdev
->dev_addr
, mac
, ETH_ALEN
);
2567 memcpy(adapter
->netdev
->perm_addr
, mac
, ETH_ALEN
);
2569 en_flags
= BE_IF_FLAGS_UNTAGGED
| BE_IF_FLAGS_BROADCAST
|
2570 BE_IF_FLAGS_MULTICAST
| BE_IF_FLAGS_PASS_L3L4_ERRORS
;
2571 cap_flags
= en_flags
| BE_IF_FLAGS_MCAST_PROMISCUOUS
|
2572 BE_IF_FLAGS_PROMISCUOUS
;
2573 if (adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
) {
2574 cap_flags
|= BE_IF_FLAGS_RSS
;
2575 en_flags
|= BE_IF_FLAGS_RSS
;
2577 status
= be_cmd_if_create(adapter
, cap_flags
, en_flags
,
2578 netdev
->dev_addr
, &adapter
->if_handle
,
2579 &adapter
->pmac_id
, 0);
2583 /* For BEx, the VF's permanent mac queried from card is incorrect.
2584 * Query the mac configued by the PF using if_handle
2586 if (!be_physfn(adapter
) && !lancer_chip(adapter
)) {
2587 status
= be_cmd_mac_addr_query(adapter
, mac
,
2588 MAC_ADDRESS_TYPE_NETWORK
, false, adapter
->if_handle
);
2590 memcpy(adapter
->netdev
->dev_addr
, mac
, ETH_ALEN
);
2591 memcpy(adapter
->netdev
->perm_addr
, mac
, ETH_ALEN
);
2595 be_cmd_get_fw_ver(adapter
, adapter
->fw_ver
, NULL
);
2597 status
= be_vid_config(adapter
, false, 0);
2601 be_set_rx_mode(adapter
->netdev
);
2603 status
= be_cmd_get_flow_control(adapter
, &tx_fc
, &rx_fc
);
2606 if (rx_fc
!= adapter
->rx_fc
|| tx_fc
!= adapter
->tx_fc
) {
2607 status
= be_cmd_set_flow_control(adapter
, adapter
->tx_fc
,
2613 pcie_set_readrq(adapter
->pdev
, 4096);
2615 if (be_physfn(adapter
) && adapter
->sriov_enabled
) {
2616 status
= be_vf_setup(adapter
);
2627 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2628 static bool be_flash_redboot(struct be_adapter
*adapter
,
2629 const u8
*p
, u32 img_start
, int image_size
,
2636 crc_offset
= hdr_size
+ img_start
+ image_size
- 4;
2640 status
= be_cmd_get_flash_crc(adapter
, flashed_crc
,
2643 dev_err(&adapter
->pdev
->dev
,
2644 "could not get crc from flash, not flashing redboot\n");
2648 /*update redboot only if crc does not match*/
2649 if (!memcmp(flashed_crc
, p
, 4))
2655 static bool phy_flashing_required(struct be_adapter
*adapter
)
2658 struct be_phy_info phy_info
;
2660 status
= be_cmd_get_phy_info(adapter
, &phy_info
);
2663 if ((phy_info
.phy_type
== TN_8022
) &&
2664 (phy_info
.interface_type
== PHY_TYPE_BASET_10GB
)) {
2670 static int be_flash_data(struct be_adapter
*adapter
,
2671 const struct firmware
*fw
,
2672 struct be_dma_mem
*flash_cmd
, int num_of_images
)
2675 int status
= 0, i
, filehdr_size
= 0;
2676 u32 total_bytes
= 0, flash_op
;
2678 const u8
*p
= fw
->data
;
2679 struct be_cmd_write_flashrom
*req
= flash_cmd
->va
;
2680 const struct flash_comp
*pflashcomp
;
2683 static const struct flash_comp gen3_flash_types
[10] = {
2684 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3
, IMG_TYPE_ISCSI_ACTIVE
,
2685 FLASH_IMAGE_MAX_SIZE_g3
},
2686 { FLASH_REDBOOT_START_g3
, IMG_TYPE_REDBOOT
,
2687 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3
},
2688 { FLASH_iSCSI_BIOS_START_g3
, IMG_TYPE_BIOS
,
2689 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
2690 { FLASH_PXE_BIOS_START_g3
, IMG_TYPE_PXE_BIOS
,
2691 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
2692 { FLASH_FCoE_BIOS_START_g3
, IMG_TYPE_FCOE_BIOS
,
2693 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
2694 { FLASH_iSCSI_BACKUP_IMAGE_START_g3
, IMG_TYPE_ISCSI_BACKUP
,
2695 FLASH_IMAGE_MAX_SIZE_g3
},
2696 { FLASH_FCoE_PRIMARY_IMAGE_START_g3
, IMG_TYPE_FCOE_FW_ACTIVE
,
2697 FLASH_IMAGE_MAX_SIZE_g3
},
2698 { FLASH_FCoE_BACKUP_IMAGE_START_g3
, IMG_TYPE_FCOE_FW_BACKUP
,
2699 FLASH_IMAGE_MAX_SIZE_g3
},
2700 { FLASH_NCSI_START_g3
, IMG_TYPE_NCSI_FW
,
2701 FLASH_NCSI_IMAGE_MAX_SIZE_g3
},
2702 { FLASH_PHY_FW_START_g3
, IMG_TYPE_PHY_FW
,
2703 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3
}
2705 static const struct flash_comp gen2_flash_types
[8] = {
2706 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2
, IMG_TYPE_ISCSI_ACTIVE
,
2707 FLASH_IMAGE_MAX_SIZE_g2
},
2708 { FLASH_REDBOOT_START_g2
, IMG_TYPE_REDBOOT
,
2709 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2
},
2710 { FLASH_iSCSI_BIOS_START_g2
, IMG_TYPE_BIOS
,
2711 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
2712 { FLASH_PXE_BIOS_START_g2
, IMG_TYPE_PXE_BIOS
,
2713 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
2714 { FLASH_FCoE_BIOS_START_g2
, IMG_TYPE_FCOE_BIOS
,
2715 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
2716 { FLASH_iSCSI_BACKUP_IMAGE_START_g2
, IMG_TYPE_ISCSI_BACKUP
,
2717 FLASH_IMAGE_MAX_SIZE_g2
},
2718 { FLASH_FCoE_PRIMARY_IMAGE_START_g2
, IMG_TYPE_FCOE_FW_ACTIVE
,
2719 FLASH_IMAGE_MAX_SIZE_g2
},
2720 { FLASH_FCoE_BACKUP_IMAGE_START_g2
, IMG_TYPE_FCOE_FW_BACKUP
,
2721 FLASH_IMAGE_MAX_SIZE_g2
}
2724 if (adapter
->generation
== BE_GEN3
) {
2725 pflashcomp
= gen3_flash_types
;
2726 filehdr_size
= sizeof(struct flash_file_hdr_g3
);
2727 num_comp
= ARRAY_SIZE(gen3_flash_types
);
2729 pflashcomp
= gen2_flash_types
;
2730 filehdr_size
= sizeof(struct flash_file_hdr_g2
);
2731 num_comp
= ARRAY_SIZE(gen2_flash_types
);
2733 for (i
= 0; i
< num_comp
; i
++) {
2734 if ((pflashcomp
[i
].optype
== IMG_TYPE_NCSI_FW
) &&
2735 memcmp(adapter
->fw_ver
, "3.102.148.0", 11) < 0)
2737 if (pflashcomp
[i
].optype
== IMG_TYPE_PHY_FW
) {
2738 if (!phy_flashing_required(adapter
))
2741 if ((pflashcomp
[i
].optype
== IMG_TYPE_REDBOOT
) &&
2742 (!be_flash_redboot(adapter
, fw
->data
,
2743 pflashcomp
[i
].offset
, pflashcomp
[i
].size
, filehdr_size
+
2744 (num_of_images
* sizeof(struct image_hdr
)))))
2747 p
+= filehdr_size
+ pflashcomp
[i
].offset
2748 + (num_of_images
* sizeof(struct image_hdr
));
2749 if (p
+ pflashcomp
[i
].size
> fw
->data
+ fw
->size
)
2751 total_bytes
= pflashcomp
[i
].size
;
2752 while (total_bytes
) {
2753 if (total_bytes
> 32*1024)
2754 num_bytes
= 32*1024;
2756 num_bytes
= total_bytes
;
2757 total_bytes
-= num_bytes
;
2759 if (pflashcomp
[i
].optype
== IMG_TYPE_PHY_FW
)
2760 flash_op
= FLASHROM_OPER_PHY_FLASH
;
2762 flash_op
= FLASHROM_OPER_FLASH
;
2764 if (pflashcomp
[i
].optype
== IMG_TYPE_PHY_FW
)
2765 flash_op
= FLASHROM_OPER_PHY_SAVE
;
2767 flash_op
= FLASHROM_OPER_SAVE
;
2769 memcpy(req
->params
.data_buf
, p
, num_bytes
);
2771 status
= be_cmd_write_flashrom(adapter
, flash_cmd
,
2772 pflashcomp
[i
].optype
, flash_op
, num_bytes
);
2774 if ((status
== ILLEGAL_IOCTL_REQ
) &&
2775 (pflashcomp
[i
].optype
==
2778 dev_err(&adapter
->pdev
->dev
,
2779 "cmd to write to flash rom failed.\n");
2787 static int get_ufigen_type(struct flash_file_hdr_g2
*fhdr
)
2791 if (fhdr
->build
[0] == '3')
2793 else if (fhdr
->build
[0] == '2')
2799 static int lancer_fw_download(struct be_adapter
*adapter
,
2800 const struct firmware
*fw
)
2802 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2803 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2804 struct be_dma_mem flash_cmd
;
2805 const u8
*data_ptr
= NULL
;
2806 u8
*dest_image_ptr
= NULL
;
2807 size_t image_size
= 0;
2809 u32 data_written
= 0;
2814 if (!IS_ALIGNED(fw
->size
, sizeof(u32
))) {
2815 dev_err(&adapter
->pdev
->dev
,
2816 "FW Image not properly aligned. "
2817 "Length must be 4 byte aligned.\n");
2819 goto lancer_fw_exit
;
2822 flash_cmd
.size
= sizeof(struct lancer_cmd_req_write_object
)
2823 + LANCER_FW_DOWNLOAD_CHUNK
;
2824 flash_cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
,
2825 &flash_cmd
.dma
, GFP_KERNEL
);
2826 if (!flash_cmd
.va
) {
2828 dev_err(&adapter
->pdev
->dev
,
2829 "Memory allocation failure while flashing\n");
2830 goto lancer_fw_exit
;
2833 dest_image_ptr
= flash_cmd
.va
+
2834 sizeof(struct lancer_cmd_req_write_object
);
2835 image_size
= fw
->size
;
2836 data_ptr
= fw
->data
;
2838 while (image_size
) {
2839 chunk_size
= min_t(u32
, image_size
, LANCER_FW_DOWNLOAD_CHUNK
);
2841 /* Copy the image chunk content. */
2842 memcpy(dest_image_ptr
, data_ptr
, chunk_size
);
2844 status
= lancer_cmd_write_object(adapter
, &flash_cmd
,
2845 chunk_size
, offset
, LANCER_FW_DOWNLOAD_LOCATION
,
2846 &data_written
, &add_status
);
2851 offset
+= data_written
;
2852 data_ptr
+= data_written
;
2853 image_size
-= data_written
;
2857 /* Commit the FW written */
2858 status
= lancer_cmd_write_object(adapter
, &flash_cmd
,
2859 0, offset
, LANCER_FW_DOWNLOAD_LOCATION
,
2860 &data_written
, &add_status
);
2863 dma_free_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
, flash_cmd
.va
,
2866 dev_err(&adapter
->pdev
->dev
,
2867 "Firmware load error. "
2868 "Status code: 0x%x Additional Status: 0x%x\n",
2869 status
, add_status
);
2870 goto lancer_fw_exit
;
2873 dev_info(&adapter
->pdev
->dev
, "Firmware flashed successfully\n");
2878 static int be_fw_download(struct be_adapter
*adapter
, const struct firmware
* fw
)
2880 struct flash_file_hdr_g2
*fhdr
;
2881 struct flash_file_hdr_g3
*fhdr3
;
2882 struct image_hdr
*img_hdr_ptr
= NULL
;
2883 struct be_dma_mem flash_cmd
;
2885 int status
= 0, i
= 0, num_imgs
= 0;
2888 fhdr
= (struct flash_file_hdr_g2
*) p
;
2890 flash_cmd
.size
= sizeof(struct be_cmd_write_flashrom
) + 32*1024;
2891 flash_cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
,
2892 &flash_cmd
.dma
, GFP_KERNEL
);
2893 if (!flash_cmd
.va
) {
2895 dev_err(&adapter
->pdev
->dev
,
2896 "Memory allocation failure while flashing\n");
2900 if ((adapter
->generation
== BE_GEN3
) &&
2901 (get_ufigen_type(fhdr
) == BE_GEN3
)) {
2902 fhdr3
= (struct flash_file_hdr_g3
*) fw
->data
;
2903 num_imgs
= le32_to_cpu(fhdr3
->num_imgs
);
2904 for (i
= 0; i
< num_imgs
; i
++) {
2905 img_hdr_ptr
= (struct image_hdr
*) (fw
->data
+
2906 (sizeof(struct flash_file_hdr_g3
) +
2907 i
* sizeof(struct image_hdr
)));
2908 if (le32_to_cpu(img_hdr_ptr
->imageid
) == 1)
2909 status
= be_flash_data(adapter
, fw
, &flash_cmd
,
2912 } else if ((adapter
->generation
== BE_GEN2
) &&
2913 (get_ufigen_type(fhdr
) == BE_GEN2
)) {
2914 status
= be_flash_data(adapter
, fw
, &flash_cmd
, 0);
2916 dev_err(&adapter
->pdev
->dev
,
2917 "UFI and Interface are not compatible for flashing\n");
2921 dma_free_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
, flash_cmd
.va
,
2924 dev_err(&adapter
->pdev
->dev
, "Firmware load error\n");
2928 dev_info(&adapter
->pdev
->dev
, "Firmware flashed successfully\n");
2934 int be_load_fw(struct be_adapter
*adapter
, u8
*fw_file
)
2936 const struct firmware
*fw
;
2939 if (!netif_running(adapter
->netdev
)) {
2940 dev_err(&adapter
->pdev
->dev
,
2941 "Firmware load not allowed (interface is down)\n");
2945 status
= request_firmware(&fw
, fw_file
, &adapter
->pdev
->dev
);
2949 dev_info(&adapter
->pdev
->dev
, "Flashing firmware file %s\n", fw_file
);
2951 if (lancer_chip(adapter
))
2952 status
= lancer_fw_download(adapter
, fw
);
2954 status
= be_fw_download(adapter
, fw
);
2957 release_firmware(fw
);
2961 static struct net_device_ops be_netdev_ops
= {
2962 .ndo_open
= be_open
,
2963 .ndo_stop
= be_close
,
2964 .ndo_start_xmit
= be_xmit
,
2965 .ndo_set_rx_mode
= be_set_rx_mode
,
2966 .ndo_set_mac_address
= be_mac_addr_set
,
2967 .ndo_change_mtu
= be_change_mtu
,
2968 .ndo_get_stats64
= be_get_stats64
,
2969 .ndo_validate_addr
= eth_validate_addr
,
2970 .ndo_vlan_rx_add_vid
= be_vlan_add_vid
,
2971 .ndo_vlan_rx_kill_vid
= be_vlan_rem_vid
,
2972 .ndo_set_vf_mac
= be_set_vf_mac
,
2973 .ndo_set_vf_vlan
= be_set_vf_vlan
,
2974 .ndo_set_vf_tx_rate
= be_set_vf_tx_rate
,
2975 .ndo_get_vf_config
= be_get_vf_config
2978 static void be_netdev_init(struct net_device
*netdev
)
2980 struct be_adapter
*adapter
= netdev_priv(netdev
);
2981 struct be_rx_obj
*rxo
;
2984 netdev
->hw_features
|= NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO6
|
2985 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
|
2987 if (be_multi_rxq(adapter
))
2988 netdev
->hw_features
|= NETIF_F_RXHASH
;
2990 netdev
->features
|= netdev
->hw_features
|
2991 NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
;
2993 netdev
->vlan_features
|= NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO6
|
2994 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
2996 netdev
->flags
|= IFF_MULTICAST
;
2998 netif_set_gso_max_size(netdev
, 65535);
3000 BE_SET_NETDEV_OPS(netdev
, &be_netdev_ops
);
3002 SET_ETHTOOL_OPS(netdev
, &be_ethtool_ops
);
3004 for_all_rx_queues(adapter
, rxo
, i
)
3005 netif_napi_add(netdev
, &rxo
->rx_eq
.napi
, be_poll_rx
,
3008 netif_napi_add(netdev
, &adapter
->tx_eq
.napi
, be_poll_tx_mcc
,
3012 static void be_unmap_pci_bars(struct be_adapter
*adapter
)
3015 iounmap(adapter
->csr
);
3017 iounmap(adapter
->db
);
3020 static int be_map_pci_bars(struct be_adapter
*adapter
)
3025 if (lancer_chip(adapter
)) {
3026 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, 0),
3027 pci_resource_len(adapter
->pdev
, 0));
3034 if (be_physfn(adapter
)) {
3035 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, 2),
3036 pci_resource_len(adapter
->pdev
, 2));
3039 adapter
->csr
= addr
;
3042 if (adapter
->generation
== BE_GEN2
) {
3045 if (be_physfn(adapter
))
3050 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, db_reg
),
3051 pci_resource_len(adapter
->pdev
, db_reg
));
3058 be_unmap_pci_bars(adapter
);
3063 static void be_ctrl_cleanup(struct be_adapter
*adapter
)
3065 struct be_dma_mem
*mem
= &adapter
->mbox_mem_alloced
;
3067 be_unmap_pci_bars(adapter
);
3070 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
3073 mem
= &adapter
->rx_filter
;
3075 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
3079 static int be_ctrl_init(struct be_adapter
*adapter
)
3081 struct be_dma_mem
*mbox_mem_alloc
= &adapter
->mbox_mem_alloced
;
3082 struct be_dma_mem
*mbox_mem_align
= &adapter
->mbox_mem
;
3083 struct be_dma_mem
*rx_filter
= &adapter
->rx_filter
;
3086 status
= be_map_pci_bars(adapter
);
3090 mbox_mem_alloc
->size
= sizeof(struct be_mcc_mailbox
) + 16;
3091 mbox_mem_alloc
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
,
3092 mbox_mem_alloc
->size
,
3093 &mbox_mem_alloc
->dma
,
3095 if (!mbox_mem_alloc
->va
) {
3097 goto unmap_pci_bars
;
3099 mbox_mem_align
->size
= sizeof(struct be_mcc_mailbox
);
3100 mbox_mem_align
->va
= PTR_ALIGN(mbox_mem_alloc
->va
, 16);
3101 mbox_mem_align
->dma
= PTR_ALIGN(mbox_mem_alloc
->dma
, 16);
3102 memset(mbox_mem_align
->va
, 0, sizeof(struct be_mcc_mailbox
));
3104 rx_filter
->size
= sizeof(struct be_cmd_req_rx_filter
);
3105 rx_filter
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
, rx_filter
->size
,
3106 &rx_filter
->dma
, GFP_KERNEL
);
3107 if (rx_filter
->va
== NULL
) {
3111 memset(rx_filter
->va
, 0, rx_filter
->size
);
3113 mutex_init(&adapter
->mbox_lock
);
3114 spin_lock_init(&adapter
->mcc_lock
);
3115 spin_lock_init(&adapter
->mcc_cq_lock
);
3117 init_completion(&adapter
->flash_compl
);
3118 pci_save_state(adapter
->pdev
);
3122 dma_free_coherent(&adapter
->pdev
->dev
, mbox_mem_alloc
->size
,
3123 mbox_mem_alloc
->va
, mbox_mem_alloc
->dma
);
3126 be_unmap_pci_bars(adapter
);
3132 static void be_stats_cleanup(struct be_adapter
*adapter
)
3134 struct be_dma_mem
*cmd
= &adapter
->stats_cmd
;
3137 dma_free_coherent(&adapter
->pdev
->dev
, cmd
->size
,
3141 static int be_stats_init(struct be_adapter
*adapter
)
3143 struct be_dma_mem
*cmd
= &adapter
->stats_cmd
;
3145 if (adapter
->generation
== BE_GEN2
) {
3146 cmd
->size
= sizeof(struct be_cmd_req_get_stats_v0
);
3148 if (lancer_chip(adapter
))
3149 cmd
->size
= sizeof(struct lancer_cmd_req_pport_stats
);
3151 cmd
->size
= sizeof(struct be_cmd_req_get_stats_v1
);
3153 cmd
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
, cmd
->size
, &cmd
->dma
,
3155 if (cmd
->va
== NULL
)
3157 memset(cmd
->va
, 0, cmd
->size
);
3161 static void __devexit
be_remove(struct pci_dev
*pdev
)
3163 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3168 cancel_delayed_work_sync(&adapter
->work
);
3170 unregister_netdev(adapter
->netdev
);
3174 be_stats_cleanup(adapter
);
3176 be_ctrl_cleanup(adapter
);
3178 be_sriov_disable(adapter
);
3180 be_msix_disable(adapter
);
3182 pci_set_drvdata(pdev
, NULL
);
3183 pci_release_regions(pdev
);
3184 pci_disable_device(pdev
);
3186 free_netdev(adapter
->netdev
);
3189 static int be_get_config(struct be_adapter
*adapter
)
3193 status
= be_cmd_query_fw_cfg(adapter
, &adapter
->port_num
,
3194 &adapter
->function_mode
, &adapter
->function_caps
);
3198 if (adapter
->function_mode
& FLEX10_MODE
)
3199 adapter
->max_vlans
= BE_NUM_VLANS_SUPPORTED
/4;
3201 adapter
->max_vlans
= BE_NUM_VLANS_SUPPORTED
;
3203 status
= be_cmd_get_cntl_attributes(adapter
);
3210 static int be_dev_family_check(struct be_adapter
*adapter
)
3212 struct pci_dev
*pdev
= adapter
->pdev
;
3213 u32 sli_intf
= 0, if_type
;
3215 switch (pdev
->device
) {
3218 adapter
->generation
= BE_GEN2
;
3222 adapter
->generation
= BE_GEN3
;
3226 pci_read_config_dword(pdev
, SLI_INTF_REG_OFFSET
, &sli_intf
);
3227 if_type
= (sli_intf
& SLI_INTF_IF_TYPE_MASK
) >>
3228 SLI_INTF_IF_TYPE_SHIFT
;
3230 if (((sli_intf
& SLI_INTF_VALID_MASK
) != SLI_INTF_VALID
) ||
3232 dev_err(&pdev
->dev
, "SLI_INTF reg val is not valid\n");
3235 adapter
->sli_family
= ((sli_intf
& SLI_INTF_FAMILY_MASK
) >>
3236 SLI_INTF_FAMILY_SHIFT
);
3237 adapter
->generation
= BE_GEN3
;
3240 adapter
->generation
= 0;
3245 static int lancer_wait_ready(struct be_adapter
*adapter
)
3247 #define SLIPORT_READY_TIMEOUT 500
3251 for (i
= 0; i
< SLIPORT_READY_TIMEOUT
; i
++) {
3252 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
3253 if (sliport_status
& SLIPORT_STATUS_RDY_MASK
)
3259 if (i
== SLIPORT_READY_TIMEOUT
)
3265 static int lancer_test_and_set_rdy_state(struct be_adapter
*adapter
)
3268 u32 sliport_status
, err
, reset_needed
;
3269 status
= lancer_wait_ready(adapter
);
3271 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
3272 err
= sliport_status
& SLIPORT_STATUS_ERR_MASK
;
3273 reset_needed
= sliport_status
& SLIPORT_STATUS_RN_MASK
;
3274 if (err
&& reset_needed
) {
3275 iowrite32(SLI_PORT_CONTROL_IP_MASK
,
3276 adapter
->db
+ SLIPORT_CONTROL_OFFSET
);
3278 /* check adapter has corrected the error */
3279 status
= lancer_wait_ready(adapter
);
3280 sliport_status
= ioread32(adapter
->db
+
3281 SLIPORT_STATUS_OFFSET
);
3282 sliport_status
&= (SLIPORT_STATUS_ERR_MASK
|
3283 SLIPORT_STATUS_RN_MASK
);
3284 if (status
|| sliport_status
)
3286 } else if (err
|| reset_needed
) {
3293 static int __devinit
be_probe(struct pci_dev
*pdev
,
3294 const struct pci_device_id
*pdev_id
)
3297 struct be_adapter
*adapter
;
3298 struct net_device
*netdev
;
3300 status
= pci_enable_device(pdev
);
3304 status
= pci_request_regions(pdev
, DRV_NAME
);
3307 pci_set_master(pdev
);
3309 netdev
= alloc_etherdev_mq(sizeof(struct be_adapter
), MAX_TX_QS
);
3310 if (netdev
== NULL
) {
3314 adapter
= netdev_priv(netdev
);
3315 adapter
->pdev
= pdev
;
3316 pci_set_drvdata(pdev
, adapter
);
3318 status
= be_dev_family_check(adapter
);
3322 adapter
->netdev
= netdev
;
3323 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3325 status
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64));
3327 netdev
->features
|= NETIF_F_HIGHDMA
;
3329 status
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
3331 dev_err(&pdev
->dev
, "Could not set PCI DMA Mask\n");
3336 status
= be_sriov_enable(adapter
);
3340 status
= be_ctrl_init(adapter
);
3344 if (lancer_chip(adapter
)) {
3345 status
= lancer_test_and_set_rdy_state(adapter
);
3347 dev_err(&pdev
->dev
, "Adapter in non recoverable error\n");
3352 /* sync up with fw's ready state */
3353 if (be_physfn(adapter
)) {
3354 status
= be_cmd_POST(adapter
);
3359 /* tell fw we're ready to fire cmds */
3360 status
= be_cmd_fw_init(adapter
);
3364 status
= be_cmd_reset_function(adapter
);
3368 status
= be_stats_init(adapter
);
3372 status
= be_get_config(adapter
);
3376 /* The INTR bit may be set in the card when probed by a kdump kernel
3379 if (!lancer_chip(adapter
))
3380 be_intr_set(adapter
, false);
3382 be_msix_enable(adapter
);
3384 INIT_DELAYED_WORK(&adapter
->work
, be_worker
);
3385 adapter
->rx_fc
= adapter
->tx_fc
= true;
3387 status
= be_setup(adapter
);
3391 be_netdev_init(netdev
);
3392 status
= register_netdev(netdev
);
3396 dev_info(&pdev
->dev
, "%s port %d\n", nic_name(pdev
), adapter
->port_num
);
3398 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(100));
3404 be_msix_disable(adapter
);
3406 be_stats_cleanup(adapter
);
3408 be_ctrl_cleanup(adapter
);
3410 be_sriov_disable(adapter
);
3412 free_netdev(netdev
);
3413 pci_set_drvdata(pdev
, NULL
);
3415 pci_release_regions(pdev
);
3417 pci_disable_device(pdev
);
3419 dev_err(&pdev
->dev
, "%s initialization failed\n", nic_name(pdev
));
3423 static int be_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3425 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3426 struct net_device
*netdev
= adapter
->netdev
;
3428 cancel_delayed_work_sync(&adapter
->work
);
3430 be_setup_wol(adapter
, true);
3432 netif_device_detach(netdev
);
3433 if (netif_running(netdev
)) {
3440 be_msix_disable(adapter
);
3441 pci_save_state(pdev
);
3442 pci_disable_device(pdev
);
3443 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
3447 static int be_resume(struct pci_dev
*pdev
)
3450 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3451 struct net_device
*netdev
= adapter
->netdev
;
3453 netif_device_detach(netdev
);
3455 status
= pci_enable_device(pdev
);
3459 pci_set_power_state(pdev
, 0);
3460 pci_restore_state(pdev
);
3462 be_msix_enable(adapter
);
3463 /* tell fw we're ready to fire cmds */
3464 status
= be_cmd_fw_init(adapter
);
3469 if (netif_running(netdev
)) {
3474 netif_device_attach(netdev
);
3477 be_setup_wol(adapter
, false);
3479 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(100));
3484 * An FLR will stop BE from DMAing any data.
3486 static void be_shutdown(struct pci_dev
*pdev
)
3488 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3493 cancel_delayed_work_sync(&adapter
->work
);
3495 netif_device_detach(adapter
->netdev
);
3498 be_setup_wol(adapter
, true);
3500 be_cmd_reset_function(adapter
);
3502 pci_disable_device(pdev
);
3505 static pci_ers_result_t
be_eeh_err_detected(struct pci_dev
*pdev
,
3506 pci_channel_state_t state
)
3508 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3509 struct net_device
*netdev
= adapter
->netdev
;
3511 dev_err(&adapter
->pdev
->dev
, "EEH error detected\n");
3513 adapter
->eeh_err
= true;
3515 netif_device_detach(netdev
);
3517 if (netif_running(netdev
)) {
3524 if (state
== pci_channel_io_perm_failure
)
3525 return PCI_ERS_RESULT_DISCONNECT
;
3527 pci_disable_device(pdev
);
3529 return PCI_ERS_RESULT_NEED_RESET
;
3532 static pci_ers_result_t
be_eeh_reset(struct pci_dev
*pdev
)
3534 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3537 dev_info(&adapter
->pdev
->dev
, "EEH reset\n");
3538 adapter
->eeh_err
= false;
3540 status
= pci_enable_device(pdev
);
3542 return PCI_ERS_RESULT_DISCONNECT
;
3544 pci_set_master(pdev
);
3545 pci_set_power_state(pdev
, 0);
3546 pci_restore_state(pdev
);
3548 /* Check if card is ok and fw is ready */
3549 status
= be_cmd_POST(adapter
);
3551 return PCI_ERS_RESULT_DISCONNECT
;
3553 return PCI_ERS_RESULT_RECOVERED
;
3556 static void be_eeh_resume(struct pci_dev
*pdev
)
3559 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3560 struct net_device
*netdev
= adapter
->netdev
;
3562 dev_info(&adapter
->pdev
->dev
, "EEH resume\n");
3564 pci_save_state(pdev
);
3566 /* tell fw we're ready to fire cmds */
3567 status
= be_cmd_fw_init(adapter
);
3571 status
= be_setup(adapter
);
3575 if (netif_running(netdev
)) {
3576 status
= be_open(netdev
);
3580 netif_device_attach(netdev
);
3583 dev_err(&adapter
->pdev
->dev
, "EEH resume failed\n");
3586 static struct pci_error_handlers be_eeh_handlers
= {
3587 .error_detected
= be_eeh_err_detected
,
3588 .slot_reset
= be_eeh_reset
,
3589 .resume
= be_eeh_resume
,
3592 static struct pci_driver be_driver
= {
3594 .id_table
= be_dev_ids
,
3596 .remove
= be_remove
,
3597 .suspend
= be_suspend
,
3598 .resume
= be_resume
,
3599 .shutdown
= be_shutdown
,
3600 .err_handler
= &be_eeh_handlers
3603 static int __init
be_init_module(void)
3605 if (rx_frag_size
!= 8192 && rx_frag_size
!= 4096 &&
3606 rx_frag_size
!= 2048) {
3607 printk(KERN_WARNING DRV_NAME
3608 " : Module param rx_frag_size must be 2048/4096/8192."
3610 rx_frag_size
= 2048;
3613 return pci_register_driver(&be_driver
);
3615 module_init(be_init_module
);
3617 static void __exit
be_exit_module(void)
3619 pci_unregister_driver(&be_driver
);
3621 module_exit(be_exit_module
);