be2net: use stats-sync to read/write 64-bit stats
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / benet / be_main.c
blob9f2f66c66be67872b319b55a1bb609ed3fe68d11
1 /*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 #include <asm/div64.h>
23 MODULE_VERSION(DRV_VER);
24 MODULE_DEVICE_TABLE(pci, be_dev_ids);
25 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
29 static ushort rx_frag_size = 2048;
30 static unsigned int num_vfs;
31 module_param(rx_frag_size, ushort, S_IRUGO);
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
41 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
43 { 0 }
45 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46 /* UE Status Low CSR */
47 static const char * const ue_status_low_desc[] = {
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
81 /* UE Status High CSR */
82 static const char * const ue_status_hi_desc[] = {
83 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
106 "NETC",
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
117 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
125 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
128 struct be_dma_mem *mem = &q->dma_mem;
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
142 static void be_intr_set(struct be_adapter *adapter, bool enable)
144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
148 if (adapter->eeh_err)
149 return;
151 if (!enabled && enable)
152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
153 else if (enabled && !enable)
154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155 else
156 return;
158 iowrite32(reg, addr);
161 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
167 wmb();
168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
171 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
177 wmb();
178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
181 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
182 bool arm, bool clear_int, u16 num_popped)
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
189 if (adapter->eeh_err)
190 return;
192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
201 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
208 if (adapter->eeh_err)
209 return;
211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
217 static int be_mac_addr_set(struct net_device *netdev, void *p)
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
229 if (!be_physfn(adapter))
230 goto netdev_addr;
232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
234 if (status)
235 return status;
237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
238 adapter->if_handle, &adapter->pmac_id, 0);
239 netdev_addr:
240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
243 return status;
246 static void populate_be2_stats(struct be_adapter *adapter)
248 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
249 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
250 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
251 struct be_port_rxf_stats_v0 *port_stats =
252 &rxf_stats->port[adapter->port_num];
253 struct be_drv_stats *drvs = &adapter->drv_stats;
255 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
270 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
271 drvs->rx_dropped_header_too_small =
272 port_stats->rx_dropped_header_too_small;
273 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
274 drvs->rx_alignment_symbol_errors =
275 port_stats->rx_alignment_symbol_errors;
277 drvs->tx_pauseframes = port_stats->tx_pauseframes;
278 drvs->tx_controlframes = port_stats->tx_controlframes;
280 if (adapter->port_num)
281 drvs->jabber_events = rxf_stats->port1_jabber_events;
282 else
283 drvs->jabber_events = rxf_stats->port0_jabber_events;
284 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
285 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
286 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
287 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
288 drvs->forwarded_packets = rxf_stats->forwarded_packets;
289 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
290 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
291 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
292 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
295 static void populate_be3_stats(struct be_adapter *adapter)
297 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
298 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
299 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
300 struct be_port_rxf_stats_v1 *port_stats =
301 &rxf_stats->port[adapter->port_num];
302 struct be_drv_stats *drvs = &adapter->drv_stats;
304 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
305 drvs->rx_pause_frames = port_stats->rx_pause_frames;
306 drvs->rx_crc_errors = port_stats->rx_crc_errors;
307 drvs->rx_control_frames = port_stats->rx_control_frames;
308 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
309 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
310 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
311 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
312 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
313 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
314 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
315 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
316 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
317 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
318 drvs->rx_dropped_header_too_small =
319 port_stats->rx_dropped_header_too_small;
320 drvs->rx_input_fifo_overflow_drop =
321 port_stats->rx_input_fifo_overflow_drop;
322 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
323 drvs->rx_alignment_symbol_errors =
324 port_stats->rx_alignment_symbol_errors;
325 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
326 drvs->tx_pauseframes = port_stats->tx_pauseframes;
327 drvs->tx_controlframes = port_stats->tx_controlframes;
328 drvs->jabber_events = port_stats->jabber_events;
329 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
330 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
331 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
332 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
333 drvs->forwarded_packets = rxf_stats->forwarded_packets;
334 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
335 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
336 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
337 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
340 static void populate_lancer_stats(struct be_adapter *adapter)
343 struct be_drv_stats *drvs = &adapter->drv_stats;
344 struct lancer_pport_stats *pport_stats =
345 pport_stats_from_cmd(adapter);
347 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
348 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
349 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
350 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
351 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
352 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
353 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
355 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
356 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
357 drvs->rx_dropped_tcp_length =
358 pport_stats->rx_dropped_invalid_tcp_length;
359 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
362 drvs->rx_dropped_header_too_small =
363 pport_stats->rx_dropped_header_too_small;
364 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
365 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
366 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
367 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
368 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
369 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
370 drvs->jabber_events = pport_stats->rx_jabbers;
371 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
372 drvs->forwarded_packets = pport_stats->num_forwards_lo;
373 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
374 drvs->rx_drops_too_many_frags =
375 pport_stats->rx_drops_too_many_frags_lo;
378 void be_parse_stats(struct be_adapter *adapter)
380 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
381 struct be_rx_obj *rxo;
382 int i;
384 if (adapter->generation == BE_GEN3) {
385 if (lancer_chip(adapter))
386 populate_lancer_stats(adapter);
387 else
388 populate_be3_stats(adapter);
389 } else {
390 populate_be2_stats(adapter);
393 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
394 for_all_rx_queues(adapter, rxo, i)
395 rx_stats(rxo)->rx_drops_no_frags =
396 erx->rx_drops_no_fragments[rxo->q.id];
399 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
400 struct rtnl_link_stats64 *stats)
402 struct be_adapter *adapter = netdev_priv(netdev);
403 struct be_drv_stats *drvs = &adapter->drv_stats;
404 struct be_rx_obj *rxo;
405 struct be_tx_obj *txo;
406 u64 pkts, bytes;
407 unsigned int start;
408 int i;
410 for_all_rx_queues(adapter, rxo, i) {
411 const struct be_rx_stats *rx_stats = rx_stats(rxo);
412 do {
413 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
414 pkts = rx_stats(rxo)->rx_pkts;
415 bytes = rx_stats(rxo)->rx_bytes;
416 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
417 stats->rx_packets += pkts;
418 stats->rx_bytes += bytes;
419 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
420 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
421 rx_stats(rxo)->rx_drops_no_frags;
424 for_all_tx_queues(adapter, txo, i) {
425 const struct be_tx_stats *tx_stats = tx_stats(txo);
426 do {
427 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
428 pkts = tx_stats(txo)->tx_pkts;
429 bytes = tx_stats(txo)->tx_bytes;
430 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
431 stats->tx_packets += pkts;
432 stats->tx_bytes += bytes;
435 /* bad pkts received */
436 stats->rx_errors = drvs->rx_crc_errors +
437 drvs->rx_alignment_symbol_errors +
438 drvs->rx_in_range_errors +
439 drvs->rx_out_range_errors +
440 drvs->rx_frame_too_long +
441 drvs->rx_dropped_too_small +
442 drvs->rx_dropped_too_short +
443 drvs->rx_dropped_header_too_small +
444 drvs->rx_dropped_tcp_length +
445 drvs->rx_dropped_runt;
447 /* detailed rx errors */
448 stats->rx_length_errors = drvs->rx_in_range_errors +
449 drvs->rx_out_range_errors +
450 drvs->rx_frame_too_long;
452 stats->rx_crc_errors = drvs->rx_crc_errors;
454 /* frame alignment errors */
455 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
457 /* receiver fifo overrun */
458 /* drops_no_pbuf is no per i/f, it's per BE card */
459 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
460 drvs->rx_input_fifo_overflow_drop +
461 drvs->rx_drops_no_pbuf;
462 return stats;
465 void be_link_status_update(struct be_adapter *adapter, bool link_up)
467 struct net_device *netdev = adapter->netdev;
469 /* If link came up or went down */
470 if (adapter->link_up != link_up) {
471 adapter->link_speed = -1;
472 if (link_up) {
473 netif_carrier_on(netdev);
474 printk(KERN_INFO "%s: Link up\n", netdev->name);
475 } else {
476 netif_carrier_off(netdev);
477 printk(KERN_INFO "%s: Link down\n", netdev->name);
479 adapter->link_up = link_up;
483 static void be_tx_stats_update(struct be_tx_obj *txo,
484 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
486 struct be_tx_stats *stats = tx_stats(txo);
488 u64_stats_update_begin(&stats->sync);
489 stats->tx_reqs++;
490 stats->tx_wrbs += wrb_cnt;
491 stats->tx_bytes += copied;
492 stats->tx_pkts += (gso_segs ? gso_segs : 1);
493 if (stopped)
494 stats->tx_stops++;
495 u64_stats_update_end(&stats->sync);
498 /* Determine number of WRB entries needed to xmit data in an skb */
499 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
500 bool *dummy)
502 int cnt = (skb->len > skb->data_len);
504 cnt += skb_shinfo(skb)->nr_frags;
506 /* to account for hdr wrb */
507 cnt++;
508 if (lancer_chip(adapter) || !(cnt & 1)) {
509 *dummy = false;
510 } else {
511 /* add a dummy to make it an even num */
512 cnt++;
513 *dummy = true;
515 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
516 return cnt;
519 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
521 wrb->frag_pa_hi = upper_32_bits(addr);
522 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
523 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
526 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
527 struct sk_buff *skb, u32 wrb_cnt, u32 len)
529 u8 vlan_prio = 0;
530 u16 vlan_tag = 0;
532 memset(hdr, 0, sizeof(*hdr));
534 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
536 if (skb_is_gso(skb)) {
537 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
538 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
539 hdr, skb_shinfo(skb)->gso_size);
540 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
541 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
542 if (lancer_chip(adapter) && adapter->sli_family ==
543 LANCER_A0_SLI_FAMILY) {
544 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
545 if (is_tcp_pkt(skb))
546 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
547 tcpcs, hdr, 1);
548 else if (is_udp_pkt(skb))
549 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
550 udpcs, hdr, 1);
552 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
553 if (is_tcp_pkt(skb))
554 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
555 else if (is_udp_pkt(skb))
556 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
559 if (vlan_tx_tag_present(skb)) {
560 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
561 vlan_tag = vlan_tx_tag_get(skb);
562 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
563 /* If vlan priority provided by OS is NOT in available bmap */
564 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
565 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
566 adapter->recommended_prio;
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
571 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
572 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
573 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
576 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
577 bool unmap_single)
579 dma_addr_t dma;
581 be_dws_le_to_cpu(wrb, sizeof(*wrb));
583 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
584 if (wrb->frag_len) {
585 if (unmap_single)
586 dma_unmap_single(dev, dma, wrb->frag_len,
587 DMA_TO_DEVICE);
588 else
589 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
593 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
594 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
596 dma_addr_t busaddr;
597 int i, copied = 0;
598 struct device *dev = &adapter->pdev->dev;
599 struct sk_buff *first_skb = skb;
600 struct be_eth_wrb *wrb;
601 struct be_eth_hdr_wrb *hdr;
602 bool map_single = false;
603 u16 map_head;
605 hdr = queue_head_node(txq);
606 queue_head_inc(txq);
607 map_head = txq->head;
609 if (skb->len > skb->data_len) {
610 int len = skb_headlen(skb);
611 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
612 if (dma_mapping_error(dev, busaddr))
613 goto dma_err;
614 map_single = true;
615 wrb = queue_head_node(txq);
616 wrb_fill(wrb, busaddr, len);
617 be_dws_cpu_to_le(wrb, sizeof(*wrb));
618 queue_head_inc(txq);
619 copied += len;
622 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
623 struct skb_frag_struct *frag =
624 &skb_shinfo(skb)->frags[i];
625 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
626 frag->size, DMA_TO_DEVICE);
627 if (dma_mapping_error(dev, busaddr))
628 goto dma_err;
629 wrb = queue_head_node(txq);
630 wrb_fill(wrb, busaddr, frag->size);
631 be_dws_cpu_to_le(wrb, sizeof(*wrb));
632 queue_head_inc(txq);
633 copied += frag->size;
636 if (dummy_wrb) {
637 wrb = queue_head_node(txq);
638 wrb_fill(wrb, 0, 0);
639 be_dws_cpu_to_le(wrb, sizeof(*wrb));
640 queue_head_inc(txq);
643 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
644 be_dws_cpu_to_le(hdr, sizeof(*hdr));
646 return copied;
647 dma_err:
648 txq->head = map_head;
649 while (copied) {
650 wrb = queue_head_node(txq);
651 unmap_tx_frag(dev, wrb, map_single);
652 map_single = false;
653 copied -= wrb->frag_len;
654 queue_head_inc(txq);
656 return 0;
659 static netdev_tx_t be_xmit(struct sk_buff *skb,
660 struct net_device *netdev)
662 struct be_adapter *adapter = netdev_priv(netdev);
663 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
664 struct be_queue_info *txq = &txo->q;
665 u32 wrb_cnt = 0, copied = 0;
666 u32 start = txq->head;
667 bool dummy_wrb, stopped = false;
669 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
671 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
672 if (copied) {
673 /* record the sent skb in the sent_skb table */
674 BUG_ON(txo->sent_skb_list[start]);
675 txo->sent_skb_list[start] = skb;
677 /* Ensure txq has space for the next skb; Else stop the queue
678 * *BEFORE* ringing the tx doorbell, so that we serialze the
679 * tx compls of the current transmit which'll wake up the queue
681 atomic_add(wrb_cnt, &txq->used);
682 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
683 txq->len) {
684 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
685 stopped = true;
688 be_txq_notify(adapter, txq->id, wrb_cnt);
690 be_tx_stats_update(txo, wrb_cnt, copied,
691 skb_shinfo(skb)->gso_segs, stopped);
692 } else {
693 txq->head = start;
694 dev_kfree_skb_any(skb);
696 return NETDEV_TX_OK;
699 static int be_change_mtu(struct net_device *netdev, int new_mtu)
701 struct be_adapter *adapter = netdev_priv(netdev);
702 if (new_mtu < BE_MIN_MTU ||
703 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
704 (ETH_HLEN + ETH_FCS_LEN))) {
705 dev_info(&adapter->pdev->dev,
706 "MTU must be between %d and %d bytes\n",
707 BE_MIN_MTU,
708 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
709 return -EINVAL;
711 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
712 netdev->mtu, new_mtu);
713 netdev->mtu = new_mtu;
714 return 0;
718 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
719 * If the user configures more, place BE in vlan promiscuous mode.
721 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
723 u16 vtag[BE_NUM_VLANS_SUPPORTED];
724 u16 ntags = 0, i;
725 int status = 0;
726 u32 if_handle;
728 if (vf) {
729 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
730 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
731 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
734 if (adapter->vlans_added <= adapter->max_vlans) {
735 /* Construct VLAN Table to give to HW */
736 for (i = 0; i < VLAN_N_VID; i++) {
737 if (adapter->vlan_tag[i]) {
738 vtag[ntags] = cpu_to_le16(i);
739 ntags++;
742 status = be_cmd_vlan_config(adapter, adapter->if_handle,
743 vtag, ntags, 1, 0);
744 } else {
745 status = be_cmd_vlan_config(adapter, adapter->if_handle,
746 NULL, 0, 1, 1);
749 return status;
752 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
754 struct be_adapter *adapter = netdev_priv(netdev);
756 adapter->vlans_added++;
757 if (!be_physfn(adapter))
758 return;
760 adapter->vlan_tag[vid] = 1;
761 if (adapter->vlans_added <= (adapter->max_vlans + 1))
762 be_vid_config(adapter, false, 0);
765 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
767 struct be_adapter *adapter = netdev_priv(netdev);
769 adapter->vlans_added--;
771 if (!be_physfn(adapter))
772 return;
774 adapter->vlan_tag[vid] = 0;
775 if (adapter->vlans_added <= adapter->max_vlans)
776 be_vid_config(adapter, false, 0);
779 static void be_set_multicast_list(struct net_device *netdev)
781 struct be_adapter *adapter = netdev_priv(netdev);
783 if (netdev->flags & IFF_PROMISC) {
784 be_cmd_promiscuous_config(adapter, true);
785 adapter->promiscuous = true;
786 goto done;
789 /* BE was previously in promiscuous mode; disable it */
790 if (adapter->promiscuous) {
791 adapter->promiscuous = false;
792 be_cmd_promiscuous_config(adapter, false);
795 /* Enable multicast promisc if num configured exceeds what we support */
796 if (netdev->flags & IFF_ALLMULTI ||
797 netdev_mc_count(netdev) > BE_MAX_MC) {
798 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
799 &adapter->mc_cmd_mem);
800 goto done;
803 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
804 &adapter->mc_cmd_mem);
805 done:
806 return;
809 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
811 struct be_adapter *adapter = netdev_priv(netdev);
812 int status;
814 if (!adapter->sriov_enabled)
815 return -EPERM;
817 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
818 return -EINVAL;
820 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
821 status = be_cmd_pmac_del(adapter,
822 adapter->vf_cfg[vf].vf_if_handle,
823 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
825 status = be_cmd_pmac_add(adapter, mac,
826 adapter->vf_cfg[vf].vf_if_handle,
827 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
829 if (status)
830 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
831 mac, vf);
832 else
833 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
835 return status;
838 static int be_get_vf_config(struct net_device *netdev, int vf,
839 struct ifla_vf_info *vi)
841 struct be_adapter *adapter = netdev_priv(netdev);
843 if (!adapter->sriov_enabled)
844 return -EPERM;
846 if (vf >= num_vfs)
847 return -EINVAL;
849 vi->vf = vf;
850 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
851 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
852 vi->qos = 0;
853 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
855 return 0;
858 static int be_set_vf_vlan(struct net_device *netdev,
859 int vf, u16 vlan, u8 qos)
861 struct be_adapter *adapter = netdev_priv(netdev);
862 int status = 0;
864 if (!adapter->sriov_enabled)
865 return -EPERM;
867 if ((vf >= num_vfs) || (vlan > 4095))
868 return -EINVAL;
870 if (vlan) {
871 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
872 adapter->vlans_added++;
873 } else {
874 adapter->vf_cfg[vf].vf_vlan_tag = 0;
875 adapter->vlans_added--;
878 status = be_vid_config(adapter, true, vf);
880 if (status)
881 dev_info(&adapter->pdev->dev,
882 "VLAN %d config on VF %d failed\n", vlan, vf);
883 return status;
886 static int be_set_vf_tx_rate(struct net_device *netdev,
887 int vf, int rate)
889 struct be_adapter *adapter = netdev_priv(netdev);
890 int status = 0;
892 if (!adapter->sriov_enabled)
893 return -EPERM;
895 if ((vf >= num_vfs) || (rate < 0))
896 return -EINVAL;
898 if (rate > 10000)
899 rate = 10000;
901 adapter->vf_cfg[vf].vf_tx_rate = rate;
902 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
904 if (status)
905 dev_info(&adapter->pdev->dev,
906 "tx rate %d on VF %d failed\n", rate, vf);
907 return status;
910 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
912 struct be_eq_obj *rx_eq = &rxo->rx_eq;
913 struct be_rx_stats *stats = rx_stats(rxo);
914 ulong now = jiffies;
915 ulong delta = now - stats->rx_jiffies;
916 u64 pkts;
917 unsigned int start, eqd;
919 if (!rx_eq->enable_aic)
920 return;
922 /* Wrapped around */
923 if (time_before(now, stats->rx_jiffies)) {
924 stats->rx_jiffies = now;
925 return;
928 /* Update once a second */
929 if (delta < HZ)
930 return;
932 do {
933 start = u64_stats_fetch_begin_bh(&stats->sync);
934 pkts = stats->rx_pkts;
935 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
937 stats->rx_pps = (pkts - stats->rx_pkts_prev) / (delta / HZ);
938 stats->rx_pkts_prev = pkts;
939 stats->rx_jiffies = now;
940 eqd = stats->rx_pps / 110000;
941 eqd = eqd << 3;
942 if (eqd > rx_eq->max_eqd)
943 eqd = rx_eq->max_eqd;
944 if (eqd < rx_eq->min_eqd)
945 eqd = rx_eq->min_eqd;
946 if (eqd < 10)
947 eqd = 0;
948 if (eqd != rx_eq->cur_eqd) {
949 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
950 rx_eq->cur_eqd = eqd;
954 static void be_rx_stats_update(struct be_rx_obj *rxo,
955 struct be_rx_compl_info *rxcp)
957 struct be_rx_stats *stats = rx_stats(rxo);
959 u64_stats_update_begin(&stats->sync);
960 stats->rx_compl++;
961 stats->rx_bytes += rxcp->pkt_size;
962 stats->rx_pkts++;
963 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
964 stats->rx_mcast_pkts++;
965 if (rxcp->err)
966 stats->rx_compl_err++;
967 u64_stats_update_end(&stats->sync);
970 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
972 /* L4 checksum is not reliable for non TCP/UDP packets.
973 * Also ignore ipcksm for ipv6 pkts */
974 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
975 (rxcp->ip_csum || rxcp->ipv6);
978 static struct be_rx_page_info *
979 get_rx_page_info(struct be_adapter *adapter,
980 struct be_rx_obj *rxo,
981 u16 frag_idx)
983 struct be_rx_page_info *rx_page_info;
984 struct be_queue_info *rxq = &rxo->q;
986 rx_page_info = &rxo->page_info_tbl[frag_idx];
987 BUG_ON(!rx_page_info->page);
989 if (rx_page_info->last_page_user) {
990 dma_unmap_page(&adapter->pdev->dev,
991 dma_unmap_addr(rx_page_info, bus),
992 adapter->big_page_size, DMA_FROM_DEVICE);
993 rx_page_info->last_page_user = false;
996 atomic_dec(&rxq->used);
997 return rx_page_info;
1000 /* Throwaway the data in the Rx completion */
1001 static void be_rx_compl_discard(struct be_adapter *adapter,
1002 struct be_rx_obj *rxo,
1003 struct be_rx_compl_info *rxcp)
1005 struct be_queue_info *rxq = &rxo->q;
1006 struct be_rx_page_info *page_info;
1007 u16 i, num_rcvd = rxcp->num_rcvd;
1009 for (i = 0; i < num_rcvd; i++) {
1010 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1011 put_page(page_info->page);
1012 memset(page_info, 0, sizeof(*page_info));
1013 index_inc(&rxcp->rxq_idx, rxq->len);
1018 * skb_fill_rx_data forms a complete skb for an ether frame
1019 * indicated by rxcp.
1021 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1022 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1024 struct be_queue_info *rxq = &rxo->q;
1025 struct be_rx_page_info *page_info;
1026 u16 i, j;
1027 u16 hdr_len, curr_frag_len, remaining;
1028 u8 *start;
1030 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1031 start = page_address(page_info->page) + page_info->page_offset;
1032 prefetch(start);
1034 /* Copy data in the first descriptor of this completion */
1035 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1037 /* Copy the header portion into skb_data */
1038 hdr_len = min(BE_HDR_LEN, curr_frag_len);
1039 memcpy(skb->data, start, hdr_len);
1040 skb->len = curr_frag_len;
1041 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1042 /* Complete packet has now been moved to data */
1043 put_page(page_info->page);
1044 skb->data_len = 0;
1045 skb->tail += curr_frag_len;
1046 } else {
1047 skb_shinfo(skb)->nr_frags = 1;
1048 skb_shinfo(skb)->frags[0].page = page_info->page;
1049 skb_shinfo(skb)->frags[0].page_offset =
1050 page_info->page_offset + hdr_len;
1051 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1052 skb->data_len = curr_frag_len - hdr_len;
1053 skb->tail += hdr_len;
1055 page_info->page = NULL;
1057 if (rxcp->pkt_size <= rx_frag_size) {
1058 BUG_ON(rxcp->num_rcvd != 1);
1059 return;
1062 /* More frags present for this completion */
1063 index_inc(&rxcp->rxq_idx, rxq->len);
1064 remaining = rxcp->pkt_size - curr_frag_len;
1065 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1066 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1067 curr_frag_len = min(remaining, rx_frag_size);
1069 /* Coalesce all frags from the same physical page in one slot */
1070 if (page_info->page_offset == 0) {
1071 /* Fresh page */
1072 j++;
1073 skb_shinfo(skb)->frags[j].page = page_info->page;
1074 skb_shinfo(skb)->frags[j].page_offset =
1075 page_info->page_offset;
1076 skb_shinfo(skb)->frags[j].size = 0;
1077 skb_shinfo(skb)->nr_frags++;
1078 } else {
1079 put_page(page_info->page);
1082 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1083 skb->len += curr_frag_len;
1084 skb->data_len += curr_frag_len;
1086 remaining -= curr_frag_len;
1087 index_inc(&rxcp->rxq_idx, rxq->len);
1088 page_info->page = NULL;
1090 BUG_ON(j > MAX_SKB_FRAGS);
1093 /* Process the RX completion indicated by rxcp when GRO is disabled */
1094 static void be_rx_compl_process(struct be_adapter *adapter,
1095 struct be_rx_obj *rxo,
1096 struct be_rx_compl_info *rxcp)
1098 struct net_device *netdev = adapter->netdev;
1099 struct sk_buff *skb;
1101 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1102 if (unlikely(!skb)) {
1103 rx_stats(rxo)->rx_drops_no_skbs++;
1104 be_rx_compl_discard(adapter, rxo, rxcp);
1105 return;
1108 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1110 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1111 skb->ip_summed = CHECKSUM_UNNECESSARY;
1112 else
1113 skb_checksum_none_assert(skb);
1115 skb->truesize = skb->len + sizeof(struct sk_buff);
1116 skb->protocol = eth_type_trans(skb, netdev);
1117 if (adapter->netdev->features & NETIF_F_RXHASH)
1118 skb->rxhash = rxcp->rss_hash;
1121 if (unlikely(rxcp->vlanf))
1122 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1124 netif_receive_skb(skb);
1127 /* Process the RX completion indicated by rxcp when GRO is enabled */
1128 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1129 struct be_rx_obj *rxo,
1130 struct be_rx_compl_info *rxcp)
1132 struct be_rx_page_info *page_info;
1133 struct sk_buff *skb = NULL;
1134 struct be_queue_info *rxq = &rxo->q;
1135 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1136 u16 remaining, curr_frag_len;
1137 u16 i, j;
1139 skb = napi_get_frags(&eq_obj->napi);
1140 if (!skb) {
1141 be_rx_compl_discard(adapter, rxo, rxcp);
1142 return;
1145 remaining = rxcp->pkt_size;
1146 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1147 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1149 curr_frag_len = min(remaining, rx_frag_size);
1151 /* Coalesce all frags from the same physical page in one slot */
1152 if (i == 0 || page_info->page_offset == 0) {
1153 /* First frag or Fresh page */
1154 j++;
1155 skb_shinfo(skb)->frags[j].page = page_info->page;
1156 skb_shinfo(skb)->frags[j].page_offset =
1157 page_info->page_offset;
1158 skb_shinfo(skb)->frags[j].size = 0;
1159 } else {
1160 put_page(page_info->page);
1162 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1164 remaining -= curr_frag_len;
1165 index_inc(&rxcp->rxq_idx, rxq->len);
1166 memset(page_info, 0, sizeof(*page_info));
1168 BUG_ON(j > MAX_SKB_FRAGS);
1170 skb_shinfo(skb)->nr_frags = j + 1;
1171 skb->len = rxcp->pkt_size;
1172 skb->data_len = rxcp->pkt_size;
1173 skb->truesize += rxcp->pkt_size;
1174 skb->ip_summed = CHECKSUM_UNNECESSARY;
1175 if (adapter->netdev->features & NETIF_F_RXHASH)
1176 skb->rxhash = rxcp->rss_hash;
1178 if (unlikely(rxcp->vlanf))
1179 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1181 napi_gro_frags(&eq_obj->napi);
1184 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1185 struct be_eth_rx_compl *compl,
1186 struct be_rx_compl_info *rxcp)
1188 rxcp->pkt_size =
1189 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1190 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1191 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1192 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1193 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1194 rxcp->ip_csum =
1195 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1196 rxcp->l4_csum =
1197 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1198 rxcp->ipv6 =
1199 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1200 rxcp->rxq_idx =
1201 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1202 rxcp->num_rcvd =
1203 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1204 rxcp->pkt_type =
1205 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1206 rxcp->rss_hash =
1207 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1208 if (rxcp->vlanf) {
1209 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1210 compl);
1211 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1212 compl);
1216 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1217 struct be_eth_rx_compl *compl,
1218 struct be_rx_compl_info *rxcp)
1220 rxcp->pkt_size =
1221 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1222 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1223 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1224 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1225 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1226 rxcp->ip_csum =
1227 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1228 rxcp->l4_csum =
1229 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1230 rxcp->ipv6 =
1231 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1232 rxcp->rxq_idx =
1233 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1234 rxcp->num_rcvd =
1235 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1236 rxcp->pkt_type =
1237 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1238 rxcp->rss_hash =
1239 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1240 if (rxcp->vlanf) {
1241 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1242 compl);
1243 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1244 compl);
1248 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1250 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1251 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1252 struct be_adapter *adapter = rxo->adapter;
1254 /* For checking the valid bit it is Ok to use either definition as the
1255 * valid bit is at the same position in both v0 and v1 Rx compl */
1256 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1257 return NULL;
1259 rmb();
1260 be_dws_le_to_cpu(compl, sizeof(*compl));
1262 if (adapter->be3_native)
1263 be_parse_rx_compl_v1(adapter, compl, rxcp);
1264 else
1265 be_parse_rx_compl_v0(adapter, compl, rxcp);
1267 if (rxcp->vlanf) {
1268 /* vlanf could be wrongly set in some cards.
1269 * ignore if vtm is not set */
1270 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1271 rxcp->vlanf = 0;
1273 if (!lancer_chip(adapter))
1274 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1276 if (((adapter->pvid & VLAN_VID_MASK) ==
1277 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1278 !adapter->vlan_tag[rxcp->vlan_tag])
1279 rxcp->vlanf = 0;
1282 /* As the compl has been parsed, reset it; we wont touch it again */
1283 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1285 queue_tail_inc(&rxo->cq);
1286 return rxcp;
1289 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1291 u32 order = get_order(size);
1293 if (order > 0)
1294 gfp |= __GFP_COMP;
1295 return alloc_pages(gfp, order);
1299 * Allocate a page, split it to fragments of size rx_frag_size and post as
1300 * receive buffers to BE
1302 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1304 struct be_adapter *adapter = rxo->adapter;
1305 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1306 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1307 struct be_queue_info *rxq = &rxo->q;
1308 struct page *pagep = NULL;
1309 struct be_eth_rx_d *rxd;
1310 u64 page_dmaaddr = 0, frag_dmaaddr;
1311 u32 posted, page_offset = 0;
1313 page_info = &rxo->page_info_tbl[rxq->head];
1314 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1315 if (!pagep) {
1316 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1317 if (unlikely(!pagep)) {
1318 rx_stats(rxo)->rx_post_fail++;
1319 break;
1321 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1322 0, adapter->big_page_size,
1323 DMA_FROM_DEVICE);
1324 page_info->page_offset = 0;
1325 } else {
1326 get_page(pagep);
1327 page_info->page_offset = page_offset + rx_frag_size;
1329 page_offset = page_info->page_offset;
1330 page_info->page = pagep;
1331 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1332 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1334 rxd = queue_head_node(rxq);
1335 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1336 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1338 /* Any space left in the current big page for another frag? */
1339 if ((page_offset + rx_frag_size + rx_frag_size) >
1340 adapter->big_page_size) {
1341 pagep = NULL;
1342 page_info->last_page_user = true;
1345 prev_page_info = page_info;
1346 queue_head_inc(rxq);
1347 page_info = &page_info_tbl[rxq->head];
1349 if (pagep)
1350 prev_page_info->last_page_user = true;
1352 if (posted) {
1353 atomic_add(posted, &rxq->used);
1354 be_rxq_notify(adapter, rxq->id, posted);
1355 } else if (atomic_read(&rxq->used) == 0) {
1356 /* Let be_worker replenish when memory is available */
1357 rxo->rx_post_starved = true;
1361 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1363 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1365 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1366 return NULL;
1368 rmb();
1369 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1371 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1373 queue_tail_inc(tx_cq);
1374 return txcp;
1377 static u16 be_tx_compl_process(struct be_adapter *adapter,
1378 struct be_tx_obj *txo, u16 last_index)
1380 struct be_queue_info *txq = &txo->q;
1381 struct be_eth_wrb *wrb;
1382 struct sk_buff **sent_skbs = txo->sent_skb_list;
1383 struct sk_buff *sent_skb;
1384 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1385 bool unmap_skb_hdr = true;
1387 sent_skb = sent_skbs[txq->tail];
1388 BUG_ON(!sent_skb);
1389 sent_skbs[txq->tail] = NULL;
1391 /* skip header wrb */
1392 queue_tail_inc(txq);
1394 do {
1395 cur_index = txq->tail;
1396 wrb = queue_tail_node(txq);
1397 unmap_tx_frag(&adapter->pdev->dev, wrb,
1398 (unmap_skb_hdr && skb_headlen(sent_skb)));
1399 unmap_skb_hdr = false;
1401 num_wrbs++;
1402 queue_tail_inc(txq);
1403 } while (cur_index != last_index);
1405 kfree_skb(sent_skb);
1406 return num_wrbs;
1409 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1411 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1413 if (!eqe->evt)
1414 return NULL;
1416 rmb();
1417 eqe->evt = le32_to_cpu(eqe->evt);
1418 queue_tail_inc(&eq_obj->q);
1419 return eqe;
1422 static int event_handle(struct be_adapter *adapter,
1423 struct be_eq_obj *eq_obj,
1424 bool rearm)
1426 struct be_eq_entry *eqe;
1427 u16 num = 0;
1429 while ((eqe = event_get(eq_obj)) != NULL) {
1430 eqe->evt = 0;
1431 num++;
1434 /* Deal with any spurious interrupts that come
1435 * without events
1437 if (!num)
1438 rearm = true;
1440 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1441 if (num)
1442 napi_schedule(&eq_obj->napi);
1444 return num;
1447 /* Just read and notify events without processing them.
1448 * Used at the time of destroying event queues */
1449 static void be_eq_clean(struct be_adapter *adapter,
1450 struct be_eq_obj *eq_obj)
1452 struct be_eq_entry *eqe;
1453 u16 num = 0;
1455 while ((eqe = event_get(eq_obj)) != NULL) {
1456 eqe->evt = 0;
1457 num++;
1460 if (num)
1461 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1464 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1466 struct be_rx_page_info *page_info;
1467 struct be_queue_info *rxq = &rxo->q;
1468 struct be_queue_info *rx_cq = &rxo->cq;
1469 struct be_rx_compl_info *rxcp;
1470 u16 tail;
1472 /* First cleanup pending rx completions */
1473 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1474 be_rx_compl_discard(adapter, rxo, rxcp);
1475 be_cq_notify(adapter, rx_cq->id, false, 1);
1478 /* Then free posted rx buffer that were not used */
1479 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1480 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1481 page_info = get_rx_page_info(adapter, rxo, tail);
1482 put_page(page_info->page);
1483 memset(page_info, 0, sizeof(*page_info));
1485 BUG_ON(atomic_read(&rxq->used));
1486 rxq->tail = rxq->head = 0;
1489 static void be_tx_compl_clean(struct be_adapter *adapter,
1490 struct be_tx_obj *txo)
1492 struct be_queue_info *tx_cq = &txo->cq;
1493 struct be_queue_info *txq = &txo->q;
1494 struct be_eth_tx_compl *txcp;
1495 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1496 struct sk_buff **sent_skbs = txo->sent_skb_list;
1497 struct sk_buff *sent_skb;
1498 bool dummy_wrb;
1500 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1501 do {
1502 while ((txcp = be_tx_compl_get(tx_cq))) {
1503 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1504 wrb_index, txcp);
1505 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1506 cmpl++;
1508 if (cmpl) {
1509 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1510 atomic_sub(num_wrbs, &txq->used);
1511 cmpl = 0;
1512 num_wrbs = 0;
1515 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1516 break;
1518 mdelay(1);
1519 } while (true);
1521 if (atomic_read(&txq->used))
1522 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1523 atomic_read(&txq->used));
1525 /* free posted tx for which compls will never arrive */
1526 while (atomic_read(&txq->used)) {
1527 sent_skb = sent_skbs[txq->tail];
1528 end_idx = txq->tail;
1529 index_adv(&end_idx,
1530 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1531 txq->len);
1532 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1533 atomic_sub(num_wrbs, &txq->used);
1537 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1539 struct be_queue_info *q;
1541 q = &adapter->mcc_obj.q;
1542 if (q->created)
1543 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1544 be_queue_free(adapter, q);
1546 q = &adapter->mcc_obj.cq;
1547 if (q->created)
1548 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1549 be_queue_free(adapter, q);
1552 /* Must be called only after TX qs are created as MCC shares TX EQ */
1553 static int be_mcc_queues_create(struct be_adapter *adapter)
1555 struct be_queue_info *q, *cq;
1557 /* Alloc MCC compl queue */
1558 cq = &adapter->mcc_obj.cq;
1559 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1560 sizeof(struct be_mcc_compl)))
1561 goto err;
1563 /* Ask BE to create MCC compl queue; share TX's eq */
1564 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1565 goto mcc_cq_free;
1567 /* Alloc MCC queue */
1568 q = &adapter->mcc_obj.q;
1569 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1570 goto mcc_cq_destroy;
1572 /* Ask BE to create MCC queue */
1573 if (be_cmd_mccq_create(adapter, q, cq))
1574 goto mcc_q_free;
1576 return 0;
1578 mcc_q_free:
1579 be_queue_free(adapter, q);
1580 mcc_cq_destroy:
1581 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1582 mcc_cq_free:
1583 be_queue_free(adapter, cq);
1584 err:
1585 return -1;
1588 static void be_tx_queues_destroy(struct be_adapter *adapter)
1590 struct be_queue_info *q;
1591 struct be_tx_obj *txo;
1592 u8 i;
1594 for_all_tx_queues(adapter, txo, i) {
1595 q = &txo->q;
1596 if (q->created)
1597 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1598 be_queue_free(adapter, q);
1600 q = &txo->cq;
1601 if (q->created)
1602 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1603 be_queue_free(adapter, q);
1606 /* Clear any residual events */
1607 be_eq_clean(adapter, &adapter->tx_eq);
1609 q = &adapter->tx_eq.q;
1610 if (q->created)
1611 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1612 be_queue_free(adapter, q);
1615 /* One TX event queue is shared by all TX compl qs */
1616 static int be_tx_queues_create(struct be_adapter *adapter)
1618 struct be_queue_info *eq, *q, *cq;
1619 struct be_tx_obj *txo;
1620 u8 i;
1622 adapter->tx_eq.max_eqd = 0;
1623 adapter->tx_eq.min_eqd = 0;
1624 adapter->tx_eq.cur_eqd = 96;
1625 adapter->tx_eq.enable_aic = false;
1627 eq = &adapter->tx_eq.q;
1628 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1629 sizeof(struct be_eq_entry)))
1630 return -1;
1632 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1633 goto err;
1634 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1636 for_all_tx_queues(adapter, txo, i) {
1637 cq = &txo->cq;
1638 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1639 sizeof(struct be_eth_tx_compl)))
1640 goto err;
1642 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1643 goto err;
1645 q = &txo->q;
1646 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1647 sizeof(struct be_eth_wrb)))
1648 goto err;
1650 if (be_cmd_txq_create(adapter, q, cq))
1651 goto err;
1653 return 0;
1655 err:
1656 be_tx_queues_destroy(adapter);
1657 return -1;
1660 static void be_rx_queues_destroy(struct be_adapter *adapter)
1662 struct be_queue_info *q;
1663 struct be_rx_obj *rxo;
1664 int i;
1666 for_all_rx_queues(adapter, rxo, i) {
1667 be_queue_free(adapter, &rxo->q);
1669 q = &rxo->cq;
1670 if (q->created)
1671 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1672 be_queue_free(adapter, q);
1674 q = &rxo->rx_eq.q;
1675 if (q->created)
1676 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1677 be_queue_free(adapter, q);
1681 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1683 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1684 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1685 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1686 } else {
1687 dev_warn(&adapter->pdev->dev,
1688 "No support for multiple RX queues\n");
1689 return 1;
1693 static int be_rx_queues_create(struct be_adapter *adapter)
1695 struct be_queue_info *eq, *q, *cq;
1696 struct be_rx_obj *rxo;
1697 int rc, i;
1699 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1700 msix_enabled(adapter) ?
1701 adapter->num_msix_vec - 1 : 1);
1702 if (adapter->num_rx_qs != MAX_RX_QS)
1703 dev_warn(&adapter->pdev->dev,
1704 "Can create only %d RX queues", adapter->num_rx_qs);
1706 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1707 for_all_rx_queues(adapter, rxo, i) {
1708 rxo->adapter = adapter;
1709 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1710 rxo->rx_eq.enable_aic = true;
1712 /* EQ */
1713 eq = &rxo->rx_eq.q;
1714 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1715 sizeof(struct be_eq_entry));
1716 if (rc)
1717 goto err;
1719 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1720 if (rc)
1721 goto err;
1723 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1725 /* CQ */
1726 cq = &rxo->cq;
1727 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1728 sizeof(struct be_eth_rx_compl));
1729 if (rc)
1730 goto err;
1732 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1733 if (rc)
1734 goto err;
1736 /* Rx Q - will be created in be_open() */
1737 q = &rxo->q;
1738 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1739 sizeof(struct be_eth_rx_d));
1740 if (rc)
1741 goto err;
1745 return 0;
1746 err:
1747 be_rx_queues_destroy(adapter);
1748 return -1;
1751 static bool event_peek(struct be_eq_obj *eq_obj)
1753 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1754 if (!eqe->evt)
1755 return false;
1756 else
1757 return true;
1760 static irqreturn_t be_intx(int irq, void *dev)
1762 struct be_adapter *adapter = dev;
1763 struct be_rx_obj *rxo;
1764 int isr, i, tx = 0 , rx = 0;
1766 if (lancer_chip(adapter)) {
1767 if (event_peek(&adapter->tx_eq))
1768 tx = event_handle(adapter, &adapter->tx_eq, false);
1769 for_all_rx_queues(adapter, rxo, i) {
1770 if (event_peek(&rxo->rx_eq))
1771 rx |= event_handle(adapter, &rxo->rx_eq, true);
1774 if (!(tx || rx))
1775 return IRQ_NONE;
1777 } else {
1778 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1779 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1780 if (!isr)
1781 return IRQ_NONE;
1783 if ((1 << adapter->tx_eq.eq_idx & isr))
1784 event_handle(adapter, &adapter->tx_eq, false);
1786 for_all_rx_queues(adapter, rxo, i) {
1787 if ((1 << rxo->rx_eq.eq_idx & isr))
1788 event_handle(adapter, &rxo->rx_eq, true);
1792 return IRQ_HANDLED;
1795 static irqreturn_t be_msix_rx(int irq, void *dev)
1797 struct be_rx_obj *rxo = dev;
1798 struct be_adapter *adapter = rxo->adapter;
1800 event_handle(adapter, &rxo->rx_eq, true);
1802 return IRQ_HANDLED;
1805 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1807 struct be_adapter *adapter = dev;
1809 event_handle(adapter, &adapter->tx_eq, false);
1811 return IRQ_HANDLED;
1814 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1816 return (rxcp->tcpf && !rxcp->err) ? true : false;
1819 static int be_poll_rx(struct napi_struct *napi, int budget)
1821 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1822 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1823 struct be_adapter *adapter = rxo->adapter;
1824 struct be_queue_info *rx_cq = &rxo->cq;
1825 struct be_rx_compl_info *rxcp;
1826 u32 work_done;
1828 rx_stats(rxo)->rx_polls++;
1829 for (work_done = 0; work_done < budget; work_done++) {
1830 rxcp = be_rx_compl_get(rxo);
1831 if (!rxcp)
1832 break;
1834 /* Ignore flush completions */
1835 if (rxcp->num_rcvd && rxcp->pkt_size) {
1836 if (do_gro(rxcp))
1837 be_rx_compl_process_gro(adapter, rxo, rxcp);
1838 else
1839 be_rx_compl_process(adapter, rxo, rxcp);
1840 } else if (rxcp->pkt_size == 0) {
1841 be_rx_compl_discard(adapter, rxo, rxcp);
1844 be_rx_stats_update(rxo, rxcp);
1847 /* Refill the queue */
1848 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1849 be_post_rx_frags(rxo, GFP_ATOMIC);
1851 /* All consumed */
1852 if (work_done < budget) {
1853 napi_complete(napi);
1854 be_cq_notify(adapter, rx_cq->id, true, work_done);
1855 } else {
1856 /* More to be consumed; continue with interrupts disabled */
1857 be_cq_notify(adapter, rx_cq->id, false, work_done);
1859 return work_done;
1862 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1863 * For TX/MCC we don't honour budget; consume everything
1865 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1867 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1868 struct be_adapter *adapter =
1869 container_of(tx_eq, struct be_adapter, tx_eq);
1870 struct be_tx_obj *txo;
1871 struct be_eth_tx_compl *txcp;
1872 int tx_compl, mcc_compl, status = 0;
1873 u8 i;
1874 u16 num_wrbs;
1876 for_all_tx_queues(adapter, txo, i) {
1877 tx_compl = 0;
1878 num_wrbs = 0;
1879 while ((txcp = be_tx_compl_get(&txo->cq))) {
1880 num_wrbs += be_tx_compl_process(adapter, txo,
1881 AMAP_GET_BITS(struct amap_eth_tx_compl,
1882 wrb_index, txcp));
1883 tx_compl++;
1885 if (tx_compl) {
1886 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1888 atomic_sub(num_wrbs, &txo->q.used);
1890 /* As Tx wrbs have been freed up, wake up netdev queue
1891 * if it was stopped due to lack of tx wrbs. */
1892 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1893 atomic_read(&txo->q.used) < txo->q.len / 2) {
1894 netif_wake_subqueue(adapter->netdev, i);
1897 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1898 tx_stats(txo)->tx_compl += tx_compl;
1899 u64_stats_update_end(&tx_stats(txo)->sync_compl);
1903 mcc_compl = be_process_mcc(adapter, &status);
1905 if (mcc_compl) {
1906 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1907 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1910 napi_complete(napi);
1912 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1913 adapter->drv_stats.tx_events++;
1914 return 1;
1917 void be_detect_dump_ue(struct be_adapter *adapter)
1919 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1920 u32 i;
1922 pci_read_config_dword(adapter->pdev,
1923 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1924 pci_read_config_dword(adapter->pdev,
1925 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1926 pci_read_config_dword(adapter->pdev,
1927 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1928 pci_read_config_dword(adapter->pdev,
1929 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1931 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1932 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1934 if (ue_status_lo || ue_status_hi) {
1935 adapter->ue_detected = true;
1936 adapter->eeh_err = true;
1937 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1940 if (ue_status_lo) {
1941 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1942 if (ue_status_lo & 1)
1943 dev_err(&adapter->pdev->dev,
1944 "UE: %s bit set\n", ue_status_low_desc[i]);
1947 if (ue_status_hi) {
1948 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1949 if (ue_status_hi & 1)
1950 dev_err(&adapter->pdev->dev,
1951 "UE: %s bit set\n", ue_status_hi_desc[i]);
1957 static void be_worker(struct work_struct *work)
1959 struct be_adapter *adapter =
1960 container_of(work, struct be_adapter, work.work);
1961 struct be_rx_obj *rxo;
1962 int i;
1964 if (!adapter->ue_detected && !lancer_chip(adapter))
1965 be_detect_dump_ue(adapter);
1967 /* when interrupts are not yet enabled, just reap any pending
1968 * mcc completions */
1969 if (!netif_running(adapter->netdev)) {
1970 int mcc_compl, status = 0;
1972 mcc_compl = be_process_mcc(adapter, &status);
1974 if (mcc_compl) {
1975 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1976 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1979 goto reschedule;
1982 if (!adapter->stats_cmd_sent) {
1983 if (lancer_chip(adapter))
1984 lancer_cmd_get_pport_stats(adapter,
1985 &adapter->stats_cmd);
1986 else
1987 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1990 for_all_rx_queues(adapter, rxo, i) {
1991 be_rx_eqd_update(adapter, rxo);
1993 if (rxo->rx_post_starved) {
1994 rxo->rx_post_starved = false;
1995 be_post_rx_frags(rxo, GFP_KERNEL);
1999 reschedule:
2000 adapter->work_counter++;
2001 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2004 static void be_msix_disable(struct be_adapter *adapter)
2006 if (msix_enabled(adapter)) {
2007 pci_disable_msix(adapter->pdev);
2008 adapter->num_msix_vec = 0;
2012 static void be_msix_enable(struct be_adapter *adapter)
2014 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
2015 int i, status, num_vec;
2017 num_vec = be_num_rxqs_want(adapter) + 1;
2019 for (i = 0; i < num_vec; i++)
2020 adapter->msix_entries[i].entry = i;
2022 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2023 if (status == 0) {
2024 goto done;
2025 } else if (status >= BE_MIN_MSIX_VECTORS) {
2026 num_vec = status;
2027 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2028 num_vec) == 0)
2029 goto done;
2031 return;
2032 done:
2033 adapter->num_msix_vec = num_vec;
2034 return;
2037 static void be_sriov_enable(struct be_adapter *adapter)
2039 be_check_sriov_fn_type(adapter);
2040 #ifdef CONFIG_PCI_IOV
2041 if (be_physfn(adapter) && num_vfs) {
2042 int status, pos;
2043 u16 nvfs;
2045 pos = pci_find_ext_capability(adapter->pdev,
2046 PCI_EXT_CAP_ID_SRIOV);
2047 pci_read_config_word(adapter->pdev,
2048 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2050 if (num_vfs > nvfs) {
2051 dev_info(&adapter->pdev->dev,
2052 "Device supports %d VFs and not %d\n",
2053 nvfs, num_vfs);
2054 num_vfs = nvfs;
2057 status = pci_enable_sriov(adapter->pdev, num_vfs);
2058 adapter->sriov_enabled = status ? false : true;
2060 #endif
2063 static void be_sriov_disable(struct be_adapter *adapter)
2065 #ifdef CONFIG_PCI_IOV
2066 if (adapter->sriov_enabled) {
2067 pci_disable_sriov(adapter->pdev);
2068 adapter->sriov_enabled = false;
2070 #endif
2073 static inline int be_msix_vec_get(struct be_adapter *adapter,
2074 struct be_eq_obj *eq_obj)
2076 return adapter->msix_entries[eq_obj->eq_idx].vector;
2079 static int be_request_irq(struct be_adapter *adapter,
2080 struct be_eq_obj *eq_obj,
2081 void *handler, char *desc, void *context)
2083 struct net_device *netdev = adapter->netdev;
2084 int vec;
2086 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2087 vec = be_msix_vec_get(adapter, eq_obj);
2088 return request_irq(vec, handler, 0, eq_obj->desc, context);
2091 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2092 void *context)
2094 int vec = be_msix_vec_get(adapter, eq_obj);
2095 free_irq(vec, context);
2098 static int be_msix_register(struct be_adapter *adapter)
2100 struct be_rx_obj *rxo;
2101 int status, i;
2102 char qname[10];
2104 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2105 adapter);
2106 if (status)
2107 goto err;
2109 for_all_rx_queues(adapter, rxo, i) {
2110 sprintf(qname, "rxq%d", i);
2111 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2112 qname, rxo);
2113 if (status)
2114 goto err_msix;
2117 return 0;
2119 err_msix:
2120 be_free_irq(adapter, &adapter->tx_eq, adapter);
2122 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2123 be_free_irq(adapter, &rxo->rx_eq, rxo);
2125 err:
2126 dev_warn(&adapter->pdev->dev,
2127 "MSIX Request IRQ failed - err %d\n", status);
2128 be_msix_disable(adapter);
2129 return status;
2132 static int be_irq_register(struct be_adapter *adapter)
2134 struct net_device *netdev = adapter->netdev;
2135 int status;
2137 if (msix_enabled(adapter)) {
2138 status = be_msix_register(adapter);
2139 if (status == 0)
2140 goto done;
2141 /* INTx is not supported for VF */
2142 if (!be_physfn(adapter))
2143 return status;
2146 /* INTx */
2147 netdev->irq = adapter->pdev->irq;
2148 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2149 adapter);
2150 if (status) {
2151 dev_err(&adapter->pdev->dev,
2152 "INTx request IRQ failed - err %d\n", status);
2153 return status;
2155 done:
2156 adapter->isr_registered = true;
2157 return 0;
2160 static void be_irq_unregister(struct be_adapter *adapter)
2162 struct net_device *netdev = adapter->netdev;
2163 struct be_rx_obj *rxo;
2164 int i;
2166 if (!adapter->isr_registered)
2167 return;
2169 /* INTx */
2170 if (!msix_enabled(adapter)) {
2171 free_irq(netdev->irq, adapter);
2172 goto done;
2175 /* MSIx */
2176 be_free_irq(adapter, &adapter->tx_eq, adapter);
2178 for_all_rx_queues(adapter, rxo, i)
2179 be_free_irq(adapter, &rxo->rx_eq, rxo);
2181 done:
2182 adapter->isr_registered = false;
2185 static void be_rx_queues_clear(struct be_adapter *adapter)
2187 struct be_queue_info *q;
2188 struct be_rx_obj *rxo;
2189 int i;
2191 for_all_rx_queues(adapter, rxo, i) {
2192 q = &rxo->q;
2193 if (q->created) {
2194 be_cmd_rxq_destroy(adapter, q);
2195 /* After the rxq is invalidated, wait for a grace time
2196 * of 1ms for all dma to end and the flush compl to
2197 * arrive
2199 mdelay(1);
2200 be_rx_q_clean(adapter, rxo);
2203 /* Clear any residual events */
2204 q = &rxo->rx_eq.q;
2205 if (q->created)
2206 be_eq_clean(adapter, &rxo->rx_eq);
2210 static int be_close(struct net_device *netdev)
2212 struct be_adapter *adapter = netdev_priv(netdev);
2213 struct be_rx_obj *rxo;
2214 struct be_tx_obj *txo;
2215 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2216 int vec, i;
2218 be_async_mcc_disable(adapter);
2220 netif_carrier_off(netdev);
2221 adapter->link_up = false;
2223 if (!lancer_chip(adapter))
2224 be_intr_set(adapter, false);
2226 for_all_rx_queues(adapter, rxo, i)
2227 napi_disable(&rxo->rx_eq.napi);
2229 napi_disable(&tx_eq->napi);
2231 if (lancer_chip(adapter)) {
2232 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2233 for_all_rx_queues(adapter, rxo, i)
2234 be_cq_notify(adapter, rxo->cq.id, false, 0);
2235 for_all_tx_queues(adapter, txo, i)
2236 be_cq_notify(adapter, txo->cq.id, false, 0);
2239 if (msix_enabled(adapter)) {
2240 vec = be_msix_vec_get(adapter, tx_eq);
2241 synchronize_irq(vec);
2243 for_all_rx_queues(adapter, rxo, i) {
2244 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2245 synchronize_irq(vec);
2247 } else {
2248 synchronize_irq(netdev->irq);
2250 be_irq_unregister(adapter);
2252 /* Wait for all pending tx completions to arrive so that
2253 * all tx skbs are freed.
2255 for_all_tx_queues(adapter, txo, i)
2256 be_tx_compl_clean(adapter, txo);
2258 be_rx_queues_clear(adapter);
2259 return 0;
2262 static int be_rx_queues_setup(struct be_adapter *adapter)
2264 struct be_rx_obj *rxo;
2265 int rc, i;
2266 u8 rsstable[MAX_RSS_QS];
2268 for_all_rx_queues(adapter, rxo, i) {
2269 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2270 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2271 adapter->if_handle,
2272 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2273 if (rc)
2274 return rc;
2277 if (be_multi_rxq(adapter)) {
2278 for_all_rss_queues(adapter, rxo, i)
2279 rsstable[i] = rxo->rss_id;
2281 rc = be_cmd_rss_config(adapter, rsstable,
2282 adapter->num_rx_qs - 1);
2283 if (rc)
2284 return rc;
2287 /* First time posting */
2288 for_all_rx_queues(adapter, rxo, i) {
2289 be_post_rx_frags(rxo, GFP_KERNEL);
2290 napi_enable(&rxo->rx_eq.napi);
2292 return 0;
2295 static int be_open(struct net_device *netdev)
2297 struct be_adapter *adapter = netdev_priv(netdev);
2298 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2299 struct be_rx_obj *rxo;
2300 bool link_up;
2301 int status, i;
2302 u8 mac_speed;
2303 u16 link_speed;
2305 status = be_rx_queues_setup(adapter);
2306 if (status)
2307 goto err;
2309 napi_enable(&tx_eq->napi);
2311 be_irq_register(adapter);
2313 if (!lancer_chip(adapter))
2314 be_intr_set(adapter, true);
2316 /* The evt queues are created in unarmed state; arm them */
2317 for_all_rx_queues(adapter, rxo, i) {
2318 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2319 be_cq_notify(adapter, rxo->cq.id, true, 0);
2321 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2323 /* Now that interrupts are on we can process async mcc */
2324 be_async_mcc_enable(adapter);
2326 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2327 &link_speed, 0);
2328 if (status)
2329 goto err;
2330 be_link_status_update(adapter, link_up);
2332 if (be_physfn(adapter)) {
2333 status = be_vid_config(adapter, false, 0);
2334 if (status)
2335 goto err;
2337 status = be_cmd_set_flow_control(adapter,
2338 adapter->tx_fc, adapter->rx_fc);
2339 if (status)
2340 goto err;
2343 return 0;
2344 err:
2345 be_close(adapter->netdev);
2346 return -EIO;
2349 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2351 struct be_dma_mem cmd;
2352 int status = 0;
2353 u8 mac[ETH_ALEN];
2355 memset(mac, 0, ETH_ALEN);
2357 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2358 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2359 GFP_KERNEL);
2360 if (cmd.va == NULL)
2361 return -1;
2362 memset(cmd.va, 0, cmd.size);
2364 if (enable) {
2365 status = pci_write_config_dword(adapter->pdev,
2366 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2367 if (status) {
2368 dev_err(&adapter->pdev->dev,
2369 "Could not enable Wake-on-lan\n");
2370 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2371 cmd.dma);
2372 return status;
2374 status = be_cmd_enable_magic_wol(adapter,
2375 adapter->netdev->dev_addr, &cmd);
2376 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2377 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2378 } else {
2379 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2380 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2381 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2384 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2385 return status;
2389 * Generate a seed MAC address from the PF MAC Address using jhash.
2390 * MAC Address for VFs are assigned incrementally starting from the seed.
2391 * These addresses are programmed in the ASIC by the PF and the VF driver
2392 * queries for the MAC address during its probe.
2394 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2396 u32 vf = 0;
2397 int status = 0;
2398 u8 mac[ETH_ALEN];
2400 be_vf_eth_addr_generate(adapter, mac);
2402 for (vf = 0; vf < num_vfs; vf++) {
2403 status = be_cmd_pmac_add(adapter, mac,
2404 adapter->vf_cfg[vf].vf_if_handle,
2405 &adapter->vf_cfg[vf].vf_pmac_id,
2406 vf + 1);
2407 if (status)
2408 dev_err(&adapter->pdev->dev,
2409 "Mac address add failed for VF %d\n", vf);
2410 else
2411 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2413 mac[5] += 1;
2415 return status;
2418 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2420 u32 vf;
2422 for (vf = 0; vf < num_vfs; vf++) {
2423 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2424 be_cmd_pmac_del(adapter,
2425 adapter->vf_cfg[vf].vf_if_handle,
2426 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2430 static int be_setup(struct be_adapter *adapter)
2432 struct net_device *netdev = adapter->netdev;
2433 u32 cap_flags, en_flags, vf = 0;
2434 int status;
2435 u8 mac[ETH_ALEN];
2437 be_cmd_req_native_mode(adapter);
2439 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2440 BE_IF_FLAGS_BROADCAST |
2441 BE_IF_FLAGS_MULTICAST;
2443 if (be_physfn(adapter)) {
2444 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2445 BE_IF_FLAGS_PROMISCUOUS |
2446 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2447 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2449 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2450 cap_flags |= BE_IF_FLAGS_RSS;
2451 en_flags |= BE_IF_FLAGS_RSS;
2455 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2456 netdev->dev_addr, false/* pmac_invalid */,
2457 &adapter->if_handle, &adapter->pmac_id, 0);
2458 if (status != 0)
2459 goto do_none;
2461 if (be_physfn(adapter)) {
2462 if (adapter->sriov_enabled) {
2463 while (vf < num_vfs) {
2464 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2465 BE_IF_FLAGS_BROADCAST;
2466 status = be_cmd_if_create(adapter, cap_flags,
2467 en_flags, mac, true,
2468 &adapter->vf_cfg[vf].vf_if_handle,
2469 NULL, vf+1);
2470 if (status) {
2471 dev_err(&adapter->pdev->dev,
2472 "Interface Create failed for VF %d\n",
2473 vf);
2474 goto if_destroy;
2476 adapter->vf_cfg[vf].vf_pmac_id =
2477 BE_INVALID_PMAC_ID;
2478 vf++;
2481 } else {
2482 status = be_cmd_mac_addr_query(adapter, mac,
2483 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2484 if (!status) {
2485 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2486 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2490 status = be_tx_queues_create(adapter);
2491 if (status != 0)
2492 goto if_destroy;
2494 status = be_rx_queues_create(adapter);
2495 if (status != 0)
2496 goto tx_qs_destroy;
2498 /* Allow all priorities by default. A GRP5 evt may modify this */
2499 adapter->vlan_prio_bmap = 0xff;
2501 status = be_mcc_queues_create(adapter);
2502 if (status != 0)
2503 goto rx_qs_destroy;
2505 adapter->link_speed = -1;
2507 return 0;
2509 rx_qs_destroy:
2510 be_rx_queues_destroy(adapter);
2511 tx_qs_destroy:
2512 be_tx_queues_destroy(adapter);
2513 if_destroy:
2514 if (be_physfn(adapter) && adapter->sriov_enabled)
2515 for (vf = 0; vf < num_vfs; vf++)
2516 if (adapter->vf_cfg[vf].vf_if_handle)
2517 be_cmd_if_destroy(adapter,
2518 adapter->vf_cfg[vf].vf_if_handle,
2519 vf + 1);
2520 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2521 do_none:
2522 return status;
2525 static int be_clear(struct be_adapter *adapter)
2527 int vf;
2529 if (be_physfn(adapter) && adapter->sriov_enabled)
2530 be_vf_eth_addr_rem(adapter);
2532 be_mcc_queues_destroy(adapter);
2533 be_rx_queues_destroy(adapter);
2534 be_tx_queues_destroy(adapter);
2535 adapter->eq_next_idx = 0;
2537 if (be_physfn(adapter) && adapter->sriov_enabled)
2538 for (vf = 0; vf < num_vfs; vf++)
2539 if (adapter->vf_cfg[vf].vf_if_handle)
2540 be_cmd_if_destroy(adapter,
2541 adapter->vf_cfg[vf].vf_if_handle,
2542 vf + 1);
2544 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2546 adapter->be3_native = 0;
2548 /* tell fw we're done with firing cmds */
2549 be_cmd_fw_clean(adapter);
2550 return 0;
2554 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2555 static bool be_flash_redboot(struct be_adapter *adapter,
2556 const u8 *p, u32 img_start, int image_size,
2557 int hdr_size)
2559 u32 crc_offset;
2560 u8 flashed_crc[4];
2561 int status;
2563 crc_offset = hdr_size + img_start + image_size - 4;
2565 p += crc_offset;
2567 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2568 (image_size - 4));
2569 if (status) {
2570 dev_err(&adapter->pdev->dev,
2571 "could not get crc from flash, not flashing redboot\n");
2572 return false;
2575 /*update redboot only if crc does not match*/
2576 if (!memcmp(flashed_crc, p, 4))
2577 return false;
2578 else
2579 return true;
2582 static int be_flash_data(struct be_adapter *adapter,
2583 const struct firmware *fw,
2584 struct be_dma_mem *flash_cmd, int num_of_images)
2587 int status = 0, i, filehdr_size = 0;
2588 u32 total_bytes = 0, flash_op;
2589 int num_bytes;
2590 const u8 *p = fw->data;
2591 struct be_cmd_write_flashrom *req = flash_cmd->va;
2592 const struct flash_comp *pflashcomp;
2593 int num_comp;
2595 static const struct flash_comp gen3_flash_types[9] = {
2596 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2597 FLASH_IMAGE_MAX_SIZE_g3},
2598 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2599 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2600 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2601 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2602 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2603 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2604 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2605 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2606 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2607 FLASH_IMAGE_MAX_SIZE_g3},
2608 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2609 FLASH_IMAGE_MAX_SIZE_g3},
2610 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2611 FLASH_IMAGE_MAX_SIZE_g3},
2612 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2613 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2615 static const struct flash_comp gen2_flash_types[8] = {
2616 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2617 FLASH_IMAGE_MAX_SIZE_g2},
2618 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2619 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2620 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2621 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2622 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2623 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2624 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2625 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2626 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2627 FLASH_IMAGE_MAX_SIZE_g2},
2628 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2629 FLASH_IMAGE_MAX_SIZE_g2},
2630 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2631 FLASH_IMAGE_MAX_SIZE_g2}
2634 if (adapter->generation == BE_GEN3) {
2635 pflashcomp = gen3_flash_types;
2636 filehdr_size = sizeof(struct flash_file_hdr_g3);
2637 num_comp = ARRAY_SIZE(gen3_flash_types);
2638 } else {
2639 pflashcomp = gen2_flash_types;
2640 filehdr_size = sizeof(struct flash_file_hdr_g2);
2641 num_comp = ARRAY_SIZE(gen2_flash_types);
2643 for (i = 0; i < num_comp; i++) {
2644 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2645 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2646 continue;
2647 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2648 (!be_flash_redboot(adapter, fw->data,
2649 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2650 (num_of_images * sizeof(struct image_hdr)))))
2651 continue;
2652 p = fw->data;
2653 p += filehdr_size + pflashcomp[i].offset
2654 + (num_of_images * sizeof(struct image_hdr));
2655 if (p + pflashcomp[i].size > fw->data + fw->size)
2656 return -1;
2657 total_bytes = pflashcomp[i].size;
2658 while (total_bytes) {
2659 if (total_bytes > 32*1024)
2660 num_bytes = 32*1024;
2661 else
2662 num_bytes = total_bytes;
2663 total_bytes -= num_bytes;
2665 if (!total_bytes)
2666 flash_op = FLASHROM_OPER_FLASH;
2667 else
2668 flash_op = FLASHROM_OPER_SAVE;
2669 memcpy(req->params.data_buf, p, num_bytes);
2670 p += num_bytes;
2671 status = be_cmd_write_flashrom(adapter, flash_cmd,
2672 pflashcomp[i].optype, flash_op, num_bytes);
2673 if (status) {
2674 dev_err(&adapter->pdev->dev,
2675 "cmd to write to flash rom failed.\n");
2676 return -1;
2680 return 0;
2683 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2685 if (fhdr == NULL)
2686 return 0;
2687 if (fhdr->build[0] == '3')
2688 return BE_GEN3;
2689 else if (fhdr->build[0] == '2')
2690 return BE_GEN2;
2691 else
2692 return 0;
2695 static int lancer_fw_download(struct be_adapter *adapter,
2696 const struct firmware *fw)
2698 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2699 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2700 struct be_dma_mem flash_cmd;
2701 const u8 *data_ptr = NULL;
2702 u8 *dest_image_ptr = NULL;
2703 size_t image_size = 0;
2704 u32 chunk_size = 0;
2705 u32 data_written = 0;
2706 u32 offset = 0;
2707 int status = 0;
2708 u8 add_status = 0;
2710 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2711 dev_err(&adapter->pdev->dev,
2712 "FW Image not properly aligned. "
2713 "Length must be 4 byte aligned.\n");
2714 status = -EINVAL;
2715 goto lancer_fw_exit;
2718 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2719 + LANCER_FW_DOWNLOAD_CHUNK;
2720 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2721 &flash_cmd.dma, GFP_KERNEL);
2722 if (!flash_cmd.va) {
2723 status = -ENOMEM;
2724 dev_err(&adapter->pdev->dev,
2725 "Memory allocation failure while flashing\n");
2726 goto lancer_fw_exit;
2729 dest_image_ptr = flash_cmd.va +
2730 sizeof(struct lancer_cmd_req_write_object);
2731 image_size = fw->size;
2732 data_ptr = fw->data;
2734 while (image_size) {
2735 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2737 /* Copy the image chunk content. */
2738 memcpy(dest_image_ptr, data_ptr, chunk_size);
2740 status = lancer_cmd_write_object(adapter, &flash_cmd,
2741 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2742 &data_written, &add_status);
2744 if (status)
2745 break;
2747 offset += data_written;
2748 data_ptr += data_written;
2749 image_size -= data_written;
2752 if (!status) {
2753 /* Commit the FW written */
2754 status = lancer_cmd_write_object(adapter, &flash_cmd,
2755 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2756 &data_written, &add_status);
2759 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2760 flash_cmd.dma);
2761 if (status) {
2762 dev_err(&adapter->pdev->dev,
2763 "Firmware load error. "
2764 "Status code: 0x%x Additional Status: 0x%x\n",
2765 status, add_status);
2766 goto lancer_fw_exit;
2769 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2770 lancer_fw_exit:
2771 return status;
2774 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2776 struct flash_file_hdr_g2 *fhdr;
2777 struct flash_file_hdr_g3 *fhdr3;
2778 struct image_hdr *img_hdr_ptr = NULL;
2779 struct be_dma_mem flash_cmd;
2780 const u8 *p;
2781 int status = 0, i = 0, num_imgs = 0;
2783 p = fw->data;
2784 fhdr = (struct flash_file_hdr_g2 *) p;
2786 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2787 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2788 &flash_cmd.dma, GFP_KERNEL);
2789 if (!flash_cmd.va) {
2790 status = -ENOMEM;
2791 dev_err(&adapter->pdev->dev,
2792 "Memory allocation failure while flashing\n");
2793 goto be_fw_exit;
2796 if ((adapter->generation == BE_GEN3) &&
2797 (get_ufigen_type(fhdr) == BE_GEN3)) {
2798 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2799 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2800 for (i = 0; i < num_imgs; i++) {
2801 img_hdr_ptr = (struct image_hdr *) (fw->data +
2802 (sizeof(struct flash_file_hdr_g3) +
2803 i * sizeof(struct image_hdr)));
2804 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2805 status = be_flash_data(adapter, fw, &flash_cmd,
2806 num_imgs);
2808 } else if ((adapter->generation == BE_GEN2) &&
2809 (get_ufigen_type(fhdr) == BE_GEN2)) {
2810 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2811 } else {
2812 dev_err(&adapter->pdev->dev,
2813 "UFI and Interface are not compatible for flashing\n");
2814 status = -1;
2817 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2818 flash_cmd.dma);
2819 if (status) {
2820 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2821 goto be_fw_exit;
2824 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2826 be_fw_exit:
2827 return status;
2830 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2832 const struct firmware *fw;
2833 int status;
2835 if (!netif_running(adapter->netdev)) {
2836 dev_err(&adapter->pdev->dev,
2837 "Firmware load not allowed (interface is down)\n");
2838 return -1;
2841 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2842 if (status)
2843 goto fw_exit;
2845 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2847 if (lancer_chip(adapter))
2848 status = lancer_fw_download(adapter, fw);
2849 else
2850 status = be_fw_download(adapter, fw);
2852 fw_exit:
2853 release_firmware(fw);
2854 return status;
2857 static struct net_device_ops be_netdev_ops = {
2858 .ndo_open = be_open,
2859 .ndo_stop = be_close,
2860 .ndo_start_xmit = be_xmit,
2861 .ndo_set_rx_mode = be_set_multicast_list,
2862 .ndo_set_mac_address = be_mac_addr_set,
2863 .ndo_change_mtu = be_change_mtu,
2864 .ndo_get_stats64 = be_get_stats64,
2865 .ndo_validate_addr = eth_validate_addr,
2866 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2867 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2868 .ndo_set_vf_mac = be_set_vf_mac,
2869 .ndo_set_vf_vlan = be_set_vf_vlan,
2870 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2871 .ndo_get_vf_config = be_get_vf_config
2874 static void be_netdev_init(struct net_device *netdev)
2876 struct be_adapter *adapter = netdev_priv(netdev);
2877 struct be_rx_obj *rxo;
2878 int i;
2880 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2881 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2882 NETIF_F_HW_VLAN_TX;
2883 if (be_multi_rxq(adapter))
2884 netdev->hw_features |= NETIF_F_RXHASH;
2886 netdev->features |= netdev->hw_features |
2887 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2889 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2890 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2892 netdev->flags |= IFF_MULTICAST;
2894 /* Default settings for Rx and Tx flow control */
2895 adapter->rx_fc = true;
2896 adapter->tx_fc = true;
2898 netif_set_gso_max_size(netdev, 65535);
2900 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2902 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2904 for_all_rx_queues(adapter, rxo, i)
2905 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2906 BE_NAPI_WEIGHT);
2908 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2909 BE_NAPI_WEIGHT);
2912 static void be_unmap_pci_bars(struct be_adapter *adapter)
2914 if (adapter->csr)
2915 iounmap(adapter->csr);
2916 if (adapter->db)
2917 iounmap(adapter->db);
2918 if (adapter->pcicfg && be_physfn(adapter))
2919 iounmap(adapter->pcicfg);
2922 static int be_map_pci_bars(struct be_adapter *adapter)
2924 u8 __iomem *addr;
2925 int pcicfg_reg, db_reg;
2927 if (lancer_chip(adapter)) {
2928 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2929 pci_resource_len(adapter->pdev, 0));
2930 if (addr == NULL)
2931 return -ENOMEM;
2932 adapter->db = addr;
2933 return 0;
2936 if (be_physfn(adapter)) {
2937 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2938 pci_resource_len(adapter->pdev, 2));
2939 if (addr == NULL)
2940 return -ENOMEM;
2941 adapter->csr = addr;
2944 if (adapter->generation == BE_GEN2) {
2945 pcicfg_reg = 1;
2946 db_reg = 4;
2947 } else {
2948 pcicfg_reg = 0;
2949 if (be_physfn(adapter))
2950 db_reg = 4;
2951 else
2952 db_reg = 0;
2954 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2955 pci_resource_len(adapter->pdev, db_reg));
2956 if (addr == NULL)
2957 goto pci_map_err;
2958 adapter->db = addr;
2960 if (be_physfn(adapter)) {
2961 addr = ioremap_nocache(
2962 pci_resource_start(adapter->pdev, pcicfg_reg),
2963 pci_resource_len(adapter->pdev, pcicfg_reg));
2964 if (addr == NULL)
2965 goto pci_map_err;
2966 adapter->pcicfg = addr;
2967 } else
2968 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2970 return 0;
2971 pci_map_err:
2972 be_unmap_pci_bars(adapter);
2973 return -ENOMEM;
2977 static void be_ctrl_cleanup(struct be_adapter *adapter)
2979 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2981 be_unmap_pci_bars(adapter);
2983 if (mem->va)
2984 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2985 mem->dma);
2987 mem = &adapter->mc_cmd_mem;
2988 if (mem->va)
2989 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2990 mem->dma);
2993 static int be_ctrl_init(struct be_adapter *adapter)
2995 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2996 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2997 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2998 int status;
3000 status = be_map_pci_bars(adapter);
3001 if (status)
3002 goto done;
3004 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3005 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3006 mbox_mem_alloc->size,
3007 &mbox_mem_alloc->dma,
3008 GFP_KERNEL);
3009 if (!mbox_mem_alloc->va) {
3010 status = -ENOMEM;
3011 goto unmap_pci_bars;
3014 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3015 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3016 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3017 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3019 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
3020 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3021 mc_cmd_mem->size, &mc_cmd_mem->dma,
3022 GFP_KERNEL);
3023 if (mc_cmd_mem->va == NULL) {
3024 status = -ENOMEM;
3025 goto free_mbox;
3027 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3029 mutex_init(&adapter->mbox_lock);
3030 spin_lock_init(&adapter->mcc_lock);
3031 spin_lock_init(&adapter->mcc_cq_lock);
3033 init_completion(&adapter->flash_compl);
3034 pci_save_state(adapter->pdev);
3035 return 0;
3037 free_mbox:
3038 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3039 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3041 unmap_pci_bars:
3042 be_unmap_pci_bars(adapter);
3044 done:
3045 return status;
3048 static void be_stats_cleanup(struct be_adapter *adapter)
3050 struct be_dma_mem *cmd = &adapter->stats_cmd;
3052 if (cmd->va)
3053 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3054 cmd->va, cmd->dma);
3057 static int be_stats_init(struct be_adapter *adapter)
3059 struct be_dma_mem *cmd = &adapter->stats_cmd;
3061 if (adapter->generation == BE_GEN2) {
3062 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3063 } else {
3064 if (lancer_chip(adapter))
3065 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3066 else
3067 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3069 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3070 GFP_KERNEL);
3071 if (cmd->va == NULL)
3072 return -1;
3073 memset(cmd->va, 0, cmd->size);
3074 return 0;
3077 static void __devexit be_remove(struct pci_dev *pdev)
3079 struct be_adapter *adapter = pci_get_drvdata(pdev);
3081 if (!adapter)
3082 return;
3084 cancel_delayed_work_sync(&adapter->work);
3086 unregister_netdev(adapter->netdev);
3088 be_clear(adapter);
3090 be_stats_cleanup(adapter);
3092 be_ctrl_cleanup(adapter);
3094 kfree(adapter->vf_cfg);
3095 be_sriov_disable(adapter);
3097 be_msix_disable(adapter);
3099 pci_set_drvdata(pdev, NULL);
3100 pci_release_regions(pdev);
3101 pci_disable_device(pdev);
3103 free_netdev(adapter->netdev);
3106 static int be_get_config(struct be_adapter *adapter)
3108 int status;
3109 u8 mac[ETH_ALEN];
3111 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
3112 if (status)
3113 return status;
3115 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3116 &adapter->function_mode, &adapter->function_caps);
3117 if (status)
3118 return status;
3120 memset(mac, 0, ETH_ALEN);
3122 /* A default permanent address is given to each VF for Lancer*/
3123 if (be_physfn(adapter) || lancer_chip(adapter)) {
3124 status = be_cmd_mac_addr_query(adapter, mac,
3125 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
3127 if (status)
3128 return status;
3130 if (!is_valid_ether_addr(mac))
3131 return -EADDRNOTAVAIL;
3133 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3134 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3137 if (adapter->function_mode & 0x400)
3138 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3139 else
3140 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3142 status = be_cmd_get_cntl_attributes(adapter);
3143 if (status)
3144 return status;
3146 if ((num_vfs && adapter->sriov_enabled) ||
3147 (adapter->function_mode & 0x400) ||
3148 lancer_chip(adapter) || !be_physfn(adapter)) {
3149 adapter->num_tx_qs = 1;
3150 netif_set_real_num_tx_queues(adapter->netdev,
3151 adapter->num_tx_qs);
3152 } else {
3153 adapter->num_tx_qs = MAX_TX_QS;
3156 return 0;
3159 static int be_dev_family_check(struct be_adapter *adapter)
3161 struct pci_dev *pdev = adapter->pdev;
3162 u32 sli_intf = 0, if_type;
3164 switch (pdev->device) {
3165 case BE_DEVICE_ID1:
3166 case OC_DEVICE_ID1:
3167 adapter->generation = BE_GEN2;
3168 break;
3169 case BE_DEVICE_ID2:
3170 case OC_DEVICE_ID2:
3171 adapter->generation = BE_GEN3;
3172 break;
3173 case OC_DEVICE_ID3:
3174 case OC_DEVICE_ID4:
3175 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3176 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3177 SLI_INTF_IF_TYPE_SHIFT;
3179 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3180 if_type != 0x02) {
3181 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3182 return -EINVAL;
3184 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3185 SLI_INTF_FAMILY_SHIFT);
3186 adapter->generation = BE_GEN3;
3187 break;
3188 default:
3189 adapter->generation = 0;
3191 return 0;
3194 static int lancer_wait_ready(struct be_adapter *adapter)
3196 #define SLIPORT_READY_TIMEOUT 500
3197 u32 sliport_status;
3198 int status = 0, i;
3200 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3201 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3202 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3203 break;
3205 msleep(20);
3208 if (i == SLIPORT_READY_TIMEOUT)
3209 status = -1;
3211 return status;
3214 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3216 int status;
3217 u32 sliport_status, err, reset_needed;
3218 status = lancer_wait_ready(adapter);
3219 if (!status) {
3220 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3221 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3222 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3223 if (err && reset_needed) {
3224 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3225 adapter->db + SLIPORT_CONTROL_OFFSET);
3227 /* check adapter has corrected the error */
3228 status = lancer_wait_ready(adapter);
3229 sliport_status = ioread32(adapter->db +
3230 SLIPORT_STATUS_OFFSET);
3231 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3232 SLIPORT_STATUS_RN_MASK);
3233 if (status || sliport_status)
3234 status = -1;
3235 } else if (err || reset_needed) {
3236 status = -1;
3239 return status;
3242 static int __devinit be_probe(struct pci_dev *pdev,
3243 const struct pci_device_id *pdev_id)
3245 int status = 0;
3246 struct be_adapter *adapter;
3247 struct net_device *netdev;
3249 status = pci_enable_device(pdev);
3250 if (status)
3251 goto do_none;
3253 status = pci_request_regions(pdev, DRV_NAME);
3254 if (status)
3255 goto disable_dev;
3256 pci_set_master(pdev);
3258 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3259 if (netdev == NULL) {
3260 status = -ENOMEM;
3261 goto rel_reg;
3263 adapter = netdev_priv(netdev);
3264 adapter->pdev = pdev;
3265 pci_set_drvdata(pdev, adapter);
3267 status = be_dev_family_check(adapter);
3268 if (status)
3269 goto free_netdev;
3271 adapter->netdev = netdev;
3272 SET_NETDEV_DEV(netdev, &pdev->dev);
3274 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3275 if (!status) {
3276 netdev->features |= NETIF_F_HIGHDMA;
3277 } else {
3278 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3279 if (status) {
3280 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3281 goto free_netdev;
3285 be_sriov_enable(adapter);
3286 if (adapter->sriov_enabled) {
3287 adapter->vf_cfg = kcalloc(num_vfs,
3288 sizeof(struct be_vf_cfg), GFP_KERNEL);
3290 if (!adapter->vf_cfg)
3291 goto free_netdev;
3294 status = be_ctrl_init(adapter);
3295 if (status)
3296 goto free_vf_cfg;
3298 if (lancer_chip(adapter)) {
3299 status = lancer_test_and_set_rdy_state(adapter);
3300 if (status) {
3301 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3302 goto ctrl_clean;
3306 /* sync up with fw's ready state */
3307 if (be_physfn(adapter)) {
3308 status = be_cmd_POST(adapter);
3309 if (status)
3310 goto ctrl_clean;
3313 /* tell fw we're ready to fire cmds */
3314 status = be_cmd_fw_init(adapter);
3315 if (status)
3316 goto ctrl_clean;
3318 status = be_cmd_reset_function(adapter);
3319 if (status)
3320 goto ctrl_clean;
3322 status = be_stats_init(adapter);
3323 if (status)
3324 goto ctrl_clean;
3326 status = be_get_config(adapter);
3327 if (status)
3328 goto stats_clean;
3330 /* The INTR bit may be set in the card when probed by a kdump kernel
3331 * after a crash.
3333 if (!lancer_chip(adapter))
3334 be_intr_set(adapter, false);
3336 be_msix_enable(adapter);
3338 INIT_DELAYED_WORK(&adapter->work, be_worker);
3340 status = be_setup(adapter);
3341 if (status)
3342 goto msix_disable;
3344 be_netdev_init(netdev);
3345 status = register_netdev(netdev);
3346 if (status != 0)
3347 goto unsetup;
3348 netif_carrier_off(netdev);
3350 if (be_physfn(adapter) && adapter->sriov_enabled) {
3351 u8 mac_speed;
3352 bool link_up;
3353 u16 vf, lnk_speed;
3355 if (!lancer_chip(adapter)) {
3356 status = be_vf_eth_addr_config(adapter);
3357 if (status)
3358 goto unreg_netdev;
3361 for (vf = 0; vf < num_vfs; vf++) {
3362 status = be_cmd_link_status_query(adapter, &link_up,
3363 &mac_speed, &lnk_speed, vf + 1);
3364 if (!status)
3365 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3366 else
3367 goto unreg_netdev;
3371 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3373 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3374 return 0;
3376 unreg_netdev:
3377 unregister_netdev(netdev);
3378 unsetup:
3379 be_clear(adapter);
3380 msix_disable:
3381 be_msix_disable(adapter);
3382 stats_clean:
3383 be_stats_cleanup(adapter);
3384 ctrl_clean:
3385 be_ctrl_cleanup(adapter);
3386 free_vf_cfg:
3387 kfree(adapter->vf_cfg);
3388 free_netdev:
3389 be_sriov_disable(adapter);
3390 free_netdev(netdev);
3391 pci_set_drvdata(pdev, NULL);
3392 rel_reg:
3393 pci_release_regions(pdev);
3394 disable_dev:
3395 pci_disable_device(pdev);
3396 do_none:
3397 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3398 return status;
3401 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3403 struct be_adapter *adapter = pci_get_drvdata(pdev);
3404 struct net_device *netdev = adapter->netdev;
3406 cancel_delayed_work_sync(&adapter->work);
3407 if (adapter->wol)
3408 be_setup_wol(adapter, true);
3410 netif_device_detach(netdev);
3411 if (netif_running(netdev)) {
3412 rtnl_lock();
3413 be_close(netdev);
3414 rtnl_unlock();
3416 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3417 be_clear(adapter);
3419 be_msix_disable(adapter);
3420 pci_save_state(pdev);
3421 pci_disable_device(pdev);
3422 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3423 return 0;
3426 static int be_resume(struct pci_dev *pdev)
3428 int status = 0;
3429 struct be_adapter *adapter = pci_get_drvdata(pdev);
3430 struct net_device *netdev = adapter->netdev;
3432 netif_device_detach(netdev);
3434 status = pci_enable_device(pdev);
3435 if (status)
3436 return status;
3438 pci_set_power_state(pdev, 0);
3439 pci_restore_state(pdev);
3441 be_msix_enable(adapter);
3442 /* tell fw we're ready to fire cmds */
3443 status = be_cmd_fw_init(adapter);
3444 if (status)
3445 return status;
3447 be_setup(adapter);
3448 if (netif_running(netdev)) {
3449 rtnl_lock();
3450 be_open(netdev);
3451 rtnl_unlock();
3453 netif_device_attach(netdev);
3455 if (adapter->wol)
3456 be_setup_wol(adapter, false);
3458 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3459 return 0;
3463 * An FLR will stop BE from DMAing any data.
3465 static void be_shutdown(struct pci_dev *pdev)
3467 struct be_adapter *adapter = pci_get_drvdata(pdev);
3469 if (!adapter)
3470 return;
3472 cancel_delayed_work_sync(&adapter->work);
3474 netif_device_detach(adapter->netdev);
3476 if (adapter->wol)
3477 be_setup_wol(adapter, true);
3479 be_cmd_reset_function(adapter);
3481 pci_disable_device(pdev);
3484 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3485 pci_channel_state_t state)
3487 struct be_adapter *adapter = pci_get_drvdata(pdev);
3488 struct net_device *netdev = adapter->netdev;
3490 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3492 adapter->eeh_err = true;
3494 netif_device_detach(netdev);
3496 if (netif_running(netdev)) {
3497 rtnl_lock();
3498 be_close(netdev);
3499 rtnl_unlock();
3501 be_clear(adapter);
3503 if (state == pci_channel_io_perm_failure)
3504 return PCI_ERS_RESULT_DISCONNECT;
3506 pci_disable_device(pdev);
3508 return PCI_ERS_RESULT_NEED_RESET;
3511 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3513 struct be_adapter *adapter = pci_get_drvdata(pdev);
3514 int status;
3516 dev_info(&adapter->pdev->dev, "EEH reset\n");
3517 adapter->eeh_err = false;
3519 status = pci_enable_device(pdev);
3520 if (status)
3521 return PCI_ERS_RESULT_DISCONNECT;
3523 pci_set_master(pdev);
3524 pci_set_power_state(pdev, 0);
3525 pci_restore_state(pdev);
3527 /* Check if card is ok and fw is ready */
3528 status = be_cmd_POST(adapter);
3529 if (status)
3530 return PCI_ERS_RESULT_DISCONNECT;
3532 return PCI_ERS_RESULT_RECOVERED;
3535 static void be_eeh_resume(struct pci_dev *pdev)
3537 int status = 0;
3538 struct be_adapter *adapter = pci_get_drvdata(pdev);
3539 struct net_device *netdev = adapter->netdev;
3541 dev_info(&adapter->pdev->dev, "EEH resume\n");
3543 pci_save_state(pdev);
3545 /* tell fw we're ready to fire cmds */
3546 status = be_cmd_fw_init(adapter);
3547 if (status)
3548 goto err;
3550 status = be_setup(adapter);
3551 if (status)
3552 goto err;
3554 if (netif_running(netdev)) {
3555 status = be_open(netdev);
3556 if (status)
3557 goto err;
3559 netif_device_attach(netdev);
3560 return;
3561 err:
3562 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3565 static struct pci_error_handlers be_eeh_handlers = {
3566 .error_detected = be_eeh_err_detected,
3567 .slot_reset = be_eeh_reset,
3568 .resume = be_eeh_resume,
3571 static struct pci_driver be_driver = {
3572 .name = DRV_NAME,
3573 .id_table = be_dev_ids,
3574 .probe = be_probe,
3575 .remove = be_remove,
3576 .suspend = be_suspend,
3577 .resume = be_resume,
3578 .shutdown = be_shutdown,
3579 .err_handler = &be_eeh_handlers
3582 static int __init be_init_module(void)
3584 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3585 rx_frag_size != 2048) {
3586 printk(KERN_WARNING DRV_NAME
3587 " : Module param rx_frag_size must be 2048/4096/8192."
3588 " Using 2048\n");
3589 rx_frag_size = 2048;
3592 return pci_register_driver(&be_driver);
3594 module_init(be_init_module);
3596 static void __exit be_exit_module(void)
3598 pci_unregister_driver(&be_driver);
3600 module_exit(be_exit_module);