be2net: Show newly flashed FW ver in ethtool
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / ethernet / emulex / benet / be_main.c
blob1a7b24cc5da70d8018f744b634d67e5e685c32aa
1 /*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 #include <asm/div64.h>
23 MODULE_VERSION(DRV_VER);
24 MODULE_DEVICE_TABLE(pci, be_dev_ids);
25 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
29 static ushort rx_frag_size = 2048;
30 static unsigned int num_vfs;
31 module_param(rx_frag_size, ushort, S_IRUGO);
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
41 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
43 { 0 }
45 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46 /* UE Status Low CSR */
47 static const char * const ue_status_low_desc[] = {
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
81 /* UE Status High CSR */
82 static const char * const ue_status_hi_desc[] = {
83 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
106 "NETC",
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
117 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
125 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
128 struct be_dma_mem *mem = &q->dma_mem;
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
142 static void be_intr_set(struct be_adapter *adapter, bool enable)
144 u32 reg, enabled;
146 if (adapter->eeh_err)
147 return;
149 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
150 &reg);
151 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
153 if (!enabled && enable)
154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155 else if (enabled && !enable)
156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
157 else
158 return;
160 pci_write_config_dword(adapter->pdev,
161 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
164 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
166 u32 val = 0;
167 val |= qid & DB_RQ_RING_ID_MASK;
168 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
170 wmb();
171 iowrite32(val, adapter->db + DB_RQ_OFFSET);
174 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
176 u32 val = 0;
177 val |= qid & DB_TXULP_RING_ID_MASK;
178 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
180 wmb();
181 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
184 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
185 bool arm, bool clear_int, u16 num_popped)
187 u32 val = 0;
188 val |= qid & DB_EQ_RING_ID_MASK;
189 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
190 DB_EQ_RING_ID_EXT_MASK_SHIFT);
192 if (adapter->eeh_err)
193 return;
195 if (arm)
196 val |= 1 << DB_EQ_REARM_SHIFT;
197 if (clear_int)
198 val |= 1 << DB_EQ_CLR_SHIFT;
199 val |= 1 << DB_EQ_EVNT_SHIFT;
200 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
201 iowrite32(val, adapter->db + DB_EQ_OFFSET);
204 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
206 u32 val = 0;
207 val |= qid & DB_CQ_RING_ID_MASK;
208 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
209 DB_CQ_RING_ID_EXT_MASK_SHIFT);
211 if (adapter->eeh_err)
212 return;
214 if (arm)
215 val |= 1 << DB_CQ_REARM_SHIFT;
216 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
217 iowrite32(val, adapter->db + DB_CQ_OFFSET);
220 static int be_mac_addr_set(struct net_device *netdev, void *p)
222 struct be_adapter *adapter = netdev_priv(netdev);
223 struct sockaddr *addr = p;
224 int status = 0;
226 if (!is_valid_ether_addr(addr->sa_data))
227 return -EADDRNOTAVAIL;
229 /* MAC addr configuration will be done in hardware for VFs
230 * by their corresponding PFs. Just copy to netdev addr here
232 if (!be_physfn(adapter))
233 goto netdev_addr;
235 status = be_cmd_pmac_del(adapter, adapter->if_handle,
236 adapter->pmac_id, 0);
237 if (status)
238 return status;
240 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
241 adapter->if_handle, &adapter->pmac_id, 0);
242 netdev_addr:
243 if (!status)
244 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
246 return status;
249 static void populate_be2_stats(struct be_adapter *adapter)
251 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
252 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
253 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
254 struct be_port_rxf_stats_v0 *port_stats =
255 &rxf_stats->port[adapter->port_num];
256 struct be_drv_stats *drvs = &adapter->drv_stats;
258 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
259 drvs->rx_pause_frames = port_stats->rx_pause_frames;
260 drvs->rx_crc_errors = port_stats->rx_crc_errors;
261 drvs->rx_control_frames = port_stats->rx_control_frames;
262 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
263 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
264 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
265 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
266 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
267 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
268 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
269 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
270 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
271 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
272 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
273 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
274 drvs->rx_dropped_header_too_small =
275 port_stats->rx_dropped_header_too_small;
276 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
277 drvs->rx_alignment_symbol_errors =
278 port_stats->rx_alignment_symbol_errors;
280 drvs->tx_pauseframes = port_stats->tx_pauseframes;
281 drvs->tx_controlframes = port_stats->tx_controlframes;
283 if (adapter->port_num)
284 drvs->jabber_events = rxf_stats->port1_jabber_events;
285 else
286 drvs->jabber_events = rxf_stats->port0_jabber_events;
287 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
288 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
289 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
290 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
291 drvs->forwarded_packets = rxf_stats->forwarded_packets;
292 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
293 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
294 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
295 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
298 static void populate_be3_stats(struct be_adapter *adapter)
300 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
301 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
302 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
303 struct be_port_rxf_stats_v1 *port_stats =
304 &rxf_stats->port[adapter->port_num];
305 struct be_drv_stats *drvs = &adapter->drv_stats;
307 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
308 drvs->rx_pause_frames = port_stats->rx_pause_frames;
309 drvs->rx_crc_errors = port_stats->rx_crc_errors;
310 drvs->rx_control_frames = port_stats->rx_control_frames;
311 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
312 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
313 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
314 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
315 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
316 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
317 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
318 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
319 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
320 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
321 drvs->rx_dropped_header_too_small =
322 port_stats->rx_dropped_header_too_small;
323 drvs->rx_input_fifo_overflow_drop =
324 port_stats->rx_input_fifo_overflow_drop;
325 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
326 drvs->rx_alignment_symbol_errors =
327 port_stats->rx_alignment_symbol_errors;
328 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
329 drvs->tx_pauseframes = port_stats->tx_pauseframes;
330 drvs->tx_controlframes = port_stats->tx_controlframes;
331 drvs->jabber_events = port_stats->jabber_events;
332 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
333 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
334 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
335 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
336 drvs->forwarded_packets = rxf_stats->forwarded_packets;
337 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
338 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
339 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
340 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
343 static void populate_lancer_stats(struct be_adapter *adapter)
346 struct be_drv_stats *drvs = &adapter->drv_stats;
347 struct lancer_pport_stats *pport_stats =
348 pport_stats_from_cmd(adapter);
350 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
351 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
352 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
353 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
354 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
355 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
356 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
357 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
358 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
359 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
360 drvs->rx_dropped_tcp_length =
361 pport_stats->rx_dropped_invalid_tcp_length;
362 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
363 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
364 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
365 drvs->rx_dropped_header_too_small =
366 pport_stats->rx_dropped_header_too_small;
367 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
368 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
369 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
370 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
371 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
372 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
373 drvs->jabber_events = pport_stats->rx_jabbers;
374 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
375 drvs->forwarded_packets = pport_stats->num_forwards_lo;
376 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
377 drvs->rx_drops_too_many_frags =
378 pport_stats->rx_drops_too_many_frags_lo;
381 static void accumulate_16bit_val(u32 *acc, u16 val)
383 #define lo(x) (x & 0xFFFF)
384 #define hi(x) (x & 0xFFFF0000)
385 bool wrapped = val < lo(*acc);
386 u32 newacc = hi(*acc) + val;
388 if (wrapped)
389 newacc += 65536;
390 ACCESS_ONCE(*acc) = newacc;
393 void be_parse_stats(struct be_adapter *adapter)
395 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
396 struct be_rx_obj *rxo;
397 int i;
399 if (adapter->generation == BE_GEN3) {
400 if (lancer_chip(adapter))
401 populate_lancer_stats(adapter);
402 else
403 populate_be3_stats(adapter);
404 } else {
405 populate_be2_stats(adapter);
408 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
409 for_all_rx_queues(adapter, rxo, i) {
410 /* below erx HW counter can actually wrap around after
411 * 65535. Driver accumulates a 32-bit value
413 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
414 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
418 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
419 struct rtnl_link_stats64 *stats)
421 struct be_adapter *adapter = netdev_priv(netdev);
422 struct be_drv_stats *drvs = &adapter->drv_stats;
423 struct be_rx_obj *rxo;
424 struct be_tx_obj *txo;
425 u64 pkts, bytes;
426 unsigned int start;
427 int i;
429 for_all_rx_queues(adapter, rxo, i) {
430 const struct be_rx_stats *rx_stats = rx_stats(rxo);
431 do {
432 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
433 pkts = rx_stats(rxo)->rx_pkts;
434 bytes = rx_stats(rxo)->rx_bytes;
435 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
436 stats->rx_packets += pkts;
437 stats->rx_bytes += bytes;
438 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
439 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
440 rx_stats(rxo)->rx_drops_no_frags;
443 for_all_tx_queues(adapter, txo, i) {
444 const struct be_tx_stats *tx_stats = tx_stats(txo);
445 do {
446 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
447 pkts = tx_stats(txo)->tx_pkts;
448 bytes = tx_stats(txo)->tx_bytes;
449 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
450 stats->tx_packets += pkts;
451 stats->tx_bytes += bytes;
454 /* bad pkts received */
455 stats->rx_errors = drvs->rx_crc_errors +
456 drvs->rx_alignment_symbol_errors +
457 drvs->rx_in_range_errors +
458 drvs->rx_out_range_errors +
459 drvs->rx_frame_too_long +
460 drvs->rx_dropped_too_small +
461 drvs->rx_dropped_too_short +
462 drvs->rx_dropped_header_too_small +
463 drvs->rx_dropped_tcp_length +
464 drvs->rx_dropped_runt;
466 /* detailed rx errors */
467 stats->rx_length_errors = drvs->rx_in_range_errors +
468 drvs->rx_out_range_errors +
469 drvs->rx_frame_too_long;
471 stats->rx_crc_errors = drvs->rx_crc_errors;
473 /* frame alignment errors */
474 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
476 /* receiver fifo overrun */
477 /* drops_no_pbuf is no per i/f, it's per BE card */
478 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
479 drvs->rx_input_fifo_overflow_drop +
480 drvs->rx_drops_no_pbuf;
481 return stats;
484 void be_link_status_update(struct be_adapter *adapter, u32 link_status)
486 struct net_device *netdev = adapter->netdev;
488 /* when link status changes, link speed must be re-queried from card */
489 adapter->link_speed = -1;
490 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
491 netif_carrier_on(netdev);
492 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
493 } else {
494 netif_carrier_off(netdev);
495 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
499 static void be_tx_stats_update(struct be_tx_obj *txo,
500 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
502 struct be_tx_stats *stats = tx_stats(txo);
504 u64_stats_update_begin(&stats->sync);
505 stats->tx_reqs++;
506 stats->tx_wrbs += wrb_cnt;
507 stats->tx_bytes += copied;
508 stats->tx_pkts += (gso_segs ? gso_segs : 1);
509 if (stopped)
510 stats->tx_stops++;
511 u64_stats_update_end(&stats->sync);
514 /* Determine number of WRB entries needed to xmit data in an skb */
515 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
516 bool *dummy)
518 int cnt = (skb->len > skb->data_len);
520 cnt += skb_shinfo(skb)->nr_frags;
522 /* to account for hdr wrb */
523 cnt++;
524 if (lancer_chip(adapter) || !(cnt & 1)) {
525 *dummy = false;
526 } else {
527 /* add a dummy to make it an even num */
528 cnt++;
529 *dummy = true;
531 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
532 return cnt;
535 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
537 wrb->frag_pa_hi = upper_32_bits(addr);
538 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
539 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
542 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
543 struct sk_buff *skb, u32 wrb_cnt, u32 len)
545 u8 vlan_prio = 0;
546 u16 vlan_tag = 0;
548 memset(hdr, 0, sizeof(*hdr));
550 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
552 if (skb_is_gso(skb)) {
553 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
554 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
555 hdr, skb_shinfo(skb)->gso_size);
556 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
557 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
558 if (lancer_chip(adapter) && adapter->sli_family ==
559 LANCER_A0_SLI_FAMILY) {
560 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
561 if (is_tcp_pkt(skb))
562 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
563 tcpcs, hdr, 1);
564 else if (is_udp_pkt(skb))
565 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
566 udpcs, hdr, 1);
568 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
569 if (is_tcp_pkt(skb))
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
571 else if (is_udp_pkt(skb))
572 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
575 if (vlan_tx_tag_present(skb)) {
576 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
577 vlan_tag = vlan_tx_tag_get(skb);
578 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
579 /* If vlan priority provided by OS is NOT in available bmap */
580 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
581 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
582 adapter->recommended_prio;
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
588 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
592 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
593 bool unmap_single)
595 dma_addr_t dma;
597 be_dws_le_to_cpu(wrb, sizeof(*wrb));
599 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
600 if (wrb->frag_len) {
601 if (unmap_single)
602 dma_unmap_single(dev, dma, wrb->frag_len,
603 DMA_TO_DEVICE);
604 else
605 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
609 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
610 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
612 dma_addr_t busaddr;
613 int i, copied = 0;
614 struct device *dev = &adapter->pdev->dev;
615 struct sk_buff *first_skb = skb;
616 struct be_eth_wrb *wrb;
617 struct be_eth_hdr_wrb *hdr;
618 bool map_single = false;
619 u16 map_head;
621 hdr = queue_head_node(txq);
622 queue_head_inc(txq);
623 map_head = txq->head;
625 if (skb->len > skb->data_len) {
626 int len = skb_headlen(skb);
627 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
628 if (dma_mapping_error(dev, busaddr))
629 goto dma_err;
630 map_single = true;
631 wrb = queue_head_node(txq);
632 wrb_fill(wrb, busaddr, len);
633 be_dws_cpu_to_le(wrb, sizeof(*wrb));
634 queue_head_inc(txq);
635 copied += len;
638 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
639 struct skb_frag_struct *frag =
640 &skb_shinfo(skb)->frags[i];
641 busaddr = skb_frag_dma_map(dev, frag, 0,
642 frag->size, DMA_TO_DEVICE);
643 if (dma_mapping_error(dev, busaddr))
644 goto dma_err;
645 wrb = queue_head_node(txq);
646 wrb_fill(wrb, busaddr, frag->size);
647 be_dws_cpu_to_le(wrb, sizeof(*wrb));
648 queue_head_inc(txq);
649 copied += frag->size;
652 if (dummy_wrb) {
653 wrb = queue_head_node(txq);
654 wrb_fill(wrb, 0, 0);
655 be_dws_cpu_to_le(wrb, sizeof(*wrb));
656 queue_head_inc(txq);
659 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
660 be_dws_cpu_to_le(hdr, sizeof(*hdr));
662 return copied;
663 dma_err:
664 txq->head = map_head;
665 while (copied) {
666 wrb = queue_head_node(txq);
667 unmap_tx_frag(dev, wrb, map_single);
668 map_single = false;
669 copied -= wrb->frag_len;
670 queue_head_inc(txq);
672 return 0;
675 static netdev_tx_t be_xmit(struct sk_buff *skb,
676 struct net_device *netdev)
678 struct be_adapter *adapter = netdev_priv(netdev);
679 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
680 struct be_queue_info *txq = &txo->q;
681 u32 wrb_cnt = 0, copied = 0;
682 u32 start = txq->head;
683 bool dummy_wrb, stopped = false;
685 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
687 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
688 if (copied) {
689 /* record the sent skb in the sent_skb table */
690 BUG_ON(txo->sent_skb_list[start]);
691 txo->sent_skb_list[start] = skb;
693 /* Ensure txq has space for the next skb; Else stop the queue
694 * *BEFORE* ringing the tx doorbell, so that we serialze the
695 * tx compls of the current transmit which'll wake up the queue
697 atomic_add(wrb_cnt, &txq->used);
698 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
699 txq->len) {
700 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
701 stopped = true;
704 be_txq_notify(adapter, txq->id, wrb_cnt);
706 be_tx_stats_update(txo, wrb_cnt, copied,
707 skb_shinfo(skb)->gso_segs, stopped);
708 } else {
709 txq->head = start;
710 dev_kfree_skb_any(skb);
712 return NETDEV_TX_OK;
715 static int be_change_mtu(struct net_device *netdev, int new_mtu)
717 struct be_adapter *adapter = netdev_priv(netdev);
718 if (new_mtu < BE_MIN_MTU ||
719 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
720 (ETH_HLEN + ETH_FCS_LEN))) {
721 dev_info(&adapter->pdev->dev,
722 "MTU must be between %d and %d bytes\n",
723 BE_MIN_MTU,
724 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
725 return -EINVAL;
727 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
728 netdev->mtu, new_mtu);
729 netdev->mtu = new_mtu;
730 return 0;
734 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
735 * If the user configures more, place BE in vlan promiscuous mode.
737 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
739 u16 vtag[BE_NUM_VLANS_SUPPORTED];
740 u16 ntags = 0, i;
741 int status = 0;
742 u32 if_handle;
744 if (vf) {
745 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
746 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
747 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
750 /* No need to further configure vids if in promiscuous mode */
751 if (adapter->promiscuous)
752 return 0;
754 if (adapter->vlans_added <= adapter->max_vlans) {
755 /* Construct VLAN Table to give to HW */
756 for (i = 0; i < VLAN_N_VID; i++) {
757 if (adapter->vlan_tag[i]) {
758 vtag[ntags] = cpu_to_le16(i);
759 ntags++;
762 status = be_cmd_vlan_config(adapter, adapter->if_handle,
763 vtag, ntags, 1, 0);
764 } else {
765 status = be_cmd_vlan_config(adapter, adapter->if_handle,
766 NULL, 0, 1, 1);
769 return status;
772 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
774 struct be_adapter *adapter = netdev_priv(netdev);
776 adapter->vlans_added++;
777 if (!be_physfn(adapter))
778 return;
780 adapter->vlan_tag[vid] = 1;
781 if (adapter->vlans_added <= (adapter->max_vlans + 1))
782 be_vid_config(adapter, false, 0);
785 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
787 struct be_adapter *adapter = netdev_priv(netdev);
789 adapter->vlans_added--;
791 if (!be_physfn(adapter))
792 return;
794 adapter->vlan_tag[vid] = 0;
795 if (adapter->vlans_added <= adapter->max_vlans)
796 be_vid_config(adapter, false, 0);
799 static void be_set_multicast_list(struct net_device *netdev)
801 struct be_adapter *adapter = netdev_priv(netdev);
803 if (netdev->flags & IFF_PROMISC) {
804 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
805 adapter->promiscuous = true;
806 goto done;
809 /* BE was previously in promiscuous mode; disable it */
810 if (adapter->promiscuous) {
811 adapter->promiscuous = false;
812 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
814 if (adapter->vlans_added)
815 be_vid_config(adapter, false, 0);
818 /* Enable multicast promisc if num configured exceeds what we support */
819 if (netdev->flags & IFF_ALLMULTI ||
820 netdev_mc_count(netdev) > BE_MAX_MC) {
821 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
822 goto done;
825 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
826 done:
827 return;
830 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
832 struct be_adapter *adapter = netdev_priv(netdev);
833 int status;
835 if (!adapter->sriov_enabled)
836 return -EPERM;
838 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
839 return -EINVAL;
841 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
842 status = be_cmd_pmac_del(adapter,
843 adapter->vf_cfg[vf].vf_if_handle,
844 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
846 status = be_cmd_pmac_add(adapter, mac,
847 adapter->vf_cfg[vf].vf_if_handle,
848 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
850 if (status)
851 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
852 mac, vf);
853 else
854 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
856 return status;
859 static int be_get_vf_config(struct net_device *netdev, int vf,
860 struct ifla_vf_info *vi)
862 struct be_adapter *adapter = netdev_priv(netdev);
864 if (!adapter->sriov_enabled)
865 return -EPERM;
867 if (vf >= num_vfs)
868 return -EINVAL;
870 vi->vf = vf;
871 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
872 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
873 vi->qos = 0;
874 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
876 return 0;
879 static int be_set_vf_vlan(struct net_device *netdev,
880 int vf, u16 vlan, u8 qos)
882 struct be_adapter *adapter = netdev_priv(netdev);
883 int status = 0;
885 if (!adapter->sriov_enabled)
886 return -EPERM;
888 if ((vf >= num_vfs) || (vlan > 4095))
889 return -EINVAL;
891 if (vlan) {
892 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
893 adapter->vlans_added++;
894 } else {
895 adapter->vf_cfg[vf].vf_vlan_tag = 0;
896 adapter->vlans_added--;
899 status = be_vid_config(adapter, true, vf);
901 if (status)
902 dev_info(&adapter->pdev->dev,
903 "VLAN %d config on VF %d failed\n", vlan, vf);
904 return status;
907 static int be_set_vf_tx_rate(struct net_device *netdev,
908 int vf, int rate)
910 struct be_adapter *adapter = netdev_priv(netdev);
911 int status = 0;
913 if (!adapter->sriov_enabled)
914 return -EPERM;
916 if ((vf >= num_vfs) || (rate < 0))
917 return -EINVAL;
919 if (rate > 10000)
920 rate = 10000;
922 adapter->vf_cfg[vf].vf_tx_rate = rate;
923 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
925 if (status)
926 dev_info(&adapter->pdev->dev,
927 "tx rate %d on VF %d failed\n", rate, vf);
928 return status;
931 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
933 struct be_eq_obj *rx_eq = &rxo->rx_eq;
934 struct be_rx_stats *stats = rx_stats(rxo);
935 ulong now = jiffies;
936 ulong delta = now - stats->rx_jiffies;
937 u64 pkts;
938 unsigned int start, eqd;
940 if (!rx_eq->enable_aic)
941 return;
943 /* Wrapped around */
944 if (time_before(now, stats->rx_jiffies)) {
945 stats->rx_jiffies = now;
946 return;
949 /* Update once a second */
950 if (delta < HZ)
951 return;
953 do {
954 start = u64_stats_fetch_begin_bh(&stats->sync);
955 pkts = stats->rx_pkts;
956 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
958 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
959 stats->rx_pkts_prev = pkts;
960 stats->rx_jiffies = now;
961 eqd = stats->rx_pps / 110000;
962 eqd = eqd << 3;
963 if (eqd > rx_eq->max_eqd)
964 eqd = rx_eq->max_eqd;
965 if (eqd < rx_eq->min_eqd)
966 eqd = rx_eq->min_eqd;
967 if (eqd < 10)
968 eqd = 0;
969 if (eqd != rx_eq->cur_eqd) {
970 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
971 rx_eq->cur_eqd = eqd;
975 static void be_rx_stats_update(struct be_rx_obj *rxo,
976 struct be_rx_compl_info *rxcp)
978 struct be_rx_stats *stats = rx_stats(rxo);
980 u64_stats_update_begin(&stats->sync);
981 stats->rx_compl++;
982 stats->rx_bytes += rxcp->pkt_size;
983 stats->rx_pkts++;
984 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
985 stats->rx_mcast_pkts++;
986 if (rxcp->err)
987 stats->rx_compl_err++;
988 u64_stats_update_end(&stats->sync);
991 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
993 /* L4 checksum is not reliable for non TCP/UDP packets.
994 * Also ignore ipcksm for ipv6 pkts */
995 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
996 (rxcp->ip_csum || rxcp->ipv6);
999 static struct be_rx_page_info *
1000 get_rx_page_info(struct be_adapter *adapter,
1001 struct be_rx_obj *rxo,
1002 u16 frag_idx)
1004 struct be_rx_page_info *rx_page_info;
1005 struct be_queue_info *rxq = &rxo->q;
1007 rx_page_info = &rxo->page_info_tbl[frag_idx];
1008 BUG_ON(!rx_page_info->page);
1010 if (rx_page_info->last_page_user) {
1011 dma_unmap_page(&adapter->pdev->dev,
1012 dma_unmap_addr(rx_page_info, bus),
1013 adapter->big_page_size, DMA_FROM_DEVICE);
1014 rx_page_info->last_page_user = false;
1017 atomic_dec(&rxq->used);
1018 return rx_page_info;
1021 /* Throwaway the data in the Rx completion */
1022 static void be_rx_compl_discard(struct be_adapter *adapter,
1023 struct be_rx_obj *rxo,
1024 struct be_rx_compl_info *rxcp)
1026 struct be_queue_info *rxq = &rxo->q;
1027 struct be_rx_page_info *page_info;
1028 u16 i, num_rcvd = rxcp->num_rcvd;
1030 for (i = 0; i < num_rcvd; i++) {
1031 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1032 put_page(page_info->page);
1033 memset(page_info, 0, sizeof(*page_info));
1034 index_inc(&rxcp->rxq_idx, rxq->len);
1039 * skb_fill_rx_data forms a complete skb for an ether frame
1040 * indicated by rxcp.
1042 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1043 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1045 struct be_queue_info *rxq = &rxo->q;
1046 struct be_rx_page_info *page_info;
1047 u16 i, j;
1048 u16 hdr_len, curr_frag_len, remaining;
1049 u8 *start;
1051 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1052 start = page_address(page_info->page) + page_info->page_offset;
1053 prefetch(start);
1055 /* Copy data in the first descriptor of this completion */
1056 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1058 /* Copy the header portion into skb_data */
1059 hdr_len = min(BE_HDR_LEN, curr_frag_len);
1060 memcpy(skb->data, start, hdr_len);
1061 skb->len = curr_frag_len;
1062 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1063 /* Complete packet has now been moved to data */
1064 put_page(page_info->page);
1065 skb->data_len = 0;
1066 skb->tail += curr_frag_len;
1067 } else {
1068 skb_shinfo(skb)->nr_frags = 1;
1069 skb_frag_set_page(skb, 0, page_info->page);
1070 skb_shinfo(skb)->frags[0].page_offset =
1071 page_info->page_offset + hdr_len;
1072 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1073 skb->data_len = curr_frag_len - hdr_len;
1074 skb->tail += hdr_len;
1076 page_info->page = NULL;
1078 if (rxcp->pkt_size <= rx_frag_size) {
1079 BUG_ON(rxcp->num_rcvd != 1);
1080 return;
1083 /* More frags present for this completion */
1084 index_inc(&rxcp->rxq_idx, rxq->len);
1085 remaining = rxcp->pkt_size - curr_frag_len;
1086 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1087 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1088 curr_frag_len = min(remaining, rx_frag_size);
1090 /* Coalesce all frags from the same physical page in one slot */
1091 if (page_info->page_offset == 0) {
1092 /* Fresh page */
1093 j++;
1094 skb_frag_set_page(skb, j, page_info->page);
1095 skb_shinfo(skb)->frags[j].page_offset =
1096 page_info->page_offset;
1097 skb_shinfo(skb)->frags[j].size = 0;
1098 skb_shinfo(skb)->nr_frags++;
1099 } else {
1100 put_page(page_info->page);
1103 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1104 skb->len += curr_frag_len;
1105 skb->data_len += curr_frag_len;
1107 remaining -= curr_frag_len;
1108 index_inc(&rxcp->rxq_idx, rxq->len);
1109 page_info->page = NULL;
1111 BUG_ON(j > MAX_SKB_FRAGS);
1114 /* Process the RX completion indicated by rxcp when GRO is disabled */
1115 static void be_rx_compl_process(struct be_adapter *adapter,
1116 struct be_rx_obj *rxo,
1117 struct be_rx_compl_info *rxcp)
1119 struct net_device *netdev = adapter->netdev;
1120 struct sk_buff *skb;
1122 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1123 if (unlikely(!skb)) {
1124 rx_stats(rxo)->rx_drops_no_skbs++;
1125 be_rx_compl_discard(adapter, rxo, rxcp);
1126 return;
1129 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1131 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1132 skb->ip_summed = CHECKSUM_UNNECESSARY;
1133 else
1134 skb_checksum_none_assert(skb);
1136 skb->truesize = skb->len + sizeof(struct sk_buff);
1137 skb->protocol = eth_type_trans(skb, netdev);
1138 if (adapter->netdev->features & NETIF_F_RXHASH)
1139 skb->rxhash = rxcp->rss_hash;
1142 if (rxcp->vlanf)
1143 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1145 netif_receive_skb(skb);
1148 /* Process the RX completion indicated by rxcp when GRO is enabled */
1149 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1150 struct be_rx_obj *rxo,
1151 struct be_rx_compl_info *rxcp)
1153 struct be_rx_page_info *page_info;
1154 struct sk_buff *skb = NULL;
1155 struct be_queue_info *rxq = &rxo->q;
1156 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1157 u16 remaining, curr_frag_len;
1158 u16 i, j;
1160 skb = napi_get_frags(&eq_obj->napi);
1161 if (!skb) {
1162 be_rx_compl_discard(adapter, rxo, rxcp);
1163 return;
1166 remaining = rxcp->pkt_size;
1167 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1168 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1170 curr_frag_len = min(remaining, rx_frag_size);
1172 /* Coalesce all frags from the same physical page in one slot */
1173 if (i == 0 || page_info->page_offset == 0) {
1174 /* First frag or Fresh page */
1175 j++;
1176 skb_frag_set_page(skb, j, page_info->page);
1177 skb_shinfo(skb)->frags[j].page_offset =
1178 page_info->page_offset;
1179 skb_shinfo(skb)->frags[j].size = 0;
1180 } else {
1181 put_page(page_info->page);
1183 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1185 remaining -= curr_frag_len;
1186 index_inc(&rxcp->rxq_idx, rxq->len);
1187 memset(page_info, 0, sizeof(*page_info));
1189 BUG_ON(j > MAX_SKB_FRAGS);
1191 skb_shinfo(skb)->nr_frags = j + 1;
1192 skb->len = rxcp->pkt_size;
1193 skb->data_len = rxcp->pkt_size;
1194 skb->truesize += rxcp->pkt_size;
1195 skb->ip_summed = CHECKSUM_UNNECESSARY;
1196 if (adapter->netdev->features & NETIF_F_RXHASH)
1197 skb->rxhash = rxcp->rss_hash;
1199 if (rxcp->vlanf)
1200 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1202 napi_gro_frags(&eq_obj->napi);
1205 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1206 struct be_eth_rx_compl *compl,
1207 struct be_rx_compl_info *rxcp)
1209 rxcp->pkt_size =
1210 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1211 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1212 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1213 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1214 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1215 rxcp->ip_csum =
1216 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1217 rxcp->l4_csum =
1218 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1219 rxcp->ipv6 =
1220 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1221 rxcp->rxq_idx =
1222 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1223 rxcp->num_rcvd =
1224 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1225 rxcp->pkt_type =
1226 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1227 rxcp->rss_hash =
1228 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1229 if (rxcp->vlanf) {
1230 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1231 compl);
1232 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1233 compl);
1235 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1238 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1239 struct be_eth_rx_compl *compl,
1240 struct be_rx_compl_info *rxcp)
1242 rxcp->pkt_size =
1243 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1244 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1245 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1246 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1247 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1248 rxcp->ip_csum =
1249 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1250 rxcp->l4_csum =
1251 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1252 rxcp->ipv6 =
1253 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1254 rxcp->rxq_idx =
1255 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1256 rxcp->num_rcvd =
1257 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1258 rxcp->pkt_type =
1259 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1260 rxcp->rss_hash =
1261 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1262 if (rxcp->vlanf) {
1263 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1264 compl);
1265 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1266 compl);
1268 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1271 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1273 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1274 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1275 struct be_adapter *adapter = rxo->adapter;
1277 /* For checking the valid bit it is Ok to use either definition as the
1278 * valid bit is at the same position in both v0 and v1 Rx compl */
1279 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1280 return NULL;
1282 rmb();
1283 be_dws_le_to_cpu(compl, sizeof(*compl));
1285 if (adapter->be3_native)
1286 be_parse_rx_compl_v1(adapter, compl, rxcp);
1287 else
1288 be_parse_rx_compl_v0(adapter, compl, rxcp);
1290 if (rxcp->vlanf) {
1291 /* vlanf could be wrongly set in some cards.
1292 * ignore if vtm is not set */
1293 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1294 rxcp->vlanf = 0;
1296 if (!lancer_chip(adapter))
1297 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1299 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1300 !adapter->vlan_tag[rxcp->vlan_tag])
1301 rxcp->vlanf = 0;
1304 /* As the compl has been parsed, reset it; we wont touch it again */
1305 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1307 queue_tail_inc(&rxo->cq);
1308 return rxcp;
1311 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1313 u32 order = get_order(size);
1315 if (order > 0)
1316 gfp |= __GFP_COMP;
1317 return alloc_pages(gfp, order);
1321 * Allocate a page, split it to fragments of size rx_frag_size and post as
1322 * receive buffers to BE
1324 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1326 struct be_adapter *adapter = rxo->adapter;
1327 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1328 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1329 struct be_queue_info *rxq = &rxo->q;
1330 struct page *pagep = NULL;
1331 struct be_eth_rx_d *rxd;
1332 u64 page_dmaaddr = 0, frag_dmaaddr;
1333 u32 posted, page_offset = 0;
1335 page_info = &rxo->page_info_tbl[rxq->head];
1336 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1337 if (!pagep) {
1338 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1339 if (unlikely(!pagep)) {
1340 rx_stats(rxo)->rx_post_fail++;
1341 break;
1343 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1344 0, adapter->big_page_size,
1345 DMA_FROM_DEVICE);
1346 page_info->page_offset = 0;
1347 } else {
1348 get_page(pagep);
1349 page_info->page_offset = page_offset + rx_frag_size;
1351 page_offset = page_info->page_offset;
1352 page_info->page = pagep;
1353 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1354 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1356 rxd = queue_head_node(rxq);
1357 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1358 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1360 /* Any space left in the current big page for another frag? */
1361 if ((page_offset + rx_frag_size + rx_frag_size) >
1362 adapter->big_page_size) {
1363 pagep = NULL;
1364 page_info->last_page_user = true;
1367 prev_page_info = page_info;
1368 queue_head_inc(rxq);
1369 page_info = &page_info_tbl[rxq->head];
1371 if (pagep)
1372 prev_page_info->last_page_user = true;
1374 if (posted) {
1375 atomic_add(posted, &rxq->used);
1376 be_rxq_notify(adapter, rxq->id, posted);
1377 } else if (atomic_read(&rxq->used) == 0) {
1378 /* Let be_worker replenish when memory is available */
1379 rxo->rx_post_starved = true;
1383 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1385 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1387 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1388 return NULL;
1390 rmb();
1391 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1393 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1395 queue_tail_inc(tx_cq);
1396 return txcp;
1399 static u16 be_tx_compl_process(struct be_adapter *adapter,
1400 struct be_tx_obj *txo, u16 last_index)
1402 struct be_queue_info *txq = &txo->q;
1403 struct be_eth_wrb *wrb;
1404 struct sk_buff **sent_skbs = txo->sent_skb_list;
1405 struct sk_buff *sent_skb;
1406 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1407 bool unmap_skb_hdr = true;
1409 sent_skb = sent_skbs[txq->tail];
1410 BUG_ON(!sent_skb);
1411 sent_skbs[txq->tail] = NULL;
1413 /* skip header wrb */
1414 queue_tail_inc(txq);
1416 do {
1417 cur_index = txq->tail;
1418 wrb = queue_tail_node(txq);
1419 unmap_tx_frag(&adapter->pdev->dev, wrb,
1420 (unmap_skb_hdr && skb_headlen(sent_skb)));
1421 unmap_skb_hdr = false;
1423 num_wrbs++;
1424 queue_tail_inc(txq);
1425 } while (cur_index != last_index);
1427 kfree_skb(sent_skb);
1428 return num_wrbs;
1431 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1433 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1435 if (!eqe->evt)
1436 return NULL;
1438 rmb();
1439 eqe->evt = le32_to_cpu(eqe->evt);
1440 queue_tail_inc(&eq_obj->q);
1441 return eqe;
1444 static int event_handle(struct be_adapter *adapter,
1445 struct be_eq_obj *eq_obj,
1446 bool rearm)
1448 struct be_eq_entry *eqe;
1449 u16 num = 0;
1451 while ((eqe = event_get(eq_obj)) != NULL) {
1452 eqe->evt = 0;
1453 num++;
1456 /* Deal with any spurious interrupts that come
1457 * without events
1459 if (!num)
1460 rearm = true;
1462 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1463 if (num)
1464 napi_schedule(&eq_obj->napi);
1466 return num;
1469 /* Just read and notify events without processing them.
1470 * Used at the time of destroying event queues */
1471 static void be_eq_clean(struct be_adapter *adapter,
1472 struct be_eq_obj *eq_obj)
1474 struct be_eq_entry *eqe;
1475 u16 num = 0;
1477 while ((eqe = event_get(eq_obj)) != NULL) {
1478 eqe->evt = 0;
1479 num++;
1482 if (num)
1483 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1486 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1488 struct be_rx_page_info *page_info;
1489 struct be_queue_info *rxq = &rxo->q;
1490 struct be_queue_info *rx_cq = &rxo->cq;
1491 struct be_rx_compl_info *rxcp;
1492 u16 tail;
1494 /* First cleanup pending rx completions */
1495 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1496 be_rx_compl_discard(adapter, rxo, rxcp);
1497 be_cq_notify(adapter, rx_cq->id, false, 1);
1500 /* Then free posted rx buffer that were not used */
1501 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1502 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1503 page_info = get_rx_page_info(adapter, rxo, tail);
1504 put_page(page_info->page);
1505 memset(page_info, 0, sizeof(*page_info));
1507 BUG_ON(atomic_read(&rxq->used));
1508 rxq->tail = rxq->head = 0;
1511 static void be_tx_compl_clean(struct be_adapter *adapter,
1512 struct be_tx_obj *txo)
1514 struct be_queue_info *tx_cq = &txo->cq;
1515 struct be_queue_info *txq = &txo->q;
1516 struct be_eth_tx_compl *txcp;
1517 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1518 struct sk_buff **sent_skbs = txo->sent_skb_list;
1519 struct sk_buff *sent_skb;
1520 bool dummy_wrb;
1522 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1523 do {
1524 while ((txcp = be_tx_compl_get(tx_cq))) {
1525 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1526 wrb_index, txcp);
1527 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1528 cmpl++;
1530 if (cmpl) {
1531 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1532 atomic_sub(num_wrbs, &txq->used);
1533 cmpl = 0;
1534 num_wrbs = 0;
1537 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1538 break;
1540 mdelay(1);
1541 } while (true);
1543 if (atomic_read(&txq->used))
1544 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1545 atomic_read(&txq->used));
1547 /* free posted tx for which compls will never arrive */
1548 while (atomic_read(&txq->used)) {
1549 sent_skb = sent_skbs[txq->tail];
1550 end_idx = txq->tail;
1551 index_adv(&end_idx,
1552 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1553 txq->len);
1554 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1555 atomic_sub(num_wrbs, &txq->used);
1559 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1561 struct be_queue_info *q;
1563 q = &adapter->mcc_obj.q;
1564 if (q->created)
1565 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1566 be_queue_free(adapter, q);
1568 q = &adapter->mcc_obj.cq;
1569 if (q->created)
1570 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1571 be_queue_free(adapter, q);
1574 /* Must be called only after TX qs are created as MCC shares TX EQ */
1575 static int be_mcc_queues_create(struct be_adapter *adapter)
1577 struct be_queue_info *q, *cq;
1579 /* Alloc MCC compl queue */
1580 cq = &adapter->mcc_obj.cq;
1581 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1582 sizeof(struct be_mcc_compl)))
1583 goto err;
1585 /* Ask BE to create MCC compl queue; share TX's eq */
1586 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1587 goto mcc_cq_free;
1589 /* Alloc MCC queue */
1590 q = &adapter->mcc_obj.q;
1591 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1592 goto mcc_cq_destroy;
1594 /* Ask BE to create MCC queue */
1595 if (be_cmd_mccq_create(adapter, q, cq))
1596 goto mcc_q_free;
1598 return 0;
1600 mcc_q_free:
1601 be_queue_free(adapter, q);
1602 mcc_cq_destroy:
1603 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1604 mcc_cq_free:
1605 be_queue_free(adapter, cq);
1606 err:
1607 return -1;
1610 static void be_tx_queues_destroy(struct be_adapter *adapter)
1612 struct be_queue_info *q;
1613 struct be_tx_obj *txo;
1614 u8 i;
1616 for_all_tx_queues(adapter, txo, i) {
1617 q = &txo->q;
1618 if (q->created)
1619 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1620 be_queue_free(adapter, q);
1622 q = &txo->cq;
1623 if (q->created)
1624 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1625 be_queue_free(adapter, q);
1628 /* Clear any residual events */
1629 be_eq_clean(adapter, &adapter->tx_eq);
1631 q = &adapter->tx_eq.q;
1632 if (q->created)
1633 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1634 be_queue_free(adapter, q);
1637 /* One TX event queue is shared by all TX compl qs */
1638 static int be_tx_queues_create(struct be_adapter *adapter)
1640 struct be_queue_info *eq, *q, *cq;
1641 struct be_tx_obj *txo;
1642 u8 i;
1644 adapter->tx_eq.max_eqd = 0;
1645 adapter->tx_eq.min_eqd = 0;
1646 adapter->tx_eq.cur_eqd = 96;
1647 adapter->tx_eq.enable_aic = false;
1649 eq = &adapter->tx_eq.q;
1650 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1651 sizeof(struct be_eq_entry)))
1652 return -1;
1654 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1655 goto err;
1656 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1658 for_all_tx_queues(adapter, txo, i) {
1659 cq = &txo->cq;
1660 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1661 sizeof(struct be_eth_tx_compl)))
1662 goto err;
1664 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1665 goto err;
1667 q = &txo->q;
1668 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1669 sizeof(struct be_eth_wrb)))
1670 goto err;
1672 if (be_cmd_txq_create(adapter, q, cq))
1673 goto err;
1675 return 0;
1677 err:
1678 be_tx_queues_destroy(adapter);
1679 return -1;
1682 static void be_rx_queues_destroy(struct be_adapter *adapter)
1684 struct be_queue_info *q;
1685 struct be_rx_obj *rxo;
1686 int i;
1688 for_all_rx_queues(adapter, rxo, i) {
1689 be_queue_free(adapter, &rxo->q);
1691 q = &rxo->cq;
1692 if (q->created)
1693 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1694 be_queue_free(adapter, q);
1696 q = &rxo->rx_eq.q;
1697 if (q->created)
1698 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1699 be_queue_free(adapter, q);
1703 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1705 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1706 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1707 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1708 } else {
1709 dev_warn(&adapter->pdev->dev,
1710 "No support for multiple RX queues\n");
1711 return 1;
1715 static int be_rx_queues_create(struct be_adapter *adapter)
1717 struct be_queue_info *eq, *q, *cq;
1718 struct be_rx_obj *rxo;
1719 int rc, i;
1721 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1722 msix_enabled(adapter) ?
1723 adapter->num_msix_vec - 1 : 1);
1724 if (adapter->num_rx_qs != MAX_RX_QS)
1725 dev_warn(&adapter->pdev->dev,
1726 "Can create only %d RX queues", adapter->num_rx_qs);
1728 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1729 for_all_rx_queues(adapter, rxo, i) {
1730 rxo->adapter = adapter;
1731 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1732 rxo->rx_eq.enable_aic = true;
1734 /* EQ */
1735 eq = &rxo->rx_eq.q;
1736 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1737 sizeof(struct be_eq_entry));
1738 if (rc)
1739 goto err;
1741 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1742 if (rc)
1743 goto err;
1745 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1747 /* CQ */
1748 cq = &rxo->cq;
1749 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1750 sizeof(struct be_eth_rx_compl));
1751 if (rc)
1752 goto err;
1754 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1755 if (rc)
1756 goto err;
1758 /* Rx Q - will be created in be_open() */
1759 q = &rxo->q;
1760 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1761 sizeof(struct be_eth_rx_d));
1762 if (rc)
1763 goto err;
1767 return 0;
1768 err:
1769 be_rx_queues_destroy(adapter);
1770 return -1;
1773 static bool event_peek(struct be_eq_obj *eq_obj)
1775 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1776 if (!eqe->evt)
1777 return false;
1778 else
1779 return true;
1782 static irqreturn_t be_intx(int irq, void *dev)
1784 struct be_adapter *adapter = dev;
1785 struct be_rx_obj *rxo;
1786 int isr, i, tx = 0 , rx = 0;
1788 if (lancer_chip(adapter)) {
1789 if (event_peek(&adapter->tx_eq))
1790 tx = event_handle(adapter, &adapter->tx_eq, false);
1791 for_all_rx_queues(adapter, rxo, i) {
1792 if (event_peek(&rxo->rx_eq))
1793 rx |= event_handle(adapter, &rxo->rx_eq, true);
1796 if (!(tx || rx))
1797 return IRQ_NONE;
1799 } else {
1800 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1801 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1802 if (!isr)
1803 return IRQ_NONE;
1805 if ((1 << adapter->tx_eq.eq_idx & isr))
1806 event_handle(adapter, &adapter->tx_eq, false);
1808 for_all_rx_queues(adapter, rxo, i) {
1809 if ((1 << rxo->rx_eq.eq_idx & isr))
1810 event_handle(adapter, &rxo->rx_eq, true);
1814 return IRQ_HANDLED;
1817 static irqreturn_t be_msix_rx(int irq, void *dev)
1819 struct be_rx_obj *rxo = dev;
1820 struct be_adapter *adapter = rxo->adapter;
1822 event_handle(adapter, &rxo->rx_eq, true);
1824 return IRQ_HANDLED;
1827 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1829 struct be_adapter *adapter = dev;
1831 event_handle(adapter, &adapter->tx_eq, false);
1833 return IRQ_HANDLED;
1836 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1838 return (rxcp->tcpf && !rxcp->err) ? true : false;
1841 static int be_poll_rx(struct napi_struct *napi, int budget)
1843 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1844 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1845 struct be_adapter *adapter = rxo->adapter;
1846 struct be_queue_info *rx_cq = &rxo->cq;
1847 struct be_rx_compl_info *rxcp;
1848 u32 work_done;
1850 rx_stats(rxo)->rx_polls++;
1851 for (work_done = 0; work_done < budget; work_done++) {
1852 rxcp = be_rx_compl_get(rxo);
1853 if (!rxcp)
1854 break;
1856 /* Is it a flush compl that has no data */
1857 if (unlikely(rxcp->num_rcvd == 0))
1858 goto loop_continue;
1860 /* Discard compl with partial DMA Lancer B0 */
1861 if (unlikely(!rxcp->pkt_size)) {
1862 be_rx_compl_discard(adapter, rxo, rxcp);
1863 goto loop_continue;
1866 /* On BE drop pkts that arrive due to imperfect filtering in
1867 * promiscuous mode on some skews
1869 if (unlikely(rxcp->port != adapter->port_num &&
1870 !lancer_chip(adapter))) {
1871 be_rx_compl_discard(adapter, rxo, rxcp);
1872 goto loop_continue;
1875 if (do_gro(rxcp))
1876 be_rx_compl_process_gro(adapter, rxo, rxcp);
1877 else
1878 be_rx_compl_process(adapter, rxo, rxcp);
1879 loop_continue:
1880 be_rx_stats_update(rxo, rxcp);
1883 /* Refill the queue */
1884 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1885 be_post_rx_frags(rxo, GFP_ATOMIC);
1887 /* All consumed */
1888 if (work_done < budget) {
1889 napi_complete(napi);
1890 be_cq_notify(adapter, rx_cq->id, true, work_done);
1891 } else {
1892 /* More to be consumed; continue with interrupts disabled */
1893 be_cq_notify(adapter, rx_cq->id, false, work_done);
1895 return work_done;
1898 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1899 * For TX/MCC we don't honour budget; consume everything
1901 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1903 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1904 struct be_adapter *adapter =
1905 container_of(tx_eq, struct be_adapter, tx_eq);
1906 struct be_tx_obj *txo;
1907 struct be_eth_tx_compl *txcp;
1908 int tx_compl, mcc_compl, status = 0;
1909 u8 i;
1910 u16 num_wrbs;
1912 for_all_tx_queues(adapter, txo, i) {
1913 tx_compl = 0;
1914 num_wrbs = 0;
1915 while ((txcp = be_tx_compl_get(&txo->cq))) {
1916 num_wrbs += be_tx_compl_process(adapter, txo,
1917 AMAP_GET_BITS(struct amap_eth_tx_compl,
1918 wrb_index, txcp));
1919 tx_compl++;
1921 if (tx_compl) {
1922 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1924 atomic_sub(num_wrbs, &txo->q.used);
1926 /* As Tx wrbs have been freed up, wake up netdev queue
1927 * if it was stopped due to lack of tx wrbs. */
1928 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1929 atomic_read(&txo->q.used) < txo->q.len / 2) {
1930 netif_wake_subqueue(adapter->netdev, i);
1933 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1934 tx_stats(txo)->tx_compl += tx_compl;
1935 u64_stats_update_end(&tx_stats(txo)->sync_compl);
1939 mcc_compl = be_process_mcc(adapter, &status);
1941 if (mcc_compl) {
1942 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1943 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1946 napi_complete(napi);
1948 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1949 adapter->drv_stats.tx_events++;
1950 return 1;
1953 void be_detect_dump_ue(struct be_adapter *adapter)
1955 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1956 u32 i;
1958 pci_read_config_dword(adapter->pdev,
1959 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1960 pci_read_config_dword(adapter->pdev,
1961 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1962 pci_read_config_dword(adapter->pdev,
1963 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1964 pci_read_config_dword(adapter->pdev,
1965 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1967 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1968 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1970 if (ue_status_lo || ue_status_hi) {
1971 adapter->ue_detected = true;
1972 adapter->eeh_err = true;
1973 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1976 if (ue_status_lo) {
1977 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1978 if (ue_status_lo & 1)
1979 dev_err(&adapter->pdev->dev,
1980 "UE: %s bit set\n", ue_status_low_desc[i]);
1983 if (ue_status_hi) {
1984 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1985 if (ue_status_hi & 1)
1986 dev_err(&adapter->pdev->dev,
1987 "UE: %s bit set\n", ue_status_hi_desc[i]);
1993 static void be_worker(struct work_struct *work)
1995 struct be_adapter *adapter =
1996 container_of(work, struct be_adapter, work.work);
1997 struct be_rx_obj *rxo;
1998 int i;
2000 if (!adapter->ue_detected && !lancer_chip(adapter))
2001 be_detect_dump_ue(adapter);
2003 /* when interrupts are not yet enabled, just reap any pending
2004 * mcc completions */
2005 if (!netif_running(adapter->netdev)) {
2006 int mcc_compl, status = 0;
2008 mcc_compl = be_process_mcc(adapter, &status);
2010 if (mcc_compl) {
2011 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2012 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2015 goto reschedule;
2018 if (!adapter->stats_cmd_sent) {
2019 if (lancer_chip(adapter))
2020 lancer_cmd_get_pport_stats(adapter,
2021 &adapter->stats_cmd);
2022 else
2023 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2026 for_all_rx_queues(adapter, rxo, i) {
2027 be_rx_eqd_update(adapter, rxo);
2029 if (rxo->rx_post_starved) {
2030 rxo->rx_post_starved = false;
2031 be_post_rx_frags(rxo, GFP_KERNEL);
2035 reschedule:
2036 adapter->work_counter++;
2037 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2040 static void be_msix_disable(struct be_adapter *adapter)
2042 if (msix_enabled(adapter)) {
2043 pci_disable_msix(adapter->pdev);
2044 adapter->num_msix_vec = 0;
2048 static void be_msix_enable(struct be_adapter *adapter)
2050 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
2051 int i, status, num_vec;
2053 num_vec = be_num_rxqs_want(adapter) + 1;
2055 for (i = 0; i < num_vec; i++)
2056 adapter->msix_entries[i].entry = i;
2058 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2059 if (status == 0) {
2060 goto done;
2061 } else if (status >= BE_MIN_MSIX_VECTORS) {
2062 num_vec = status;
2063 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2064 num_vec) == 0)
2065 goto done;
2067 return;
2068 done:
2069 adapter->num_msix_vec = num_vec;
2070 return;
2073 static void be_sriov_enable(struct be_adapter *adapter)
2075 be_check_sriov_fn_type(adapter);
2076 #ifdef CONFIG_PCI_IOV
2077 if (be_physfn(adapter) && num_vfs) {
2078 int status, pos;
2079 u16 nvfs;
2081 pos = pci_find_ext_capability(adapter->pdev,
2082 PCI_EXT_CAP_ID_SRIOV);
2083 pci_read_config_word(adapter->pdev,
2084 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2086 if (num_vfs > nvfs) {
2087 dev_info(&adapter->pdev->dev,
2088 "Device supports %d VFs and not %d\n",
2089 nvfs, num_vfs);
2090 num_vfs = nvfs;
2093 status = pci_enable_sriov(adapter->pdev, num_vfs);
2094 adapter->sriov_enabled = status ? false : true;
2096 #endif
2099 static void be_sriov_disable(struct be_adapter *adapter)
2101 #ifdef CONFIG_PCI_IOV
2102 if (adapter->sriov_enabled) {
2103 pci_disable_sriov(adapter->pdev);
2104 adapter->sriov_enabled = false;
2106 #endif
2109 static inline int be_msix_vec_get(struct be_adapter *adapter,
2110 struct be_eq_obj *eq_obj)
2112 return adapter->msix_entries[eq_obj->eq_idx].vector;
2115 static int be_request_irq(struct be_adapter *adapter,
2116 struct be_eq_obj *eq_obj,
2117 void *handler, char *desc, void *context)
2119 struct net_device *netdev = adapter->netdev;
2120 int vec;
2122 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2123 vec = be_msix_vec_get(adapter, eq_obj);
2124 return request_irq(vec, handler, 0, eq_obj->desc, context);
2127 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2128 void *context)
2130 int vec = be_msix_vec_get(adapter, eq_obj);
2131 free_irq(vec, context);
2134 static int be_msix_register(struct be_adapter *adapter)
2136 struct be_rx_obj *rxo;
2137 int status, i;
2138 char qname[10];
2140 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2141 adapter);
2142 if (status)
2143 goto err;
2145 for_all_rx_queues(adapter, rxo, i) {
2146 sprintf(qname, "rxq%d", i);
2147 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2148 qname, rxo);
2149 if (status)
2150 goto err_msix;
2153 return 0;
2155 err_msix:
2156 be_free_irq(adapter, &adapter->tx_eq, adapter);
2158 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2159 be_free_irq(adapter, &rxo->rx_eq, rxo);
2161 err:
2162 dev_warn(&adapter->pdev->dev,
2163 "MSIX Request IRQ failed - err %d\n", status);
2164 be_msix_disable(adapter);
2165 return status;
2168 static int be_irq_register(struct be_adapter *adapter)
2170 struct net_device *netdev = adapter->netdev;
2171 int status;
2173 if (msix_enabled(adapter)) {
2174 status = be_msix_register(adapter);
2175 if (status == 0)
2176 goto done;
2177 /* INTx is not supported for VF */
2178 if (!be_physfn(adapter))
2179 return status;
2182 /* INTx */
2183 netdev->irq = adapter->pdev->irq;
2184 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2185 adapter);
2186 if (status) {
2187 dev_err(&adapter->pdev->dev,
2188 "INTx request IRQ failed - err %d\n", status);
2189 return status;
2191 done:
2192 adapter->isr_registered = true;
2193 return 0;
2196 static void be_irq_unregister(struct be_adapter *adapter)
2198 struct net_device *netdev = adapter->netdev;
2199 struct be_rx_obj *rxo;
2200 int i;
2202 if (!adapter->isr_registered)
2203 return;
2205 /* INTx */
2206 if (!msix_enabled(adapter)) {
2207 free_irq(netdev->irq, adapter);
2208 goto done;
2211 /* MSIx */
2212 be_free_irq(adapter, &adapter->tx_eq, adapter);
2214 for_all_rx_queues(adapter, rxo, i)
2215 be_free_irq(adapter, &rxo->rx_eq, rxo);
2217 done:
2218 adapter->isr_registered = false;
2221 static void be_rx_queues_clear(struct be_adapter *adapter)
2223 struct be_queue_info *q;
2224 struct be_rx_obj *rxo;
2225 int i;
2227 for_all_rx_queues(adapter, rxo, i) {
2228 q = &rxo->q;
2229 if (q->created) {
2230 be_cmd_rxq_destroy(adapter, q);
2231 /* After the rxq is invalidated, wait for a grace time
2232 * of 1ms for all dma to end and the flush compl to
2233 * arrive
2235 mdelay(1);
2236 be_rx_q_clean(adapter, rxo);
2239 /* Clear any residual events */
2240 q = &rxo->rx_eq.q;
2241 if (q->created)
2242 be_eq_clean(adapter, &rxo->rx_eq);
2246 static int be_close(struct net_device *netdev)
2248 struct be_adapter *adapter = netdev_priv(netdev);
2249 struct be_rx_obj *rxo;
2250 struct be_tx_obj *txo;
2251 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2252 int vec, i;
2254 be_async_mcc_disable(adapter);
2256 if (!lancer_chip(adapter))
2257 be_intr_set(adapter, false);
2259 for_all_rx_queues(adapter, rxo, i)
2260 napi_disable(&rxo->rx_eq.napi);
2262 napi_disable(&tx_eq->napi);
2264 if (lancer_chip(adapter)) {
2265 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2266 for_all_rx_queues(adapter, rxo, i)
2267 be_cq_notify(adapter, rxo->cq.id, false, 0);
2268 for_all_tx_queues(adapter, txo, i)
2269 be_cq_notify(adapter, txo->cq.id, false, 0);
2272 if (msix_enabled(adapter)) {
2273 vec = be_msix_vec_get(adapter, tx_eq);
2274 synchronize_irq(vec);
2276 for_all_rx_queues(adapter, rxo, i) {
2277 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2278 synchronize_irq(vec);
2280 } else {
2281 synchronize_irq(netdev->irq);
2283 be_irq_unregister(adapter);
2285 /* Wait for all pending tx completions to arrive so that
2286 * all tx skbs are freed.
2288 for_all_tx_queues(adapter, txo, i)
2289 be_tx_compl_clean(adapter, txo);
2291 be_rx_queues_clear(adapter);
2292 return 0;
2295 static int be_rx_queues_setup(struct be_adapter *adapter)
2297 struct be_rx_obj *rxo;
2298 int rc, i;
2299 u8 rsstable[MAX_RSS_QS];
2301 for_all_rx_queues(adapter, rxo, i) {
2302 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2303 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2304 adapter->if_handle,
2305 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2306 if (rc)
2307 return rc;
2310 if (be_multi_rxq(adapter)) {
2311 for_all_rss_queues(adapter, rxo, i)
2312 rsstable[i] = rxo->rss_id;
2314 rc = be_cmd_rss_config(adapter, rsstable,
2315 adapter->num_rx_qs - 1);
2316 if (rc)
2317 return rc;
2320 /* First time posting */
2321 for_all_rx_queues(adapter, rxo, i) {
2322 be_post_rx_frags(rxo, GFP_KERNEL);
2323 napi_enable(&rxo->rx_eq.napi);
2325 return 0;
2328 static int be_open(struct net_device *netdev)
2330 struct be_adapter *adapter = netdev_priv(netdev);
2331 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2332 struct be_rx_obj *rxo;
2333 int status, i;
2335 status = be_rx_queues_setup(adapter);
2336 if (status)
2337 goto err;
2339 napi_enable(&tx_eq->napi);
2341 be_irq_register(adapter);
2343 if (!lancer_chip(adapter))
2344 be_intr_set(adapter, true);
2346 /* The evt queues are created in unarmed state; arm them */
2347 for_all_rx_queues(adapter, rxo, i) {
2348 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2349 be_cq_notify(adapter, rxo->cq.id, true, 0);
2351 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2353 /* Now that interrupts are on we can process async mcc */
2354 be_async_mcc_enable(adapter);
2356 if (be_physfn(adapter)) {
2357 status = be_vid_config(adapter, false, 0);
2358 if (status)
2359 goto err;
2361 status = be_cmd_set_flow_control(adapter,
2362 adapter->tx_fc, adapter->rx_fc);
2363 if (status)
2364 goto err;
2367 return 0;
2368 err:
2369 be_close(adapter->netdev);
2370 return -EIO;
2373 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2375 struct be_dma_mem cmd;
2376 int status = 0;
2377 u8 mac[ETH_ALEN];
2379 memset(mac, 0, ETH_ALEN);
2381 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2382 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2383 GFP_KERNEL);
2384 if (cmd.va == NULL)
2385 return -1;
2386 memset(cmd.va, 0, cmd.size);
2388 if (enable) {
2389 status = pci_write_config_dword(adapter->pdev,
2390 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2391 if (status) {
2392 dev_err(&adapter->pdev->dev,
2393 "Could not enable Wake-on-lan\n");
2394 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2395 cmd.dma);
2396 return status;
2398 status = be_cmd_enable_magic_wol(adapter,
2399 adapter->netdev->dev_addr, &cmd);
2400 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2401 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2402 } else {
2403 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2404 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2405 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2408 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2409 return status;
2413 * Generate a seed MAC address from the PF MAC Address using jhash.
2414 * MAC Address for VFs are assigned incrementally starting from the seed.
2415 * These addresses are programmed in the ASIC by the PF and the VF driver
2416 * queries for the MAC address during its probe.
2418 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2420 u32 vf = 0;
2421 int status = 0;
2422 u8 mac[ETH_ALEN];
2424 be_vf_eth_addr_generate(adapter, mac);
2426 for (vf = 0; vf < num_vfs; vf++) {
2427 status = be_cmd_pmac_add(adapter, mac,
2428 adapter->vf_cfg[vf].vf_if_handle,
2429 &adapter->vf_cfg[vf].vf_pmac_id,
2430 vf + 1);
2431 if (status)
2432 dev_err(&adapter->pdev->dev,
2433 "Mac address add failed for VF %d\n", vf);
2434 else
2435 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2437 mac[5] += 1;
2439 return status;
2442 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2444 u32 vf;
2446 for (vf = 0; vf < num_vfs; vf++) {
2447 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2448 be_cmd_pmac_del(adapter,
2449 adapter->vf_cfg[vf].vf_if_handle,
2450 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2454 static int be_setup(struct be_adapter *adapter)
2456 struct net_device *netdev = adapter->netdev;
2457 u32 cap_flags, en_flags, vf = 0;
2458 int status;
2459 u8 mac[ETH_ALEN];
2461 be_cmd_req_native_mode(adapter);
2463 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2464 BE_IF_FLAGS_BROADCAST |
2465 BE_IF_FLAGS_MULTICAST;
2467 if (be_physfn(adapter)) {
2468 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2469 BE_IF_FLAGS_PROMISCUOUS |
2470 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2471 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2473 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2474 cap_flags |= BE_IF_FLAGS_RSS;
2475 en_flags |= BE_IF_FLAGS_RSS;
2479 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2480 netdev->dev_addr, false/* pmac_invalid */,
2481 &adapter->if_handle, &adapter->pmac_id, 0);
2482 if (status != 0)
2483 goto do_none;
2485 if (be_physfn(adapter)) {
2486 if (adapter->sriov_enabled) {
2487 while (vf < num_vfs) {
2488 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2489 BE_IF_FLAGS_BROADCAST;
2490 status = be_cmd_if_create(adapter, cap_flags,
2491 en_flags, mac, true,
2492 &adapter->vf_cfg[vf].vf_if_handle,
2493 NULL, vf+1);
2494 if (status) {
2495 dev_err(&adapter->pdev->dev,
2496 "Interface Create failed for VF %d\n",
2497 vf);
2498 goto if_destroy;
2500 adapter->vf_cfg[vf].vf_pmac_id =
2501 BE_INVALID_PMAC_ID;
2502 vf++;
2505 } else {
2506 status = be_cmd_mac_addr_query(adapter, mac,
2507 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2508 if (!status) {
2509 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2510 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2514 status = be_tx_queues_create(adapter);
2515 if (status != 0)
2516 goto if_destroy;
2518 status = be_rx_queues_create(adapter);
2519 if (status != 0)
2520 goto tx_qs_destroy;
2522 /* Allow all priorities by default. A GRP5 evt may modify this */
2523 adapter->vlan_prio_bmap = 0xff;
2525 status = be_mcc_queues_create(adapter);
2526 if (status != 0)
2527 goto rx_qs_destroy;
2529 adapter->link_speed = -1;
2531 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2532 return 0;
2534 rx_qs_destroy:
2535 be_rx_queues_destroy(adapter);
2536 tx_qs_destroy:
2537 be_tx_queues_destroy(adapter);
2538 if_destroy:
2539 if (be_physfn(adapter) && adapter->sriov_enabled)
2540 for (vf = 0; vf < num_vfs; vf++)
2541 if (adapter->vf_cfg[vf].vf_if_handle)
2542 be_cmd_if_destroy(adapter,
2543 adapter->vf_cfg[vf].vf_if_handle,
2544 vf + 1);
2545 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2546 do_none:
2547 return status;
2550 static int be_clear(struct be_adapter *adapter)
2552 int vf;
2554 if (be_physfn(adapter) && adapter->sriov_enabled)
2555 be_vf_eth_addr_rem(adapter);
2557 be_mcc_queues_destroy(adapter);
2558 be_rx_queues_destroy(adapter);
2559 be_tx_queues_destroy(adapter);
2560 adapter->eq_next_idx = 0;
2562 if (be_physfn(adapter) && adapter->sriov_enabled)
2563 for (vf = 0; vf < num_vfs; vf++)
2564 if (adapter->vf_cfg[vf].vf_if_handle)
2565 be_cmd_if_destroy(adapter,
2566 adapter->vf_cfg[vf].vf_if_handle,
2567 vf + 1);
2569 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2571 adapter->be3_native = 0;
2573 /* tell fw we're done with firing cmds */
2574 be_cmd_fw_clean(adapter);
2575 return 0;
2579 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2580 static bool be_flash_redboot(struct be_adapter *adapter,
2581 const u8 *p, u32 img_start, int image_size,
2582 int hdr_size)
2584 u32 crc_offset;
2585 u8 flashed_crc[4];
2586 int status;
2588 crc_offset = hdr_size + img_start + image_size - 4;
2590 p += crc_offset;
2592 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2593 (image_size - 4));
2594 if (status) {
2595 dev_err(&adapter->pdev->dev,
2596 "could not get crc from flash, not flashing redboot\n");
2597 return false;
2600 /*update redboot only if crc does not match*/
2601 if (!memcmp(flashed_crc, p, 4))
2602 return false;
2603 else
2604 return true;
2607 static bool phy_flashing_required(struct be_adapter *adapter)
2609 int status = 0;
2610 struct be_phy_info phy_info;
2612 status = be_cmd_get_phy_info(adapter, &phy_info);
2613 if (status)
2614 return false;
2615 if ((phy_info.phy_type == TN_8022) &&
2616 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2617 return true;
2619 return false;
2622 static int be_flash_data(struct be_adapter *adapter,
2623 const struct firmware *fw,
2624 struct be_dma_mem *flash_cmd, int num_of_images)
2627 int status = 0, i, filehdr_size = 0;
2628 u32 total_bytes = 0, flash_op;
2629 int num_bytes;
2630 const u8 *p = fw->data;
2631 struct be_cmd_write_flashrom *req = flash_cmd->va;
2632 const struct flash_comp *pflashcomp;
2633 int num_comp;
2635 static const struct flash_comp gen3_flash_types[10] = {
2636 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2637 FLASH_IMAGE_MAX_SIZE_g3},
2638 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2639 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2640 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2641 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2642 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2643 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2644 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2645 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2646 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2647 FLASH_IMAGE_MAX_SIZE_g3},
2648 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2649 FLASH_IMAGE_MAX_SIZE_g3},
2650 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2651 FLASH_IMAGE_MAX_SIZE_g3},
2652 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2653 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2654 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2655 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2657 static const struct flash_comp gen2_flash_types[8] = {
2658 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2659 FLASH_IMAGE_MAX_SIZE_g2},
2660 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2661 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2662 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2663 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2664 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2665 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2666 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2667 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2668 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2669 FLASH_IMAGE_MAX_SIZE_g2},
2670 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2671 FLASH_IMAGE_MAX_SIZE_g2},
2672 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2673 FLASH_IMAGE_MAX_SIZE_g2}
2676 if (adapter->generation == BE_GEN3) {
2677 pflashcomp = gen3_flash_types;
2678 filehdr_size = sizeof(struct flash_file_hdr_g3);
2679 num_comp = ARRAY_SIZE(gen3_flash_types);
2680 } else {
2681 pflashcomp = gen2_flash_types;
2682 filehdr_size = sizeof(struct flash_file_hdr_g2);
2683 num_comp = ARRAY_SIZE(gen2_flash_types);
2685 for (i = 0; i < num_comp; i++) {
2686 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2687 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2688 continue;
2689 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2690 if (!phy_flashing_required(adapter))
2691 continue;
2693 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2694 (!be_flash_redboot(adapter, fw->data,
2695 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2696 (num_of_images * sizeof(struct image_hdr)))))
2697 continue;
2698 p = fw->data;
2699 p += filehdr_size + pflashcomp[i].offset
2700 + (num_of_images * sizeof(struct image_hdr));
2701 if (p + pflashcomp[i].size > fw->data + fw->size)
2702 return -1;
2703 total_bytes = pflashcomp[i].size;
2704 while (total_bytes) {
2705 if (total_bytes > 32*1024)
2706 num_bytes = 32*1024;
2707 else
2708 num_bytes = total_bytes;
2709 total_bytes -= num_bytes;
2710 if (!total_bytes) {
2711 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2712 flash_op = FLASHROM_OPER_PHY_FLASH;
2713 else
2714 flash_op = FLASHROM_OPER_FLASH;
2715 } else {
2716 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2717 flash_op = FLASHROM_OPER_PHY_SAVE;
2718 else
2719 flash_op = FLASHROM_OPER_SAVE;
2721 memcpy(req->params.data_buf, p, num_bytes);
2722 p += num_bytes;
2723 status = be_cmd_write_flashrom(adapter, flash_cmd,
2724 pflashcomp[i].optype, flash_op, num_bytes);
2725 if (status) {
2726 if ((status == ILLEGAL_IOCTL_REQ) &&
2727 (pflashcomp[i].optype ==
2728 IMG_TYPE_PHY_FW))
2729 break;
2730 dev_err(&adapter->pdev->dev,
2731 "cmd to write to flash rom failed.\n");
2732 return -1;
2736 return 0;
2739 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2741 if (fhdr == NULL)
2742 return 0;
2743 if (fhdr->build[0] == '3')
2744 return BE_GEN3;
2745 else if (fhdr->build[0] == '2')
2746 return BE_GEN2;
2747 else
2748 return 0;
2751 static int lancer_fw_download(struct be_adapter *adapter,
2752 const struct firmware *fw)
2754 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2755 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2756 struct be_dma_mem flash_cmd;
2757 const u8 *data_ptr = NULL;
2758 u8 *dest_image_ptr = NULL;
2759 size_t image_size = 0;
2760 u32 chunk_size = 0;
2761 u32 data_written = 0;
2762 u32 offset = 0;
2763 int status = 0;
2764 u8 add_status = 0;
2766 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2767 dev_err(&adapter->pdev->dev,
2768 "FW Image not properly aligned. "
2769 "Length must be 4 byte aligned.\n");
2770 status = -EINVAL;
2771 goto lancer_fw_exit;
2774 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2775 + LANCER_FW_DOWNLOAD_CHUNK;
2776 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2777 &flash_cmd.dma, GFP_KERNEL);
2778 if (!flash_cmd.va) {
2779 status = -ENOMEM;
2780 dev_err(&adapter->pdev->dev,
2781 "Memory allocation failure while flashing\n");
2782 goto lancer_fw_exit;
2785 dest_image_ptr = flash_cmd.va +
2786 sizeof(struct lancer_cmd_req_write_object);
2787 image_size = fw->size;
2788 data_ptr = fw->data;
2790 while (image_size) {
2791 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2793 /* Copy the image chunk content. */
2794 memcpy(dest_image_ptr, data_ptr, chunk_size);
2796 status = lancer_cmd_write_object(adapter, &flash_cmd,
2797 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2798 &data_written, &add_status);
2800 if (status)
2801 break;
2803 offset += data_written;
2804 data_ptr += data_written;
2805 image_size -= data_written;
2808 if (!status) {
2809 /* Commit the FW written */
2810 status = lancer_cmd_write_object(adapter, &flash_cmd,
2811 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2812 &data_written, &add_status);
2815 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2816 flash_cmd.dma);
2817 if (status) {
2818 dev_err(&adapter->pdev->dev,
2819 "Firmware load error. "
2820 "Status code: 0x%x Additional Status: 0x%x\n",
2821 status, add_status);
2822 goto lancer_fw_exit;
2825 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2826 lancer_fw_exit:
2827 return status;
2830 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2832 struct flash_file_hdr_g2 *fhdr;
2833 struct flash_file_hdr_g3 *fhdr3;
2834 struct image_hdr *img_hdr_ptr = NULL;
2835 struct be_dma_mem flash_cmd;
2836 const u8 *p;
2837 int status = 0, i = 0, num_imgs = 0;
2839 p = fw->data;
2840 fhdr = (struct flash_file_hdr_g2 *) p;
2842 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2843 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2844 &flash_cmd.dma, GFP_KERNEL);
2845 if (!flash_cmd.va) {
2846 status = -ENOMEM;
2847 dev_err(&adapter->pdev->dev,
2848 "Memory allocation failure while flashing\n");
2849 goto be_fw_exit;
2852 if ((adapter->generation == BE_GEN3) &&
2853 (get_ufigen_type(fhdr) == BE_GEN3)) {
2854 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2855 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2856 for (i = 0; i < num_imgs; i++) {
2857 img_hdr_ptr = (struct image_hdr *) (fw->data +
2858 (sizeof(struct flash_file_hdr_g3) +
2859 i * sizeof(struct image_hdr)));
2860 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2861 status = be_flash_data(adapter, fw, &flash_cmd,
2862 num_imgs);
2864 } else if ((adapter->generation == BE_GEN2) &&
2865 (get_ufigen_type(fhdr) == BE_GEN2)) {
2866 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2867 } else {
2868 dev_err(&adapter->pdev->dev,
2869 "UFI and Interface are not compatible for flashing\n");
2870 status = -1;
2873 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2874 flash_cmd.dma);
2875 if (status) {
2876 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2877 goto be_fw_exit;
2880 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2882 be_fw_exit:
2883 return status;
2886 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2888 const struct firmware *fw;
2889 int status;
2891 if (!netif_running(adapter->netdev)) {
2892 dev_err(&adapter->pdev->dev,
2893 "Firmware load not allowed (interface is down)\n");
2894 return -1;
2897 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2898 if (status)
2899 goto fw_exit;
2901 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2903 if (lancer_chip(adapter))
2904 status = lancer_fw_download(adapter, fw);
2905 else
2906 status = be_fw_download(adapter, fw);
2908 fw_exit:
2909 release_firmware(fw);
2910 return status;
2913 static struct net_device_ops be_netdev_ops = {
2914 .ndo_open = be_open,
2915 .ndo_stop = be_close,
2916 .ndo_start_xmit = be_xmit,
2917 .ndo_set_rx_mode = be_set_multicast_list,
2918 .ndo_set_mac_address = be_mac_addr_set,
2919 .ndo_change_mtu = be_change_mtu,
2920 .ndo_get_stats64 = be_get_stats64,
2921 .ndo_validate_addr = eth_validate_addr,
2922 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2923 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2924 .ndo_set_vf_mac = be_set_vf_mac,
2925 .ndo_set_vf_vlan = be_set_vf_vlan,
2926 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2927 .ndo_get_vf_config = be_get_vf_config
2930 static void be_netdev_init(struct net_device *netdev)
2932 struct be_adapter *adapter = netdev_priv(netdev);
2933 struct be_rx_obj *rxo;
2934 int i;
2936 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2937 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2938 NETIF_F_HW_VLAN_TX;
2939 if (be_multi_rxq(adapter))
2940 netdev->hw_features |= NETIF_F_RXHASH;
2942 netdev->features |= netdev->hw_features |
2943 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2945 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2946 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2948 netdev->flags |= IFF_MULTICAST;
2950 /* Default settings for Rx and Tx flow control */
2951 adapter->rx_fc = true;
2952 adapter->tx_fc = true;
2954 netif_set_gso_max_size(netdev, 65535);
2956 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2958 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2960 for_all_rx_queues(adapter, rxo, i)
2961 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2962 BE_NAPI_WEIGHT);
2964 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2965 BE_NAPI_WEIGHT);
2968 static void be_unmap_pci_bars(struct be_adapter *adapter)
2970 if (adapter->csr)
2971 iounmap(adapter->csr);
2972 if (adapter->db)
2973 iounmap(adapter->db);
2976 static int be_map_pci_bars(struct be_adapter *adapter)
2978 u8 __iomem *addr;
2979 int db_reg;
2981 if (lancer_chip(adapter)) {
2982 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2983 pci_resource_len(adapter->pdev, 0));
2984 if (addr == NULL)
2985 return -ENOMEM;
2986 adapter->db = addr;
2987 return 0;
2990 if (be_physfn(adapter)) {
2991 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2992 pci_resource_len(adapter->pdev, 2));
2993 if (addr == NULL)
2994 return -ENOMEM;
2995 adapter->csr = addr;
2998 if (adapter->generation == BE_GEN2) {
2999 db_reg = 4;
3000 } else {
3001 if (be_physfn(adapter))
3002 db_reg = 4;
3003 else
3004 db_reg = 0;
3006 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3007 pci_resource_len(adapter->pdev, db_reg));
3008 if (addr == NULL)
3009 goto pci_map_err;
3010 adapter->db = addr;
3012 return 0;
3013 pci_map_err:
3014 be_unmap_pci_bars(adapter);
3015 return -ENOMEM;
3019 static void be_ctrl_cleanup(struct be_adapter *adapter)
3021 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3023 be_unmap_pci_bars(adapter);
3025 if (mem->va)
3026 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3027 mem->dma);
3029 mem = &adapter->rx_filter;
3030 if (mem->va)
3031 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3032 mem->dma);
3035 static int be_ctrl_init(struct be_adapter *adapter)
3037 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3038 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3039 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3040 int status;
3042 status = be_map_pci_bars(adapter);
3043 if (status)
3044 goto done;
3046 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3047 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3048 mbox_mem_alloc->size,
3049 &mbox_mem_alloc->dma,
3050 GFP_KERNEL);
3051 if (!mbox_mem_alloc->va) {
3052 status = -ENOMEM;
3053 goto unmap_pci_bars;
3055 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3056 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3057 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3058 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3060 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3061 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3062 &rx_filter->dma, GFP_KERNEL);
3063 if (rx_filter->va == NULL) {
3064 status = -ENOMEM;
3065 goto free_mbox;
3067 memset(rx_filter->va, 0, rx_filter->size);
3069 mutex_init(&adapter->mbox_lock);
3070 spin_lock_init(&adapter->mcc_lock);
3071 spin_lock_init(&adapter->mcc_cq_lock);
3073 init_completion(&adapter->flash_compl);
3074 pci_save_state(adapter->pdev);
3075 return 0;
3077 free_mbox:
3078 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3079 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3081 unmap_pci_bars:
3082 be_unmap_pci_bars(adapter);
3084 done:
3085 return status;
3088 static void be_stats_cleanup(struct be_adapter *adapter)
3090 struct be_dma_mem *cmd = &adapter->stats_cmd;
3092 if (cmd->va)
3093 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3094 cmd->va, cmd->dma);
3097 static int be_stats_init(struct be_adapter *adapter)
3099 struct be_dma_mem *cmd = &adapter->stats_cmd;
3101 if (adapter->generation == BE_GEN2) {
3102 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3103 } else {
3104 if (lancer_chip(adapter))
3105 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3106 else
3107 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3109 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3110 GFP_KERNEL);
3111 if (cmd->va == NULL)
3112 return -1;
3113 memset(cmd->va, 0, cmd->size);
3114 return 0;
3117 static void __devexit be_remove(struct pci_dev *pdev)
3119 struct be_adapter *adapter = pci_get_drvdata(pdev);
3121 if (!adapter)
3122 return;
3124 cancel_delayed_work_sync(&adapter->work);
3126 unregister_netdev(adapter->netdev);
3128 be_clear(adapter);
3130 be_stats_cleanup(adapter);
3132 be_ctrl_cleanup(adapter);
3134 kfree(adapter->vf_cfg);
3135 be_sriov_disable(adapter);
3137 be_msix_disable(adapter);
3139 pci_set_drvdata(pdev, NULL);
3140 pci_release_regions(pdev);
3141 pci_disable_device(pdev);
3143 free_netdev(adapter->netdev);
3146 static int be_get_config(struct be_adapter *adapter)
3148 int status;
3149 u8 mac[ETH_ALEN];
3151 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3152 &adapter->function_mode, &adapter->function_caps);
3153 if (status)
3154 return status;
3156 memset(mac, 0, ETH_ALEN);
3158 /* A default permanent address is given to each VF for Lancer*/
3159 if (be_physfn(adapter) || lancer_chip(adapter)) {
3160 status = be_cmd_mac_addr_query(adapter, mac,
3161 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
3163 if (status)
3164 return status;
3166 if (!is_valid_ether_addr(mac))
3167 return -EADDRNOTAVAIL;
3169 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3170 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3173 if (adapter->function_mode & 0x400)
3174 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3175 else
3176 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3178 status = be_cmd_get_cntl_attributes(adapter);
3179 if (status)
3180 return status;
3182 if ((num_vfs && adapter->sriov_enabled) ||
3183 (adapter->function_mode & 0x400) ||
3184 lancer_chip(adapter) || !be_physfn(adapter)) {
3185 adapter->num_tx_qs = 1;
3186 netif_set_real_num_tx_queues(adapter->netdev,
3187 adapter->num_tx_qs);
3188 } else {
3189 adapter->num_tx_qs = MAX_TX_QS;
3192 return 0;
3195 static int be_dev_family_check(struct be_adapter *adapter)
3197 struct pci_dev *pdev = adapter->pdev;
3198 u32 sli_intf = 0, if_type;
3200 switch (pdev->device) {
3201 case BE_DEVICE_ID1:
3202 case OC_DEVICE_ID1:
3203 adapter->generation = BE_GEN2;
3204 break;
3205 case BE_DEVICE_ID2:
3206 case OC_DEVICE_ID2:
3207 adapter->generation = BE_GEN3;
3208 break;
3209 case OC_DEVICE_ID3:
3210 case OC_DEVICE_ID4:
3211 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3212 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3213 SLI_INTF_IF_TYPE_SHIFT;
3215 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3216 if_type != 0x02) {
3217 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3218 return -EINVAL;
3220 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3221 SLI_INTF_FAMILY_SHIFT);
3222 adapter->generation = BE_GEN3;
3223 break;
3224 default:
3225 adapter->generation = 0;
3227 return 0;
3230 static int lancer_wait_ready(struct be_adapter *adapter)
3232 #define SLIPORT_READY_TIMEOUT 500
3233 u32 sliport_status;
3234 int status = 0, i;
3236 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3237 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3238 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3239 break;
3241 msleep(20);
3244 if (i == SLIPORT_READY_TIMEOUT)
3245 status = -1;
3247 return status;
3250 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3252 int status;
3253 u32 sliport_status, err, reset_needed;
3254 status = lancer_wait_ready(adapter);
3255 if (!status) {
3256 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3257 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3258 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3259 if (err && reset_needed) {
3260 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3261 adapter->db + SLIPORT_CONTROL_OFFSET);
3263 /* check adapter has corrected the error */
3264 status = lancer_wait_ready(adapter);
3265 sliport_status = ioread32(adapter->db +
3266 SLIPORT_STATUS_OFFSET);
3267 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3268 SLIPORT_STATUS_RN_MASK);
3269 if (status || sliport_status)
3270 status = -1;
3271 } else if (err || reset_needed) {
3272 status = -1;
3275 return status;
3278 static int __devinit be_probe(struct pci_dev *pdev,
3279 const struct pci_device_id *pdev_id)
3281 int status = 0;
3282 struct be_adapter *adapter;
3283 struct net_device *netdev;
3285 status = pci_enable_device(pdev);
3286 if (status)
3287 goto do_none;
3289 status = pci_request_regions(pdev, DRV_NAME);
3290 if (status)
3291 goto disable_dev;
3292 pci_set_master(pdev);
3294 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3295 if (netdev == NULL) {
3296 status = -ENOMEM;
3297 goto rel_reg;
3299 adapter = netdev_priv(netdev);
3300 adapter->pdev = pdev;
3301 pci_set_drvdata(pdev, adapter);
3303 status = be_dev_family_check(adapter);
3304 if (status)
3305 goto free_netdev;
3307 adapter->netdev = netdev;
3308 SET_NETDEV_DEV(netdev, &pdev->dev);
3310 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3311 if (!status) {
3312 netdev->features |= NETIF_F_HIGHDMA;
3313 } else {
3314 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3315 if (status) {
3316 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3317 goto free_netdev;
3321 be_sriov_enable(adapter);
3322 if (adapter->sriov_enabled) {
3323 adapter->vf_cfg = kcalloc(num_vfs,
3324 sizeof(struct be_vf_cfg), GFP_KERNEL);
3326 if (!adapter->vf_cfg)
3327 goto free_netdev;
3330 status = be_ctrl_init(adapter);
3331 if (status)
3332 goto free_vf_cfg;
3334 if (lancer_chip(adapter)) {
3335 status = lancer_test_and_set_rdy_state(adapter);
3336 if (status) {
3337 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3338 goto ctrl_clean;
3342 /* sync up with fw's ready state */
3343 if (be_physfn(adapter)) {
3344 status = be_cmd_POST(adapter);
3345 if (status)
3346 goto ctrl_clean;
3349 /* tell fw we're ready to fire cmds */
3350 status = be_cmd_fw_init(adapter);
3351 if (status)
3352 goto ctrl_clean;
3354 status = be_cmd_reset_function(adapter);
3355 if (status)
3356 goto ctrl_clean;
3358 status = be_stats_init(adapter);
3359 if (status)
3360 goto ctrl_clean;
3362 status = be_get_config(adapter);
3363 if (status)
3364 goto stats_clean;
3366 /* The INTR bit may be set in the card when probed by a kdump kernel
3367 * after a crash.
3369 if (!lancer_chip(adapter))
3370 be_intr_set(adapter, false);
3372 be_msix_enable(adapter);
3374 INIT_DELAYED_WORK(&adapter->work, be_worker);
3376 status = be_setup(adapter);
3377 if (status)
3378 goto msix_disable;
3380 be_netdev_init(netdev);
3381 status = register_netdev(netdev);
3382 if (status != 0)
3383 goto unsetup;
3385 if (be_physfn(adapter) && adapter->sriov_enabled) {
3386 u8 mac_speed;
3387 u16 vf, lnk_speed;
3389 if (!lancer_chip(adapter)) {
3390 status = be_vf_eth_addr_config(adapter);
3391 if (status)
3392 goto unreg_netdev;
3395 for (vf = 0; vf < num_vfs; vf++) {
3396 status = be_cmd_link_status_query(adapter, &mac_speed,
3397 &lnk_speed, vf + 1);
3398 if (!status)
3399 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3400 else
3401 goto unreg_netdev;
3405 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3407 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3408 return 0;
3410 unreg_netdev:
3411 unregister_netdev(netdev);
3412 unsetup:
3413 be_clear(adapter);
3414 msix_disable:
3415 be_msix_disable(adapter);
3416 stats_clean:
3417 be_stats_cleanup(adapter);
3418 ctrl_clean:
3419 be_ctrl_cleanup(adapter);
3420 free_vf_cfg:
3421 kfree(adapter->vf_cfg);
3422 free_netdev:
3423 be_sriov_disable(adapter);
3424 free_netdev(netdev);
3425 pci_set_drvdata(pdev, NULL);
3426 rel_reg:
3427 pci_release_regions(pdev);
3428 disable_dev:
3429 pci_disable_device(pdev);
3430 do_none:
3431 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3432 return status;
3435 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3437 struct be_adapter *adapter = pci_get_drvdata(pdev);
3438 struct net_device *netdev = adapter->netdev;
3440 cancel_delayed_work_sync(&adapter->work);
3441 if (adapter->wol)
3442 be_setup_wol(adapter, true);
3444 netif_device_detach(netdev);
3445 if (netif_running(netdev)) {
3446 rtnl_lock();
3447 be_close(netdev);
3448 rtnl_unlock();
3450 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3451 be_clear(adapter);
3453 be_msix_disable(adapter);
3454 pci_save_state(pdev);
3455 pci_disable_device(pdev);
3456 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3457 return 0;
3460 static int be_resume(struct pci_dev *pdev)
3462 int status = 0;
3463 struct be_adapter *adapter = pci_get_drvdata(pdev);
3464 struct net_device *netdev = adapter->netdev;
3466 netif_device_detach(netdev);
3468 status = pci_enable_device(pdev);
3469 if (status)
3470 return status;
3472 pci_set_power_state(pdev, 0);
3473 pci_restore_state(pdev);
3475 be_msix_enable(adapter);
3476 /* tell fw we're ready to fire cmds */
3477 status = be_cmd_fw_init(adapter);
3478 if (status)
3479 return status;
3481 be_setup(adapter);
3482 if (netif_running(netdev)) {
3483 rtnl_lock();
3484 be_open(netdev);
3485 rtnl_unlock();
3487 netif_device_attach(netdev);
3489 if (adapter->wol)
3490 be_setup_wol(adapter, false);
3492 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3493 return 0;
3497 * An FLR will stop BE from DMAing any data.
3499 static void be_shutdown(struct pci_dev *pdev)
3501 struct be_adapter *adapter = pci_get_drvdata(pdev);
3503 if (!adapter)
3504 return;
3506 cancel_delayed_work_sync(&adapter->work);
3508 netif_device_detach(adapter->netdev);
3510 if (adapter->wol)
3511 be_setup_wol(adapter, true);
3513 be_cmd_reset_function(adapter);
3515 pci_disable_device(pdev);
3518 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3519 pci_channel_state_t state)
3521 struct be_adapter *adapter = pci_get_drvdata(pdev);
3522 struct net_device *netdev = adapter->netdev;
3524 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3526 adapter->eeh_err = true;
3528 netif_device_detach(netdev);
3530 if (netif_running(netdev)) {
3531 rtnl_lock();
3532 be_close(netdev);
3533 rtnl_unlock();
3535 be_clear(adapter);
3537 if (state == pci_channel_io_perm_failure)
3538 return PCI_ERS_RESULT_DISCONNECT;
3540 pci_disable_device(pdev);
3542 return PCI_ERS_RESULT_NEED_RESET;
3545 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3547 struct be_adapter *adapter = pci_get_drvdata(pdev);
3548 int status;
3550 dev_info(&adapter->pdev->dev, "EEH reset\n");
3551 adapter->eeh_err = false;
3553 status = pci_enable_device(pdev);
3554 if (status)
3555 return PCI_ERS_RESULT_DISCONNECT;
3557 pci_set_master(pdev);
3558 pci_set_power_state(pdev, 0);
3559 pci_restore_state(pdev);
3561 /* Check if card is ok and fw is ready */
3562 status = be_cmd_POST(adapter);
3563 if (status)
3564 return PCI_ERS_RESULT_DISCONNECT;
3566 return PCI_ERS_RESULT_RECOVERED;
3569 static void be_eeh_resume(struct pci_dev *pdev)
3571 int status = 0;
3572 struct be_adapter *adapter = pci_get_drvdata(pdev);
3573 struct net_device *netdev = adapter->netdev;
3575 dev_info(&adapter->pdev->dev, "EEH resume\n");
3577 pci_save_state(pdev);
3579 /* tell fw we're ready to fire cmds */
3580 status = be_cmd_fw_init(adapter);
3581 if (status)
3582 goto err;
3584 status = be_setup(adapter);
3585 if (status)
3586 goto err;
3588 if (netif_running(netdev)) {
3589 status = be_open(netdev);
3590 if (status)
3591 goto err;
3593 netif_device_attach(netdev);
3594 return;
3595 err:
3596 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3599 static struct pci_error_handlers be_eeh_handlers = {
3600 .error_detected = be_eeh_err_detected,
3601 .slot_reset = be_eeh_reset,
3602 .resume = be_eeh_resume,
3605 static struct pci_driver be_driver = {
3606 .name = DRV_NAME,
3607 .id_table = be_dev_ids,
3608 .probe = be_probe,
3609 .remove = be_remove,
3610 .suspend = be_suspend,
3611 .resume = be_resume,
3612 .shutdown = be_shutdown,
3613 .err_handler = &be_eeh_handlers
3616 static int __init be_init_module(void)
3618 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3619 rx_frag_size != 2048) {
3620 printk(KERN_WARNING DRV_NAME
3621 " : Module param rx_frag_size must be 2048/4096/8192."
3622 " Using 2048\n");
3623 rx_frag_size = 2048;
3626 return pci_register_driver(&be_driver);
3628 module_init(be_init_module);
3630 static void __exit be_exit_module(void)
3632 pci_unregister_driver(&be_driver);
3634 module_exit(be_exit_module);