be2net: don't create multiple TXQs in BE2
[linux-2.6/kvm.git] / drivers / net / ethernet / emulex / benet / be_main.c
blob555b0ec3fe5f8af06d1eabbc0d9eea7028cc155c
1 /*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 #include <asm/div64.h>
23 MODULE_VERSION(DRV_VER);
24 MODULE_DEVICE_TABLE(pci, be_dev_ids);
25 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
29 static ushort rx_frag_size = 2048;
30 static unsigned int num_vfs;
31 module_param(rx_frag_size, ushort, S_IRUGO);
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
41 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
43 { 0 }
45 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46 /* UE Status Low CSR */
47 static const char * const ue_status_low_desc[] = {
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
81 /* UE Status High CSR */
82 static const char * const ue_status_hi_desc[] = {
83 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
106 "NETC",
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
117 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
125 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
128 struct be_dma_mem *mem = &q->dma_mem;
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
142 static void be_intr_set(struct be_adapter *adapter, bool enable)
144 u32 reg, enabled;
146 if (adapter->eeh_err)
147 return;
149 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
150 &reg);
151 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
153 if (!enabled && enable)
154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155 else if (enabled && !enable)
156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
157 else
158 return;
160 pci_write_config_dword(adapter->pdev,
161 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
164 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
166 u32 val = 0;
167 val |= qid & DB_RQ_RING_ID_MASK;
168 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
170 wmb();
171 iowrite32(val, adapter->db + DB_RQ_OFFSET);
174 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
176 u32 val = 0;
177 val |= qid & DB_TXULP_RING_ID_MASK;
178 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
180 wmb();
181 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
184 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
185 bool arm, bool clear_int, u16 num_popped)
187 u32 val = 0;
188 val |= qid & DB_EQ_RING_ID_MASK;
189 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
190 DB_EQ_RING_ID_EXT_MASK_SHIFT);
192 if (adapter->eeh_err)
193 return;
195 if (arm)
196 val |= 1 << DB_EQ_REARM_SHIFT;
197 if (clear_int)
198 val |= 1 << DB_EQ_CLR_SHIFT;
199 val |= 1 << DB_EQ_EVNT_SHIFT;
200 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
201 iowrite32(val, adapter->db + DB_EQ_OFFSET);
204 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
206 u32 val = 0;
207 val |= qid & DB_CQ_RING_ID_MASK;
208 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
209 DB_CQ_RING_ID_EXT_MASK_SHIFT);
211 if (adapter->eeh_err)
212 return;
214 if (arm)
215 val |= 1 << DB_CQ_REARM_SHIFT;
216 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
217 iowrite32(val, adapter->db + DB_CQ_OFFSET);
220 static int be_mac_addr_set(struct net_device *netdev, void *p)
222 struct be_adapter *adapter = netdev_priv(netdev);
223 struct sockaddr *addr = p;
224 int status = 0;
226 if (!is_valid_ether_addr(addr->sa_data))
227 return -EADDRNOTAVAIL;
229 /* MAC addr configuration will be done in hardware for VFs
230 * by their corresponding PFs. Just copy to netdev addr here
232 if (!be_physfn(adapter))
233 goto netdev_addr;
235 status = be_cmd_pmac_del(adapter, adapter->if_handle,
236 adapter->pmac_id, 0);
237 if (status)
238 return status;
240 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
241 adapter->if_handle, &adapter->pmac_id, 0);
242 netdev_addr:
243 if (!status)
244 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
246 return status;
249 static void populate_be2_stats(struct be_adapter *adapter)
251 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
252 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
253 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
254 struct be_port_rxf_stats_v0 *port_stats =
255 &rxf_stats->port[adapter->port_num];
256 struct be_drv_stats *drvs = &adapter->drv_stats;
258 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
259 drvs->rx_pause_frames = port_stats->rx_pause_frames;
260 drvs->rx_crc_errors = port_stats->rx_crc_errors;
261 drvs->rx_control_frames = port_stats->rx_control_frames;
262 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
263 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
264 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
265 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
266 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
267 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
268 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
269 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
270 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
271 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
272 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
273 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
274 drvs->rx_dropped_header_too_small =
275 port_stats->rx_dropped_header_too_small;
276 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
277 drvs->rx_alignment_symbol_errors =
278 port_stats->rx_alignment_symbol_errors;
280 drvs->tx_pauseframes = port_stats->tx_pauseframes;
281 drvs->tx_controlframes = port_stats->tx_controlframes;
283 if (adapter->port_num)
284 drvs->jabber_events = rxf_stats->port1_jabber_events;
285 else
286 drvs->jabber_events = rxf_stats->port0_jabber_events;
287 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
288 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
289 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
290 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
291 drvs->forwarded_packets = rxf_stats->forwarded_packets;
292 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
293 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
294 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
295 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
298 static void populate_be3_stats(struct be_adapter *adapter)
300 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
301 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
302 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
303 struct be_port_rxf_stats_v1 *port_stats =
304 &rxf_stats->port[adapter->port_num];
305 struct be_drv_stats *drvs = &adapter->drv_stats;
307 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
308 drvs->rx_pause_frames = port_stats->rx_pause_frames;
309 drvs->rx_crc_errors = port_stats->rx_crc_errors;
310 drvs->rx_control_frames = port_stats->rx_control_frames;
311 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
312 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
313 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
314 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
315 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
316 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
317 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
318 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
319 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
320 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
321 drvs->rx_dropped_header_too_small =
322 port_stats->rx_dropped_header_too_small;
323 drvs->rx_input_fifo_overflow_drop =
324 port_stats->rx_input_fifo_overflow_drop;
325 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
326 drvs->rx_alignment_symbol_errors =
327 port_stats->rx_alignment_symbol_errors;
328 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
329 drvs->tx_pauseframes = port_stats->tx_pauseframes;
330 drvs->tx_controlframes = port_stats->tx_controlframes;
331 drvs->jabber_events = port_stats->jabber_events;
332 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
333 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
334 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
335 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
336 drvs->forwarded_packets = rxf_stats->forwarded_packets;
337 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
338 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
339 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
340 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
343 static void populate_lancer_stats(struct be_adapter *adapter)
346 struct be_drv_stats *drvs = &adapter->drv_stats;
347 struct lancer_pport_stats *pport_stats =
348 pport_stats_from_cmd(adapter);
350 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
351 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
352 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
353 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
354 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
355 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
356 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
357 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
358 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
359 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
360 drvs->rx_dropped_tcp_length =
361 pport_stats->rx_dropped_invalid_tcp_length;
362 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
363 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
364 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
365 drvs->rx_dropped_header_too_small =
366 pport_stats->rx_dropped_header_too_small;
367 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
368 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
369 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
370 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
371 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
372 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
373 drvs->jabber_events = pport_stats->rx_jabbers;
374 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
375 drvs->forwarded_packets = pport_stats->num_forwards_lo;
376 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
377 drvs->rx_drops_too_many_frags =
378 pport_stats->rx_drops_too_many_frags_lo;
381 static void accumulate_16bit_val(u32 *acc, u16 val)
383 #define lo(x) (x & 0xFFFF)
384 #define hi(x) (x & 0xFFFF0000)
385 bool wrapped = val < lo(*acc);
386 u32 newacc = hi(*acc) + val;
388 if (wrapped)
389 newacc += 65536;
390 ACCESS_ONCE(*acc) = newacc;
393 void be_parse_stats(struct be_adapter *adapter)
395 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
396 struct be_rx_obj *rxo;
397 int i;
399 if (adapter->generation == BE_GEN3) {
400 if (lancer_chip(adapter))
401 populate_lancer_stats(adapter);
402 else
403 populate_be3_stats(adapter);
404 } else {
405 populate_be2_stats(adapter);
408 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
409 for_all_rx_queues(adapter, rxo, i) {
410 /* below erx HW counter can actually wrap around after
411 * 65535. Driver accumulates a 32-bit value
413 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
414 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
418 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
419 struct rtnl_link_stats64 *stats)
421 struct be_adapter *adapter = netdev_priv(netdev);
422 struct be_drv_stats *drvs = &adapter->drv_stats;
423 struct be_rx_obj *rxo;
424 struct be_tx_obj *txo;
425 u64 pkts, bytes;
426 unsigned int start;
427 int i;
429 for_all_rx_queues(adapter, rxo, i) {
430 const struct be_rx_stats *rx_stats = rx_stats(rxo);
431 do {
432 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
433 pkts = rx_stats(rxo)->rx_pkts;
434 bytes = rx_stats(rxo)->rx_bytes;
435 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
436 stats->rx_packets += pkts;
437 stats->rx_bytes += bytes;
438 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
439 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
440 rx_stats(rxo)->rx_drops_no_frags;
443 for_all_tx_queues(adapter, txo, i) {
444 const struct be_tx_stats *tx_stats = tx_stats(txo);
445 do {
446 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
447 pkts = tx_stats(txo)->tx_pkts;
448 bytes = tx_stats(txo)->tx_bytes;
449 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
450 stats->tx_packets += pkts;
451 stats->tx_bytes += bytes;
454 /* bad pkts received */
455 stats->rx_errors = drvs->rx_crc_errors +
456 drvs->rx_alignment_symbol_errors +
457 drvs->rx_in_range_errors +
458 drvs->rx_out_range_errors +
459 drvs->rx_frame_too_long +
460 drvs->rx_dropped_too_small +
461 drvs->rx_dropped_too_short +
462 drvs->rx_dropped_header_too_small +
463 drvs->rx_dropped_tcp_length +
464 drvs->rx_dropped_runt;
466 /* detailed rx errors */
467 stats->rx_length_errors = drvs->rx_in_range_errors +
468 drvs->rx_out_range_errors +
469 drvs->rx_frame_too_long;
471 stats->rx_crc_errors = drvs->rx_crc_errors;
473 /* frame alignment errors */
474 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
476 /* receiver fifo overrun */
477 /* drops_no_pbuf is no per i/f, it's per BE card */
478 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
479 drvs->rx_input_fifo_overflow_drop +
480 drvs->rx_drops_no_pbuf;
481 return stats;
484 void be_link_status_update(struct be_adapter *adapter, u32 link_status)
486 struct net_device *netdev = adapter->netdev;
488 /* when link status changes, link speed must be re-queried from card */
489 adapter->link_speed = -1;
490 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
491 netif_carrier_on(netdev);
492 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
493 } else {
494 netif_carrier_off(netdev);
495 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
499 static void be_tx_stats_update(struct be_tx_obj *txo,
500 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
502 struct be_tx_stats *stats = tx_stats(txo);
504 u64_stats_update_begin(&stats->sync);
505 stats->tx_reqs++;
506 stats->tx_wrbs += wrb_cnt;
507 stats->tx_bytes += copied;
508 stats->tx_pkts += (gso_segs ? gso_segs : 1);
509 if (stopped)
510 stats->tx_stops++;
511 u64_stats_update_end(&stats->sync);
514 /* Determine number of WRB entries needed to xmit data in an skb */
515 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
516 bool *dummy)
518 int cnt = (skb->len > skb->data_len);
520 cnt += skb_shinfo(skb)->nr_frags;
522 /* to account for hdr wrb */
523 cnt++;
524 if (lancer_chip(adapter) || !(cnt & 1)) {
525 *dummy = false;
526 } else {
527 /* add a dummy to make it an even num */
528 cnt++;
529 *dummy = true;
531 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
532 return cnt;
535 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
537 wrb->frag_pa_hi = upper_32_bits(addr);
538 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
539 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
542 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
543 struct sk_buff *skb, u32 wrb_cnt, u32 len)
545 u8 vlan_prio = 0;
546 u16 vlan_tag = 0;
548 memset(hdr, 0, sizeof(*hdr));
550 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
552 if (skb_is_gso(skb)) {
553 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
554 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
555 hdr, skb_shinfo(skb)->gso_size);
556 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
557 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
558 if (lancer_chip(adapter) && adapter->sli_family ==
559 LANCER_A0_SLI_FAMILY) {
560 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
561 if (is_tcp_pkt(skb))
562 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
563 tcpcs, hdr, 1);
564 else if (is_udp_pkt(skb))
565 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
566 udpcs, hdr, 1);
568 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
569 if (is_tcp_pkt(skb))
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
571 else if (is_udp_pkt(skb))
572 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
575 if (vlan_tx_tag_present(skb)) {
576 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
577 vlan_tag = vlan_tx_tag_get(skb);
578 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
579 /* If vlan priority provided by OS is NOT in available bmap */
580 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
581 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
582 adapter->recommended_prio;
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
588 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
592 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
593 bool unmap_single)
595 dma_addr_t dma;
597 be_dws_le_to_cpu(wrb, sizeof(*wrb));
599 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
600 if (wrb->frag_len) {
601 if (unmap_single)
602 dma_unmap_single(dev, dma, wrb->frag_len,
603 DMA_TO_DEVICE);
604 else
605 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
609 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
610 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
612 dma_addr_t busaddr;
613 int i, copied = 0;
614 struct device *dev = &adapter->pdev->dev;
615 struct sk_buff *first_skb = skb;
616 struct be_eth_wrb *wrb;
617 struct be_eth_hdr_wrb *hdr;
618 bool map_single = false;
619 u16 map_head;
621 hdr = queue_head_node(txq);
622 queue_head_inc(txq);
623 map_head = txq->head;
625 if (skb->len > skb->data_len) {
626 int len = skb_headlen(skb);
627 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
628 if (dma_mapping_error(dev, busaddr))
629 goto dma_err;
630 map_single = true;
631 wrb = queue_head_node(txq);
632 wrb_fill(wrb, busaddr, len);
633 be_dws_cpu_to_le(wrb, sizeof(*wrb));
634 queue_head_inc(txq);
635 copied += len;
638 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
639 const struct skb_frag_struct *frag =
640 &skb_shinfo(skb)->frags[i];
641 busaddr = skb_frag_dma_map(dev, frag, 0,
642 skb_frag_size(frag), DMA_TO_DEVICE);
643 if (dma_mapping_error(dev, busaddr))
644 goto dma_err;
645 wrb = queue_head_node(txq);
646 wrb_fill(wrb, busaddr, skb_frag_size(frag));
647 be_dws_cpu_to_le(wrb, sizeof(*wrb));
648 queue_head_inc(txq);
649 copied += skb_frag_size(frag);
652 if (dummy_wrb) {
653 wrb = queue_head_node(txq);
654 wrb_fill(wrb, 0, 0);
655 be_dws_cpu_to_le(wrb, sizeof(*wrb));
656 queue_head_inc(txq);
659 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
660 be_dws_cpu_to_le(hdr, sizeof(*hdr));
662 return copied;
663 dma_err:
664 txq->head = map_head;
665 while (copied) {
666 wrb = queue_head_node(txq);
667 unmap_tx_frag(dev, wrb, map_single);
668 map_single = false;
669 copied -= wrb->frag_len;
670 queue_head_inc(txq);
672 return 0;
675 static netdev_tx_t be_xmit(struct sk_buff *skb,
676 struct net_device *netdev)
678 struct be_adapter *adapter = netdev_priv(netdev);
679 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
680 struct be_queue_info *txq = &txo->q;
681 u32 wrb_cnt = 0, copied = 0;
682 u32 start = txq->head;
683 bool dummy_wrb, stopped = false;
685 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
687 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
688 if (copied) {
689 /* record the sent skb in the sent_skb table */
690 BUG_ON(txo->sent_skb_list[start]);
691 txo->sent_skb_list[start] = skb;
693 /* Ensure txq has space for the next skb; Else stop the queue
694 * *BEFORE* ringing the tx doorbell, so that we serialze the
695 * tx compls of the current transmit which'll wake up the queue
697 atomic_add(wrb_cnt, &txq->used);
698 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
699 txq->len) {
700 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
701 stopped = true;
704 be_txq_notify(adapter, txq->id, wrb_cnt);
706 be_tx_stats_update(txo, wrb_cnt, copied,
707 skb_shinfo(skb)->gso_segs, stopped);
708 } else {
709 txq->head = start;
710 dev_kfree_skb_any(skb);
712 return NETDEV_TX_OK;
715 static int be_change_mtu(struct net_device *netdev, int new_mtu)
717 struct be_adapter *adapter = netdev_priv(netdev);
718 if (new_mtu < BE_MIN_MTU ||
719 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
720 (ETH_HLEN + ETH_FCS_LEN))) {
721 dev_info(&adapter->pdev->dev,
722 "MTU must be between %d and %d bytes\n",
723 BE_MIN_MTU,
724 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
725 return -EINVAL;
727 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
728 netdev->mtu, new_mtu);
729 netdev->mtu = new_mtu;
730 return 0;
734 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
735 * If the user configures more, place BE in vlan promiscuous mode.
737 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
739 u16 vtag[BE_NUM_VLANS_SUPPORTED];
740 u16 ntags = 0, i;
741 int status = 0;
742 u32 if_handle;
744 if (vf) {
745 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
746 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
747 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
750 /* No need to further configure vids if in promiscuous mode */
751 if (adapter->promiscuous)
752 return 0;
754 if (adapter->vlans_added <= adapter->max_vlans) {
755 /* Construct VLAN Table to give to HW */
756 for (i = 0; i < VLAN_N_VID; i++) {
757 if (adapter->vlan_tag[i]) {
758 vtag[ntags] = cpu_to_le16(i);
759 ntags++;
762 status = be_cmd_vlan_config(adapter, adapter->if_handle,
763 vtag, ntags, 1, 0);
764 } else {
765 status = be_cmd_vlan_config(adapter, adapter->if_handle,
766 NULL, 0, 1, 1);
769 return status;
772 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
774 struct be_adapter *adapter = netdev_priv(netdev);
776 adapter->vlans_added++;
777 if (!be_physfn(adapter))
778 return;
780 adapter->vlan_tag[vid] = 1;
781 if (adapter->vlans_added <= (adapter->max_vlans + 1))
782 be_vid_config(adapter, false, 0);
785 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
787 struct be_adapter *adapter = netdev_priv(netdev);
789 adapter->vlans_added--;
791 if (!be_physfn(adapter))
792 return;
794 adapter->vlan_tag[vid] = 0;
795 if (adapter->vlans_added <= adapter->max_vlans)
796 be_vid_config(adapter, false, 0);
799 static void be_set_rx_mode(struct net_device *netdev)
801 struct be_adapter *adapter = netdev_priv(netdev);
803 if (netdev->flags & IFF_PROMISC) {
804 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
805 adapter->promiscuous = true;
806 goto done;
809 /* BE was previously in promiscuous mode; disable it */
810 if (adapter->promiscuous) {
811 adapter->promiscuous = false;
812 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
814 if (adapter->vlans_added)
815 be_vid_config(adapter, false, 0);
818 /* Enable multicast promisc if num configured exceeds what we support */
819 if (netdev->flags & IFF_ALLMULTI ||
820 netdev_mc_count(netdev) > BE_MAX_MC) {
821 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
822 goto done;
825 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
826 done:
827 return;
830 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
832 struct be_adapter *adapter = netdev_priv(netdev);
833 int status;
835 if (!adapter->sriov_enabled)
836 return -EPERM;
838 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
839 return -EINVAL;
841 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
842 status = be_cmd_pmac_del(adapter,
843 adapter->vf_cfg[vf].vf_if_handle,
844 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
846 status = be_cmd_pmac_add(adapter, mac,
847 adapter->vf_cfg[vf].vf_if_handle,
848 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
850 if (status)
851 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
852 mac, vf);
853 else
854 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
856 return status;
859 static int be_get_vf_config(struct net_device *netdev, int vf,
860 struct ifla_vf_info *vi)
862 struct be_adapter *adapter = netdev_priv(netdev);
864 if (!adapter->sriov_enabled)
865 return -EPERM;
867 if (vf >= num_vfs)
868 return -EINVAL;
870 vi->vf = vf;
871 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
872 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
873 vi->qos = 0;
874 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
876 return 0;
879 static int be_set_vf_vlan(struct net_device *netdev,
880 int vf, u16 vlan, u8 qos)
882 struct be_adapter *adapter = netdev_priv(netdev);
883 int status = 0;
885 if (!adapter->sriov_enabled)
886 return -EPERM;
888 if ((vf >= num_vfs) || (vlan > 4095))
889 return -EINVAL;
891 if (vlan) {
892 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
893 adapter->vlans_added++;
894 } else {
895 adapter->vf_cfg[vf].vf_vlan_tag = 0;
896 adapter->vlans_added--;
899 status = be_vid_config(adapter, true, vf);
901 if (status)
902 dev_info(&adapter->pdev->dev,
903 "VLAN %d config on VF %d failed\n", vlan, vf);
904 return status;
907 static int be_set_vf_tx_rate(struct net_device *netdev,
908 int vf, int rate)
910 struct be_adapter *adapter = netdev_priv(netdev);
911 int status = 0;
913 if (!adapter->sriov_enabled)
914 return -EPERM;
916 if ((vf >= num_vfs) || (rate < 0))
917 return -EINVAL;
919 if (rate > 10000)
920 rate = 10000;
922 adapter->vf_cfg[vf].vf_tx_rate = rate;
923 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
925 if (status)
926 dev_info(&adapter->pdev->dev,
927 "tx rate %d on VF %d failed\n", rate, vf);
928 return status;
931 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
933 struct be_eq_obj *rx_eq = &rxo->rx_eq;
934 struct be_rx_stats *stats = rx_stats(rxo);
935 ulong now = jiffies;
936 ulong delta = now - stats->rx_jiffies;
937 u64 pkts;
938 unsigned int start, eqd;
940 if (!rx_eq->enable_aic)
941 return;
943 /* Wrapped around */
944 if (time_before(now, stats->rx_jiffies)) {
945 stats->rx_jiffies = now;
946 return;
949 /* Update once a second */
950 if (delta < HZ)
951 return;
953 do {
954 start = u64_stats_fetch_begin_bh(&stats->sync);
955 pkts = stats->rx_pkts;
956 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
958 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
959 stats->rx_pkts_prev = pkts;
960 stats->rx_jiffies = now;
961 eqd = stats->rx_pps / 110000;
962 eqd = eqd << 3;
963 if (eqd > rx_eq->max_eqd)
964 eqd = rx_eq->max_eqd;
965 if (eqd < rx_eq->min_eqd)
966 eqd = rx_eq->min_eqd;
967 if (eqd < 10)
968 eqd = 0;
969 if (eqd != rx_eq->cur_eqd) {
970 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
971 rx_eq->cur_eqd = eqd;
975 static void be_rx_stats_update(struct be_rx_obj *rxo,
976 struct be_rx_compl_info *rxcp)
978 struct be_rx_stats *stats = rx_stats(rxo);
980 u64_stats_update_begin(&stats->sync);
981 stats->rx_compl++;
982 stats->rx_bytes += rxcp->pkt_size;
983 stats->rx_pkts++;
984 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
985 stats->rx_mcast_pkts++;
986 if (rxcp->err)
987 stats->rx_compl_err++;
988 u64_stats_update_end(&stats->sync);
991 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
993 /* L4 checksum is not reliable for non TCP/UDP packets.
994 * Also ignore ipcksm for ipv6 pkts */
995 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
996 (rxcp->ip_csum || rxcp->ipv6);
999 static struct be_rx_page_info *
1000 get_rx_page_info(struct be_adapter *adapter,
1001 struct be_rx_obj *rxo,
1002 u16 frag_idx)
1004 struct be_rx_page_info *rx_page_info;
1005 struct be_queue_info *rxq = &rxo->q;
1007 rx_page_info = &rxo->page_info_tbl[frag_idx];
1008 BUG_ON(!rx_page_info->page);
1010 if (rx_page_info->last_page_user) {
1011 dma_unmap_page(&adapter->pdev->dev,
1012 dma_unmap_addr(rx_page_info, bus),
1013 adapter->big_page_size, DMA_FROM_DEVICE);
1014 rx_page_info->last_page_user = false;
1017 atomic_dec(&rxq->used);
1018 return rx_page_info;
1021 /* Throwaway the data in the Rx completion */
1022 static void be_rx_compl_discard(struct be_adapter *adapter,
1023 struct be_rx_obj *rxo,
1024 struct be_rx_compl_info *rxcp)
1026 struct be_queue_info *rxq = &rxo->q;
1027 struct be_rx_page_info *page_info;
1028 u16 i, num_rcvd = rxcp->num_rcvd;
1030 for (i = 0; i < num_rcvd; i++) {
1031 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1032 put_page(page_info->page);
1033 memset(page_info, 0, sizeof(*page_info));
1034 index_inc(&rxcp->rxq_idx, rxq->len);
1039 * skb_fill_rx_data forms a complete skb for an ether frame
1040 * indicated by rxcp.
1042 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1043 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1045 struct be_queue_info *rxq = &rxo->q;
1046 struct be_rx_page_info *page_info;
1047 u16 i, j;
1048 u16 hdr_len, curr_frag_len, remaining;
1049 u8 *start;
1051 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1052 start = page_address(page_info->page) + page_info->page_offset;
1053 prefetch(start);
1055 /* Copy data in the first descriptor of this completion */
1056 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1058 /* Copy the header portion into skb_data */
1059 hdr_len = min(BE_HDR_LEN, curr_frag_len);
1060 memcpy(skb->data, start, hdr_len);
1061 skb->len = curr_frag_len;
1062 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1063 /* Complete packet has now been moved to data */
1064 put_page(page_info->page);
1065 skb->data_len = 0;
1066 skb->tail += curr_frag_len;
1067 } else {
1068 skb_shinfo(skb)->nr_frags = 1;
1069 skb_frag_set_page(skb, 0, page_info->page);
1070 skb_shinfo(skb)->frags[0].page_offset =
1071 page_info->page_offset + hdr_len;
1072 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1073 skb->data_len = curr_frag_len - hdr_len;
1074 skb->truesize += rx_frag_size;
1075 skb->tail += hdr_len;
1077 page_info->page = NULL;
1079 if (rxcp->pkt_size <= rx_frag_size) {
1080 BUG_ON(rxcp->num_rcvd != 1);
1081 return;
1084 /* More frags present for this completion */
1085 index_inc(&rxcp->rxq_idx, rxq->len);
1086 remaining = rxcp->pkt_size - curr_frag_len;
1087 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1088 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1089 curr_frag_len = min(remaining, rx_frag_size);
1091 /* Coalesce all frags from the same physical page in one slot */
1092 if (page_info->page_offset == 0) {
1093 /* Fresh page */
1094 j++;
1095 skb_frag_set_page(skb, j, page_info->page);
1096 skb_shinfo(skb)->frags[j].page_offset =
1097 page_info->page_offset;
1098 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1099 skb_shinfo(skb)->nr_frags++;
1100 } else {
1101 put_page(page_info->page);
1104 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1105 skb->len += curr_frag_len;
1106 skb->data_len += curr_frag_len;
1107 skb->truesize += rx_frag_size;
1108 remaining -= curr_frag_len;
1109 index_inc(&rxcp->rxq_idx, rxq->len);
1110 page_info->page = NULL;
1112 BUG_ON(j > MAX_SKB_FRAGS);
1115 /* Process the RX completion indicated by rxcp when GRO is disabled */
1116 static void be_rx_compl_process(struct be_adapter *adapter,
1117 struct be_rx_obj *rxo,
1118 struct be_rx_compl_info *rxcp)
1120 struct net_device *netdev = adapter->netdev;
1121 struct sk_buff *skb;
1123 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1124 if (unlikely(!skb)) {
1125 rx_stats(rxo)->rx_drops_no_skbs++;
1126 be_rx_compl_discard(adapter, rxo, rxcp);
1127 return;
1130 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1132 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1133 skb->ip_summed = CHECKSUM_UNNECESSARY;
1134 else
1135 skb_checksum_none_assert(skb);
1137 skb->protocol = eth_type_trans(skb, netdev);
1138 if (adapter->netdev->features & NETIF_F_RXHASH)
1139 skb->rxhash = rxcp->rss_hash;
1142 if (rxcp->vlanf)
1143 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1145 netif_receive_skb(skb);
1148 /* Process the RX completion indicated by rxcp when GRO is enabled */
1149 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1150 struct be_rx_obj *rxo,
1151 struct be_rx_compl_info *rxcp)
1153 struct be_rx_page_info *page_info;
1154 struct sk_buff *skb = NULL;
1155 struct be_queue_info *rxq = &rxo->q;
1156 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1157 u16 remaining, curr_frag_len;
1158 u16 i, j;
1160 skb = napi_get_frags(&eq_obj->napi);
1161 if (!skb) {
1162 be_rx_compl_discard(adapter, rxo, rxcp);
1163 return;
1166 remaining = rxcp->pkt_size;
1167 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1168 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1170 curr_frag_len = min(remaining, rx_frag_size);
1172 /* Coalesce all frags from the same physical page in one slot */
1173 if (i == 0 || page_info->page_offset == 0) {
1174 /* First frag or Fresh page */
1175 j++;
1176 skb_frag_set_page(skb, j, page_info->page);
1177 skb_shinfo(skb)->frags[j].page_offset =
1178 page_info->page_offset;
1179 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1180 } else {
1181 put_page(page_info->page);
1183 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1184 skb->truesize += rx_frag_size;
1185 remaining -= curr_frag_len;
1186 index_inc(&rxcp->rxq_idx, rxq->len);
1187 memset(page_info, 0, sizeof(*page_info));
1189 BUG_ON(j > MAX_SKB_FRAGS);
1191 skb_shinfo(skb)->nr_frags = j + 1;
1192 skb->len = rxcp->pkt_size;
1193 skb->data_len = rxcp->pkt_size;
1194 skb->ip_summed = CHECKSUM_UNNECESSARY;
1195 if (adapter->netdev->features & NETIF_F_RXHASH)
1196 skb->rxhash = rxcp->rss_hash;
1198 if (rxcp->vlanf)
1199 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1201 napi_gro_frags(&eq_obj->napi);
1204 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1205 struct be_eth_rx_compl *compl,
1206 struct be_rx_compl_info *rxcp)
1208 rxcp->pkt_size =
1209 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1210 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1211 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1212 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1213 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1214 rxcp->ip_csum =
1215 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1216 rxcp->l4_csum =
1217 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1218 rxcp->ipv6 =
1219 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1220 rxcp->rxq_idx =
1221 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1222 rxcp->num_rcvd =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1224 rxcp->pkt_type =
1225 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1226 rxcp->rss_hash =
1227 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1228 if (rxcp->vlanf) {
1229 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1230 compl);
1231 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1232 compl);
1234 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1237 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1238 struct be_eth_rx_compl *compl,
1239 struct be_rx_compl_info *rxcp)
1241 rxcp->pkt_size =
1242 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1243 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1244 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1245 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1246 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1247 rxcp->ip_csum =
1248 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1249 rxcp->l4_csum =
1250 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1251 rxcp->ipv6 =
1252 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1253 rxcp->rxq_idx =
1254 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1255 rxcp->num_rcvd =
1256 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1257 rxcp->pkt_type =
1258 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1259 rxcp->rss_hash =
1260 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1261 if (rxcp->vlanf) {
1262 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1263 compl);
1264 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1265 compl);
1267 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1270 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1272 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1273 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1274 struct be_adapter *adapter = rxo->adapter;
1276 /* For checking the valid bit it is Ok to use either definition as the
1277 * valid bit is at the same position in both v0 and v1 Rx compl */
1278 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1279 return NULL;
1281 rmb();
1282 be_dws_le_to_cpu(compl, sizeof(*compl));
1284 if (adapter->be3_native)
1285 be_parse_rx_compl_v1(adapter, compl, rxcp);
1286 else
1287 be_parse_rx_compl_v0(adapter, compl, rxcp);
1289 if (rxcp->vlanf) {
1290 /* vlanf could be wrongly set in some cards.
1291 * ignore if vtm is not set */
1292 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1293 rxcp->vlanf = 0;
1295 if (!lancer_chip(adapter))
1296 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1298 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1299 !adapter->vlan_tag[rxcp->vlan_tag])
1300 rxcp->vlanf = 0;
1303 /* As the compl has been parsed, reset it; we wont touch it again */
1304 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1306 queue_tail_inc(&rxo->cq);
1307 return rxcp;
1310 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1312 u32 order = get_order(size);
1314 if (order > 0)
1315 gfp |= __GFP_COMP;
1316 return alloc_pages(gfp, order);
1320 * Allocate a page, split it to fragments of size rx_frag_size and post as
1321 * receive buffers to BE
1323 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1325 struct be_adapter *adapter = rxo->adapter;
1326 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1327 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1328 struct be_queue_info *rxq = &rxo->q;
1329 struct page *pagep = NULL;
1330 struct be_eth_rx_d *rxd;
1331 u64 page_dmaaddr = 0, frag_dmaaddr;
1332 u32 posted, page_offset = 0;
1334 page_info = &rxo->page_info_tbl[rxq->head];
1335 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1336 if (!pagep) {
1337 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1338 if (unlikely(!pagep)) {
1339 rx_stats(rxo)->rx_post_fail++;
1340 break;
1342 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1343 0, adapter->big_page_size,
1344 DMA_FROM_DEVICE);
1345 page_info->page_offset = 0;
1346 } else {
1347 get_page(pagep);
1348 page_info->page_offset = page_offset + rx_frag_size;
1350 page_offset = page_info->page_offset;
1351 page_info->page = pagep;
1352 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1353 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1355 rxd = queue_head_node(rxq);
1356 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1357 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1359 /* Any space left in the current big page for another frag? */
1360 if ((page_offset + rx_frag_size + rx_frag_size) >
1361 adapter->big_page_size) {
1362 pagep = NULL;
1363 page_info->last_page_user = true;
1366 prev_page_info = page_info;
1367 queue_head_inc(rxq);
1368 page_info = &page_info_tbl[rxq->head];
1370 if (pagep)
1371 prev_page_info->last_page_user = true;
1373 if (posted) {
1374 atomic_add(posted, &rxq->used);
1375 be_rxq_notify(adapter, rxq->id, posted);
1376 } else if (atomic_read(&rxq->used) == 0) {
1377 /* Let be_worker replenish when memory is available */
1378 rxo->rx_post_starved = true;
1382 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1384 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1386 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1387 return NULL;
1389 rmb();
1390 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1392 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1394 queue_tail_inc(tx_cq);
1395 return txcp;
1398 static u16 be_tx_compl_process(struct be_adapter *adapter,
1399 struct be_tx_obj *txo, u16 last_index)
1401 struct be_queue_info *txq = &txo->q;
1402 struct be_eth_wrb *wrb;
1403 struct sk_buff **sent_skbs = txo->sent_skb_list;
1404 struct sk_buff *sent_skb;
1405 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1406 bool unmap_skb_hdr = true;
1408 sent_skb = sent_skbs[txq->tail];
1409 BUG_ON(!sent_skb);
1410 sent_skbs[txq->tail] = NULL;
1412 /* skip header wrb */
1413 queue_tail_inc(txq);
1415 do {
1416 cur_index = txq->tail;
1417 wrb = queue_tail_node(txq);
1418 unmap_tx_frag(&adapter->pdev->dev, wrb,
1419 (unmap_skb_hdr && skb_headlen(sent_skb)));
1420 unmap_skb_hdr = false;
1422 num_wrbs++;
1423 queue_tail_inc(txq);
1424 } while (cur_index != last_index);
1426 kfree_skb(sent_skb);
1427 return num_wrbs;
1430 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1432 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1434 if (!eqe->evt)
1435 return NULL;
1437 rmb();
1438 eqe->evt = le32_to_cpu(eqe->evt);
1439 queue_tail_inc(&eq_obj->q);
1440 return eqe;
1443 static int event_handle(struct be_adapter *adapter,
1444 struct be_eq_obj *eq_obj,
1445 bool rearm)
1447 struct be_eq_entry *eqe;
1448 u16 num = 0;
1450 while ((eqe = event_get(eq_obj)) != NULL) {
1451 eqe->evt = 0;
1452 num++;
1455 /* Deal with any spurious interrupts that come
1456 * without events
1458 if (!num)
1459 rearm = true;
1461 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1462 if (num)
1463 napi_schedule(&eq_obj->napi);
1465 return num;
1468 /* Just read and notify events without processing them.
1469 * Used at the time of destroying event queues */
1470 static void be_eq_clean(struct be_adapter *adapter,
1471 struct be_eq_obj *eq_obj)
1473 struct be_eq_entry *eqe;
1474 u16 num = 0;
1476 while ((eqe = event_get(eq_obj)) != NULL) {
1477 eqe->evt = 0;
1478 num++;
1481 if (num)
1482 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1485 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1487 struct be_rx_page_info *page_info;
1488 struct be_queue_info *rxq = &rxo->q;
1489 struct be_queue_info *rx_cq = &rxo->cq;
1490 struct be_rx_compl_info *rxcp;
1491 u16 tail;
1493 /* First cleanup pending rx completions */
1494 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1495 be_rx_compl_discard(adapter, rxo, rxcp);
1496 be_cq_notify(adapter, rx_cq->id, false, 1);
1499 /* Then free posted rx buffer that were not used */
1500 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1501 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1502 page_info = get_rx_page_info(adapter, rxo, tail);
1503 put_page(page_info->page);
1504 memset(page_info, 0, sizeof(*page_info));
1506 BUG_ON(atomic_read(&rxq->used));
1507 rxq->tail = rxq->head = 0;
1510 static void be_tx_compl_clean(struct be_adapter *adapter,
1511 struct be_tx_obj *txo)
1513 struct be_queue_info *tx_cq = &txo->cq;
1514 struct be_queue_info *txq = &txo->q;
1515 struct be_eth_tx_compl *txcp;
1516 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1517 struct sk_buff **sent_skbs = txo->sent_skb_list;
1518 struct sk_buff *sent_skb;
1519 bool dummy_wrb;
1521 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1522 do {
1523 while ((txcp = be_tx_compl_get(tx_cq))) {
1524 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1525 wrb_index, txcp);
1526 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1527 cmpl++;
1529 if (cmpl) {
1530 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1531 atomic_sub(num_wrbs, &txq->used);
1532 cmpl = 0;
1533 num_wrbs = 0;
1536 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1537 break;
1539 mdelay(1);
1540 } while (true);
1542 if (atomic_read(&txq->used))
1543 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1544 atomic_read(&txq->used));
1546 /* free posted tx for which compls will never arrive */
1547 while (atomic_read(&txq->used)) {
1548 sent_skb = sent_skbs[txq->tail];
1549 end_idx = txq->tail;
1550 index_adv(&end_idx,
1551 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1552 txq->len);
1553 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1554 atomic_sub(num_wrbs, &txq->used);
1558 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1560 struct be_queue_info *q;
1562 q = &adapter->mcc_obj.q;
1563 if (q->created)
1564 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1565 be_queue_free(adapter, q);
1567 q = &adapter->mcc_obj.cq;
1568 if (q->created)
1569 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1570 be_queue_free(adapter, q);
1573 /* Must be called only after TX qs are created as MCC shares TX EQ */
1574 static int be_mcc_queues_create(struct be_adapter *adapter)
1576 struct be_queue_info *q, *cq;
1578 /* Alloc MCC compl queue */
1579 cq = &adapter->mcc_obj.cq;
1580 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1581 sizeof(struct be_mcc_compl)))
1582 goto err;
1584 /* Ask BE to create MCC compl queue; share TX's eq */
1585 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1586 goto mcc_cq_free;
1588 /* Alloc MCC queue */
1589 q = &adapter->mcc_obj.q;
1590 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1591 goto mcc_cq_destroy;
1593 /* Ask BE to create MCC queue */
1594 if (be_cmd_mccq_create(adapter, q, cq))
1595 goto mcc_q_free;
1597 return 0;
1599 mcc_q_free:
1600 be_queue_free(adapter, q);
1601 mcc_cq_destroy:
1602 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1603 mcc_cq_free:
1604 be_queue_free(adapter, cq);
1605 err:
1606 return -1;
1609 static void be_tx_queues_destroy(struct be_adapter *adapter)
1611 struct be_queue_info *q;
1612 struct be_tx_obj *txo;
1613 u8 i;
1615 for_all_tx_queues(adapter, txo, i) {
1616 q = &txo->q;
1617 if (q->created)
1618 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1619 be_queue_free(adapter, q);
1621 q = &txo->cq;
1622 if (q->created)
1623 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1624 be_queue_free(adapter, q);
1627 /* Clear any residual events */
1628 be_eq_clean(adapter, &adapter->tx_eq);
1630 q = &adapter->tx_eq.q;
1631 if (q->created)
1632 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1633 be_queue_free(adapter, q);
1636 static int be_num_txqs_want(struct be_adapter *adapter)
1638 if ((num_vfs && adapter->sriov_enabled) ||
1639 (adapter->function_mode & 0x400) ||
1640 lancer_chip(adapter) || !be_physfn(adapter) ||
1641 adapter->generation == BE_GEN2)
1642 return 1;
1643 else
1644 return MAX_TX_QS;
1647 /* One TX event queue is shared by all TX compl qs */
1648 static int be_tx_queues_create(struct be_adapter *adapter)
1650 struct be_queue_info *eq, *q, *cq;
1651 struct be_tx_obj *txo;
1652 u8 i;
1654 adapter->num_tx_qs = be_num_txqs_want(adapter);
1655 if (adapter->num_tx_qs != MAX_TX_QS)
1656 netif_set_real_num_tx_queues(adapter->netdev,
1657 adapter->num_tx_qs);
1659 adapter->tx_eq.max_eqd = 0;
1660 adapter->tx_eq.min_eqd = 0;
1661 adapter->tx_eq.cur_eqd = 96;
1662 adapter->tx_eq.enable_aic = false;
1664 eq = &adapter->tx_eq.q;
1665 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1666 sizeof(struct be_eq_entry)))
1667 return -1;
1669 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1670 goto err;
1671 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1673 for_all_tx_queues(adapter, txo, i) {
1674 cq = &txo->cq;
1675 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1676 sizeof(struct be_eth_tx_compl)))
1677 goto err;
1679 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1680 goto err;
1682 q = &txo->q;
1683 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1684 sizeof(struct be_eth_wrb)))
1685 goto err;
1687 if (be_cmd_txq_create(adapter, q, cq))
1688 goto err;
1690 return 0;
1692 err:
1693 be_tx_queues_destroy(adapter);
1694 return -1;
1697 static void be_rx_queues_destroy(struct be_adapter *adapter)
1699 struct be_queue_info *q;
1700 struct be_rx_obj *rxo;
1701 int i;
1703 for_all_rx_queues(adapter, rxo, i) {
1704 be_queue_free(adapter, &rxo->q);
1706 q = &rxo->cq;
1707 if (q->created)
1708 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1709 be_queue_free(adapter, q);
1711 q = &rxo->rx_eq.q;
1712 if (q->created)
1713 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1714 be_queue_free(adapter, q);
1718 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1720 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1721 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1722 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1723 } else {
1724 dev_warn(&adapter->pdev->dev,
1725 "No support for multiple RX queues\n");
1726 return 1;
1730 static int be_rx_queues_create(struct be_adapter *adapter)
1732 struct be_queue_info *eq, *q, *cq;
1733 struct be_rx_obj *rxo;
1734 int rc, i;
1736 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1737 msix_enabled(adapter) ?
1738 adapter->num_msix_vec - 1 : 1);
1739 if (adapter->num_rx_qs != MAX_RX_QS)
1740 dev_warn(&adapter->pdev->dev,
1741 "Can create only %d RX queues", adapter->num_rx_qs);
1743 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1744 for_all_rx_queues(adapter, rxo, i) {
1745 rxo->adapter = adapter;
1746 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1747 rxo->rx_eq.enable_aic = true;
1749 /* EQ */
1750 eq = &rxo->rx_eq.q;
1751 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1752 sizeof(struct be_eq_entry));
1753 if (rc)
1754 goto err;
1756 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1757 if (rc)
1758 goto err;
1760 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1762 /* CQ */
1763 cq = &rxo->cq;
1764 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1765 sizeof(struct be_eth_rx_compl));
1766 if (rc)
1767 goto err;
1769 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1770 if (rc)
1771 goto err;
1773 /* Rx Q - will be created in be_open() */
1774 q = &rxo->q;
1775 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1776 sizeof(struct be_eth_rx_d));
1777 if (rc)
1778 goto err;
1782 return 0;
1783 err:
1784 be_rx_queues_destroy(adapter);
1785 return -1;
1788 static bool event_peek(struct be_eq_obj *eq_obj)
1790 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1791 if (!eqe->evt)
1792 return false;
1793 else
1794 return true;
1797 static irqreturn_t be_intx(int irq, void *dev)
1799 struct be_adapter *adapter = dev;
1800 struct be_rx_obj *rxo;
1801 int isr, i, tx = 0 , rx = 0;
1803 if (lancer_chip(adapter)) {
1804 if (event_peek(&adapter->tx_eq))
1805 tx = event_handle(adapter, &adapter->tx_eq, false);
1806 for_all_rx_queues(adapter, rxo, i) {
1807 if (event_peek(&rxo->rx_eq))
1808 rx |= event_handle(adapter, &rxo->rx_eq, true);
1811 if (!(tx || rx))
1812 return IRQ_NONE;
1814 } else {
1815 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1816 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1817 if (!isr)
1818 return IRQ_NONE;
1820 if ((1 << adapter->tx_eq.eq_idx & isr))
1821 event_handle(adapter, &adapter->tx_eq, false);
1823 for_all_rx_queues(adapter, rxo, i) {
1824 if ((1 << rxo->rx_eq.eq_idx & isr))
1825 event_handle(adapter, &rxo->rx_eq, true);
1829 return IRQ_HANDLED;
1832 static irqreturn_t be_msix_rx(int irq, void *dev)
1834 struct be_rx_obj *rxo = dev;
1835 struct be_adapter *adapter = rxo->adapter;
1837 event_handle(adapter, &rxo->rx_eq, true);
1839 return IRQ_HANDLED;
1842 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1844 struct be_adapter *adapter = dev;
1846 event_handle(adapter, &adapter->tx_eq, false);
1848 return IRQ_HANDLED;
1851 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1853 return (rxcp->tcpf && !rxcp->err) ? true : false;
1856 static int be_poll_rx(struct napi_struct *napi, int budget)
1858 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1859 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1860 struct be_adapter *adapter = rxo->adapter;
1861 struct be_queue_info *rx_cq = &rxo->cq;
1862 struct be_rx_compl_info *rxcp;
1863 u32 work_done;
1865 rx_stats(rxo)->rx_polls++;
1866 for (work_done = 0; work_done < budget; work_done++) {
1867 rxcp = be_rx_compl_get(rxo);
1868 if (!rxcp)
1869 break;
1871 /* Is it a flush compl that has no data */
1872 if (unlikely(rxcp->num_rcvd == 0))
1873 goto loop_continue;
1875 /* Discard compl with partial DMA Lancer B0 */
1876 if (unlikely(!rxcp->pkt_size)) {
1877 be_rx_compl_discard(adapter, rxo, rxcp);
1878 goto loop_continue;
1881 /* On BE drop pkts that arrive due to imperfect filtering in
1882 * promiscuous mode on some skews
1884 if (unlikely(rxcp->port != adapter->port_num &&
1885 !lancer_chip(adapter))) {
1886 be_rx_compl_discard(adapter, rxo, rxcp);
1887 goto loop_continue;
1890 if (do_gro(rxcp))
1891 be_rx_compl_process_gro(adapter, rxo, rxcp);
1892 else
1893 be_rx_compl_process(adapter, rxo, rxcp);
1894 loop_continue:
1895 be_rx_stats_update(rxo, rxcp);
1898 /* Refill the queue */
1899 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1900 be_post_rx_frags(rxo, GFP_ATOMIC);
1902 /* All consumed */
1903 if (work_done < budget) {
1904 napi_complete(napi);
1905 be_cq_notify(adapter, rx_cq->id, true, work_done);
1906 } else {
1907 /* More to be consumed; continue with interrupts disabled */
1908 be_cq_notify(adapter, rx_cq->id, false, work_done);
1910 return work_done;
1913 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1914 * For TX/MCC we don't honour budget; consume everything
1916 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1918 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1919 struct be_adapter *adapter =
1920 container_of(tx_eq, struct be_adapter, tx_eq);
1921 struct be_tx_obj *txo;
1922 struct be_eth_tx_compl *txcp;
1923 int tx_compl, mcc_compl, status = 0;
1924 u8 i;
1925 u16 num_wrbs;
1927 for_all_tx_queues(adapter, txo, i) {
1928 tx_compl = 0;
1929 num_wrbs = 0;
1930 while ((txcp = be_tx_compl_get(&txo->cq))) {
1931 num_wrbs += be_tx_compl_process(adapter, txo,
1932 AMAP_GET_BITS(struct amap_eth_tx_compl,
1933 wrb_index, txcp));
1934 tx_compl++;
1936 if (tx_compl) {
1937 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1939 atomic_sub(num_wrbs, &txo->q.used);
1941 /* As Tx wrbs have been freed up, wake up netdev queue
1942 * if it was stopped due to lack of tx wrbs. */
1943 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1944 atomic_read(&txo->q.used) < txo->q.len / 2) {
1945 netif_wake_subqueue(adapter->netdev, i);
1948 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1949 tx_stats(txo)->tx_compl += tx_compl;
1950 u64_stats_update_end(&tx_stats(txo)->sync_compl);
1954 mcc_compl = be_process_mcc(adapter, &status);
1956 if (mcc_compl) {
1957 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1958 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1961 napi_complete(napi);
1963 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1964 adapter->drv_stats.tx_events++;
1965 return 1;
1968 void be_detect_dump_ue(struct be_adapter *adapter)
1970 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1971 u32 i;
1973 pci_read_config_dword(adapter->pdev,
1974 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1975 pci_read_config_dword(adapter->pdev,
1976 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1977 pci_read_config_dword(adapter->pdev,
1978 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1979 pci_read_config_dword(adapter->pdev,
1980 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1982 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1983 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1985 if (ue_status_lo || ue_status_hi) {
1986 adapter->ue_detected = true;
1987 adapter->eeh_err = true;
1988 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1991 if (ue_status_lo) {
1992 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1993 if (ue_status_lo & 1)
1994 dev_err(&adapter->pdev->dev,
1995 "UE: %s bit set\n", ue_status_low_desc[i]);
1998 if (ue_status_hi) {
1999 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2000 if (ue_status_hi & 1)
2001 dev_err(&adapter->pdev->dev,
2002 "UE: %s bit set\n", ue_status_hi_desc[i]);
2008 static void be_worker(struct work_struct *work)
2010 struct be_adapter *adapter =
2011 container_of(work, struct be_adapter, work.work);
2012 struct be_rx_obj *rxo;
2013 int i;
2015 if (!adapter->ue_detected && !lancer_chip(adapter))
2016 be_detect_dump_ue(adapter);
2018 /* when interrupts are not yet enabled, just reap any pending
2019 * mcc completions */
2020 if (!netif_running(adapter->netdev)) {
2021 int mcc_compl, status = 0;
2023 mcc_compl = be_process_mcc(adapter, &status);
2025 if (mcc_compl) {
2026 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2027 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2030 goto reschedule;
2033 if (!adapter->stats_cmd_sent) {
2034 if (lancer_chip(adapter))
2035 lancer_cmd_get_pport_stats(adapter,
2036 &adapter->stats_cmd);
2037 else
2038 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2041 for_all_rx_queues(adapter, rxo, i) {
2042 be_rx_eqd_update(adapter, rxo);
2044 if (rxo->rx_post_starved) {
2045 rxo->rx_post_starved = false;
2046 be_post_rx_frags(rxo, GFP_KERNEL);
2050 reschedule:
2051 adapter->work_counter++;
2052 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2055 static void be_msix_disable(struct be_adapter *adapter)
2057 if (msix_enabled(adapter)) {
2058 pci_disable_msix(adapter->pdev);
2059 adapter->num_msix_vec = 0;
2063 static void be_msix_enable(struct be_adapter *adapter)
2065 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
2066 int i, status, num_vec;
2068 num_vec = be_num_rxqs_want(adapter) + 1;
2070 for (i = 0; i < num_vec; i++)
2071 adapter->msix_entries[i].entry = i;
2073 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2074 if (status == 0) {
2075 goto done;
2076 } else if (status >= BE_MIN_MSIX_VECTORS) {
2077 num_vec = status;
2078 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2079 num_vec) == 0)
2080 goto done;
2082 return;
2083 done:
2084 adapter->num_msix_vec = num_vec;
2085 return;
2088 static int be_sriov_enable(struct be_adapter *adapter)
2090 be_check_sriov_fn_type(adapter);
2091 #ifdef CONFIG_PCI_IOV
2092 if (be_physfn(adapter) && num_vfs) {
2093 int status, pos;
2094 u16 nvfs;
2096 pos = pci_find_ext_capability(adapter->pdev,
2097 PCI_EXT_CAP_ID_SRIOV);
2098 pci_read_config_word(adapter->pdev,
2099 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2101 if (num_vfs > nvfs) {
2102 dev_info(&adapter->pdev->dev,
2103 "Device supports %d VFs and not %d\n",
2104 nvfs, num_vfs);
2105 num_vfs = nvfs;
2108 status = pci_enable_sriov(adapter->pdev, num_vfs);
2109 adapter->sriov_enabled = status ? false : true;
2111 if (adapter->sriov_enabled) {
2112 adapter->vf_cfg = kcalloc(num_vfs,
2113 sizeof(struct be_vf_cfg),
2114 GFP_KERNEL);
2115 if (!adapter->vf_cfg)
2116 return -ENOMEM;
2119 #endif
2120 return 0;
2123 static void be_sriov_disable(struct be_adapter *adapter)
2125 #ifdef CONFIG_PCI_IOV
2126 if (adapter->sriov_enabled) {
2127 pci_disable_sriov(adapter->pdev);
2128 kfree(adapter->vf_cfg);
2129 adapter->sriov_enabled = false;
2131 #endif
2134 static inline int be_msix_vec_get(struct be_adapter *adapter,
2135 struct be_eq_obj *eq_obj)
2137 return adapter->msix_entries[eq_obj->eq_idx].vector;
2140 static int be_request_irq(struct be_adapter *adapter,
2141 struct be_eq_obj *eq_obj,
2142 void *handler, char *desc, void *context)
2144 struct net_device *netdev = adapter->netdev;
2145 int vec;
2147 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2148 vec = be_msix_vec_get(adapter, eq_obj);
2149 return request_irq(vec, handler, 0, eq_obj->desc, context);
2152 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2153 void *context)
2155 int vec = be_msix_vec_get(adapter, eq_obj);
2156 free_irq(vec, context);
2159 static int be_msix_register(struct be_adapter *adapter)
2161 struct be_rx_obj *rxo;
2162 int status, i;
2163 char qname[10];
2165 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2166 adapter);
2167 if (status)
2168 goto err;
2170 for_all_rx_queues(adapter, rxo, i) {
2171 sprintf(qname, "rxq%d", i);
2172 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2173 qname, rxo);
2174 if (status)
2175 goto err_msix;
2178 return 0;
2180 err_msix:
2181 be_free_irq(adapter, &adapter->tx_eq, adapter);
2183 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2184 be_free_irq(adapter, &rxo->rx_eq, rxo);
2186 err:
2187 dev_warn(&adapter->pdev->dev,
2188 "MSIX Request IRQ failed - err %d\n", status);
2189 be_msix_disable(adapter);
2190 return status;
2193 static int be_irq_register(struct be_adapter *adapter)
2195 struct net_device *netdev = adapter->netdev;
2196 int status;
2198 if (msix_enabled(adapter)) {
2199 status = be_msix_register(adapter);
2200 if (status == 0)
2201 goto done;
2202 /* INTx is not supported for VF */
2203 if (!be_physfn(adapter))
2204 return status;
2207 /* INTx */
2208 netdev->irq = adapter->pdev->irq;
2209 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2210 adapter);
2211 if (status) {
2212 dev_err(&adapter->pdev->dev,
2213 "INTx request IRQ failed - err %d\n", status);
2214 return status;
2216 done:
2217 adapter->isr_registered = true;
2218 return 0;
2221 static void be_irq_unregister(struct be_adapter *adapter)
2223 struct net_device *netdev = adapter->netdev;
2224 struct be_rx_obj *rxo;
2225 int i;
2227 if (!adapter->isr_registered)
2228 return;
2230 /* INTx */
2231 if (!msix_enabled(adapter)) {
2232 free_irq(netdev->irq, adapter);
2233 goto done;
2236 /* MSIx */
2237 be_free_irq(adapter, &adapter->tx_eq, adapter);
2239 for_all_rx_queues(adapter, rxo, i)
2240 be_free_irq(adapter, &rxo->rx_eq, rxo);
2242 done:
2243 adapter->isr_registered = false;
2246 static void be_rx_queues_clear(struct be_adapter *adapter)
2248 struct be_queue_info *q;
2249 struct be_rx_obj *rxo;
2250 int i;
2252 for_all_rx_queues(adapter, rxo, i) {
2253 q = &rxo->q;
2254 if (q->created) {
2255 be_cmd_rxq_destroy(adapter, q);
2256 /* After the rxq is invalidated, wait for a grace time
2257 * of 1ms for all dma to end and the flush compl to
2258 * arrive
2260 mdelay(1);
2261 be_rx_q_clean(adapter, rxo);
2264 /* Clear any residual events */
2265 q = &rxo->rx_eq.q;
2266 if (q->created)
2267 be_eq_clean(adapter, &rxo->rx_eq);
2271 static int be_close(struct net_device *netdev)
2273 struct be_adapter *adapter = netdev_priv(netdev);
2274 struct be_rx_obj *rxo;
2275 struct be_tx_obj *txo;
2276 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2277 int vec, i;
2279 be_async_mcc_disable(adapter);
2281 if (!lancer_chip(adapter))
2282 be_intr_set(adapter, false);
2284 for_all_rx_queues(adapter, rxo, i)
2285 napi_disable(&rxo->rx_eq.napi);
2287 napi_disable(&tx_eq->napi);
2289 if (lancer_chip(adapter)) {
2290 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2291 for_all_rx_queues(adapter, rxo, i)
2292 be_cq_notify(adapter, rxo->cq.id, false, 0);
2293 for_all_tx_queues(adapter, txo, i)
2294 be_cq_notify(adapter, txo->cq.id, false, 0);
2297 if (msix_enabled(adapter)) {
2298 vec = be_msix_vec_get(adapter, tx_eq);
2299 synchronize_irq(vec);
2301 for_all_rx_queues(adapter, rxo, i) {
2302 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2303 synchronize_irq(vec);
2305 } else {
2306 synchronize_irq(netdev->irq);
2308 be_irq_unregister(adapter);
2310 /* Wait for all pending tx completions to arrive so that
2311 * all tx skbs are freed.
2313 for_all_tx_queues(adapter, txo, i)
2314 be_tx_compl_clean(adapter, txo);
2316 be_rx_queues_clear(adapter);
2317 return 0;
2320 static int be_rx_queues_setup(struct be_adapter *adapter)
2322 struct be_rx_obj *rxo;
2323 int rc, i;
2324 u8 rsstable[MAX_RSS_QS];
2326 for_all_rx_queues(adapter, rxo, i) {
2327 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2328 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2329 adapter->if_handle,
2330 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2331 if (rc)
2332 return rc;
2335 if (be_multi_rxq(adapter)) {
2336 for_all_rss_queues(adapter, rxo, i)
2337 rsstable[i] = rxo->rss_id;
2339 rc = be_cmd_rss_config(adapter, rsstable,
2340 adapter->num_rx_qs - 1);
2341 if (rc)
2342 return rc;
2345 /* First time posting */
2346 for_all_rx_queues(adapter, rxo, i) {
2347 be_post_rx_frags(rxo, GFP_KERNEL);
2348 napi_enable(&rxo->rx_eq.napi);
2350 return 0;
2353 static int be_open(struct net_device *netdev)
2355 struct be_adapter *adapter = netdev_priv(netdev);
2356 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2357 struct be_rx_obj *rxo;
2358 int status, i;
2360 status = be_rx_queues_setup(adapter);
2361 if (status)
2362 goto err;
2364 napi_enable(&tx_eq->napi);
2366 be_irq_register(adapter);
2368 if (!lancer_chip(adapter))
2369 be_intr_set(adapter, true);
2371 /* The evt queues are created in unarmed state; arm them */
2372 for_all_rx_queues(adapter, rxo, i) {
2373 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2374 be_cq_notify(adapter, rxo->cq.id, true, 0);
2376 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2378 /* Now that interrupts are on we can process async mcc */
2379 be_async_mcc_enable(adapter);
2381 return 0;
2382 err:
2383 be_close(adapter->netdev);
2384 return -EIO;
2387 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2389 struct be_dma_mem cmd;
2390 int status = 0;
2391 u8 mac[ETH_ALEN];
2393 memset(mac, 0, ETH_ALEN);
2395 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2396 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2397 GFP_KERNEL);
2398 if (cmd.va == NULL)
2399 return -1;
2400 memset(cmd.va, 0, cmd.size);
2402 if (enable) {
2403 status = pci_write_config_dword(adapter->pdev,
2404 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2405 if (status) {
2406 dev_err(&adapter->pdev->dev,
2407 "Could not enable Wake-on-lan\n");
2408 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2409 cmd.dma);
2410 return status;
2412 status = be_cmd_enable_magic_wol(adapter,
2413 adapter->netdev->dev_addr, &cmd);
2414 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2415 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2416 } else {
2417 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2418 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2419 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2422 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2423 return status;
2427 * Generate a seed MAC address from the PF MAC Address using jhash.
2428 * MAC Address for VFs are assigned incrementally starting from the seed.
2429 * These addresses are programmed in the ASIC by the PF and the VF driver
2430 * queries for the MAC address during its probe.
2432 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2434 u32 vf;
2435 int status = 0;
2436 u8 mac[ETH_ALEN];
2438 be_vf_eth_addr_generate(adapter, mac);
2440 for (vf = 0; vf < num_vfs; vf++) {
2441 status = be_cmd_pmac_add(adapter, mac,
2442 adapter->vf_cfg[vf].vf_if_handle,
2443 &adapter->vf_cfg[vf].vf_pmac_id,
2444 vf + 1);
2445 if (status)
2446 dev_err(&adapter->pdev->dev,
2447 "Mac address add failed for VF %d\n", vf);
2448 else
2449 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2451 mac[5] += 1;
2453 return status;
2456 static void be_vf_clear(struct be_adapter *adapter)
2458 u32 vf;
2460 for (vf = 0; vf < num_vfs; vf++) {
2461 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2462 be_cmd_pmac_del(adapter,
2463 adapter->vf_cfg[vf].vf_if_handle,
2464 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2467 for (vf = 0; vf < num_vfs; vf++)
2468 if (adapter->vf_cfg[vf].vf_if_handle)
2469 be_cmd_if_destroy(adapter,
2470 adapter->vf_cfg[vf].vf_if_handle, vf + 1);
2473 static int be_clear(struct be_adapter *adapter)
2475 if (be_physfn(adapter) && adapter->sriov_enabled)
2476 be_vf_clear(adapter);
2478 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2480 be_mcc_queues_destroy(adapter);
2481 be_rx_queues_destroy(adapter);
2482 be_tx_queues_destroy(adapter);
2483 adapter->eq_next_idx = 0;
2485 adapter->be3_native = false;
2486 adapter->promiscuous = false;
2488 /* tell fw we're done with firing cmds */
2489 be_cmd_fw_clean(adapter);
2490 return 0;
2493 static int be_vf_setup(struct be_adapter *adapter)
2495 u32 cap_flags, en_flags, vf;
2496 u16 lnk_speed;
2497 int status;
2499 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2500 for (vf = 0; vf < num_vfs; vf++) {
2501 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2502 &adapter->vf_cfg[vf].vf_if_handle,
2503 NULL, vf+1);
2504 if (status)
2505 goto err;
2506 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2509 if (!lancer_chip(adapter)) {
2510 status = be_vf_eth_addr_config(adapter);
2511 if (status)
2512 goto err;
2515 for (vf = 0; vf < num_vfs; vf++) {
2516 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2517 vf + 1);
2518 if (status)
2519 goto err;
2520 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2522 return 0;
2523 err:
2524 return status;
2527 static int be_setup(struct be_adapter *adapter)
2529 struct net_device *netdev = adapter->netdev;
2530 u32 cap_flags, en_flags;
2531 u32 tx_fc, rx_fc;
2532 int status;
2533 u8 mac[ETH_ALEN];
2535 /* Allow all priorities by default. A GRP5 evt may modify this */
2536 adapter->vlan_prio_bmap = 0xff;
2537 adapter->link_speed = -1;
2539 be_cmd_req_native_mode(adapter);
2541 status = be_tx_queues_create(adapter);
2542 if (status != 0)
2543 goto err;
2545 status = be_rx_queues_create(adapter);
2546 if (status != 0)
2547 goto err;
2549 status = be_mcc_queues_create(adapter);
2550 if (status != 0)
2551 goto err;
2553 memset(mac, 0, ETH_ALEN);
2554 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2555 true /*permanent */, 0);
2556 if (status)
2557 return status;
2558 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2559 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2561 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2562 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2563 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2564 BE_IF_FLAGS_PROMISCUOUS;
2565 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2566 cap_flags |= BE_IF_FLAGS_RSS;
2567 en_flags |= BE_IF_FLAGS_RSS;
2569 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2570 netdev->dev_addr, &adapter->if_handle,
2571 &adapter->pmac_id, 0);
2572 if (status != 0)
2573 goto err;
2575 /* For BEx, the VF's permanent mac queried from card is incorrect.
2576 * Query the mac configued by the PF using if_handle
2578 if (!be_physfn(adapter) && !lancer_chip(adapter)) {
2579 status = be_cmd_mac_addr_query(adapter, mac,
2580 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2581 if (!status) {
2582 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2583 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2587 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2589 status = be_vid_config(adapter, false, 0);
2590 if (status)
2591 goto err;
2593 be_set_rx_mode(adapter->netdev);
2595 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2596 if (status)
2597 goto err;
2598 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2599 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2600 adapter->rx_fc);
2601 if (status)
2602 goto err;
2605 pcie_set_readrq(adapter->pdev, 4096);
2607 if (be_physfn(adapter) && adapter->sriov_enabled) {
2608 status = be_vf_setup(adapter);
2609 if (status)
2610 goto err;
2613 return 0;
2614 err:
2615 be_clear(adapter);
2616 return status;
2619 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2620 static bool be_flash_redboot(struct be_adapter *adapter,
2621 const u8 *p, u32 img_start, int image_size,
2622 int hdr_size)
2624 u32 crc_offset;
2625 u8 flashed_crc[4];
2626 int status;
2628 crc_offset = hdr_size + img_start + image_size - 4;
2630 p += crc_offset;
2632 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2633 (image_size - 4));
2634 if (status) {
2635 dev_err(&adapter->pdev->dev,
2636 "could not get crc from flash, not flashing redboot\n");
2637 return false;
2640 /*update redboot only if crc does not match*/
2641 if (!memcmp(flashed_crc, p, 4))
2642 return false;
2643 else
2644 return true;
2647 static bool phy_flashing_required(struct be_adapter *adapter)
2649 int status = 0;
2650 struct be_phy_info phy_info;
2652 status = be_cmd_get_phy_info(adapter, &phy_info);
2653 if (status)
2654 return false;
2655 if ((phy_info.phy_type == TN_8022) &&
2656 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2657 return true;
2659 return false;
2662 static int be_flash_data(struct be_adapter *adapter,
2663 const struct firmware *fw,
2664 struct be_dma_mem *flash_cmd, int num_of_images)
2667 int status = 0, i, filehdr_size = 0;
2668 u32 total_bytes = 0, flash_op;
2669 int num_bytes;
2670 const u8 *p = fw->data;
2671 struct be_cmd_write_flashrom *req = flash_cmd->va;
2672 const struct flash_comp *pflashcomp;
2673 int num_comp;
2675 static const struct flash_comp gen3_flash_types[10] = {
2676 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2677 FLASH_IMAGE_MAX_SIZE_g3},
2678 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2679 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2680 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2681 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2682 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2683 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2684 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2685 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2686 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2687 FLASH_IMAGE_MAX_SIZE_g3},
2688 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2689 FLASH_IMAGE_MAX_SIZE_g3},
2690 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2691 FLASH_IMAGE_MAX_SIZE_g3},
2692 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2693 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2694 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2695 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2697 static const struct flash_comp gen2_flash_types[8] = {
2698 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2699 FLASH_IMAGE_MAX_SIZE_g2},
2700 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2701 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2702 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2703 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2704 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2705 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2706 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2707 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2708 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2709 FLASH_IMAGE_MAX_SIZE_g2},
2710 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2711 FLASH_IMAGE_MAX_SIZE_g2},
2712 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2713 FLASH_IMAGE_MAX_SIZE_g2}
2716 if (adapter->generation == BE_GEN3) {
2717 pflashcomp = gen3_flash_types;
2718 filehdr_size = sizeof(struct flash_file_hdr_g3);
2719 num_comp = ARRAY_SIZE(gen3_flash_types);
2720 } else {
2721 pflashcomp = gen2_flash_types;
2722 filehdr_size = sizeof(struct flash_file_hdr_g2);
2723 num_comp = ARRAY_SIZE(gen2_flash_types);
2725 for (i = 0; i < num_comp; i++) {
2726 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2727 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2728 continue;
2729 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2730 if (!phy_flashing_required(adapter))
2731 continue;
2733 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2734 (!be_flash_redboot(adapter, fw->data,
2735 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2736 (num_of_images * sizeof(struct image_hdr)))))
2737 continue;
2738 p = fw->data;
2739 p += filehdr_size + pflashcomp[i].offset
2740 + (num_of_images * sizeof(struct image_hdr));
2741 if (p + pflashcomp[i].size > fw->data + fw->size)
2742 return -1;
2743 total_bytes = pflashcomp[i].size;
2744 while (total_bytes) {
2745 if (total_bytes > 32*1024)
2746 num_bytes = 32*1024;
2747 else
2748 num_bytes = total_bytes;
2749 total_bytes -= num_bytes;
2750 if (!total_bytes) {
2751 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2752 flash_op = FLASHROM_OPER_PHY_FLASH;
2753 else
2754 flash_op = FLASHROM_OPER_FLASH;
2755 } else {
2756 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2757 flash_op = FLASHROM_OPER_PHY_SAVE;
2758 else
2759 flash_op = FLASHROM_OPER_SAVE;
2761 memcpy(req->params.data_buf, p, num_bytes);
2762 p += num_bytes;
2763 status = be_cmd_write_flashrom(adapter, flash_cmd,
2764 pflashcomp[i].optype, flash_op, num_bytes);
2765 if (status) {
2766 if ((status == ILLEGAL_IOCTL_REQ) &&
2767 (pflashcomp[i].optype ==
2768 IMG_TYPE_PHY_FW))
2769 break;
2770 dev_err(&adapter->pdev->dev,
2771 "cmd to write to flash rom failed.\n");
2772 return -1;
2776 return 0;
2779 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2781 if (fhdr == NULL)
2782 return 0;
2783 if (fhdr->build[0] == '3')
2784 return BE_GEN3;
2785 else if (fhdr->build[0] == '2')
2786 return BE_GEN2;
2787 else
2788 return 0;
2791 static int lancer_fw_download(struct be_adapter *adapter,
2792 const struct firmware *fw)
2794 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2795 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2796 struct be_dma_mem flash_cmd;
2797 const u8 *data_ptr = NULL;
2798 u8 *dest_image_ptr = NULL;
2799 size_t image_size = 0;
2800 u32 chunk_size = 0;
2801 u32 data_written = 0;
2802 u32 offset = 0;
2803 int status = 0;
2804 u8 add_status = 0;
2806 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2807 dev_err(&adapter->pdev->dev,
2808 "FW Image not properly aligned. "
2809 "Length must be 4 byte aligned.\n");
2810 status = -EINVAL;
2811 goto lancer_fw_exit;
2814 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2815 + LANCER_FW_DOWNLOAD_CHUNK;
2816 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2817 &flash_cmd.dma, GFP_KERNEL);
2818 if (!flash_cmd.va) {
2819 status = -ENOMEM;
2820 dev_err(&adapter->pdev->dev,
2821 "Memory allocation failure while flashing\n");
2822 goto lancer_fw_exit;
2825 dest_image_ptr = flash_cmd.va +
2826 sizeof(struct lancer_cmd_req_write_object);
2827 image_size = fw->size;
2828 data_ptr = fw->data;
2830 while (image_size) {
2831 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2833 /* Copy the image chunk content. */
2834 memcpy(dest_image_ptr, data_ptr, chunk_size);
2836 status = lancer_cmd_write_object(adapter, &flash_cmd,
2837 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2838 &data_written, &add_status);
2840 if (status)
2841 break;
2843 offset += data_written;
2844 data_ptr += data_written;
2845 image_size -= data_written;
2848 if (!status) {
2849 /* Commit the FW written */
2850 status = lancer_cmd_write_object(adapter, &flash_cmd,
2851 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2852 &data_written, &add_status);
2855 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2856 flash_cmd.dma);
2857 if (status) {
2858 dev_err(&adapter->pdev->dev,
2859 "Firmware load error. "
2860 "Status code: 0x%x Additional Status: 0x%x\n",
2861 status, add_status);
2862 goto lancer_fw_exit;
2865 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2866 lancer_fw_exit:
2867 return status;
2870 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2872 struct flash_file_hdr_g2 *fhdr;
2873 struct flash_file_hdr_g3 *fhdr3;
2874 struct image_hdr *img_hdr_ptr = NULL;
2875 struct be_dma_mem flash_cmd;
2876 const u8 *p;
2877 int status = 0, i = 0, num_imgs = 0;
2879 p = fw->data;
2880 fhdr = (struct flash_file_hdr_g2 *) p;
2882 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2883 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2884 &flash_cmd.dma, GFP_KERNEL);
2885 if (!flash_cmd.va) {
2886 status = -ENOMEM;
2887 dev_err(&adapter->pdev->dev,
2888 "Memory allocation failure while flashing\n");
2889 goto be_fw_exit;
2892 if ((adapter->generation == BE_GEN3) &&
2893 (get_ufigen_type(fhdr) == BE_GEN3)) {
2894 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2895 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2896 for (i = 0; i < num_imgs; i++) {
2897 img_hdr_ptr = (struct image_hdr *) (fw->data +
2898 (sizeof(struct flash_file_hdr_g3) +
2899 i * sizeof(struct image_hdr)));
2900 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2901 status = be_flash_data(adapter, fw, &flash_cmd,
2902 num_imgs);
2904 } else if ((adapter->generation == BE_GEN2) &&
2905 (get_ufigen_type(fhdr) == BE_GEN2)) {
2906 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2907 } else {
2908 dev_err(&adapter->pdev->dev,
2909 "UFI and Interface are not compatible for flashing\n");
2910 status = -1;
2913 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2914 flash_cmd.dma);
2915 if (status) {
2916 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2917 goto be_fw_exit;
2920 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2922 be_fw_exit:
2923 return status;
2926 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2928 const struct firmware *fw;
2929 int status;
2931 if (!netif_running(adapter->netdev)) {
2932 dev_err(&adapter->pdev->dev,
2933 "Firmware load not allowed (interface is down)\n");
2934 return -1;
2937 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2938 if (status)
2939 goto fw_exit;
2941 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2943 if (lancer_chip(adapter))
2944 status = lancer_fw_download(adapter, fw);
2945 else
2946 status = be_fw_download(adapter, fw);
2948 fw_exit:
2949 release_firmware(fw);
2950 return status;
2953 static struct net_device_ops be_netdev_ops = {
2954 .ndo_open = be_open,
2955 .ndo_stop = be_close,
2956 .ndo_start_xmit = be_xmit,
2957 .ndo_set_rx_mode = be_set_rx_mode,
2958 .ndo_set_mac_address = be_mac_addr_set,
2959 .ndo_change_mtu = be_change_mtu,
2960 .ndo_get_stats64 = be_get_stats64,
2961 .ndo_validate_addr = eth_validate_addr,
2962 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2963 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2964 .ndo_set_vf_mac = be_set_vf_mac,
2965 .ndo_set_vf_vlan = be_set_vf_vlan,
2966 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2967 .ndo_get_vf_config = be_get_vf_config
2970 static void be_netdev_init(struct net_device *netdev)
2972 struct be_adapter *adapter = netdev_priv(netdev);
2973 struct be_rx_obj *rxo;
2974 int i;
2976 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2977 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2978 NETIF_F_HW_VLAN_TX;
2979 if (be_multi_rxq(adapter))
2980 netdev->hw_features |= NETIF_F_RXHASH;
2982 netdev->features |= netdev->hw_features |
2983 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2985 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2986 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2988 netdev->flags |= IFF_MULTICAST;
2990 netif_set_gso_max_size(netdev, 65535);
2992 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2994 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2996 for_all_rx_queues(adapter, rxo, i)
2997 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2998 BE_NAPI_WEIGHT);
3000 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
3001 BE_NAPI_WEIGHT);
3004 static void be_unmap_pci_bars(struct be_adapter *adapter)
3006 if (adapter->csr)
3007 iounmap(adapter->csr);
3008 if (adapter->db)
3009 iounmap(adapter->db);
3012 static int be_map_pci_bars(struct be_adapter *adapter)
3014 u8 __iomem *addr;
3015 int db_reg;
3017 if (lancer_chip(adapter)) {
3018 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3019 pci_resource_len(adapter->pdev, 0));
3020 if (addr == NULL)
3021 return -ENOMEM;
3022 adapter->db = addr;
3023 return 0;
3026 if (be_physfn(adapter)) {
3027 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3028 pci_resource_len(adapter->pdev, 2));
3029 if (addr == NULL)
3030 return -ENOMEM;
3031 adapter->csr = addr;
3034 if (adapter->generation == BE_GEN2) {
3035 db_reg = 4;
3036 } else {
3037 if (be_physfn(adapter))
3038 db_reg = 4;
3039 else
3040 db_reg = 0;
3042 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3043 pci_resource_len(adapter->pdev, db_reg));
3044 if (addr == NULL)
3045 goto pci_map_err;
3046 adapter->db = addr;
3048 return 0;
3049 pci_map_err:
3050 be_unmap_pci_bars(adapter);
3051 return -ENOMEM;
3055 static void be_ctrl_cleanup(struct be_adapter *adapter)
3057 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3059 be_unmap_pci_bars(adapter);
3061 if (mem->va)
3062 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3063 mem->dma);
3065 mem = &adapter->rx_filter;
3066 if (mem->va)
3067 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3068 mem->dma);
3071 static int be_ctrl_init(struct be_adapter *adapter)
3073 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3074 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3075 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3076 int status;
3078 status = be_map_pci_bars(adapter);
3079 if (status)
3080 goto done;
3082 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3083 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3084 mbox_mem_alloc->size,
3085 &mbox_mem_alloc->dma,
3086 GFP_KERNEL);
3087 if (!mbox_mem_alloc->va) {
3088 status = -ENOMEM;
3089 goto unmap_pci_bars;
3091 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3092 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3093 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3094 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3096 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3097 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3098 &rx_filter->dma, GFP_KERNEL);
3099 if (rx_filter->va == NULL) {
3100 status = -ENOMEM;
3101 goto free_mbox;
3103 memset(rx_filter->va, 0, rx_filter->size);
3105 mutex_init(&adapter->mbox_lock);
3106 spin_lock_init(&adapter->mcc_lock);
3107 spin_lock_init(&adapter->mcc_cq_lock);
3109 init_completion(&adapter->flash_compl);
3110 pci_save_state(adapter->pdev);
3111 return 0;
3113 free_mbox:
3114 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3115 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3117 unmap_pci_bars:
3118 be_unmap_pci_bars(adapter);
3120 done:
3121 return status;
3124 static void be_stats_cleanup(struct be_adapter *adapter)
3126 struct be_dma_mem *cmd = &adapter->stats_cmd;
3128 if (cmd->va)
3129 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3130 cmd->va, cmd->dma);
3133 static int be_stats_init(struct be_adapter *adapter)
3135 struct be_dma_mem *cmd = &adapter->stats_cmd;
3137 if (adapter->generation == BE_GEN2) {
3138 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3139 } else {
3140 if (lancer_chip(adapter))
3141 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3142 else
3143 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3145 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3146 GFP_KERNEL);
3147 if (cmd->va == NULL)
3148 return -1;
3149 memset(cmd->va, 0, cmd->size);
3150 return 0;
3153 static void __devexit be_remove(struct pci_dev *pdev)
3155 struct be_adapter *adapter = pci_get_drvdata(pdev);
3157 if (!adapter)
3158 return;
3160 cancel_delayed_work_sync(&adapter->work);
3162 unregister_netdev(adapter->netdev);
3164 be_clear(adapter);
3166 be_stats_cleanup(adapter);
3168 be_ctrl_cleanup(adapter);
3170 be_sriov_disable(adapter);
3172 be_msix_disable(adapter);
3174 pci_set_drvdata(pdev, NULL);
3175 pci_release_regions(pdev);
3176 pci_disable_device(pdev);
3178 free_netdev(adapter->netdev);
3181 static int be_get_config(struct be_adapter *adapter)
3183 int status;
3185 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3186 &adapter->function_mode, &adapter->function_caps);
3187 if (status)
3188 return status;
3190 if (adapter->function_mode & 0x400)
3191 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3192 else
3193 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3195 status = be_cmd_get_cntl_attributes(adapter);
3196 if (status)
3197 return status;
3199 return 0;
3202 static int be_dev_family_check(struct be_adapter *adapter)
3204 struct pci_dev *pdev = adapter->pdev;
3205 u32 sli_intf = 0, if_type;
3207 switch (pdev->device) {
3208 case BE_DEVICE_ID1:
3209 case OC_DEVICE_ID1:
3210 adapter->generation = BE_GEN2;
3211 break;
3212 case BE_DEVICE_ID2:
3213 case OC_DEVICE_ID2:
3214 adapter->generation = BE_GEN3;
3215 break;
3216 case OC_DEVICE_ID3:
3217 case OC_DEVICE_ID4:
3218 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3219 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3220 SLI_INTF_IF_TYPE_SHIFT;
3222 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3223 if_type != 0x02) {
3224 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3225 return -EINVAL;
3227 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3228 SLI_INTF_FAMILY_SHIFT);
3229 adapter->generation = BE_GEN3;
3230 break;
3231 default:
3232 adapter->generation = 0;
3234 return 0;
3237 static int lancer_wait_ready(struct be_adapter *adapter)
3239 #define SLIPORT_READY_TIMEOUT 500
3240 u32 sliport_status;
3241 int status = 0, i;
3243 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3244 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3245 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3246 break;
3248 msleep(20);
3251 if (i == SLIPORT_READY_TIMEOUT)
3252 status = -1;
3254 return status;
3257 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3259 int status;
3260 u32 sliport_status, err, reset_needed;
3261 status = lancer_wait_ready(adapter);
3262 if (!status) {
3263 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3264 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3265 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3266 if (err && reset_needed) {
3267 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3268 adapter->db + SLIPORT_CONTROL_OFFSET);
3270 /* check adapter has corrected the error */
3271 status = lancer_wait_ready(adapter);
3272 sliport_status = ioread32(adapter->db +
3273 SLIPORT_STATUS_OFFSET);
3274 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3275 SLIPORT_STATUS_RN_MASK);
3276 if (status || sliport_status)
3277 status = -1;
3278 } else if (err || reset_needed) {
3279 status = -1;
3282 return status;
3285 static int __devinit be_probe(struct pci_dev *pdev,
3286 const struct pci_device_id *pdev_id)
3288 int status = 0;
3289 struct be_adapter *adapter;
3290 struct net_device *netdev;
3292 status = pci_enable_device(pdev);
3293 if (status)
3294 goto do_none;
3296 status = pci_request_regions(pdev, DRV_NAME);
3297 if (status)
3298 goto disable_dev;
3299 pci_set_master(pdev);
3301 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3302 if (netdev == NULL) {
3303 status = -ENOMEM;
3304 goto rel_reg;
3306 adapter = netdev_priv(netdev);
3307 adapter->pdev = pdev;
3308 pci_set_drvdata(pdev, adapter);
3310 status = be_dev_family_check(adapter);
3311 if (status)
3312 goto free_netdev;
3314 adapter->netdev = netdev;
3315 SET_NETDEV_DEV(netdev, &pdev->dev);
3317 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3318 if (!status) {
3319 netdev->features |= NETIF_F_HIGHDMA;
3320 } else {
3321 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3322 if (status) {
3323 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3324 goto free_netdev;
3328 status = be_sriov_enable(adapter);
3329 if (status)
3330 goto free_netdev;
3332 status = be_ctrl_init(adapter);
3333 if (status)
3334 goto disable_sriov;
3336 if (lancer_chip(adapter)) {
3337 status = lancer_test_and_set_rdy_state(adapter);
3338 if (status) {
3339 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3340 goto ctrl_clean;
3344 /* sync up with fw's ready state */
3345 if (be_physfn(adapter)) {
3346 status = be_cmd_POST(adapter);
3347 if (status)
3348 goto ctrl_clean;
3351 /* tell fw we're ready to fire cmds */
3352 status = be_cmd_fw_init(adapter);
3353 if (status)
3354 goto ctrl_clean;
3356 status = be_cmd_reset_function(adapter);
3357 if (status)
3358 goto ctrl_clean;
3360 status = be_stats_init(adapter);
3361 if (status)
3362 goto ctrl_clean;
3364 status = be_get_config(adapter);
3365 if (status)
3366 goto stats_clean;
3368 /* The INTR bit may be set in the card when probed by a kdump kernel
3369 * after a crash.
3371 if (!lancer_chip(adapter))
3372 be_intr_set(adapter, false);
3374 be_msix_enable(adapter);
3376 INIT_DELAYED_WORK(&adapter->work, be_worker);
3377 adapter->rx_fc = adapter->tx_fc = true;
3379 status = be_setup(adapter);
3380 if (status)
3381 goto msix_disable;
3383 be_netdev_init(netdev);
3384 status = register_netdev(netdev);
3385 if (status != 0)
3386 goto unsetup;
3388 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3390 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3391 return 0;
3393 unsetup:
3394 be_clear(adapter);
3395 msix_disable:
3396 be_msix_disable(adapter);
3397 stats_clean:
3398 be_stats_cleanup(adapter);
3399 ctrl_clean:
3400 be_ctrl_cleanup(adapter);
3401 disable_sriov:
3402 be_sriov_disable(adapter);
3403 free_netdev:
3404 free_netdev(netdev);
3405 pci_set_drvdata(pdev, NULL);
3406 rel_reg:
3407 pci_release_regions(pdev);
3408 disable_dev:
3409 pci_disable_device(pdev);
3410 do_none:
3411 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3412 return status;
3415 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3417 struct be_adapter *adapter = pci_get_drvdata(pdev);
3418 struct net_device *netdev = adapter->netdev;
3420 cancel_delayed_work_sync(&adapter->work);
3421 if (adapter->wol)
3422 be_setup_wol(adapter, true);
3424 netif_device_detach(netdev);
3425 if (netif_running(netdev)) {
3426 rtnl_lock();
3427 be_close(netdev);
3428 rtnl_unlock();
3430 be_clear(adapter);
3432 be_msix_disable(adapter);
3433 pci_save_state(pdev);
3434 pci_disable_device(pdev);
3435 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3436 return 0;
3439 static int be_resume(struct pci_dev *pdev)
3441 int status = 0;
3442 struct be_adapter *adapter = pci_get_drvdata(pdev);
3443 struct net_device *netdev = adapter->netdev;
3445 netif_device_detach(netdev);
3447 status = pci_enable_device(pdev);
3448 if (status)
3449 return status;
3451 pci_set_power_state(pdev, 0);
3452 pci_restore_state(pdev);
3454 be_msix_enable(adapter);
3455 /* tell fw we're ready to fire cmds */
3456 status = be_cmd_fw_init(adapter);
3457 if (status)
3458 return status;
3460 be_setup(adapter);
3461 if (netif_running(netdev)) {
3462 rtnl_lock();
3463 be_open(netdev);
3464 rtnl_unlock();
3466 netif_device_attach(netdev);
3468 if (adapter->wol)
3469 be_setup_wol(adapter, false);
3471 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3472 return 0;
3476 * An FLR will stop BE from DMAing any data.
3478 static void be_shutdown(struct pci_dev *pdev)
3480 struct be_adapter *adapter = pci_get_drvdata(pdev);
3482 if (!adapter)
3483 return;
3485 cancel_delayed_work_sync(&adapter->work);
3487 netif_device_detach(adapter->netdev);
3489 if (adapter->wol)
3490 be_setup_wol(adapter, true);
3492 be_cmd_reset_function(adapter);
3494 pci_disable_device(pdev);
3497 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3498 pci_channel_state_t state)
3500 struct be_adapter *adapter = pci_get_drvdata(pdev);
3501 struct net_device *netdev = adapter->netdev;
3503 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3505 adapter->eeh_err = true;
3507 netif_device_detach(netdev);
3509 if (netif_running(netdev)) {
3510 rtnl_lock();
3511 be_close(netdev);
3512 rtnl_unlock();
3514 be_clear(adapter);
3516 if (state == pci_channel_io_perm_failure)
3517 return PCI_ERS_RESULT_DISCONNECT;
3519 pci_disable_device(pdev);
3521 return PCI_ERS_RESULT_NEED_RESET;
3524 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3526 struct be_adapter *adapter = pci_get_drvdata(pdev);
3527 int status;
3529 dev_info(&adapter->pdev->dev, "EEH reset\n");
3530 adapter->eeh_err = false;
3532 status = pci_enable_device(pdev);
3533 if (status)
3534 return PCI_ERS_RESULT_DISCONNECT;
3536 pci_set_master(pdev);
3537 pci_set_power_state(pdev, 0);
3538 pci_restore_state(pdev);
3540 /* Check if card is ok and fw is ready */
3541 status = be_cmd_POST(adapter);
3542 if (status)
3543 return PCI_ERS_RESULT_DISCONNECT;
3545 return PCI_ERS_RESULT_RECOVERED;
3548 static void be_eeh_resume(struct pci_dev *pdev)
3550 int status = 0;
3551 struct be_adapter *adapter = pci_get_drvdata(pdev);
3552 struct net_device *netdev = adapter->netdev;
3554 dev_info(&adapter->pdev->dev, "EEH resume\n");
3556 pci_save_state(pdev);
3558 /* tell fw we're ready to fire cmds */
3559 status = be_cmd_fw_init(adapter);
3560 if (status)
3561 goto err;
3563 status = be_setup(adapter);
3564 if (status)
3565 goto err;
3567 if (netif_running(netdev)) {
3568 status = be_open(netdev);
3569 if (status)
3570 goto err;
3572 netif_device_attach(netdev);
3573 return;
3574 err:
3575 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3578 static struct pci_error_handlers be_eeh_handlers = {
3579 .error_detected = be_eeh_err_detected,
3580 .slot_reset = be_eeh_reset,
3581 .resume = be_eeh_resume,
3584 static struct pci_driver be_driver = {
3585 .name = DRV_NAME,
3586 .id_table = be_dev_ids,
3587 .probe = be_probe,
3588 .remove = be_remove,
3589 .suspend = be_suspend,
3590 .resume = be_resume,
3591 .shutdown = be_shutdown,
3592 .err_handler = &be_eeh_handlers
3595 static int __init be_init_module(void)
3597 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3598 rx_frag_size != 2048) {
3599 printk(KERN_WARNING DRV_NAME
3600 " : Module param rx_frag_size must be 2048/4096/8192."
3601 " Using 2048\n");
3602 rx_frag_size = 2048;
3605 return pci_register_driver(&be_driver);
3607 module_init(be_init_module);
3609 static void __exit be_exit_module(void)
3611 pci_unregister_driver(&be_driver);
3613 module_exit(be_exit_module);