be2net: add vlan/rx-mode/flow-control config to be_setup()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / ethernet / emulex / benet / be_main.c
blobd05b6bb5ae817ef2613dbf54c21b576a63d92825
1 /*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 #include <asm/div64.h>
23 MODULE_VERSION(DRV_VER);
24 MODULE_DEVICE_TABLE(pci, be_dev_ids);
25 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
29 static ushort rx_frag_size = 2048;
30 static unsigned int num_vfs;
31 module_param(rx_frag_size, ushort, S_IRUGO);
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
41 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
43 { 0 }
45 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46 /* UE Status Low CSR */
47 static const char * const ue_status_low_desc[] = {
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
81 /* UE Status High CSR */
82 static const char * const ue_status_hi_desc[] = {
83 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
106 "NETC",
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
117 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
125 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
128 struct be_dma_mem *mem = &q->dma_mem;
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
142 static void be_intr_set(struct be_adapter *adapter, bool enable)
144 u32 reg, enabled;
146 if (adapter->eeh_err)
147 return;
149 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
150 &reg);
151 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
153 if (!enabled && enable)
154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155 else if (enabled && !enable)
156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
157 else
158 return;
160 pci_write_config_dword(adapter->pdev,
161 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
164 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
166 u32 val = 0;
167 val |= qid & DB_RQ_RING_ID_MASK;
168 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
170 wmb();
171 iowrite32(val, adapter->db + DB_RQ_OFFSET);
174 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
176 u32 val = 0;
177 val |= qid & DB_TXULP_RING_ID_MASK;
178 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
180 wmb();
181 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
184 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
185 bool arm, bool clear_int, u16 num_popped)
187 u32 val = 0;
188 val |= qid & DB_EQ_RING_ID_MASK;
189 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
190 DB_EQ_RING_ID_EXT_MASK_SHIFT);
192 if (adapter->eeh_err)
193 return;
195 if (arm)
196 val |= 1 << DB_EQ_REARM_SHIFT;
197 if (clear_int)
198 val |= 1 << DB_EQ_CLR_SHIFT;
199 val |= 1 << DB_EQ_EVNT_SHIFT;
200 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
201 iowrite32(val, adapter->db + DB_EQ_OFFSET);
204 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
206 u32 val = 0;
207 val |= qid & DB_CQ_RING_ID_MASK;
208 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
209 DB_CQ_RING_ID_EXT_MASK_SHIFT);
211 if (adapter->eeh_err)
212 return;
214 if (arm)
215 val |= 1 << DB_CQ_REARM_SHIFT;
216 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
217 iowrite32(val, adapter->db + DB_CQ_OFFSET);
220 static int be_mac_addr_set(struct net_device *netdev, void *p)
222 struct be_adapter *adapter = netdev_priv(netdev);
223 struct sockaddr *addr = p;
224 int status = 0;
226 if (!is_valid_ether_addr(addr->sa_data))
227 return -EADDRNOTAVAIL;
229 /* MAC addr configuration will be done in hardware for VFs
230 * by their corresponding PFs. Just copy to netdev addr here
232 if (!be_physfn(adapter))
233 goto netdev_addr;
235 status = be_cmd_pmac_del(adapter, adapter->if_handle,
236 adapter->pmac_id, 0);
237 if (status)
238 return status;
240 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
241 adapter->if_handle, &adapter->pmac_id, 0);
242 netdev_addr:
243 if (!status)
244 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
246 return status;
249 static void populate_be2_stats(struct be_adapter *adapter)
251 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
252 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
253 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
254 struct be_port_rxf_stats_v0 *port_stats =
255 &rxf_stats->port[adapter->port_num];
256 struct be_drv_stats *drvs = &adapter->drv_stats;
258 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
259 drvs->rx_pause_frames = port_stats->rx_pause_frames;
260 drvs->rx_crc_errors = port_stats->rx_crc_errors;
261 drvs->rx_control_frames = port_stats->rx_control_frames;
262 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
263 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
264 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
265 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
266 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
267 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
268 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
269 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
270 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
271 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
272 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
273 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
274 drvs->rx_dropped_header_too_small =
275 port_stats->rx_dropped_header_too_small;
276 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
277 drvs->rx_alignment_symbol_errors =
278 port_stats->rx_alignment_symbol_errors;
280 drvs->tx_pauseframes = port_stats->tx_pauseframes;
281 drvs->tx_controlframes = port_stats->tx_controlframes;
283 if (adapter->port_num)
284 drvs->jabber_events = rxf_stats->port1_jabber_events;
285 else
286 drvs->jabber_events = rxf_stats->port0_jabber_events;
287 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
288 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
289 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
290 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
291 drvs->forwarded_packets = rxf_stats->forwarded_packets;
292 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
293 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
294 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
295 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
298 static void populate_be3_stats(struct be_adapter *adapter)
300 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
301 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
302 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
303 struct be_port_rxf_stats_v1 *port_stats =
304 &rxf_stats->port[adapter->port_num];
305 struct be_drv_stats *drvs = &adapter->drv_stats;
307 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
308 drvs->rx_pause_frames = port_stats->rx_pause_frames;
309 drvs->rx_crc_errors = port_stats->rx_crc_errors;
310 drvs->rx_control_frames = port_stats->rx_control_frames;
311 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
312 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
313 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
314 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
315 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
316 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
317 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
318 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
319 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
320 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
321 drvs->rx_dropped_header_too_small =
322 port_stats->rx_dropped_header_too_small;
323 drvs->rx_input_fifo_overflow_drop =
324 port_stats->rx_input_fifo_overflow_drop;
325 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
326 drvs->rx_alignment_symbol_errors =
327 port_stats->rx_alignment_symbol_errors;
328 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
329 drvs->tx_pauseframes = port_stats->tx_pauseframes;
330 drvs->tx_controlframes = port_stats->tx_controlframes;
331 drvs->jabber_events = port_stats->jabber_events;
332 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
333 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
334 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
335 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
336 drvs->forwarded_packets = rxf_stats->forwarded_packets;
337 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
338 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
339 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
340 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
343 static void populate_lancer_stats(struct be_adapter *adapter)
346 struct be_drv_stats *drvs = &adapter->drv_stats;
347 struct lancer_pport_stats *pport_stats =
348 pport_stats_from_cmd(adapter);
350 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
351 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
352 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
353 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
354 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
355 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
356 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
357 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
358 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
359 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
360 drvs->rx_dropped_tcp_length =
361 pport_stats->rx_dropped_invalid_tcp_length;
362 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
363 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
364 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
365 drvs->rx_dropped_header_too_small =
366 pport_stats->rx_dropped_header_too_small;
367 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
368 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
369 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
370 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
371 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
372 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
373 drvs->jabber_events = pport_stats->rx_jabbers;
374 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
375 drvs->forwarded_packets = pport_stats->num_forwards_lo;
376 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
377 drvs->rx_drops_too_many_frags =
378 pport_stats->rx_drops_too_many_frags_lo;
381 static void accumulate_16bit_val(u32 *acc, u16 val)
383 #define lo(x) (x & 0xFFFF)
384 #define hi(x) (x & 0xFFFF0000)
385 bool wrapped = val < lo(*acc);
386 u32 newacc = hi(*acc) + val;
388 if (wrapped)
389 newacc += 65536;
390 ACCESS_ONCE(*acc) = newacc;
393 void be_parse_stats(struct be_adapter *adapter)
395 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
396 struct be_rx_obj *rxo;
397 int i;
399 if (adapter->generation == BE_GEN3) {
400 if (lancer_chip(adapter))
401 populate_lancer_stats(adapter);
402 else
403 populate_be3_stats(adapter);
404 } else {
405 populate_be2_stats(adapter);
408 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
409 for_all_rx_queues(adapter, rxo, i) {
410 /* below erx HW counter can actually wrap around after
411 * 65535. Driver accumulates a 32-bit value
413 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
414 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
418 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
419 struct rtnl_link_stats64 *stats)
421 struct be_adapter *adapter = netdev_priv(netdev);
422 struct be_drv_stats *drvs = &adapter->drv_stats;
423 struct be_rx_obj *rxo;
424 struct be_tx_obj *txo;
425 u64 pkts, bytes;
426 unsigned int start;
427 int i;
429 for_all_rx_queues(adapter, rxo, i) {
430 const struct be_rx_stats *rx_stats = rx_stats(rxo);
431 do {
432 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
433 pkts = rx_stats(rxo)->rx_pkts;
434 bytes = rx_stats(rxo)->rx_bytes;
435 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
436 stats->rx_packets += pkts;
437 stats->rx_bytes += bytes;
438 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
439 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
440 rx_stats(rxo)->rx_drops_no_frags;
443 for_all_tx_queues(adapter, txo, i) {
444 const struct be_tx_stats *tx_stats = tx_stats(txo);
445 do {
446 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
447 pkts = tx_stats(txo)->tx_pkts;
448 bytes = tx_stats(txo)->tx_bytes;
449 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
450 stats->tx_packets += pkts;
451 stats->tx_bytes += bytes;
454 /* bad pkts received */
455 stats->rx_errors = drvs->rx_crc_errors +
456 drvs->rx_alignment_symbol_errors +
457 drvs->rx_in_range_errors +
458 drvs->rx_out_range_errors +
459 drvs->rx_frame_too_long +
460 drvs->rx_dropped_too_small +
461 drvs->rx_dropped_too_short +
462 drvs->rx_dropped_header_too_small +
463 drvs->rx_dropped_tcp_length +
464 drvs->rx_dropped_runt;
466 /* detailed rx errors */
467 stats->rx_length_errors = drvs->rx_in_range_errors +
468 drvs->rx_out_range_errors +
469 drvs->rx_frame_too_long;
471 stats->rx_crc_errors = drvs->rx_crc_errors;
473 /* frame alignment errors */
474 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
476 /* receiver fifo overrun */
477 /* drops_no_pbuf is no per i/f, it's per BE card */
478 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
479 drvs->rx_input_fifo_overflow_drop +
480 drvs->rx_drops_no_pbuf;
481 return stats;
484 void be_link_status_update(struct be_adapter *adapter, u32 link_status)
486 struct net_device *netdev = adapter->netdev;
488 /* when link status changes, link speed must be re-queried from card */
489 adapter->link_speed = -1;
490 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
491 netif_carrier_on(netdev);
492 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
493 } else {
494 netif_carrier_off(netdev);
495 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
499 static void be_tx_stats_update(struct be_tx_obj *txo,
500 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
502 struct be_tx_stats *stats = tx_stats(txo);
504 u64_stats_update_begin(&stats->sync);
505 stats->tx_reqs++;
506 stats->tx_wrbs += wrb_cnt;
507 stats->tx_bytes += copied;
508 stats->tx_pkts += (gso_segs ? gso_segs : 1);
509 if (stopped)
510 stats->tx_stops++;
511 u64_stats_update_end(&stats->sync);
514 /* Determine number of WRB entries needed to xmit data in an skb */
515 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
516 bool *dummy)
518 int cnt = (skb->len > skb->data_len);
520 cnt += skb_shinfo(skb)->nr_frags;
522 /* to account for hdr wrb */
523 cnt++;
524 if (lancer_chip(adapter) || !(cnt & 1)) {
525 *dummy = false;
526 } else {
527 /* add a dummy to make it an even num */
528 cnt++;
529 *dummy = true;
531 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
532 return cnt;
535 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
537 wrb->frag_pa_hi = upper_32_bits(addr);
538 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
539 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
542 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
543 struct sk_buff *skb, u32 wrb_cnt, u32 len)
545 u8 vlan_prio = 0;
546 u16 vlan_tag = 0;
548 memset(hdr, 0, sizeof(*hdr));
550 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
552 if (skb_is_gso(skb)) {
553 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
554 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
555 hdr, skb_shinfo(skb)->gso_size);
556 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
557 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
558 if (lancer_chip(adapter) && adapter->sli_family ==
559 LANCER_A0_SLI_FAMILY) {
560 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
561 if (is_tcp_pkt(skb))
562 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
563 tcpcs, hdr, 1);
564 else if (is_udp_pkt(skb))
565 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
566 udpcs, hdr, 1);
568 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
569 if (is_tcp_pkt(skb))
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
571 else if (is_udp_pkt(skb))
572 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
575 if (vlan_tx_tag_present(skb)) {
576 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
577 vlan_tag = vlan_tx_tag_get(skb);
578 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
579 /* If vlan priority provided by OS is NOT in available bmap */
580 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
581 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
582 adapter->recommended_prio;
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
588 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
592 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
593 bool unmap_single)
595 dma_addr_t dma;
597 be_dws_le_to_cpu(wrb, sizeof(*wrb));
599 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
600 if (wrb->frag_len) {
601 if (unmap_single)
602 dma_unmap_single(dev, dma, wrb->frag_len,
603 DMA_TO_DEVICE);
604 else
605 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
609 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
610 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
612 dma_addr_t busaddr;
613 int i, copied = 0;
614 struct device *dev = &adapter->pdev->dev;
615 struct sk_buff *first_skb = skb;
616 struct be_eth_wrb *wrb;
617 struct be_eth_hdr_wrb *hdr;
618 bool map_single = false;
619 u16 map_head;
621 hdr = queue_head_node(txq);
622 queue_head_inc(txq);
623 map_head = txq->head;
625 if (skb->len > skb->data_len) {
626 int len = skb_headlen(skb);
627 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
628 if (dma_mapping_error(dev, busaddr))
629 goto dma_err;
630 map_single = true;
631 wrb = queue_head_node(txq);
632 wrb_fill(wrb, busaddr, len);
633 be_dws_cpu_to_le(wrb, sizeof(*wrb));
634 queue_head_inc(txq);
635 copied += len;
638 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
639 const struct skb_frag_struct *frag =
640 &skb_shinfo(skb)->frags[i];
641 busaddr = skb_frag_dma_map(dev, frag, 0,
642 skb_frag_size(frag), DMA_TO_DEVICE);
643 if (dma_mapping_error(dev, busaddr))
644 goto dma_err;
645 wrb = queue_head_node(txq);
646 wrb_fill(wrb, busaddr, skb_frag_size(frag));
647 be_dws_cpu_to_le(wrb, sizeof(*wrb));
648 queue_head_inc(txq);
649 copied += skb_frag_size(frag);
652 if (dummy_wrb) {
653 wrb = queue_head_node(txq);
654 wrb_fill(wrb, 0, 0);
655 be_dws_cpu_to_le(wrb, sizeof(*wrb));
656 queue_head_inc(txq);
659 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
660 be_dws_cpu_to_le(hdr, sizeof(*hdr));
662 return copied;
663 dma_err:
664 txq->head = map_head;
665 while (copied) {
666 wrb = queue_head_node(txq);
667 unmap_tx_frag(dev, wrb, map_single);
668 map_single = false;
669 copied -= wrb->frag_len;
670 queue_head_inc(txq);
672 return 0;
675 static netdev_tx_t be_xmit(struct sk_buff *skb,
676 struct net_device *netdev)
678 struct be_adapter *adapter = netdev_priv(netdev);
679 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
680 struct be_queue_info *txq = &txo->q;
681 u32 wrb_cnt = 0, copied = 0;
682 u32 start = txq->head;
683 bool dummy_wrb, stopped = false;
685 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
687 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
688 if (copied) {
689 /* record the sent skb in the sent_skb table */
690 BUG_ON(txo->sent_skb_list[start]);
691 txo->sent_skb_list[start] = skb;
693 /* Ensure txq has space for the next skb; Else stop the queue
694 * *BEFORE* ringing the tx doorbell, so that we serialze the
695 * tx compls of the current transmit which'll wake up the queue
697 atomic_add(wrb_cnt, &txq->used);
698 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
699 txq->len) {
700 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
701 stopped = true;
704 be_txq_notify(adapter, txq->id, wrb_cnt);
706 be_tx_stats_update(txo, wrb_cnt, copied,
707 skb_shinfo(skb)->gso_segs, stopped);
708 } else {
709 txq->head = start;
710 dev_kfree_skb_any(skb);
712 return NETDEV_TX_OK;
715 static int be_change_mtu(struct net_device *netdev, int new_mtu)
717 struct be_adapter *adapter = netdev_priv(netdev);
718 if (new_mtu < BE_MIN_MTU ||
719 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
720 (ETH_HLEN + ETH_FCS_LEN))) {
721 dev_info(&adapter->pdev->dev,
722 "MTU must be between %d and %d bytes\n",
723 BE_MIN_MTU,
724 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
725 return -EINVAL;
727 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
728 netdev->mtu, new_mtu);
729 netdev->mtu = new_mtu;
730 return 0;
734 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
735 * If the user configures more, place BE in vlan promiscuous mode.
737 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
739 u16 vtag[BE_NUM_VLANS_SUPPORTED];
740 u16 ntags = 0, i;
741 int status = 0;
742 u32 if_handle;
744 if (vf) {
745 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
746 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
747 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
750 /* No need to further configure vids if in promiscuous mode */
751 if (adapter->promiscuous)
752 return 0;
754 if (adapter->vlans_added <= adapter->max_vlans) {
755 /* Construct VLAN Table to give to HW */
756 for (i = 0; i < VLAN_N_VID; i++) {
757 if (adapter->vlan_tag[i]) {
758 vtag[ntags] = cpu_to_le16(i);
759 ntags++;
762 status = be_cmd_vlan_config(adapter, adapter->if_handle,
763 vtag, ntags, 1, 0);
764 } else {
765 status = be_cmd_vlan_config(adapter, adapter->if_handle,
766 NULL, 0, 1, 1);
769 return status;
772 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
774 struct be_adapter *adapter = netdev_priv(netdev);
776 adapter->vlans_added++;
777 if (!be_physfn(adapter))
778 return;
780 adapter->vlan_tag[vid] = 1;
781 if (adapter->vlans_added <= (adapter->max_vlans + 1))
782 be_vid_config(adapter, false, 0);
785 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
787 struct be_adapter *adapter = netdev_priv(netdev);
789 adapter->vlans_added--;
791 if (!be_physfn(adapter))
792 return;
794 adapter->vlan_tag[vid] = 0;
795 if (adapter->vlans_added <= adapter->max_vlans)
796 be_vid_config(adapter, false, 0);
799 static void be_set_rx_mode(struct net_device *netdev)
801 struct be_adapter *adapter = netdev_priv(netdev);
803 if (netdev->flags & IFF_PROMISC) {
804 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
805 adapter->promiscuous = true;
806 goto done;
809 /* BE was previously in promiscuous mode; disable it */
810 if (adapter->promiscuous) {
811 adapter->promiscuous = false;
812 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
814 if (adapter->vlans_added)
815 be_vid_config(adapter, false, 0);
818 /* Enable multicast promisc if num configured exceeds what we support */
819 if (netdev->flags & IFF_ALLMULTI ||
820 netdev_mc_count(netdev) > BE_MAX_MC) {
821 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
822 goto done;
825 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
826 done:
827 return;
830 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
832 struct be_adapter *adapter = netdev_priv(netdev);
833 int status;
835 if (!adapter->sriov_enabled)
836 return -EPERM;
838 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
839 return -EINVAL;
841 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
842 status = be_cmd_pmac_del(adapter,
843 adapter->vf_cfg[vf].vf_if_handle,
844 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
846 status = be_cmd_pmac_add(adapter, mac,
847 adapter->vf_cfg[vf].vf_if_handle,
848 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
850 if (status)
851 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
852 mac, vf);
853 else
854 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
856 return status;
859 static int be_get_vf_config(struct net_device *netdev, int vf,
860 struct ifla_vf_info *vi)
862 struct be_adapter *adapter = netdev_priv(netdev);
864 if (!adapter->sriov_enabled)
865 return -EPERM;
867 if (vf >= num_vfs)
868 return -EINVAL;
870 vi->vf = vf;
871 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
872 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
873 vi->qos = 0;
874 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
876 return 0;
879 static int be_set_vf_vlan(struct net_device *netdev,
880 int vf, u16 vlan, u8 qos)
882 struct be_adapter *adapter = netdev_priv(netdev);
883 int status = 0;
885 if (!adapter->sriov_enabled)
886 return -EPERM;
888 if ((vf >= num_vfs) || (vlan > 4095))
889 return -EINVAL;
891 if (vlan) {
892 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
893 adapter->vlans_added++;
894 } else {
895 adapter->vf_cfg[vf].vf_vlan_tag = 0;
896 adapter->vlans_added--;
899 status = be_vid_config(adapter, true, vf);
901 if (status)
902 dev_info(&adapter->pdev->dev,
903 "VLAN %d config on VF %d failed\n", vlan, vf);
904 return status;
907 static int be_set_vf_tx_rate(struct net_device *netdev,
908 int vf, int rate)
910 struct be_adapter *adapter = netdev_priv(netdev);
911 int status = 0;
913 if (!adapter->sriov_enabled)
914 return -EPERM;
916 if ((vf >= num_vfs) || (rate < 0))
917 return -EINVAL;
919 if (rate > 10000)
920 rate = 10000;
922 adapter->vf_cfg[vf].vf_tx_rate = rate;
923 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
925 if (status)
926 dev_info(&adapter->pdev->dev,
927 "tx rate %d on VF %d failed\n", rate, vf);
928 return status;
931 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
933 struct be_eq_obj *rx_eq = &rxo->rx_eq;
934 struct be_rx_stats *stats = rx_stats(rxo);
935 ulong now = jiffies;
936 ulong delta = now - stats->rx_jiffies;
937 u64 pkts;
938 unsigned int start, eqd;
940 if (!rx_eq->enable_aic)
941 return;
943 /* Wrapped around */
944 if (time_before(now, stats->rx_jiffies)) {
945 stats->rx_jiffies = now;
946 return;
949 /* Update once a second */
950 if (delta < HZ)
951 return;
953 do {
954 start = u64_stats_fetch_begin_bh(&stats->sync);
955 pkts = stats->rx_pkts;
956 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
958 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
959 stats->rx_pkts_prev = pkts;
960 stats->rx_jiffies = now;
961 eqd = stats->rx_pps / 110000;
962 eqd = eqd << 3;
963 if (eqd > rx_eq->max_eqd)
964 eqd = rx_eq->max_eqd;
965 if (eqd < rx_eq->min_eqd)
966 eqd = rx_eq->min_eqd;
967 if (eqd < 10)
968 eqd = 0;
969 if (eqd != rx_eq->cur_eqd) {
970 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
971 rx_eq->cur_eqd = eqd;
975 static void be_rx_stats_update(struct be_rx_obj *rxo,
976 struct be_rx_compl_info *rxcp)
978 struct be_rx_stats *stats = rx_stats(rxo);
980 u64_stats_update_begin(&stats->sync);
981 stats->rx_compl++;
982 stats->rx_bytes += rxcp->pkt_size;
983 stats->rx_pkts++;
984 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
985 stats->rx_mcast_pkts++;
986 if (rxcp->err)
987 stats->rx_compl_err++;
988 u64_stats_update_end(&stats->sync);
991 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
993 /* L4 checksum is not reliable for non TCP/UDP packets.
994 * Also ignore ipcksm for ipv6 pkts */
995 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
996 (rxcp->ip_csum || rxcp->ipv6);
999 static struct be_rx_page_info *
1000 get_rx_page_info(struct be_adapter *adapter,
1001 struct be_rx_obj *rxo,
1002 u16 frag_idx)
1004 struct be_rx_page_info *rx_page_info;
1005 struct be_queue_info *rxq = &rxo->q;
1007 rx_page_info = &rxo->page_info_tbl[frag_idx];
1008 BUG_ON(!rx_page_info->page);
1010 if (rx_page_info->last_page_user) {
1011 dma_unmap_page(&adapter->pdev->dev,
1012 dma_unmap_addr(rx_page_info, bus),
1013 adapter->big_page_size, DMA_FROM_DEVICE);
1014 rx_page_info->last_page_user = false;
1017 atomic_dec(&rxq->used);
1018 return rx_page_info;
1021 /* Throwaway the data in the Rx completion */
1022 static void be_rx_compl_discard(struct be_adapter *adapter,
1023 struct be_rx_obj *rxo,
1024 struct be_rx_compl_info *rxcp)
1026 struct be_queue_info *rxq = &rxo->q;
1027 struct be_rx_page_info *page_info;
1028 u16 i, num_rcvd = rxcp->num_rcvd;
1030 for (i = 0; i < num_rcvd; i++) {
1031 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1032 put_page(page_info->page);
1033 memset(page_info, 0, sizeof(*page_info));
1034 index_inc(&rxcp->rxq_idx, rxq->len);
1039 * skb_fill_rx_data forms a complete skb for an ether frame
1040 * indicated by rxcp.
1042 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1043 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1045 struct be_queue_info *rxq = &rxo->q;
1046 struct be_rx_page_info *page_info;
1047 u16 i, j;
1048 u16 hdr_len, curr_frag_len, remaining;
1049 u8 *start;
1051 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1052 start = page_address(page_info->page) + page_info->page_offset;
1053 prefetch(start);
1055 /* Copy data in the first descriptor of this completion */
1056 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1058 /* Copy the header portion into skb_data */
1059 hdr_len = min(BE_HDR_LEN, curr_frag_len);
1060 memcpy(skb->data, start, hdr_len);
1061 skb->len = curr_frag_len;
1062 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1063 /* Complete packet has now been moved to data */
1064 put_page(page_info->page);
1065 skb->data_len = 0;
1066 skb->tail += curr_frag_len;
1067 } else {
1068 skb_shinfo(skb)->nr_frags = 1;
1069 skb_frag_set_page(skb, 0, page_info->page);
1070 skb_shinfo(skb)->frags[0].page_offset =
1071 page_info->page_offset + hdr_len;
1072 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1073 skb->data_len = curr_frag_len - hdr_len;
1074 skb->truesize += rx_frag_size;
1075 skb->tail += hdr_len;
1077 page_info->page = NULL;
1079 if (rxcp->pkt_size <= rx_frag_size) {
1080 BUG_ON(rxcp->num_rcvd != 1);
1081 return;
1084 /* More frags present for this completion */
1085 index_inc(&rxcp->rxq_idx, rxq->len);
1086 remaining = rxcp->pkt_size - curr_frag_len;
1087 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1088 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1089 curr_frag_len = min(remaining, rx_frag_size);
1091 /* Coalesce all frags from the same physical page in one slot */
1092 if (page_info->page_offset == 0) {
1093 /* Fresh page */
1094 j++;
1095 skb_frag_set_page(skb, j, page_info->page);
1096 skb_shinfo(skb)->frags[j].page_offset =
1097 page_info->page_offset;
1098 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1099 skb_shinfo(skb)->nr_frags++;
1100 } else {
1101 put_page(page_info->page);
1104 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1105 skb->len += curr_frag_len;
1106 skb->data_len += curr_frag_len;
1107 skb->truesize += rx_frag_size;
1108 remaining -= curr_frag_len;
1109 index_inc(&rxcp->rxq_idx, rxq->len);
1110 page_info->page = NULL;
1112 BUG_ON(j > MAX_SKB_FRAGS);
1115 /* Process the RX completion indicated by rxcp when GRO is disabled */
1116 static void be_rx_compl_process(struct be_adapter *adapter,
1117 struct be_rx_obj *rxo,
1118 struct be_rx_compl_info *rxcp)
1120 struct net_device *netdev = adapter->netdev;
1121 struct sk_buff *skb;
1123 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1124 if (unlikely(!skb)) {
1125 rx_stats(rxo)->rx_drops_no_skbs++;
1126 be_rx_compl_discard(adapter, rxo, rxcp);
1127 return;
1130 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1132 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1133 skb->ip_summed = CHECKSUM_UNNECESSARY;
1134 else
1135 skb_checksum_none_assert(skb);
1137 skb->protocol = eth_type_trans(skb, netdev);
1138 if (adapter->netdev->features & NETIF_F_RXHASH)
1139 skb->rxhash = rxcp->rss_hash;
1142 if (rxcp->vlanf)
1143 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1145 netif_receive_skb(skb);
1148 /* Process the RX completion indicated by rxcp when GRO is enabled */
1149 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1150 struct be_rx_obj *rxo,
1151 struct be_rx_compl_info *rxcp)
1153 struct be_rx_page_info *page_info;
1154 struct sk_buff *skb = NULL;
1155 struct be_queue_info *rxq = &rxo->q;
1156 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1157 u16 remaining, curr_frag_len;
1158 u16 i, j;
1160 skb = napi_get_frags(&eq_obj->napi);
1161 if (!skb) {
1162 be_rx_compl_discard(adapter, rxo, rxcp);
1163 return;
1166 remaining = rxcp->pkt_size;
1167 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1168 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1170 curr_frag_len = min(remaining, rx_frag_size);
1172 /* Coalesce all frags from the same physical page in one slot */
1173 if (i == 0 || page_info->page_offset == 0) {
1174 /* First frag or Fresh page */
1175 j++;
1176 skb_frag_set_page(skb, j, page_info->page);
1177 skb_shinfo(skb)->frags[j].page_offset =
1178 page_info->page_offset;
1179 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1180 } else {
1181 put_page(page_info->page);
1183 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1184 skb->truesize += rx_frag_size;
1185 remaining -= curr_frag_len;
1186 index_inc(&rxcp->rxq_idx, rxq->len);
1187 memset(page_info, 0, sizeof(*page_info));
1189 BUG_ON(j > MAX_SKB_FRAGS);
1191 skb_shinfo(skb)->nr_frags = j + 1;
1192 skb->len = rxcp->pkt_size;
1193 skb->data_len = rxcp->pkt_size;
1194 skb->ip_summed = CHECKSUM_UNNECESSARY;
1195 if (adapter->netdev->features & NETIF_F_RXHASH)
1196 skb->rxhash = rxcp->rss_hash;
1198 if (rxcp->vlanf)
1199 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1201 napi_gro_frags(&eq_obj->napi);
1204 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1205 struct be_eth_rx_compl *compl,
1206 struct be_rx_compl_info *rxcp)
1208 rxcp->pkt_size =
1209 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1210 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1211 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1212 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1213 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1214 rxcp->ip_csum =
1215 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1216 rxcp->l4_csum =
1217 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1218 rxcp->ipv6 =
1219 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1220 rxcp->rxq_idx =
1221 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1222 rxcp->num_rcvd =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1224 rxcp->pkt_type =
1225 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1226 rxcp->rss_hash =
1227 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1228 if (rxcp->vlanf) {
1229 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1230 compl);
1231 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1232 compl);
1234 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1237 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1238 struct be_eth_rx_compl *compl,
1239 struct be_rx_compl_info *rxcp)
1241 rxcp->pkt_size =
1242 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1243 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1244 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1245 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1246 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1247 rxcp->ip_csum =
1248 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1249 rxcp->l4_csum =
1250 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1251 rxcp->ipv6 =
1252 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1253 rxcp->rxq_idx =
1254 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1255 rxcp->num_rcvd =
1256 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1257 rxcp->pkt_type =
1258 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1259 rxcp->rss_hash =
1260 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1261 if (rxcp->vlanf) {
1262 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1263 compl);
1264 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1265 compl);
1267 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1270 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1272 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1273 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1274 struct be_adapter *adapter = rxo->adapter;
1276 /* For checking the valid bit it is Ok to use either definition as the
1277 * valid bit is at the same position in both v0 and v1 Rx compl */
1278 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1279 return NULL;
1281 rmb();
1282 be_dws_le_to_cpu(compl, sizeof(*compl));
1284 if (adapter->be3_native)
1285 be_parse_rx_compl_v1(adapter, compl, rxcp);
1286 else
1287 be_parse_rx_compl_v0(adapter, compl, rxcp);
1289 if (rxcp->vlanf) {
1290 /* vlanf could be wrongly set in some cards.
1291 * ignore if vtm is not set */
1292 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1293 rxcp->vlanf = 0;
1295 if (!lancer_chip(adapter))
1296 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1298 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1299 !adapter->vlan_tag[rxcp->vlan_tag])
1300 rxcp->vlanf = 0;
1303 /* As the compl has been parsed, reset it; we wont touch it again */
1304 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1306 queue_tail_inc(&rxo->cq);
1307 return rxcp;
1310 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1312 u32 order = get_order(size);
1314 if (order > 0)
1315 gfp |= __GFP_COMP;
1316 return alloc_pages(gfp, order);
1320 * Allocate a page, split it to fragments of size rx_frag_size and post as
1321 * receive buffers to BE
1323 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1325 struct be_adapter *adapter = rxo->adapter;
1326 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1327 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1328 struct be_queue_info *rxq = &rxo->q;
1329 struct page *pagep = NULL;
1330 struct be_eth_rx_d *rxd;
1331 u64 page_dmaaddr = 0, frag_dmaaddr;
1332 u32 posted, page_offset = 0;
1334 page_info = &rxo->page_info_tbl[rxq->head];
1335 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1336 if (!pagep) {
1337 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1338 if (unlikely(!pagep)) {
1339 rx_stats(rxo)->rx_post_fail++;
1340 break;
1342 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1343 0, adapter->big_page_size,
1344 DMA_FROM_DEVICE);
1345 page_info->page_offset = 0;
1346 } else {
1347 get_page(pagep);
1348 page_info->page_offset = page_offset + rx_frag_size;
1350 page_offset = page_info->page_offset;
1351 page_info->page = pagep;
1352 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1353 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1355 rxd = queue_head_node(rxq);
1356 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1357 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1359 /* Any space left in the current big page for another frag? */
1360 if ((page_offset + rx_frag_size + rx_frag_size) >
1361 adapter->big_page_size) {
1362 pagep = NULL;
1363 page_info->last_page_user = true;
1366 prev_page_info = page_info;
1367 queue_head_inc(rxq);
1368 page_info = &page_info_tbl[rxq->head];
1370 if (pagep)
1371 prev_page_info->last_page_user = true;
1373 if (posted) {
1374 atomic_add(posted, &rxq->used);
1375 be_rxq_notify(adapter, rxq->id, posted);
1376 } else if (atomic_read(&rxq->used) == 0) {
1377 /* Let be_worker replenish when memory is available */
1378 rxo->rx_post_starved = true;
1382 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1384 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1386 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1387 return NULL;
1389 rmb();
1390 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1392 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1394 queue_tail_inc(tx_cq);
1395 return txcp;
1398 static u16 be_tx_compl_process(struct be_adapter *adapter,
1399 struct be_tx_obj *txo, u16 last_index)
1401 struct be_queue_info *txq = &txo->q;
1402 struct be_eth_wrb *wrb;
1403 struct sk_buff **sent_skbs = txo->sent_skb_list;
1404 struct sk_buff *sent_skb;
1405 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1406 bool unmap_skb_hdr = true;
1408 sent_skb = sent_skbs[txq->tail];
1409 BUG_ON(!sent_skb);
1410 sent_skbs[txq->tail] = NULL;
1412 /* skip header wrb */
1413 queue_tail_inc(txq);
1415 do {
1416 cur_index = txq->tail;
1417 wrb = queue_tail_node(txq);
1418 unmap_tx_frag(&adapter->pdev->dev, wrb,
1419 (unmap_skb_hdr && skb_headlen(sent_skb)));
1420 unmap_skb_hdr = false;
1422 num_wrbs++;
1423 queue_tail_inc(txq);
1424 } while (cur_index != last_index);
1426 kfree_skb(sent_skb);
1427 return num_wrbs;
1430 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1432 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1434 if (!eqe->evt)
1435 return NULL;
1437 rmb();
1438 eqe->evt = le32_to_cpu(eqe->evt);
1439 queue_tail_inc(&eq_obj->q);
1440 return eqe;
1443 static int event_handle(struct be_adapter *adapter,
1444 struct be_eq_obj *eq_obj,
1445 bool rearm)
1447 struct be_eq_entry *eqe;
1448 u16 num = 0;
1450 while ((eqe = event_get(eq_obj)) != NULL) {
1451 eqe->evt = 0;
1452 num++;
1455 /* Deal with any spurious interrupts that come
1456 * without events
1458 if (!num)
1459 rearm = true;
1461 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1462 if (num)
1463 napi_schedule(&eq_obj->napi);
1465 return num;
1468 /* Just read and notify events without processing them.
1469 * Used at the time of destroying event queues */
1470 static void be_eq_clean(struct be_adapter *adapter,
1471 struct be_eq_obj *eq_obj)
1473 struct be_eq_entry *eqe;
1474 u16 num = 0;
1476 while ((eqe = event_get(eq_obj)) != NULL) {
1477 eqe->evt = 0;
1478 num++;
1481 if (num)
1482 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1485 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1487 struct be_rx_page_info *page_info;
1488 struct be_queue_info *rxq = &rxo->q;
1489 struct be_queue_info *rx_cq = &rxo->cq;
1490 struct be_rx_compl_info *rxcp;
1491 u16 tail;
1493 /* First cleanup pending rx completions */
1494 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1495 be_rx_compl_discard(adapter, rxo, rxcp);
1496 be_cq_notify(adapter, rx_cq->id, false, 1);
1499 /* Then free posted rx buffer that were not used */
1500 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1501 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1502 page_info = get_rx_page_info(adapter, rxo, tail);
1503 put_page(page_info->page);
1504 memset(page_info, 0, sizeof(*page_info));
1506 BUG_ON(atomic_read(&rxq->used));
1507 rxq->tail = rxq->head = 0;
1510 static void be_tx_compl_clean(struct be_adapter *adapter,
1511 struct be_tx_obj *txo)
1513 struct be_queue_info *tx_cq = &txo->cq;
1514 struct be_queue_info *txq = &txo->q;
1515 struct be_eth_tx_compl *txcp;
1516 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1517 struct sk_buff **sent_skbs = txo->sent_skb_list;
1518 struct sk_buff *sent_skb;
1519 bool dummy_wrb;
1521 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1522 do {
1523 while ((txcp = be_tx_compl_get(tx_cq))) {
1524 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1525 wrb_index, txcp);
1526 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1527 cmpl++;
1529 if (cmpl) {
1530 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1531 atomic_sub(num_wrbs, &txq->used);
1532 cmpl = 0;
1533 num_wrbs = 0;
1536 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1537 break;
1539 mdelay(1);
1540 } while (true);
1542 if (atomic_read(&txq->used))
1543 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1544 atomic_read(&txq->used));
1546 /* free posted tx for which compls will never arrive */
1547 while (atomic_read(&txq->used)) {
1548 sent_skb = sent_skbs[txq->tail];
1549 end_idx = txq->tail;
1550 index_adv(&end_idx,
1551 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1552 txq->len);
1553 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1554 atomic_sub(num_wrbs, &txq->used);
1558 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1560 struct be_queue_info *q;
1562 q = &adapter->mcc_obj.q;
1563 if (q->created)
1564 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1565 be_queue_free(adapter, q);
1567 q = &adapter->mcc_obj.cq;
1568 if (q->created)
1569 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1570 be_queue_free(adapter, q);
1573 /* Must be called only after TX qs are created as MCC shares TX EQ */
1574 static int be_mcc_queues_create(struct be_adapter *adapter)
1576 struct be_queue_info *q, *cq;
1578 /* Alloc MCC compl queue */
1579 cq = &adapter->mcc_obj.cq;
1580 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1581 sizeof(struct be_mcc_compl)))
1582 goto err;
1584 /* Ask BE to create MCC compl queue; share TX's eq */
1585 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1586 goto mcc_cq_free;
1588 /* Alloc MCC queue */
1589 q = &adapter->mcc_obj.q;
1590 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1591 goto mcc_cq_destroy;
1593 /* Ask BE to create MCC queue */
1594 if (be_cmd_mccq_create(adapter, q, cq))
1595 goto mcc_q_free;
1597 return 0;
1599 mcc_q_free:
1600 be_queue_free(adapter, q);
1601 mcc_cq_destroy:
1602 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1603 mcc_cq_free:
1604 be_queue_free(adapter, cq);
1605 err:
1606 return -1;
1609 static void be_tx_queues_destroy(struct be_adapter *adapter)
1611 struct be_queue_info *q;
1612 struct be_tx_obj *txo;
1613 u8 i;
1615 for_all_tx_queues(adapter, txo, i) {
1616 q = &txo->q;
1617 if (q->created)
1618 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1619 be_queue_free(adapter, q);
1621 q = &txo->cq;
1622 if (q->created)
1623 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1624 be_queue_free(adapter, q);
1627 /* Clear any residual events */
1628 be_eq_clean(adapter, &adapter->tx_eq);
1630 q = &adapter->tx_eq.q;
1631 if (q->created)
1632 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1633 be_queue_free(adapter, q);
1636 /* One TX event queue is shared by all TX compl qs */
1637 static int be_tx_queues_create(struct be_adapter *adapter)
1639 struct be_queue_info *eq, *q, *cq;
1640 struct be_tx_obj *txo;
1641 u8 i;
1643 adapter->tx_eq.max_eqd = 0;
1644 adapter->tx_eq.min_eqd = 0;
1645 adapter->tx_eq.cur_eqd = 96;
1646 adapter->tx_eq.enable_aic = false;
1648 eq = &adapter->tx_eq.q;
1649 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1650 sizeof(struct be_eq_entry)))
1651 return -1;
1653 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1654 goto err;
1655 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1657 for_all_tx_queues(adapter, txo, i) {
1658 cq = &txo->cq;
1659 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1660 sizeof(struct be_eth_tx_compl)))
1661 goto err;
1663 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1664 goto err;
1666 q = &txo->q;
1667 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1668 sizeof(struct be_eth_wrb)))
1669 goto err;
1671 if (be_cmd_txq_create(adapter, q, cq))
1672 goto err;
1674 return 0;
1676 err:
1677 be_tx_queues_destroy(adapter);
1678 return -1;
1681 static void be_rx_queues_destroy(struct be_adapter *adapter)
1683 struct be_queue_info *q;
1684 struct be_rx_obj *rxo;
1685 int i;
1687 for_all_rx_queues(adapter, rxo, i) {
1688 be_queue_free(adapter, &rxo->q);
1690 q = &rxo->cq;
1691 if (q->created)
1692 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1693 be_queue_free(adapter, q);
1695 q = &rxo->rx_eq.q;
1696 if (q->created)
1697 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1698 be_queue_free(adapter, q);
1702 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1704 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1705 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1706 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1707 } else {
1708 dev_warn(&adapter->pdev->dev,
1709 "No support for multiple RX queues\n");
1710 return 1;
1714 static int be_rx_queues_create(struct be_adapter *adapter)
1716 struct be_queue_info *eq, *q, *cq;
1717 struct be_rx_obj *rxo;
1718 int rc, i;
1720 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1721 msix_enabled(adapter) ?
1722 adapter->num_msix_vec - 1 : 1);
1723 if (adapter->num_rx_qs != MAX_RX_QS)
1724 dev_warn(&adapter->pdev->dev,
1725 "Can create only %d RX queues", adapter->num_rx_qs);
1727 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1728 for_all_rx_queues(adapter, rxo, i) {
1729 rxo->adapter = adapter;
1730 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1731 rxo->rx_eq.enable_aic = true;
1733 /* EQ */
1734 eq = &rxo->rx_eq.q;
1735 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1736 sizeof(struct be_eq_entry));
1737 if (rc)
1738 goto err;
1740 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1741 if (rc)
1742 goto err;
1744 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1746 /* CQ */
1747 cq = &rxo->cq;
1748 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1749 sizeof(struct be_eth_rx_compl));
1750 if (rc)
1751 goto err;
1753 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1754 if (rc)
1755 goto err;
1757 /* Rx Q - will be created in be_open() */
1758 q = &rxo->q;
1759 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1760 sizeof(struct be_eth_rx_d));
1761 if (rc)
1762 goto err;
1766 return 0;
1767 err:
1768 be_rx_queues_destroy(adapter);
1769 return -1;
1772 static bool event_peek(struct be_eq_obj *eq_obj)
1774 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1775 if (!eqe->evt)
1776 return false;
1777 else
1778 return true;
1781 static irqreturn_t be_intx(int irq, void *dev)
1783 struct be_adapter *adapter = dev;
1784 struct be_rx_obj *rxo;
1785 int isr, i, tx = 0 , rx = 0;
1787 if (lancer_chip(adapter)) {
1788 if (event_peek(&adapter->tx_eq))
1789 tx = event_handle(adapter, &adapter->tx_eq, false);
1790 for_all_rx_queues(adapter, rxo, i) {
1791 if (event_peek(&rxo->rx_eq))
1792 rx |= event_handle(adapter, &rxo->rx_eq, true);
1795 if (!(tx || rx))
1796 return IRQ_NONE;
1798 } else {
1799 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1800 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1801 if (!isr)
1802 return IRQ_NONE;
1804 if ((1 << adapter->tx_eq.eq_idx & isr))
1805 event_handle(adapter, &adapter->tx_eq, false);
1807 for_all_rx_queues(adapter, rxo, i) {
1808 if ((1 << rxo->rx_eq.eq_idx & isr))
1809 event_handle(adapter, &rxo->rx_eq, true);
1813 return IRQ_HANDLED;
1816 static irqreturn_t be_msix_rx(int irq, void *dev)
1818 struct be_rx_obj *rxo = dev;
1819 struct be_adapter *adapter = rxo->adapter;
1821 event_handle(adapter, &rxo->rx_eq, true);
1823 return IRQ_HANDLED;
1826 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1828 struct be_adapter *adapter = dev;
1830 event_handle(adapter, &adapter->tx_eq, false);
1832 return IRQ_HANDLED;
1835 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1837 return (rxcp->tcpf && !rxcp->err) ? true : false;
1840 static int be_poll_rx(struct napi_struct *napi, int budget)
1842 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1843 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1844 struct be_adapter *adapter = rxo->adapter;
1845 struct be_queue_info *rx_cq = &rxo->cq;
1846 struct be_rx_compl_info *rxcp;
1847 u32 work_done;
1849 rx_stats(rxo)->rx_polls++;
1850 for (work_done = 0; work_done < budget; work_done++) {
1851 rxcp = be_rx_compl_get(rxo);
1852 if (!rxcp)
1853 break;
1855 /* Is it a flush compl that has no data */
1856 if (unlikely(rxcp->num_rcvd == 0))
1857 goto loop_continue;
1859 /* Discard compl with partial DMA Lancer B0 */
1860 if (unlikely(!rxcp->pkt_size)) {
1861 be_rx_compl_discard(adapter, rxo, rxcp);
1862 goto loop_continue;
1865 /* On BE drop pkts that arrive due to imperfect filtering in
1866 * promiscuous mode on some skews
1868 if (unlikely(rxcp->port != adapter->port_num &&
1869 !lancer_chip(adapter))) {
1870 be_rx_compl_discard(adapter, rxo, rxcp);
1871 goto loop_continue;
1874 if (do_gro(rxcp))
1875 be_rx_compl_process_gro(adapter, rxo, rxcp);
1876 else
1877 be_rx_compl_process(adapter, rxo, rxcp);
1878 loop_continue:
1879 be_rx_stats_update(rxo, rxcp);
1882 /* Refill the queue */
1883 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1884 be_post_rx_frags(rxo, GFP_ATOMIC);
1886 /* All consumed */
1887 if (work_done < budget) {
1888 napi_complete(napi);
1889 be_cq_notify(adapter, rx_cq->id, true, work_done);
1890 } else {
1891 /* More to be consumed; continue with interrupts disabled */
1892 be_cq_notify(adapter, rx_cq->id, false, work_done);
1894 return work_done;
1897 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1898 * For TX/MCC we don't honour budget; consume everything
1900 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1902 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1903 struct be_adapter *adapter =
1904 container_of(tx_eq, struct be_adapter, tx_eq);
1905 struct be_tx_obj *txo;
1906 struct be_eth_tx_compl *txcp;
1907 int tx_compl, mcc_compl, status = 0;
1908 u8 i;
1909 u16 num_wrbs;
1911 for_all_tx_queues(adapter, txo, i) {
1912 tx_compl = 0;
1913 num_wrbs = 0;
1914 while ((txcp = be_tx_compl_get(&txo->cq))) {
1915 num_wrbs += be_tx_compl_process(adapter, txo,
1916 AMAP_GET_BITS(struct amap_eth_tx_compl,
1917 wrb_index, txcp));
1918 tx_compl++;
1920 if (tx_compl) {
1921 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1923 atomic_sub(num_wrbs, &txo->q.used);
1925 /* As Tx wrbs have been freed up, wake up netdev queue
1926 * if it was stopped due to lack of tx wrbs. */
1927 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1928 atomic_read(&txo->q.used) < txo->q.len / 2) {
1929 netif_wake_subqueue(adapter->netdev, i);
1932 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1933 tx_stats(txo)->tx_compl += tx_compl;
1934 u64_stats_update_end(&tx_stats(txo)->sync_compl);
1938 mcc_compl = be_process_mcc(adapter, &status);
1940 if (mcc_compl) {
1941 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1942 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1945 napi_complete(napi);
1947 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1948 adapter->drv_stats.tx_events++;
1949 return 1;
1952 void be_detect_dump_ue(struct be_adapter *adapter)
1954 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1955 u32 i;
1957 pci_read_config_dword(adapter->pdev,
1958 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1959 pci_read_config_dword(adapter->pdev,
1960 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1961 pci_read_config_dword(adapter->pdev,
1962 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1963 pci_read_config_dword(adapter->pdev,
1964 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1966 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1967 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1969 if (ue_status_lo || ue_status_hi) {
1970 adapter->ue_detected = true;
1971 adapter->eeh_err = true;
1972 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1975 if (ue_status_lo) {
1976 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1977 if (ue_status_lo & 1)
1978 dev_err(&adapter->pdev->dev,
1979 "UE: %s bit set\n", ue_status_low_desc[i]);
1982 if (ue_status_hi) {
1983 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1984 if (ue_status_hi & 1)
1985 dev_err(&adapter->pdev->dev,
1986 "UE: %s bit set\n", ue_status_hi_desc[i]);
1992 static void be_worker(struct work_struct *work)
1994 struct be_adapter *adapter =
1995 container_of(work, struct be_adapter, work.work);
1996 struct be_rx_obj *rxo;
1997 int i;
1999 if (!adapter->ue_detected && !lancer_chip(adapter))
2000 be_detect_dump_ue(adapter);
2002 /* when interrupts are not yet enabled, just reap any pending
2003 * mcc completions */
2004 if (!netif_running(adapter->netdev)) {
2005 int mcc_compl, status = 0;
2007 mcc_compl = be_process_mcc(adapter, &status);
2009 if (mcc_compl) {
2010 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2011 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2014 goto reschedule;
2017 if (!adapter->stats_cmd_sent) {
2018 if (lancer_chip(adapter))
2019 lancer_cmd_get_pport_stats(adapter,
2020 &adapter->stats_cmd);
2021 else
2022 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2025 for_all_rx_queues(adapter, rxo, i) {
2026 be_rx_eqd_update(adapter, rxo);
2028 if (rxo->rx_post_starved) {
2029 rxo->rx_post_starved = false;
2030 be_post_rx_frags(rxo, GFP_KERNEL);
2034 reschedule:
2035 adapter->work_counter++;
2036 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2039 static void be_msix_disable(struct be_adapter *adapter)
2041 if (msix_enabled(adapter)) {
2042 pci_disable_msix(adapter->pdev);
2043 adapter->num_msix_vec = 0;
2047 static void be_msix_enable(struct be_adapter *adapter)
2049 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
2050 int i, status, num_vec;
2052 num_vec = be_num_rxqs_want(adapter) + 1;
2054 for (i = 0; i < num_vec; i++)
2055 adapter->msix_entries[i].entry = i;
2057 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2058 if (status == 0) {
2059 goto done;
2060 } else if (status >= BE_MIN_MSIX_VECTORS) {
2061 num_vec = status;
2062 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2063 num_vec) == 0)
2064 goto done;
2066 return;
2067 done:
2068 adapter->num_msix_vec = num_vec;
2069 return;
2072 static void be_sriov_enable(struct be_adapter *adapter)
2074 be_check_sriov_fn_type(adapter);
2075 #ifdef CONFIG_PCI_IOV
2076 if (be_physfn(adapter) && num_vfs) {
2077 int status, pos;
2078 u16 nvfs;
2080 pos = pci_find_ext_capability(adapter->pdev,
2081 PCI_EXT_CAP_ID_SRIOV);
2082 pci_read_config_word(adapter->pdev,
2083 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2085 if (num_vfs > nvfs) {
2086 dev_info(&adapter->pdev->dev,
2087 "Device supports %d VFs and not %d\n",
2088 nvfs, num_vfs);
2089 num_vfs = nvfs;
2092 status = pci_enable_sriov(adapter->pdev, num_vfs);
2093 adapter->sriov_enabled = status ? false : true;
2095 #endif
2098 static void be_sriov_disable(struct be_adapter *adapter)
2100 #ifdef CONFIG_PCI_IOV
2101 if (adapter->sriov_enabled) {
2102 pci_disable_sriov(adapter->pdev);
2103 adapter->sriov_enabled = false;
2105 #endif
2108 static inline int be_msix_vec_get(struct be_adapter *adapter,
2109 struct be_eq_obj *eq_obj)
2111 return adapter->msix_entries[eq_obj->eq_idx].vector;
2114 static int be_request_irq(struct be_adapter *adapter,
2115 struct be_eq_obj *eq_obj,
2116 void *handler, char *desc, void *context)
2118 struct net_device *netdev = adapter->netdev;
2119 int vec;
2121 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2122 vec = be_msix_vec_get(adapter, eq_obj);
2123 return request_irq(vec, handler, 0, eq_obj->desc, context);
2126 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2127 void *context)
2129 int vec = be_msix_vec_get(adapter, eq_obj);
2130 free_irq(vec, context);
2133 static int be_msix_register(struct be_adapter *adapter)
2135 struct be_rx_obj *rxo;
2136 int status, i;
2137 char qname[10];
2139 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2140 adapter);
2141 if (status)
2142 goto err;
2144 for_all_rx_queues(adapter, rxo, i) {
2145 sprintf(qname, "rxq%d", i);
2146 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2147 qname, rxo);
2148 if (status)
2149 goto err_msix;
2152 return 0;
2154 err_msix:
2155 be_free_irq(adapter, &adapter->tx_eq, adapter);
2157 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2158 be_free_irq(adapter, &rxo->rx_eq, rxo);
2160 err:
2161 dev_warn(&adapter->pdev->dev,
2162 "MSIX Request IRQ failed - err %d\n", status);
2163 be_msix_disable(adapter);
2164 return status;
2167 static int be_irq_register(struct be_adapter *adapter)
2169 struct net_device *netdev = adapter->netdev;
2170 int status;
2172 if (msix_enabled(adapter)) {
2173 status = be_msix_register(adapter);
2174 if (status == 0)
2175 goto done;
2176 /* INTx is not supported for VF */
2177 if (!be_physfn(adapter))
2178 return status;
2181 /* INTx */
2182 netdev->irq = adapter->pdev->irq;
2183 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2184 adapter);
2185 if (status) {
2186 dev_err(&adapter->pdev->dev,
2187 "INTx request IRQ failed - err %d\n", status);
2188 return status;
2190 done:
2191 adapter->isr_registered = true;
2192 return 0;
2195 static void be_irq_unregister(struct be_adapter *adapter)
2197 struct net_device *netdev = adapter->netdev;
2198 struct be_rx_obj *rxo;
2199 int i;
2201 if (!adapter->isr_registered)
2202 return;
2204 /* INTx */
2205 if (!msix_enabled(adapter)) {
2206 free_irq(netdev->irq, adapter);
2207 goto done;
2210 /* MSIx */
2211 be_free_irq(adapter, &adapter->tx_eq, adapter);
2213 for_all_rx_queues(adapter, rxo, i)
2214 be_free_irq(adapter, &rxo->rx_eq, rxo);
2216 done:
2217 adapter->isr_registered = false;
2220 static void be_rx_queues_clear(struct be_adapter *adapter)
2222 struct be_queue_info *q;
2223 struct be_rx_obj *rxo;
2224 int i;
2226 for_all_rx_queues(adapter, rxo, i) {
2227 q = &rxo->q;
2228 if (q->created) {
2229 be_cmd_rxq_destroy(adapter, q);
2230 /* After the rxq is invalidated, wait for a grace time
2231 * of 1ms for all dma to end and the flush compl to
2232 * arrive
2234 mdelay(1);
2235 be_rx_q_clean(adapter, rxo);
2238 /* Clear any residual events */
2239 q = &rxo->rx_eq.q;
2240 if (q->created)
2241 be_eq_clean(adapter, &rxo->rx_eq);
2245 static int be_close(struct net_device *netdev)
2247 struct be_adapter *adapter = netdev_priv(netdev);
2248 struct be_rx_obj *rxo;
2249 struct be_tx_obj *txo;
2250 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2251 int vec, i;
2253 be_async_mcc_disable(adapter);
2255 if (!lancer_chip(adapter))
2256 be_intr_set(adapter, false);
2258 for_all_rx_queues(adapter, rxo, i)
2259 napi_disable(&rxo->rx_eq.napi);
2261 napi_disable(&tx_eq->napi);
2263 if (lancer_chip(adapter)) {
2264 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2265 for_all_rx_queues(adapter, rxo, i)
2266 be_cq_notify(adapter, rxo->cq.id, false, 0);
2267 for_all_tx_queues(adapter, txo, i)
2268 be_cq_notify(adapter, txo->cq.id, false, 0);
2271 if (msix_enabled(adapter)) {
2272 vec = be_msix_vec_get(adapter, tx_eq);
2273 synchronize_irq(vec);
2275 for_all_rx_queues(adapter, rxo, i) {
2276 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2277 synchronize_irq(vec);
2279 } else {
2280 synchronize_irq(netdev->irq);
2282 be_irq_unregister(adapter);
2284 /* Wait for all pending tx completions to arrive so that
2285 * all tx skbs are freed.
2287 for_all_tx_queues(adapter, txo, i)
2288 be_tx_compl_clean(adapter, txo);
2290 be_rx_queues_clear(adapter);
2291 return 0;
2294 static int be_rx_queues_setup(struct be_adapter *adapter)
2296 struct be_rx_obj *rxo;
2297 int rc, i;
2298 u8 rsstable[MAX_RSS_QS];
2300 for_all_rx_queues(adapter, rxo, i) {
2301 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2302 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2303 adapter->if_handle,
2304 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2305 if (rc)
2306 return rc;
2309 if (be_multi_rxq(adapter)) {
2310 for_all_rss_queues(adapter, rxo, i)
2311 rsstable[i] = rxo->rss_id;
2313 rc = be_cmd_rss_config(adapter, rsstable,
2314 adapter->num_rx_qs - 1);
2315 if (rc)
2316 return rc;
2319 /* First time posting */
2320 for_all_rx_queues(adapter, rxo, i) {
2321 be_post_rx_frags(rxo, GFP_KERNEL);
2322 napi_enable(&rxo->rx_eq.napi);
2324 return 0;
2327 static int be_open(struct net_device *netdev)
2329 struct be_adapter *adapter = netdev_priv(netdev);
2330 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2331 struct be_rx_obj *rxo;
2332 int status, i;
2334 status = be_rx_queues_setup(adapter);
2335 if (status)
2336 goto err;
2338 napi_enable(&tx_eq->napi);
2340 be_irq_register(adapter);
2342 if (!lancer_chip(adapter))
2343 be_intr_set(adapter, true);
2345 /* The evt queues are created in unarmed state; arm them */
2346 for_all_rx_queues(adapter, rxo, i) {
2347 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2348 be_cq_notify(adapter, rxo->cq.id, true, 0);
2350 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2352 /* Now that interrupts are on we can process async mcc */
2353 be_async_mcc_enable(adapter);
2355 return 0;
2356 err:
2357 be_close(adapter->netdev);
2358 return -EIO;
2361 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2363 struct be_dma_mem cmd;
2364 int status = 0;
2365 u8 mac[ETH_ALEN];
2367 memset(mac, 0, ETH_ALEN);
2369 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2370 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2371 GFP_KERNEL);
2372 if (cmd.va == NULL)
2373 return -1;
2374 memset(cmd.va, 0, cmd.size);
2376 if (enable) {
2377 status = pci_write_config_dword(adapter->pdev,
2378 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2379 if (status) {
2380 dev_err(&adapter->pdev->dev,
2381 "Could not enable Wake-on-lan\n");
2382 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2383 cmd.dma);
2384 return status;
2386 status = be_cmd_enable_magic_wol(adapter,
2387 adapter->netdev->dev_addr, &cmd);
2388 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2389 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2390 } else {
2391 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2392 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2393 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2396 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2397 return status;
2401 * Generate a seed MAC address from the PF MAC Address using jhash.
2402 * MAC Address for VFs are assigned incrementally starting from the seed.
2403 * These addresses are programmed in the ASIC by the PF and the VF driver
2404 * queries for the MAC address during its probe.
2406 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2408 u32 vf = 0;
2409 int status = 0;
2410 u8 mac[ETH_ALEN];
2412 be_vf_eth_addr_generate(adapter, mac);
2414 for (vf = 0; vf < num_vfs; vf++) {
2415 status = be_cmd_pmac_add(adapter, mac,
2416 adapter->vf_cfg[vf].vf_if_handle,
2417 &adapter->vf_cfg[vf].vf_pmac_id,
2418 vf + 1);
2419 if (status)
2420 dev_err(&adapter->pdev->dev,
2421 "Mac address add failed for VF %d\n", vf);
2422 else
2423 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2425 mac[5] += 1;
2427 return status;
2430 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2432 u32 vf;
2434 for (vf = 0; vf < num_vfs; vf++) {
2435 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2436 be_cmd_pmac_del(adapter,
2437 adapter->vf_cfg[vf].vf_if_handle,
2438 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2442 static int be_clear(struct be_adapter *adapter)
2444 int vf;
2446 if (be_physfn(adapter) && adapter->sriov_enabled)
2447 be_vf_eth_addr_rem(adapter);
2449 be_mcc_queues_destroy(adapter);
2450 be_rx_queues_destroy(adapter);
2451 be_tx_queues_destroy(adapter);
2452 adapter->eq_next_idx = 0;
2454 if (be_physfn(adapter) && adapter->sriov_enabled)
2455 for (vf = 0; vf < num_vfs; vf++)
2456 if (adapter->vf_cfg[vf].vf_if_handle)
2457 be_cmd_if_destroy(adapter,
2458 adapter->vf_cfg[vf].vf_if_handle,
2459 vf + 1);
2461 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2463 adapter->be3_native = false;
2464 adapter->promiscuous = false;
2466 /* tell fw we're done with firing cmds */
2467 be_cmd_fw_clean(adapter);
2468 return 0;
2471 static int be_setup(struct be_adapter *adapter)
2473 struct net_device *netdev = adapter->netdev;
2474 u32 cap_flags, en_flags, vf = 0;
2475 u32 tx_fc, rx_fc;
2476 int status;
2477 u8 mac[ETH_ALEN];
2479 be_cmd_req_native_mode(adapter);
2481 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2482 BE_IF_FLAGS_BROADCAST |
2483 BE_IF_FLAGS_MULTICAST;
2485 if (be_physfn(adapter)) {
2486 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2487 BE_IF_FLAGS_PROMISCUOUS |
2488 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2489 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2491 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2492 cap_flags |= BE_IF_FLAGS_RSS;
2493 en_flags |= BE_IF_FLAGS_RSS;
2497 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2498 netdev->dev_addr, false/* pmac_invalid */,
2499 &adapter->if_handle, &adapter->pmac_id, 0);
2500 if (status != 0)
2501 goto err;
2503 if (be_physfn(adapter)) {
2504 if (adapter->sriov_enabled) {
2505 while (vf < num_vfs) {
2506 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2507 BE_IF_FLAGS_BROADCAST;
2508 status = be_cmd_if_create(adapter, cap_flags,
2509 en_flags, mac, true,
2510 &adapter->vf_cfg[vf].vf_if_handle,
2511 NULL, vf+1);
2512 if (status) {
2513 dev_err(&adapter->pdev->dev,
2514 "Interface Create failed for VF %d\n",
2515 vf);
2516 goto err;
2518 adapter->vf_cfg[vf].vf_pmac_id =
2519 BE_INVALID_PMAC_ID;
2520 vf++;
2523 } else {
2524 status = be_cmd_mac_addr_query(adapter, mac,
2525 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2526 if (!status) {
2527 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2528 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2532 status = be_tx_queues_create(adapter);
2533 if (status != 0)
2534 goto err;
2536 status = be_rx_queues_create(adapter);
2537 if (status != 0)
2538 goto err;
2540 /* Allow all priorities by default. A GRP5 evt may modify this */
2541 adapter->vlan_prio_bmap = 0xff;
2543 status = be_mcc_queues_create(adapter);
2544 if (status != 0)
2545 goto err;
2547 adapter->link_speed = -1;
2549 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2551 status = be_vid_config(adapter, false, 0);
2552 if (status)
2553 goto err;
2555 be_set_rx_mode(adapter->netdev);
2557 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2558 if (status)
2559 goto err;
2560 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2561 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2562 adapter->rx_fc);
2563 if (status)
2564 goto err;
2567 pcie_set_readrq(adapter->pdev, 4096);
2568 return 0;
2570 err:
2571 be_clear(adapter);
2572 return status;
2575 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2576 static bool be_flash_redboot(struct be_adapter *adapter,
2577 const u8 *p, u32 img_start, int image_size,
2578 int hdr_size)
2580 u32 crc_offset;
2581 u8 flashed_crc[4];
2582 int status;
2584 crc_offset = hdr_size + img_start + image_size - 4;
2586 p += crc_offset;
2588 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2589 (image_size - 4));
2590 if (status) {
2591 dev_err(&adapter->pdev->dev,
2592 "could not get crc from flash, not flashing redboot\n");
2593 return false;
2596 /*update redboot only if crc does not match*/
2597 if (!memcmp(flashed_crc, p, 4))
2598 return false;
2599 else
2600 return true;
2603 static bool phy_flashing_required(struct be_adapter *adapter)
2605 int status = 0;
2606 struct be_phy_info phy_info;
2608 status = be_cmd_get_phy_info(adapter, &phy_info);
2609 if (status)
2610 return false;
2611 if ((phy_info.phy_type == TN_8022) &&
2612 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2613 return true;
2615 return false;
2618 static int be_flash_data(struct be_adapter *adapter,
2619 const struct firmware *fw,
2620 struct be_dma_mem *flash_cmd, int num_of_images)
2623 int status = 0, i, filehdr_size = 0;
2624 u32 total_bytes = 0, flash_op;
2625 int num_bytes;
2626 const u8 *p = fw->data;
2627 struct be_cmd_write_flashrom *req = flash_cmd->va;
2628 const struct flash_comp *pflashcomp;
2629 int num_comp;
2631 static const struct flash_comp gen3_flash_types[10] = {
2632 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2633 FLASH_IMAGE_MAX_SIZE_g3},
2634 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2635 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2636 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2637 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2638 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2639 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2640 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2641 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2642 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2643 FLASH_IMAGE_MAX_SIZE_g3},
2644 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2645 FLASH_IMAGE_MAX_SIZE_g3},
2646 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2647 FLASH_IMAGE_MAX_SIZE_g3},
2648 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2649 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2650 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2651 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2653 static const struct flash_comp gen2_flash_types[8] = {
2654 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2655 FLASH_IMAGE_MAX_SIZE_g2},
2656 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2657 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2658 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2659 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2660 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2661 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2662 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2663 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2664 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2665 FLASH_IMAGE_MAX_SIZE_g2},
2666 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2667 FLASH_IMAGE_MAX_SIZE_g2},
2668 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2669 FLASH_IMAGE_MAX_SIZE_g2}
2672 if (adapter->generation == BE_GEN3) {
2673 pflashcomp = gen3_flash_types;
2674 filehdr_size = sizeof(struct flash_file_hdr_g3);
2675 num_comp = ARRAY_SIZE(gen3_flash_types);
2676 } else {
2677 pflashcomp = gen2_flash_types;
2678 filehdr_size = sizeof(struct flash_file_hdr_g2);
2679 num_comp = ARRAY_SIZE(gen2_flash_types);
2681 for (i = 0; i < num_comp; i++) {
2682 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2683 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2684 continue;
2685 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2686 if (!phy_flashing_required(adapter))
2687 continue;
2689 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2690 (!be_flash_redboot(adapter, fw->data,
2691 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2692 (num_of_images * sizeof(struct image_hdr)))))
2693 continue;
2694 p = fw->data;
2695 p += filehdr_size + pflashcomp[i].offset
2696 + (num_of_images * sizeof(struct image_hdr));
2697 if (p + pflashcomp[i].size > fw->data + fw->size)
2698 return -1;
2699 total_bytes = pflashcomp[i].size;
2700 while (total_bytes) {
2701 if (total_bytes > 32*1024)
2702 num_bytes = 32*1024;
2703 else
2704 num_bytes = total_bytes;
2705 total_bytes -= num_bytes;
2706 if (!total_bytes) {
2707 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2708 flash_op = FLASHROM_OPER_PHY_FLASH;
2709 else
2710 flash_op = FLASHROM_OPER_FLASH;
2711 } else {
2712 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2713 flash_op = FLASHROM_OPER_PHY_SAVE;
2714 else
2715 flash_op = FLASHROM_OPER_SAVE;
2717 memcpy(req->params.data_buf, p, num_bytes);
2718 p += num_bytes;
2719 status = be_cmd_write_flashrom(adapter, flash_cmd,
2720 pflashcomp[i].optype, flash_op, num_bytes);
2721 if (status) {
2722 if ((status == ILLEGAL_IOCTL_REQ) &&
2723 (pflashcomp[i].optype ==
2724 IMG_TYPE_PHY_FW))
2725 break;
2726 dev_err(&adapter->pdev->dev,
2727 "cmd to write to flash rom failed.\n");
2728 return -1;
2732 return 0;
2735 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2737 if (fhdr == NULL)
2738 return 0;
2739 if (fhdr->build[0] == '3')
2740 return BE_GEN3;
2741 else if (fhdr->build[0] == '2')
2742 return BE_GEN2;
2743 else
2744 return 0;
2747 static int lancer_fw_download(struct be_adapter *adapter,
2748 const struct firmware *fw)
2750 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2751 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2752 struct be_dma_mem flash_cmd;
2753 const u8 *data_ptr = NULL;
2754 u8 *dest_image_ptr = NULL;
2755 size_t image_size = 0;
2756 u32 chunk_size = 0;
2757 u32 data_written = 0;
2758 u32 offset = 0;
2759 int status = 0;
2760 u8 add_status = 0;
2762 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2763 dev_err(&adapter->pdev->dev,
2764 "FW Image not properly aligned. "
2765 "Length must be 4 byte aligned.\n");
2766 status = -EINVAL;
2767 goto lancer_fw_exit;
2770 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2771 + LANCER_FW_DOWNLOAD_CHUNK;
2772 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2773 &flash_cmd.dma, GFP_KERNEL);
2774 if (!flash_cmd.va) {
2775 status = -ENOMEM;
2776 dev_err(&adapter->pdev->dev,
2777 "Memory allocation failure while flashing\n");
2778 goto lancer_fw_exit;
2781 dest_image_ptr = flash_cmd.va +
2782 sizeof(struct lancer_cmd_req_write_object);
2783 image_size = fw->size;
2784 data_ptr = fw->data;
2786 while (image_size) {
2787 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2789 /* Copy the image chunk content. */
2790 memcpy(dest_image_ptr, data_ptr, chunk_size);
2792 status = lancer_cmd_write_object(adapter, &flash_cmd,
2793 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2794 &data_written, &add_status);
2796 if (status)
2797 break;
2799 offset += data_written;
2800 data_ptr += data_written;
2801 image_size -= data_written;
2804 if (!status) {
2805 /* Commit the FW written */
2806 status = lancer_cmd_write_object(adapter, &flash_cmd,
2807 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2808 &data_written, &add_status);
2811 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2812 flash_cmd.dma);
2813 if (status) {
2814 dev_err(&adapter->pdev->dev,
2815 "Firmware load error. "
2816 "Status code: 0x%x Additional Status: 0x%x\n",
2817 status, add_status);
2818 goto lancer_fw_exit;
2821 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2822 lancer_fw_exit:
2823 return status;
2826 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2828 struct flash_file_hdr_g2 *fhdr;
2829 struct flash_file_hdr_g3 *fhdr3;
2830 struct image_hdr *img_hdr_ptr = NULL;
2831 struct be_dma_mem flash_cmd;
2832 const u8 *p;
2833 int status = 0, i = 0, num_imgs = 0;
2835 p = fw->data;
2836 fhdr = (struct flash_file_hdr_g2 *) p;
2838 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2839 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2840 &flash_cmd.dma, GFP_KERNEL);
2841 if (!flash_cmd.va) {
2842 status = -ENOMEM;
2843 dev_err(&adapter->pdev->dev,
2844 "Memory allocation failure while flashing\n");
2845 goto be_fw_exit;
2848 if ((adapter->generation == BE_GEN3) &&
2849 (get_ufigen_type(fhdr) == BE_GEN3)) {
2850 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2851 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2852 for (i = 0; i < num_imgs; i++) {
2853 img_hdr_ptr = (struct image_hdr *) (fw->data +
2854 (sizeof(struct flash_file_hdr_g3) +
2855 i * sizeof(struct image_hdr)));
2856 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2857 status = be_flash_data(adapter, fw, &flash_cmd,
2858 num_imgs);
2860 } else if ((adapter->generation == BE_GEN2) &&
2861 (get_ufigen_type(fhdr) == BE_GEN2)) {
2862 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2863 } else {
2864 dev_err(&adapter->pdev->dev,
2865 "UFI and Interface are not compatible for flashing\n");
2866 status = -1;
2869 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2870 flash_cmd.dma);
2871 if (status) {
2872 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2873 goto be_fw_exit;
2876 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2878 be_fw_exit:
2879 return status;
2882 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2884 const struct firmware *fw;
2885 int status;
2887 if (!netif_running(adapter->netdev)) {
2888 dev_err(&adapter->pdev->dev,
2889 "Firmware load not allowed (interface is down)\n");
2890 return -1;
2893 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2894 if (status)
2895 goto fw_exit;
2897 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2899 if (lancer_chip(adapter))
2900 status = lancer_fw_download(adapter, fw);
2901 else
2902 status = be_fw_download(adapter, fw);
2904 fw_exit:
2905 release_firmware(fw);
2906 return status;
2909 static struct net_device_ops be_netdev_ops = {
2910 .ndo_open = be_open,
2911 .ndo_stop = be_close,
2912 .ndo_start_xmit = be_xmit,
2913 .ndo_set_rx_mode = be_set_rx_mode,
2914 .ndo_set_mac_address = be_mac_addr_set,
2915 .ndo_change_mtu = be_change_mtu,
2916 .ndo_get_stats64 = be_get_stats64,
2917 .ndo_validate_addr = eth_validate_addr,
2918 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2919 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2920 .ndo_set_vf_mac = be_set_vf_mac,
2921 .ndo_set_vf_vlan = be_set_vf_vlan,
2922 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2923 .ndo_get_vf_config = be_get_vf_config
2926 static void be_netdev_init(struct net_device *netdev)
2928 struct be_adapter *adapter = netdev_priv(netdev);
2929 struct be_rx_obj *rxo;
2930 int i;
2932 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2933 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2934 NETIF_F_HW_VLAN_TX;
2935 if (be_multi_rxq(adapter))
2936 netdev->hw_features |= NETIF_F_RXHASH;
2938 netdev->features |= netdev->hw_features |
2939 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2941 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2942 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2944 netdev->flags |= IFF_MULTICAST;
2946 netif_set_gso_max_size(netdev, 65535);
2948 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2950 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2952 for_all_rx_queues(adapter, rxo, i)
2953 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2954 BE_NAPI_WEIGHT);
2956 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2957 BE_NAPI_WEIGHT);
2960 static void be_unmap_pci_bars(struct be_adapter *adapter)
2962 if (adapter->csr)
2963 iounmap(adapter->csr);
2964 if (adapter->db)
2965 iounmap(adapter->db);
2968 static int be_map_pci_bars(struct be_adapter *adapter)
2970 u8 __iomem *addr;
2971 int db_reg;
2973 if (lancer_chip(adapter)) {
2974 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2975 pci_resource_len(adapter->pdev, 0));
2976 if (addr == NULL)
2977 return -ENOMEM;
2978 adapter->db = addr;
2979 return 0;
2982 if (be_physfn(adapter)) {
2983 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2984 pci_resource_len(adapter->pdev, 2));
2985 if (addr == NULL)
2986 return -ENOMEM;
2987 adapter->csr = addr;
2990 if (adapter->generation == BE_GEN2) {
2991 db_reg = 4;
2992 } else {
2993 if (be_physfn(adapter))
2994 db_reg = 4;
2995 else
2996 db_reg = 0;
2998 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2999 pci_resource_len(adapter->pdev, db_reg));
3000 if (addr == NULL)
3001 goto pci_map_err;
3002 adapter->db = addr;
3004 return 0;
3005 pci_map_err:
3006 be_unmap_pci_bars(adapter);
3007 return -ENOMEM;
3011 static void be_ctrl_cleanup(struct be_adapter *adapter)
3013 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3015 be_unmap_pci_bars(adapter);
3017 if (mem->va)
3018 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3019 mem->dma);
3021 mem = &adapter->rx_filter;
3022 if (mem->va)
3023 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3024 mem->dma);
3027 static int be_ctrl_init(struct be_adapter *adapter)
3029 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3030 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3031 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3032 int status;
3034 status = be_map_pci_bars(adapter);
3035 if (status)
3036 goto done;
3038 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3039 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3040 mbox_mem_alloc->size,
3041 &mbox_mem_alloc->dma,
3042 GFP_KERNEL);
3043 if (!mbox_mem_alloc->va) {
3044 status = -ENOMEM;
3045 goto unmap_pci_bars;
3047 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3048 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3049 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3050 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3052 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3053 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3054 &rx_filter->dma, GFP_KERNEL);
3055 if (rx_filter->va == NULL) {
3056 status = -ENOMEM;
3057 goto free_mbox;
3059 memset(rx_filter->va, 0, rx_filter->size);
3061 mutex_init(&adapter->mbox_lock);
3062 spin_lock_init(&adapter->mcc_lock);
3063 spin_lock_init(&adapter->mcc_cq_lock);
3065 init_completion(&adapter->flash_compl);
3066 pci_save_state(adapter->pdev);
3067 return 0;
3069 free_mbox:
3070 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3071 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3073 unmap_pci_bars:
3074 be_unmap_pci_bars(adapter);
3076 done:
3077 return status;
3080 static void be_stats_cleanup(struct be_adapter *adapter)
3082 struct be_dma_mem *cmd = &adapter->stats_cmd;
3084 if (cmd->va)
3085 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3086 cmd->va, cmd->dma);
3089 static int be_stats_init(struct be_adapter *adapter)
3091 struct be_dma_mem *cmd = &adapter->stats_cmd;
3093 if (adapter->generation == BE_GEN2) {
3094 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3095 } else {
3096 if (lancer_chip(adapter))
3097 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3098 else
3099 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3101 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3102 GFP_KERNEL);
3103 if (cmd->va == NULL)
3104 return -1;
3105 memset(cmd->va, 0, cmd->size);
3106 return 0;
3109 static void __devexit be_remove(struct pci_dev *pdev)
3111 struct be_adapter *adapter = pci_get_drvdata(pdev);
3113 if (!adapter)
3114 return;
3116 cancel_delayed_work_sync(&adapter->work);
3118 unregister_netdev(adapter->netdev);
3120 be_clear(adapter);
3122 be_stats_cleanup(adapter);
3124 be_ctrl_cleanup(adapter);
3126 kfree(adapter->vf_cfg);
3127 be_sriov_disable(adapter);
3129 be_msix_disable(adapter);
3131 pci_set_drvdata(pdev, NULL);
3132 pci_release_regions(pdev);
3133 pci_disable_device(pdev);
3135 free_netdev(adapter->netdev);
3138 static int be_get_config(struct be_adapter *adapter)
3140 int status;
3141 u8 mac[ETH_ALEN];
3143 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3144 &adapter->function_mode, &adapter->function_caps);
3145 if (status)
3146 return status;
3148 memset(mac, 0, ETH_ALEN);
3150 /* A default permanent address is given to each VF for Lancer*/
3151 if (be_physfn(adapter) || lancer_chip(adapter)) {
3152 status = be_cmd_mac_addr_query(adapter, mac,
3153 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
3155 if (status)
3156 return status;
3158 if (!is_valid_ether_addr(mac))
3159 return -EADDRNOTAVAIL;
3161 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3162 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3165 if (adapter->function_mode & 0x400)
3166 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3167 else
3168 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3170 status = be_cmd_get_cntl_attributes(adapter);
3171 if (status)
3172 return status;
3174 if ((num_vfs && adapter->sriov_enabled) ||
3175 (adapter->function_mode & 0x400) ||
3176 lancer_chip(adapter) || !be_physfn(adapter)) {
3177 adapter->num_tx_qs = 1;
3178 netif_set_real_num_tx_queues(adapter->netdev,
3179 adapter->num_tx_qs);
3180 } else {
3181 adapter->num_tx_qs = MAX_TX_QS;
3184 return 0;
3187 static int be_dev_family_check(struct be_adapter *adapter)
3189 struct pci_dev *pdev = adapter->pdev;
3190 u32 sli_intf = 0, if_type;
3192 switch (pdev->device) {
3193 case BE_DEVICE_ID1:
3194 case OC_DEVICE_ID1:
3195 adapter->generation = BE_GEN2;
3196 break;
3197 case BE_DEVICE_ID2:
3198 case OC_DEVICE_ID2:
3199 adapter->generation = BE_GEN3;
3200 break;
3201 case OC_DEVICE_ID3:
3202 case OC_DEVICE_ID4:
3203 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3204 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3205 SLI_INTF_IF_TYPE_SHIFT;
3207 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3208 if_type != 0x02) {
3209 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3210 return -EINVAL;
3212 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3213 SLI_INTF_FAMILY_SHIFT);
3214 adapter->generation = BE_GEN3;
3215 break;
3216 default:
3217 adapter->generation = 0;
3219 return 0;
3222 static int lancer_wait_ready(struct be_adapter *adapter)
3224 #define SLIPORT_READY_TIMEOUT 500
3225 u32 sliport_status;
3226 int status = 0, i;
3228 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3229 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3230 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3231 break;
3233 msleep(20);
3236 if (i == SLIPORT_READY_TIMEOUT)
3237 status = -1;
3239 return status;
3242 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3244 int status;
3245 u32 sliport_status, err, reset_needed;
3246 status = lancer_wait_ready(adapter);
3247 if (!status) {
3248 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3249 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3250 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3251 if (err && reset_needed) {
3252 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3253 adapter->db + SLIPORT_CONTROL_OFFSET);
3255 /* check adapter has corrected the error */
3256 status = lancer_wait_ready(adapter);
3257 sliport_status = ioread32(adapter->db +
3258 SLIPORT_STATUS_OFFSET);
3259 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3260 SLIPORT_STATUS_RN_MASK);
3261 if (status || sliport_status)
3262 status = -1;
3263 } else if (err || reset_needed) {
3264 status = -1;
3267 return status;
3270 static int __devinit be_probe(struct pci_dev *pdev,
3271 const struct pci_device_id *pdev_id)
3273 int status = 0;
3274 struct be_adapter *adapter;
3275 struct net_device *netdev;
3277 status = pci_enable_device(pdev);
3278 if (status)
3279 goto do_none;
3281 status = pci_request_regions(pdev, DRV_NAME);
3282 if (status)
3283 goto disable_dev;
3284 pci_set_master(pdev);
3286 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3287 if (netdev == NULL) {
3288 status = -ENOMEM;
3289 goto rel_reg;
3291 adapter = netdev_priv(netdev);
3292 adapter->pdev = pdev;
3293 pci_set_drvdata(pdev, adapter);
3295 status = be_dev_family_check(adapter);
3296 if (status)
3297 goto free_netdev;
3299 adapter->netdev = netdev;
3300 SET_NETDEV_DEV(netdev, &pdev->dev);
3302 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3303 if (!status) {
3304 netdev->features |= NETIF_F_HIGHDMA;
3305 } else {
3306 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3307 if (status) {
3308 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3309 goto free_netdev;
3313 be_sriov_enable(adapter);
3314 if (adapter->sriov_enabled) {
3315 adapter->vf_cfg = kcalloc(num_vfs,
3316 sizeof(struct be_vf_cfg), GFP_KERNEL);
3318 if (!adapter->vf_cfg)
3319 goto free_netdev;
3322 status = be_ctrl_init(adapter);
3323 if (status)
3324 goto free_vf_cfg;
3326 if (lancer_chip(adapter)) {
3327 status = lancer_test_and_set_rdy_state(adapter);
3328 if (status) {
3329 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3330 goto ctrl_clean;
3334 /* sync up with fw's ready state */
3335 if (be_physfn(adapter)) {
3336 status = be_cmd_POST(adapter);
3337 if (status)
3338 goto ctrl_clean;
3341 /* tell fw we're ready to fire cmds */
3342 status = be_cmd_fw_init(adapter);
3343 if (status)
3344 goto ctrl_clean;
3346 status = be_cmd_reset_function(adapter);
3347 if (status)
3348 goto ctrl_clean;
3350 status = be_stats_init(adapter);
3351 if (status)
3352 goto ctrl_clean;
3354 status = be_get_config(adapter);
3355 if (status)
3356 goto stats_clean;
3358 /* The INTR bit may be set in the card when probed by a kdump kernel
3359 * after a crash.
3361 if (!lancer_chip(adapter))
3362 be_intr_set(adapter, false);
3364 be_msix_enable(adapter);
3366 INIT_DELAYED_WORK(&adapter->work, be_worker);
3367 adapter->rx_fc = adapter->tx_fc = true;
3369 status = be_setup(adapter);
3370 if (status)
3371 goto msix_disable;
3373 be_netdev_init(netdev);
3374 status = register_netdev(netdev);
3375 if (status != 0)
3376 goto unsetup;
3378 if (be_physfn(adapter) && adapter->sriov_enabled) {
3379 u8 mac_speed;
3380 u16 vf, lnk_speed;
3382 if (!lancer_chip(adapter)) {
3383 status = be_vf_eth_addr_config(adapter);
3384 if (status)
3385 goto unreg_netdev;
3388 for (vf = 0; vf < num_vfs; vf++) {
3389 status = be_cmd_link_status_query(adapter, &mac_speed,
3390 &lnk_speed, vf + 1);
3391 if (!status)
3392 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3393 else
3394 goto unreg_netdev;
3398 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3400 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3401 return 0;
3403 unreg_netdev:
3404 unregister_netdev(netdev);
3405 unsetup:
3406 be_clear(adapter);
3407 msix_disable:
3408 be_msix_disable(adapter);
3409 stats_clean:
3410 be_stats_cleanup(adapter);
3411 ctrl_clean:
3412 be_ctrl_cleanup(adapter);
3413 free_vf_cfg:
3414 kfree(adapter->vf_cfg);
3415 free_netdev:
3416 be_sriov_disable(adapter);
3417 free_netdev(netdev);
3418 pci_set_drvdata(pdev, NULL);
3419 rel_reg:
3420 pci_release_regions(pdev);
3421 disable_dev:
3422 pci_disable_device(pdev);
3423 do_none:
3424 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3425 return status;
3428 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3430 struct be_adapter *adapter = pci_get_drvdata(pdev);
3431 struct net_device *netdev = adapter->netdev;
3433 cancel_delayed_work_sync(&adapter->work);
3434 if (adapter->wol)
3435 be_setup_wol(adapter, true);
3437 netif_device_detach(netdev);
3438 if (netif_running(netdev)) {
3439 rtnl_lock();
3440 be_close(netdev);
3441 rtnl_unlock();
3443 be_clear(adapter);
3445 be_msix_disable(adapter);
3446 pci_save_state(pdev);
3447 pci_disable_device(pdev);
3448 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3449 return 0;
3452 static int be_resume(struct pci_dev *pdev)
3454 int status = 0;
3455 struct be_adapter *adapter = pci_get_drvdata(pdev);
3456 struct net_device *netdev = adapter->netdev;
3458 netif_device_detach(netdev);
3460 status = pci_enable_device(pdev);
3461 if (status)
3462 return status;
3464 pci_set_power_state(pdev, 0);
3465 pci_restore_state(pdev);
3467 be_msix_enable(adapter);
3468 /* tell fw we're ready to fire cmds */
3469 status = be_cmd_fw_init(adapter);
3470 if (status)
3471 return status;
3473 be_setup(adapter);
3474 if (netif_running(netdev)) {
3475 rtnl_lock();
3476 be_open(netdev);
3477 rtnl_unlock();
3479 netif_device_attach(netdev);
3481 if (adapter->wol)
3482 be_setup_wol(adapter, false);
3484 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3485 return 0;
3489 * An FLR will stop BE from DMAing any data.
3491 static void be_shutdown(struct pci_dev *pdev)
3493 struct be_adapter *adapter = pci_get_drvdata(pdev);
3495 if (!adapter)
3496 return;
3498 cancel_delayed_work_sync(&adapter->work);
3500 netif_device_detach(adapter->netdev);
3502 if (adapter->wol)
3503 be_setup_wol(adapter, true);
3505 be_cmd_reset_function(adapter);
3507 pci_disable_device(pdev);
3510 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3511 pci_channel_state_t state)
3513 struct be_adapter *adapter = pci_get_drvdata(pdev);
3514 struct net_device *netdev = adapter->netdev;
3516 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3518 adapter->eeh_err = true;
3520 netif_device_detach(netdev);
3522 if (netif_running(netdev)) {
3523 rtnl_lock();
3524 be_close(netdev);
3525 rtnl_unlock();
3527 be_clear(adapter);
3529 if (state == pci_channel_io_perm_failure)
3530 return PCI_ERS_RESULT_DISCONNECT;
3532 pci_disable_device(pdev);
3534 return PCI_ERS_RESULT_NEED_RESET;
3537 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3539 struct be_adapter *adapter = pci_get_drvdata(pdev);
3540 int status;
3542 dev_info(&adapter->pdev->dev, "EEH reset\n");
3543 adapter->eeh_err = false;
3545 status = pci_enable_device(pdev);
3546 if (status)
3547 return PCI_ERS_RESULT_DISCONNECT;
3549 pci_set_master(pdev);
3550 pci_set_power_state(pdev, 0);
3551 pci_restore_state(pdev);
3553 /* Check if card is ok and fw is ready */
3554 status = be_cmd_POST(adapter);
3555 if (status)
3556 return PCI_ERS_RESULT_DISCONNECT;
3558 return PCI_ERS_RESULT_RECOVERED;
3561 static void be_eeh_resume(struct pci_dev *pdev)
3563 int status = 0;
3564 struct be_adapter *adapter = pci_get_drvdata(pdev);
3565 struct net_device *netdev = adapter->netdev;
3567 dev_info(&adapter->pdev->dev, "EEH resume\n");
3569 pci_save_state(pdev);
3571 /* tell fw we're ready to fire cmds */
3572 status = be_cmd_fw_init(adapter);
3573 if (status)
3574 goto err;
3576 status = be_setup(adapter);
3577 if (status)
3578 goto err;
3580 if (netif_running(netdev)) {
3581 status = be_open(netdev);
3582 if (status)
3583 goto err;
3585 netif_device_attach(netdev);
3586 return;
3587 err:
3588 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3591 static struct pci_error_handlers be_eeh_handlers = {
3592 .error_detected = be_eeh_err_detected,
3593 .slot_reset = be_eeh_reset,
3594 .resume = be_eeh_resume,
3597 static struct pci_driver be_driver = {
3598 .name = DRV_NAME,
3599 .id_table = be_dev_ids,
3600 .probe = be_probe,
3601 .remove = be_remove,
3602 .suspend = be_suspend,
3603 .resume = be_resume,
3604 .shutdown = be_shutdown,
3605 .err_handler = &be_eeh_handlers
3608 static int __init be_init_module(void)
3610 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3611 rx_frag_size != 2048) {
3612 printk(KERN_WARNING DRV_NAME
3613 " : Module param rx_frag_size must be 2048/4096/8192."
3614 " Using 2048\n");
3615 rx_frag_size = 2048;
3618 return pci_register_driver(&be_driver);
3620 module_init(be_init_module);
3622 static void __exit be_exit_module(void)
3624 pci_unregister_driver(&be_driver);
3626 module_exit(be_exit_module);