be2net: Call netif_carier_off() after register_netdev()
[linux-2.6/libata-dev.git] / drivers / net / benet / be_main.c
blob87673558cde511440dd7708c0b01106e1d7bff1f
1 /*
2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
28 static unsigned int rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, uint, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { 0 }
46 MODULE_DEVICE_TABLE(pci, be_dev_ids);
47 /* UE Status Low CSR */
48 static char *ue_status_low_desc[] = {
49 "CEV",
50 "CTX",
51 "DBUF",
52 "ERX",
53 "Host",
54 "MPU",
55 "NDMA",
56 "PTC ",
57 "RDMA ",
58 "RXF ",
59 "RXIPS ",
60 "RXULP0 ",
61 "RXULP1 ",
62 "RXULP2 ",
63 "TIM ",
64 "TPOST ",
65 "TPRE ",
66 "TXIPS ",
67 "TXULP0 ",
68 "TXULP1 ",
69 "UC ",
70 "WDMA ",
71 "TXULP2 ",
72 "HOST1 ",
73 "P0_OB_LINK ",
74 "P1_OB_LINK ",
75 "HOST_GPIO ",
76 "MBOX ",
77 "AXGMAC0",
78 "AXGMAC1",
79 "JTAG",
80 "MPU_INTPEND"
82 /* UE Status High CSR */
83 static char *ue_status_hi_desc[] = {
84 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
107 "NETC"
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
118 static inline bool be_multi_rxq(struct be_adapter *adapter)
120 return (adapter->num_rx_qs > 1);
123 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125 struct be_dma_mem *mem = &q->dma_mem;
126 if (mem->va)
127 pci_free_consistent(adapter->pdev, mem->size,
128 mem->va, mem->dma);
131 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
132 u16 len, u16 entry_size)
134 struct be_dma_mem *mem = &q->dma_mem;
136 memset(q, 0, sizeof(*q));
137 q->len = len;
138 q->entry_size = entry_size;
139 mem->size = len * entry_size;
140 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
141 if (!mem->va)
142 return -1;
143 memset(mem->va, 0, mem->size);
144 return 0;
147 static void be_intr_set(struct be_adapter *adapter, bool enable)
149 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
150 u32 reg = ioread32(addr);
151 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
153 if (adapter->eeh_err)
154 return;
156 if (!enabled && enable)
157 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158 else if (enabled && !enable)
159 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160 else
161 return;
163 iowrite32(reg, addr);
166 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
168 u32 val = 0;
169 val |= qid & DB_RQ_RING_ID_MASK;
170 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
172 wmb();
173 iowrite32(val, adapter->db + DB_RQ_OFFSET);
176 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
178 u32 val = 0;
179 val |= qid & DB_TXULP_RING_ID_MASK;
180 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
182 wmb();
183 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
186 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
187 bool arm, bool clear_int, u16 num_popped)
189 u32 val = 0;
190 val |= qid & DB_EQ_RING_ID_MASK;
192 if (adapter->eeh_err)
193 return;
195 if (arm)
196 val |= 1 << DB_EQ_REARM_SHIFT;
197 if (clear_int)
198 val |= 1 << DB_EQ_CLR_SHIFT;
199 val |= 1 << DB_EQ_EVNT_SHIFT;
200 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
201 iowrite32(val, adapter->db + DB_EQ_OFFSET);
204 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
206 u32 val = 0;
207 val |= qid & DB_CQ_RING_ID_MASK;
209 if (adapter->eeh_err)
210 return;
212 if (arm)
213 val |= 1 << DB_CQ_REARM_SHIFT;
214 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
215 iowrite32(val, adapter->db + DB_CQ_OFFSET);
218 static int be_mac_addr_set(struct net_device *netdev, void *p)
220 struct be_adapter *adapter = netdev_priv(netdev);
221 struct sockaddr *addr = p;
222 int status = 0;
224 if (!is_valid_ether_addr(addr->sa_data))
225 return -EADDRNOTAVAIL;
227 /* MAC addr configuration will be done in hardware for VFs
228 * by their corresponding PFs. Just copy to netdev addr here
230 if (!be_physfn(adapter))
231 goto netdev_addr;
233 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
234 if (status)
235 return status;
237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
238 adapter->if_handle, &adapter->pmac_id);
239 netdev_addr:
240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
243 return status;
246 void netdev_stats_update(struct be_adapter *adapter)
248 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
249 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
250 struct be_port_rxf_stats *port_stats =
251 &rxf_stats->port[adapter->port_num];
252 struct net_device_stats *dev_stats = &adapter->netdev->stats;
253 struct be_erx_stats *erx_stats = &hw_stats->erx;
254 struct be_rx_obj *rxo;
255 int i;
257 memset(dev_stats, 0, sizeof(*dev_stats));
258 for_all_rx_queues(adapter, rxo, i) {
259 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
260 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
261 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
262 /* no space in linux buffers: best possible approximation */
263 dev_stats->rx_dropped +=
264 erx_stats->rx_drops_no_fragments[rxo->q.id];
267 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
268 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
270 /* bad pkts received */
271 dev_stats->rx_errors = port_stats->rx_crc_errors +
272 port_stats->rx_alignment_symbol_errors +
273 port_stats->rx_in_range_errors +
274 port_stats->rx_out_range_errors +
275 port_stats->rx_frame_too_long +
276 port_stats->rx_dropped_too_small +
277 port_stats->rx_dropped_too_short +
278 port_stats->rx_dropped_header_too_small +
279 port_stats->rx_dropped_tcp_length +
280 port_stats->rx_dropped_runt +
281 port_stats->rx_tcp_checksum_errs +
282 port_stats->rx_ip_checksum_errs +
283 port_stats->rx_udp_checksum_errs;
285 /* detailed rx errors */
286 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
287 port_stats->rx_out_range_errors +
288 port_stats->rx_frame_too_long;
290 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
292 /* frame alignment errors */
293 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
295 /* receiver fifo overrun */
296 /* drops_no_pbuf is no per i/f, it's per BE card */
297 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
298 port_stats->rx_input_fifo_overflow +
299 rxf_stats->rx_drops_no_pbuf;
302 void be_link_status_update(struct be_adapter *adapter, bool link_up)
304 struct net_device *netdev = adapter->netdev;
306 /* If link came up or went down */
307 if (adapter->link_up != link_up) {
308 adapter->link_speed = -1;
309 if (link_up) {
310 netif_start_queue(netdev);
311 netif_carrier_on(netdev);
312 printk(KERN_INFO "%s: Link up\n", netdev->name);
313 } else {
314 netif_stop_queue(netdev);
315 netif_carrier_off(netdev);
316 printk(KERN_INFO "%s: Link down\n", netdev->name);
318 adapter->link_up = link_up;
322 /* Update the EQ delay n BE based on the RX frags consumed / sec */
323 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
325 struct be_eq_obj *rx_eq = &rxo->rx_eq;
326 struct be_rx_stats *stats = &rxo->stats;
327 ulong now = jiffies;
328 u32 eqd;
330 if (!rx_eq->enable_aic)
331 return;
333 /* Wrapped around */
334 if (time_before(now, stats->rx_fps_jiffies)) {
335 stats->rx_fps_jiffies = now;
336 return;
339 /* Update once a second */
340 if ((now - stats->rx_fps_jiffies) < HZ)
341 return;
343 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
344 ((now - stats->rx_fps_jiffies) / HZ);
346 stats->rx_fps_jiffies = now;
347 stats->prev_rx_frags = stats->rx_frags;
348 eqd = stats->rx_fps / 110000;
349 eqd = eqd << 3;
350 if (eqd > rx_eq->max_eqd)
351 eqd = rx_eq->max_eqd;
352 if (eqd < rx_eq->min_eqd)
353 eqd = rx_eq->min_eqd;
354 if (eqd < 10)
355 eqd = 0;
356 if (eqd != rx_eq->cur_eqd)
357 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
359 rx_eq->cur_eqd = eqd;
362 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
364 u64 rate = bytes;
366 do_div(rate, ticks / HZ);
367 rate <<= 3; /* bytes/sec -> bits/sec */
368 do_div(rate, 1000000ul); /* MB/Sec */
370 return rate;
373 static void be_tx_rate_update(struct be_adapter *adapter)
375 struct be_tx_stats *stats = tx_stats(adapter);
376 ulong now = jiffies;
378 /* Wrapped around? */
379 if (time_before(now, stats->be_tx_jiffies)) {
380 stats->be_tx_jiffies = now;
381 return;
384 /* Update tx rate once in two seconds */
385 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
386 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387 - stats->be_tx_bytes_prev,
388 now - stats->be_tx_jiffies);
389 stats->be_tx_jiffies = now;
390 stats->be_tx_bytes_prev = stats->be_tx_bytes;
394 static void be_tx_stats_update(struct be_adapter *adapter,
395 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
397 struct be_tx_stats *stats = tx_stats(adapter);
398 stats->be_tx_reqs++;
399 stats->be_tx_wrbs += wrb_cnt;
400 stats->be_tx_bytes += copied;
401 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
402 if (stopped)
403 stats->be_tx_stops++;
406 /* Determine number of WRB entries needed to xmit data in an skb */
407 static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
409 int cnt = (skb->len > skb->data_len);
411 cnt += skb_shinfo(skb)->nr_frags;
413 /* to account for hdr wrb */
414 cnt++;
415 if (cnt & 1) {
416 /* add a dummy to make it an even num */
417 cnt++;
418 *dummy = true;
419 } else
420 *dummy = false;
421 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
422 return cnt;
425 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
427 wrb->frag_pa_hi = upper_32_bits(addr);
428 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
429 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
432 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
433 struct sk_buff *skb, u32 wrb_cnt, u32 len)
435 u8 vlan_prio = 0;
436 u16 vlan_tag = 0;
438 memset(hdr, 0, sizeof(*hdr));
440 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
442 if (skb_is_gso(skb)) {
443 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
444 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
445 hdr, skb_shinfo(skb)->gso_size);
446 if (skb_is_gso_v6(skb))
447 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
448 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
449 if (is_tcp_pkt(skb))
450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
451 else if (is_udp_pkt(skb))
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
455 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
456 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
457 vlan_tag = vlan_tx_tag_get(skb);
458 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
459 /* If vlan priority provided by OS is NOT in available bmap */
460 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
461 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
462 adapter->recommended_prio;
463 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
466 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
467 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
468 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
469 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
472 static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
473 bool unmap_single)
475 dma_addr_t dma;
477 be_dws_le_to_cpu(wrb, sizeof(*wrb));
479 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
480 if (wrb->frag_len) {
481 if (unmap_single)
482 pci_unmap_single(pdev, dma, wrb->frag_len,
483 PCI_DMA_TODEVICE);
484 else
485 pci_unmap_page(pdev, dma, wrb->frag_len,
486 PCI_DMA_TODEVICE);
490 static int make_tx_wrbs(struct be_adapter *adapter,
491 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
493 dma_addr_t busaddr;
494 int i, copied = 0;
495 struct pci_dev *pdev = adapter->pdev;
496 struct sk_buff *first_skb = skb;
497 struct be_queue_info *txq = &adapter->tx_obj.q;
498 struct be_eth_wrb *wrb;
499 struct be_eth_hdr_wrb *hdr;
500 bool map_single = false;
501 u16 map_head;
503 hdr = queue_head_node(txq);
504 queue_head_inc(txq);
505 map_head = txq->head;
507 if (skb->len > skb->data_len) {
508 int len = skb_headlen(skb);
509 busaddr = pci_map_single(pdev, skb->data, len,
510 PCI_DMA_TODEVICE);
511 if (pci_dma_mapping_error(pdev, busaddr))
512 goto dma_err;
513 map_single = true;
514 wrb = queue_head_node(txq);
515 wrb_fill(wrb, busaddr, len);
516 be_dws_cpu_to_le(wrb, sizeof(*wrb));
517 queue_head_inc(txq);
518 copied += len;
521 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
522 struct skb_frag_struct *frag =
523 &skb_shinfo(skb)->frags[i];
524 busaddr = pci_map_page(pdev, frag->page,
525 frag->page_offset,
526 frag->size, PCI_DMA_TODEVICE);
527 if (pci_dma_mapping_error(pdev, busaddr))
528 goto dma_err;
529 wrb = queue_head_node(txq);
530 wrb_fill(wrb, busaddr, frag->size);
531 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532 queue_head_inc(txq);
533 copied += frag->size;
536 if (dummy_wrb) {
537 wrb = queue_head_node(txq);
538 wrb_fill(wrb, 0, 0);
539 be_dws_cpu_to_le(wrb, sizeof(*wrb));
540 queue_head_inc(txq);
543 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
544 be_dws_cpu_to_le(hdr, sizeof(*hdr));
546 return copied;
547 dma_err:
548 txq->head = map_head;
549 while (copied) {
550 wrb = queue_head_node(txq);
551 unmap_tx_frag(pdev, wrb, map_single);
552 map_single = false;
553 copied -= wrb->frag_len;
554 queue_head_inc(txq);
556 return 0;
559 static netdev_tx_t be_xmit(struct sk_buff *skb,
560 struct net_device *netdev)
562 struct be_adapter *adapter = netdev_priv(netdev);
563 struct be_tx_obj *tx_obj = &adapter->tx_obj;
564 struct be_queue_info *txq = &tx_obj->q;
565 u32 wrb_cnt = 0, copied = 0;
566 u32 start = txq->head;
567 bool dummy_wrb, stopped = false;
569 wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
571 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
572 if (copied) {
573 /* record the sent skb in the sent_skb table */
574 BUG_ON(tx_obj->sent_skb_list[start]);
575 tx_obj->sent_skb_list[start] = skb;
577 /* Ensure txq has space for the next skb; Else stop the queue
578 * *BEFORE* ringing the tx doorbell, so that we serialze the
579 * tx compls of the current transmit which'll wake up the queue
581 atomic_add(wrb_cnt, &txq->used);
582 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
583 txq->len) {
584 netif_stop_queue(netdev);
585 stopped = true;
588 be_txq_notify(adapter, txq->id, wrb_cnt);
590 be_tx_stats_update(adapter, wrb_cnt, copied,
591 skb_shinfo(skb)->gso_segs, stopped);
592 } else {
593 txq->head = start;
594 dev_kfree_skb_any(skb);
596 return NETDEV_TX_OK;
599 static int be_change_mtu(struct net_device *netdev, int new_mtu)
601 struct be_adapter *adapter = netdev_priv(netdev);
602 if (new_mtu < BE_MIN_MTU ||
603 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
604 (ETH_HLEN + ETH_FCS_LEN))) {
605 dev_info(&adapter->pdev->dev,
606 "MTU must be between %d and %d bytes\n",
607 BE_MIN_MTU,
608 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
609 return -EINVAL;
611 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
612 netdev->mtu, new_mtu);
613 netdev->mtu = new_mtu;
614 return 0;
618 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
619 * If the user configures more, place BE in vlan promiscuous mode.
621 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
623 u16 vtag[BE_NUM_VLANS_SUPPORTED];
624 u16 ntags = 0, i;
625 int status = 0;
626 u32 if_handle;
628 if (vf) {
629 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
630 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
631 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
634 if (adapter->vlans_added <= adapter->max_vlans) {
635 /* Construct VLAN Table to give to HW */
636 for (i = 0; i < VLAN_N_VID; i++) {
637 if (adapter->vlan_tag[i]) {
638 vtag[ntags] = cpu_to_le16(i);
639 ntags++;
642 status = be_cmd_vlan_config(adapter, adapter->if_handle,
643 vtag, ntags, 1, 0);
644 } else {
645 status = be_cmd_vlan_config(adapter, adapter->if_handle,
646 NULL, 0, 1, 1);
649 return status;
652 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
654 struct be_adapter *adapter = netdev_priv(netdev);
656 adapter->vlan_grp = grp;
659 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
661 struct be_adapter *adapter = netdev_priv(netdev);
663 adapter->vlans_added++;
664 if (!be_physfn(adapter))
665 return;
667 adapter->vlan_tag[vid] = 1;
668 if (adapter->vlans_added <= (adapter->max_vlans + 1))
669 be_vid_config(adapter, false, 0);
672 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
674 struct be_adapter *adapter = netdev_priv(netdev);
676 adapter->vlans_added--;
677 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
679 if (!be_physfn(adapter))
680 return;
682 adapter->vlan_tag[vid] = 0;
683 if (adapter->vlans_added <= adapter->max_vlans)
684 be_vid_config(adapter, false, 0);
687 static void be_set_multicast_list(struct net_device *netdev)
689 struct be_adapter *adapter = netdev_priv(netdev);
691 if (netdev->flags & IFF_PROMISC) {
692 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
693 adapter->promiscuous = true;
694 goto done;
697 /* BE was previously in promiscous mode; disable it */
698 if (adapter->promiscuous) {
699 adapter->promiscuous = false;
700 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
703 /* Enable multicast promisc if num configured exceeds what we support */
704 if (netdev->flags & IFF_ALLMULTI ||
705 netdev_mc_count(netdev) > BE_MAX_MC) {
706 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
707 &adapter->mc_cmd_mem);
708 goto done;
711 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
712 &adapter->mc_cmd_mem);
713 done:
714 return;
717 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
719 struct be_adapter *adapter = netdev_priv(netdev);
720 int status;
722 if (!adapter->sriov_enabled)
723 return -EPERM;
725 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
726 return -EINVAL;
728 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
729 status = be_cmd_pmac_del(adapter,
730 adapter->vf_cfg[vf].vf_if_handle,
731 adapter->vf_cfg[vf].vf_pmac_id);
733 status = be_cmd_pmac_add(adapter, mac,
734 adapter->vf_cfg[vf].vf_if_handle,
735 &adapter->vf_cfg[vf].vf_pmac_id);
737 if (status)
738 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
739 mac, vf);
740 else
741 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
743 return status;
746 static int be_get_vf_config(struct net_device *netdev, int vf,
747 struct ifla_vf_info *vi)
749 struct be_adapter *adapter = netdev_priv(netdev);
751 if (!adapter->sriov_enabled)
752 return -EPERM;
754 if (vf >= num_vfs)
755 return -EINVAL;
757 vi->vf = vf;
758 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
759 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
760 vi->qos = 0;
761 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
763 return 0;
766 static int be_set_vf_vlan(struct net_device *netdev,
767 int vf, u16 vlan, u8 qos)
769 struct be_adapter *adapter = netdev_priv(netdev);
770 int status = 0;
772 if (!adapter->sriov_enabled)
773 return -EPERM;
775 if ((vf >= num_vfs) || (vlan > 4095))
776 return -EINVAL;
778 if (vlan) {
779 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
780 adapter->vlans_added++;
781 } else {
782 adapter->vf_cfg[vf].vf_vlan_tag = 0;
783 adapter->vlans_added--;
786 status = be_vid_config(adapter, true, vf);
788 if (status)
789 dev_info(&adapter->pdev->dev,
790 "VLAN %d config on VF %d failed\n", vlan, vf);
791 return status;
794 static int be_set_vf_tx_rate(struct net_device *netdev,
795 int vf, int rate)
797 struct be_adapter *adapter = netdev_priv(netdev);
798 int status = 0;
800 if (!adapter->sriov_enabled)
801 return -EPERM;
803 if ((vf >= num_vfs) || (rate < 0))
804 return -EINVAL;
806 if (rate > 10000)
807 rate = 10000;
809 adapter->vf_cfg[vf].vf_tx_rate = rate;
810 status = be_cmd_set_qos(adapter, rate / 10, vf);
812 if (status)
813 dev_info(&adapter->pdev->dev,
814 "tx rate %d on VF %d failed\n", rate, vf);
815 return status;
818 static void be_rx_rate_update(struct be_rx_obj *rxo)
820 struct be_rx_stats *stats = &rxo->stats;
821 ulong now = jiffies;
823 /* Wrapped around */
824 if (time_before(now, stats->rx_jiffies)) {
825 stats->rx_jiffies = now;
826 return;
829 /* Update the rate once in two seconds */
830 if ((now - stats->rx_jiffies) < 2 * HZ)
831 return;
833 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
834 now - stats->rx_jiffies);
835 stats->rx_jiffies = now;
836 stats->rx_bytes_prev = stats->rx_bytes;
839 static void be_rx_stats_update(struct be_rx_obj *rxo,
840 u32 pktsize, u16 numfrags, u8 pkt_type)
842 struct be_rx_stats *stats = &rxo->stats;
844 stats->rx_compl++;
845 stats->rx_frags += numfrags;
846 stats->rx_bytes += pktsize;
847 stats->rx_pkts++;
848 if (pkt_type == BE_MULTICAST_PACKET)
849 stats->rx_mcast_pkts++;
852 static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
854 u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
856 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
857 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
858 ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
859 if (ip_version) {
860 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
861 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
863 ipv6_chk = (ip_version && (tcpf || udpf));
865 return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
868 static struct be_rx_page_info *
869 get_rx_page_info(struct be_adapter *adapter,
870 struct be_rx_obj *rxo,
871 u16 frag_idx)
873 struct be_rx_page_info *rx_page_info;
874 struct be_queue_info *rxq = &rxo->q;
876 rx_page_info = &rxo->page_info_tbl[frag_idx];
877 BUG_ON(!rx_page_info->page);
879 if (rx_page_info->last_page_user) {
880 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
881 adapter->big_page_size, PCI_DMA_FROMDEVICE);
882 rx_page_info->last_page_user = false;
885 atomic_dec(&rxq->used);
886 return rx_page_info;
889 /* Throwaway the data in the Rx completion */
890 static void be_rx_compl_discard(struct be_adapter *adapter,
891 struct be_rx_obj *rxo,
892 struct be_eth_rx_compl *rxcp)
894 struct be_queue_info *rxq = &rxo->q;
895 struct be_rx_page_info *page_info;
896 u16 rxq_idx, i, num_rcvd;
898 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
899 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
901 for (i = 0; i < num_rcvd; i++) {
902 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
903 put_page(page_info->page);
904 memset(page_info, 0, sizeof(*page_info));
905 index_inc(&rxq_idx, rxq->len);
910 * skb_fill_rx_data forms a complete skb for an ether frame
911 * indicated by rxcp.
913 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
914 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
915 u16 num_rcvd)
917 struct be_queue_info *rxq = &rxo->q;
918 struct be_rx_page_info *page_info;
919 u16 rxq_idx, i, j;
920 u32 pktsize, hdr_len, curr_frag_len, size;
921 u8 *start;
922 u8 pkt_type;
924 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
925 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
926 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
928 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
930 start = page_address(page_info->page) + page_info->page_offset;
931 prefetch(start);
933 /* Copy data in the first descriptor of this completion */
934 curr_frag_len = min(pktsize, rx_frag_size);
936 /* Copy the header portion into skb_data */
937 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
938 memcpy(skb->data, start, hdr_len);
939 skb->len = curr_frag_len;
940 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
941 /* Complete packet has now been moved to data */
942 put_page(page_info->page);
943 skb->data_len = 0;
944 skb->tail += curr_frag_len;
945 } else {
946 skb_shinfo(skb)->nr_frags = 1;
947 skb_shinfo(skb)->frags[0].page = page_info->page;
948 skb_shinfo(skb)->frags[0].page_offset =
949 page_info->page_offset + hdr_len;
950 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
951 skb->data_len = curr_frag_len - hdr_len;
952 skb->tail += hdr_len;
954 page_info->page = NULL;
956 if (pktsize <= rx_frag_size) {
957 BUG_ON(num_rcvd != 1);
958 goto done;
961 /* More frags present for this completion */
962 size = pktsize;
963 for (i = 1, j = 0; i < num_rcvd; i++) {
964 size -= curr_frag_len;
965 index_inc(&rxq_idx, rxq->len);
966 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
968 curr_frag_len = min(size, rx_frag_size);
970 /* Coalesce all frags from the same physical page in one slot */
971 if (page_info->page_offset == 0) {
972 /* Fresh page */
973 j++;
974 skb_shinfo(skb)->frags[j].page = page_info->page;
975 skb_shinfo(skb)->frags[j].page_offset =
976 page_info->page_offset;
977 skb_shinfo(skb)->frags[j].size = 0;
978 skb_shinfo(skb)->nr_frags++;
979 } else {
980 put_page(page_info->page);
983 skb_shinfo(skb)->frags[j].size += curr_frag_len;
984 skb->len += curr_frag_len;
985 skb->data_len += curr_frag_len;
987 page_info->page = NULL;
989 BUG_ON(j > MAX_SKB_FRAGS);
991 done:
992 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
995 /* Process the RX completion indicated by rxcp when GRO is disabled */
996 static void be_rx_compl_process(struct be_adapter *adapter,
997 struct be_rx_obj *rxo,
998 struct be_eth_rx_compl *rxcp)
1000 struct sk_buff *skb;
1001 u32 vlanf, vid;
1002 u16 num_rcvd;
1003 u8 vtm;
1005 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1006 /* Is it a flush compl that has no data */
1007 if (unlikely(num_rcvd == 0))
1008 return;
1010 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1011 if (unlikely(!skb)) {
1012 if (net_ratelimit())
1013 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1014 be_rx_compl_discard(adapter, rxo, rxcp);
1015 return;
1018 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1020 if (do_pkt_csum(rxcp, adapter->rx_csum))
1021 skb_checksum_none_assert(skb);
1022 else
1023 skb->ip_summed = CHECKSUM_UNNECESSARY;
1025 skb->truesize = skb->len + sizeof(struct sk_buff);
1026 skb->protocol = eth_type_trans(skb, adapter->netdev);
1028 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1029 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1031 /* vlanf could be wrongly set in some cards.
1032 * ignore if vtm is not set */
1033 if ((adapter->function_mode & 0x400) && !vtm)
1034 vlanf = 0;
1036 if (unlikely(vlanf)) {
1037 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1038 kfree_skb(skb);
1039 return;
1041 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1042 vid = swab16(vid);
1043 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1044 } else {
1045 netif_receive_skb(skb);
1049 /* Process the RX completion indicated by rxcp when GRO is enabled */
1050 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1051 struct be_rx_obj *rxo,
1052 struct be_eth_rx_compl *rxcp)
1054 struct be_rx_page_info *page_info;
1055 struct sk_buff *skb = NULL;
1056 struct be_queue_info *rxq = &rxo->q;
1057 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1058 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1059 u16 i, rxq_idx = 0, vid, j;
1060 u8 vtm;
1061 u8 pkt_type;
1063 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1064 /* Is it a flush compl that has no data */
1065 if (unlikely(num_rcvd == 0))
1066 return;
1068 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1069 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1070 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1071 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1072 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1074 /* vlanf could be wrongly set in some cards.
1075 * ignore if vtm is not set */
1076 if ((adapter->function_mode & 0x400) && !vtm)
1077 vlanf = 0;
1079 skb = napi_get_frags(&eq_obj->napi);
1080 if (!skb) {
1081 be_rx_compl_discard(adapter, rxo, rxcp);
1082 return;
1085 remaining = pkt_size;
1086 for (i = 0, j = -1; i < num_rcvd; i++) {
1087 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
1089 curr_frag_len = min(remaining, rx_frag_size);
1091 /* Coalesce all frags from the same physical page in one slot */
1092 if (i == 0 || page_info->page_offset == 0) {
1093 /* First frag or Fresh page */
1094 j++;
1095 skb_shinfo(skb)->frags[j].page = page_info->page;
1096 skb_shinfo(skb)->frags[j].page_offset =
1097 page_info->page_offset;
1098 skb_shinfo(skb)->frags[j].size = 0;
1099 } else {
1100 put_page(page_info->page);
1102 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1104 remaining -= curr_frag_len;
1105 index_inc(&rxq_idx, rxq->len);
1106 memset(page_info, 0, sizeof(*page_info));
1108 BUG_ON(j > MAX_SKB_FRAGS);
1110 skb_shinfo(skb)->nr_frags = j + 1;
1111 skb->len = pkt_size;
1112 skb->data_len = pkt_size;
1113 skb->truesize += pkt_size;
1114 skb->ip_summed = CHECKSUM_UNNECESSARY;
1116 if (likely(!vlanf)) {
1117 napi_gro_frags(&eq_obj->napi);
1118 } else {
1119 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1120 vid = swab16(vid);
1122 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1123 return;
1125 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1128 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
1131 static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
1133 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
1135 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1136 return NULL;
1138 rmb();
1139 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1141 queue_tail_inc(&rxo->cq);
1142 return rxcp;
1145 /* To reset the valid bit, we need to reset the whole word as
1146 * when walking the queue the valid entries are little-endian
1147 * and invalid entries are host endian
1149 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1151 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1154 static inline struct page *be_alloc_pages(u32 size)
1156 gfp_t alloc_flags = GFP_ATOMIC;
1157 u32 order = get_order(size);
1158 if (order > 0)
1159 alloc_flags |= __GFP_COMP;
1160 return alloc_pages(alloc_flags, order);
1164 * Allocate a page, split it to fragments of size rx_frag_size and post as
1165 * receive buffers to BE
1167 static void be_post_rx_frags(struct be_rx_obj *rxo)
1169 struct be_adapter *adapter = rxo->adapter;
1170 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1171 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1172 struct be_queue_info *rxq = &rxo->q;
1173 struct page *pagep = NULL;
1174 struct be_eth_rx_d *rxd;
1175 u64 page_dmaaddr = 0, frag_dmaaddr;
1176 u32 posted, page_offset = 0;
1178 page_info = &rxo->page_info_tbl[rxq->head];
1179 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1180 if (!pagep) {
1181 pagep = be_alloc_pages(adapter->big_page_size);
1182 if (unlikely(!pagep)) {
1183 rxo->stats.rx_post_fail++;
1184 break;
1186 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1187 adapter->big_page_size,
1188 PCI_DMA_FROMDEVICE);
1189 page_info->page_offset = 0;
1190 } else {
1191 get_page(pagep);
1192 page_info->page_offset = page_offset + rx_frag_size;
1194 page_offset = page_info->page_offset;
1195 page_info->page = pagep;
1196 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1197 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1199 rxd = queue_head_node(rxq);
1200 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1201 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1203 /* Any space left in the current big page for another frag? */
1204 if ((page_offset + rx_frag_size + rx_frag_size) >
1205 adapter->big_page_size) {
1206 pagep = NULL;
1207 page_info->last_page_user = true;
1210 prev_page_info = page_info;
1211 queue_head_inc(rxq);
1212 page_info = &page_info_tbl[rxq->head];
1214 if (pagep)
1215 prev_page_info->last_page_user = true;
1217 if (posted) {
1218 atomic_add(posted, &rxq->used);
1219 be_rxq_notify(adapter, rxq->id, posted);
1220 } else if (atomic_read(&rxq->used) == 0) {
1221 /* Let be_worker replenish when memory is available */
1222 rxo->rx_post_starved = true;
1226 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1228 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1230 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1231 return NULL;
1233 rmb();
1234 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1236 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1238 queue_tail_inc(tx_cq);
1239 return txcp;
1242 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1244 struct be_queue_info *txq = &adapter->tx_obj.q;
1245 struct be_eth_wrb *wrb;
1246 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1247 struct sk_buff *sent_skb;
1248 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1249 bool unmap_skb_hdr = true;
1251 sent_skb = sent_skbs[txq->tail];
1252 BUG_ON(!sent_skb);
1253 sent_skbs[txq->tail] = NULL;
1255 /* skip header wrb */
1256 queue_tail_inc(txq);
1258 do {
1259 cur_index = txq->tail;
1260 wrb = queue_tail_node(txq);
1261 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
1262 skb_headlen(sent_skb)));
1263 unmap_skb_hdr = false;
1265 num_wrbs++;
1266 queue_tail_inc(txq);
1267 } while (cur_index != last_index);
1269 atomic_sub(num_wrbs, &txq->used);
1271 kfree_skb(sent_skb);
1274 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1276 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1278 if (!eqe->evt)
1279 return NULL;
1281 rmb();
1282 eqe->evt = le32_to_cpu(eqe->evt);
1283 queue_tail_inc(&eq_obj->q);
1284 return eqe;
1287 static int event_handle(struct be_adapter *adapter,
1288 struct be_eq_obj *eq_obj)
1290 struct be_eq_entry *eqe;
1291 u16 num = 0;
1293 while ((eqe = event_get(eq_obj)) != NULL) {
1294 eqe->evt = 0;
1295 num++;
1298 /* Deal with any spurious interrupts that come
1299 * without events
1301 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1302 if (num)
1303 napi_schedule(&eq_obj->napi);
1305 return num;
1308 /* Just read and notify events without processing them.
1309 * Used at the time of destroying event queues */
1310 static void be_eq_clean(struct be_adapter *adapter,
1311 struct be_eq_obj *eq_obj)
1313 struct be_eq_entry *eqe;
1314 u16 num = 0;
1316 while ((eqe = event_get(eq_obj)) != NULL) {
1317 eqe->evt = 0;
1318 num++;
1321 if (num)
1322 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1325 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1327 struct be_rx_page_info *page_info;
1328 struct be_queue_info *rxq = &rxo->q;
1329 struct be_queue_info *rx_cq = &rxo->cq;
1330 struct be_eth_rx_compl *rxcp;
1331 u16 tail;
1333 /* First cleanup pending rx completions */
1334 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1335 be_rx_compl_discard(adapter, rxo, rxcp);
1336 be_rx_compl_reset(rxcp);
1337 be_cq_notify(adapter, rx_cq->id, true, 1);
1340 /* Then free posted rx buffer that were not used */
1341 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1342 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1343 page_info = get_rx_page_info(adapter, rxo, tail);
1344 put_page(page_info->page);
1345 memset(page_info, 0, sizeof(*page_info));
1347 BUG_ON(atomic_read(&rxq->used));
1350 static void be_tx_compl_clean(struct be_adapter *adapter)
1352 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1353 struct be_queue_info *txq = &adapter->tx_obj.q;
1354 struct be_eth_tx_compl *txcp;
1355 u16 end_idx, cmpl = 0, timeo = 0;
1356 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1357 struct sk_buff *sent_skb;
1358 bool dummy_wrb;
1360 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1361 do {
1362 while ((txcp = be_tx_compl_get(tx_cq))) {
1363 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1364 wrb_index, txcp);
1365 be_tx_compl_process(adapter, end_idx);
1366 cmpl++;
1368 if (cmpl) {
1369 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1370 cmpl = 0;
1373 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1374 break;
1376 mdelay(1);
1377 } while (true);
1379 if (atomic_read(&txq->used))
1380 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1381 atomic_read(&txq->used));
1383 /* free posted tx for which compls will never arrive */
1384 while (atomic_read(&txq->used)) {
1385 sent_skb = sent_skbs[txq->tail];
1386 end_idx = txq->tail;
1387 index_adv(&end_idx,
1388 wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
1389 be_tx_compl_process(adapter, end_idx);
1393 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1395 struct be_queue_info *q;
1397 q = &adapter->mcc_obj.q;
1398 if (q->created)
1399 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1400 be_queue_free(adapter, q);
1402 q = &adapter->mcc_obj.cq;
1403 if (q->created)
1404 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1405 be_queue_free(adapter, q);
1408 /* Must be called only after TX qs are created as MCC shares TX EQ */
1409 static int be_mcc_queues_create(struct be_adapter *adapter)
1411 struct be_queue_info *q, *cq;
1413 /* Alloc MCC compl queue */
1414 cq = &adapter->mcc_obj.cq;
1415 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1416 sizeof(struct be_mcc_compl)))
1417 goto err;
1419 /* Ask BE to create MCC compl queue; share TX's eq */
1420 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1421 goto mcc_cq_free;
1423 /* Alloc MCC queue */
1424 q = &adapter->mcc_obj.q;
1425 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1426 goto mcc_cq_destroy;
1428 /* Ask BE to create MCC queue */
1429 if (be_cmd_mccq_create(adapter, q, cq))
1430 goto mcc_q_free;
1432 return 0;
1434 mcc_q_free:
1435 be_queue_free(adapter, q);
1436 mcc_cq_destroy:
1437 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1438 mcc_cq_free:
1439 be_queue_free(adapter, cq);
1440 err:
1441 return -1;
1444 static void be_tx_queues_destroy(struct be_adapter *adapter)
1446 struct be_queue_info *q;
1448 q = &adapter->tx_obj.q;
1449 if (q->created)
1450 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1451 be_queue_free(adapter, q);
1453 q = &adapter->tx_obj.cq;
1454 if (q->created)
1455 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1456 be_queue_free(adapter, q);
1458 /* Clear any residual events */
1459 be_eq_clean(adapter, &adapter->tx_eq);
1461 q = &adapter->tx_eq.q;
1462 if (q->created)
1463 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1464 be_queue_free(adapter, q);
1467 static int be_tx_queues_create(struct be_adapter *adapter)
1469 struct be_queue_info *eq, *q, *cq;
1471 adapter->tx_eq.max_eqd = 0;
1472 adapter->tx_eq.min_eqd = 0;
1473 adapter->tx_eq.cur_eqd = 96;
1474 adapter->tx_eq.enable_aic = false;
1475 /* Alloc Tx Event queue */
1476 eq = &adapter->tx_eq.q;
1477 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1478 return -1;
1480 /* Ask BE to create Tx Event queue */
1481 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1482 goto tx_eq_free;
1483 adapter->base_eq_id = adapter->tx_eq.q.id;
1485 /* Alloc TX eth compl queue */
1486 cq = &adapter->tx_obj.cq;
1487 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1488 sizeof(struct be_eth_tx_compl)))
1489 goto tx_eq_destroy;
1491 /* Ask BE to create Tx eth compl queue */
1492 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1493 goto tx_cq_free;
1495 /* Alloc TX eth queue */
1496 q = &adapter->tx_obj.q;
1497 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1498 goto tx_cq_destroy;
1500 /* Ask BE to create Tx eth queue */
1501 if (be_cmd_txq_create(adapter, q, cq))
1502 goto tx_q_free;
1503 return 0;
1505 tx_q_free:
1506 be_queue_free(adapter, q);
1507 tx_cq_destroy:
1508 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1509 tx_cq_free:
1510 be_queue_free(adapter, cq);
1511 tx_eq_destroy:
1512 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1513 tx_eq_free:
1514 be_queue_free(adapter, eq);
1515 return -1;
1518 static void be_rx_queues_destroy(struct be_adapter *adapter)
1520 struct be_queue_info *q;
1521 struct be_rx_obj *rxo;
1522 int i;
1524 for_all_rx_queues(adapter, rxo, i) {
1525 q = &rxo->q;
1526 if (q->created) {
1527 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1528 /* After the rxq is invalidated, wait for a grace time
1529 * of 1ms for all dma to end and the flush compl to
1530 * arrive
1532 mdelay(1);
1533 be_rx_q_clean(adapter, rxo);
1535 be_queue_free(adapter, q);
1537 q = &rxo->cq;
1538 if (q->created)
1539 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1540 be_queue_free(adapter, q);
1542 /* Clear any residual events */
1543 q = &rxo->rx_eq.q;
1544 if (q->created) {
1545 be_eq_clean(adapter, &rxo->rx_eq);
1546 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1548 be_queue_free(adapter, q);
1552 static int be_rx_queues_create(struct be_adapter *adapter)
1554 struct be_queue_info *eq, *q, *cq;
1555 struct be_rx_obj *rxo;
1556 int rc, i;
1558 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1559 for_all_rx_queues(adapter, rxo, i) {
1560 rxo->adapter = adapter;
1561 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1562 rxo->rx_eq.enable_aic = true;
1564 /* EQ */
1565 eq = &rxo->rx_eq.q;
1566 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1567 sizeof(struct be_eq_entry));
1568 if (rc)
1569 goto err;
1571 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1572 if (rc)
1573 goto err;
1575 /* CQ */
1576 cq = &rxo->cq;
1577 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1578 sizeof(struct be_eth_rx_compl));
1579 if (rc)
1580 goto err;
1582 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1583 if (rc)
1584 goto err;
1586 /* Rx Q */
1587 q = &rxo->q;
1588 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1589 sizeof(struct be_eth_rx_d));
1590 if (rc)
1591 goto err;
1593 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1594 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1595 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1596 if (rc)
1597 goto err;
1600 if (be_multi_rxq(adapter)) {
1601 u8 rsstable[MAX_RSS_QS];
1603 for_all_rss_queues(adapter, rxo, i)
1604 rsstable[i] = rxo->rss_id;
1606 rc = be_cmd_rss_config(adapter, rsstable,
1607 adapter->num_rx_qs - 1);
1608 if (rc)
1609 goto err;
1612 return 0;
1613 err:
1614 be_rx_queues_destroy(adapter);
1615 return -1;
1618 /* There are 8 evt ids per func. Retruns the evt id's bit number */
1619 static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1621 return eq_id - adapter->base_eq_id;
1624 static irqreturn_t be_intx(int irq, void *dev)
1626 struct be_adapter *adapter = dev;
1627 struct be_rx_obj *rxo;
1628 int isr, i;
1630 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1631 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
1632 if (!isr)
1633 return IRQ_NONE;
1635 if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr))
1636 event_handle(adapter, &adapter->tx_eq);
1638 for_all_rx_queues(adapter, rxo, i) {
1639 if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr))
1640 event_handle(adapter, &rxo->rx_eq);
1643 return IRQ_HANDLED;
1646 static irqreturn_t be_msix_rx(int irq, void *dev)
1648 struct be_rx_obj *rxo = dev;
1649 struct be_adapter *adapter = rxo->adapter;
1651 event_handle(adapter, &rxo->rx_eq);
1653 return IRQ_HANDLED;
1656 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1658 struct be_adapter *adapter = dev;
1660 event_handle(adapter, &adapter->tx_eq);
1662 return IRQ_HANDLED;
1665 static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
1666 struct be_eth_rx_compl *rxcp)
1668 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1669 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1671 if (err)
1672 rxo->stats.rxcp_err++;
1674 return (tcp_frame && !err) ? true : false;
1677 static int be_poll_rx(struct napi_struct *napi, int budget)
1679 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1680 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1681 struct be_adapter *adapter = rxo->adapter;
1682 struct be_queue_info *rx_cq = &rxo->cq;
1683 struct be_eth_rx_compl *rxcp;
1684 u32 work_done;
1686 rxo->stats.rx_polls++;
1687 for (work_done = 0; work_done < budget; work_done++) {
1688 rxcp = be_rx_compl_get(rxo);
1689 if (!rxcp)
1690 break;
1692 if (do_gro(adapter, rxo, rxcp))
1693 be_rx_compl_process_gro(adapter, rxo, rxcp);
1694 else
1695 be_rx_compl_process(adapter, rxo, rxcp);
1697 be_rx_compl_reset(rxcp);
1700 /* Refill the queue */
1701 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1702 be_post_rx_frags(rxo);
1704 /* All consumed */
1705 if (work_done < budget) {
1706 napi_complete(napi);
1707 be_cq_notify(adapter, rx_cq->id, true, work_done);
1708 } else {
1709 /* More to be consumed; continue with interrupts disabled */
1710 be_cq_notify(adapter, rx_cq->id, false, work_done);
1712 return work_done;
1715 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1716 * For TX/MCC we don't honour budget; consume everything
1718 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1720 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1721 struct be_adapter *adapter =
1722 container_of(tx_eq, struct be_adapter, tx_eq);
1723 struct be_queue_info *txq = &adapter->tx_obj.q;
1724 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1725 struct be_eth_tx_compl *txcp;
1726 int tx_compl = 0, mcc_compl, status = 0;
1727 u16 end_idx;
1729 while ((txcp = be_tx_compl_get(tx_cq))) {
1730 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1731 wrb_index, txcp);
1732 be_tx_compl_process(adapter, end_idx);
1733 tx_compl++;
1736 mcc_compl = be_process_mcc(adapter, &status);
1738 napi_complete(napi);
1740 if (mcc_compl) {
1741 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1742 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1745 if (tx_compl) {
1746 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1748 /* As Tx wrbs have been freed up, wake up netdev queue if
1749 * it was stopped due to lack of tx wrbs.
1751 if (netif_queue_stopped(adapter->netdev) &&
1752 atomic_read(&txq->used) < txq->len / 2) {
1753 netif_wake_queue(adapter->netdev);
1756 tx_stats(adapter)->be_tx_events++;
1757 tx_stats(adapter)->be_tx_compl += tx_compl;
1760 return 1;
1763 void be_detect_dump_ue(struct be_adapter *adapter)
1765 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1766 u32 i;
1768 pci_read_config_dword(adapter->pdev,
1769 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1770 pci_read_config_dword(adapter->pdev,
1771 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1772 pci_read_config_dword(adapter->pdev,
1773 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1774 pci_read_config_dword(adapter->pdev,
1775 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1777 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1778 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1780 if (ue_status_lo || ue_status_hi) {
1781 adapter->ue_detected = true;
1782 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1785 if (ue_status_lo) {
1786 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1787 if (ue_status_lo & 1)
1788 dev_err(&adapter->pdev->dev,
1789 "UE: %s bit set\n", ue_status_low_desc[i]);
1792 if (ue_status_hi) {
1793 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1794 if (ue_status_hi & 1)
1795 dev_err(&adapter->pdev->dev,
1796 "UE: %s bit set\n", ue_status_hi_desc[i]);
1802 static void be_worker(struct work_struct *work)
1804 struct be_adapter *adapter =
1805 container_of(work, struct be_adapter, work.work);
1806 struct be_rx_obj *rxo;
1807 int i;
1809 if (!adapter->stats_ioctl_sent)
1810 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1812 be_tx_rate_update(adapter);
1814 for_all_rx_queues(adapter, rxo, i) {
1815 be_rx_rate_update(rxo);
1816 be_rx_eqd_update(adapter, rxo);
1818 if (rxo->rx_post_starved) {
1819 rxo->rx_post_starved = false;
1820 be_post_rx_frags(rxo);
1824 if (!adapter->ue_detected)
1825 be_detect_dump_ue(adapter);
1827 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1830 static void be_msix_disable(struct be_adapter *adapter)
1832 if (adapter->msix_enabled) {
1833 pci_disable_msix(adapter->pdev);
1834 adapter->msix_enabled = false;
1838 static int be_num_rxqs_get(struct be_adapter *adapter)
1840 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1841 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1842 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1843 } else {
1844 dev_warn(&adapter->pdev->dev,
1845 "No support for multiple RX queues\n");
1846 return 1;
1850 static void be_msix_enable(struct be_adapter *adapter)
1852 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
1853 int i, status;
1855 adapter->num_rx_qs = be_num_rxqs_get(adapter);
1857 for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1858 adapter->msix_entries[i].entry = i;
1860 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1861 adapter->num_rx_qs + 1);
1862 if (status == 0) {
1863 goto done;
1864 } else if (status >= BE_MIN_MSIX_VECTORS) {
1865 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1866 status) == 0) {
1867 adapter->num_rx_qs = status - 1;
1868 dev_warn(&adapter->pdev->dev,
1869 "Could alloc only %d MSIx vectors. "
1870 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1871 goto done;
1874 return;
1875 done:
1876 adapter->msix_enabled = true;
1879 static void be_sriov_enable(struct be_adapter *adapter)
1881 be_check_sriov_fn_type(adapter);
1882 #ifdef CONFIG_PCI_IOV
1883 if (be_physfn(adapter) && num_vfs) {
1884 int status;
1886 status = pci_enable_sriov(adapter->pdev, num_vfs);
1887 adapter->sriov_enabled = status ? false : true;
1889 #endif
1892 static void be_sriov_disable(struct be_adapter *adapter)
1894 #ifdef CONFIG_PCI_IOV
1895 if (adapter->sriov_enabled) {
1896 pci_disable_sriov(adapter->pdev);
1897 adapter->sriov_enabled = false;
1899 #endif
1902 static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1904 return adapter->msix_entries[
1905 be_evt_bit_get(adapter, eq_id)].vector;
1908 static int be_request_irq(struct be_adapter *adapter,
1909 struct be_eq_obj *eq_obj,
1910 void *handler, char *desc, void *context)
1912 struct net_device *netdev = adapter->netdev;
1913 int vec;
1915 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1916 vec = be_msix_vec_get(adapter, eq_obj->q.id);
1917 return request_irq(vec, handler, 0, eq_obj->desc, context);
1920 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1921 void *context)
1923 int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1924 free_irq(vec, context);
1927 static int be_msix_register(struct be_adapter *adapter)
1929 struct be_rx_obj *rxo;
1930 int status, i;
1931 char qname[10];
1933 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1934 adapter);
1935 if (status)
1936 goto err;
1938 for_all_rx_queues(adapter, rxo, i) {
1939 sprintf(qname, "rxq%d", i);
1940 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
1941 qname, rxo);
1942 if (status)
1943 goto err_msix;
1946 return 0;
1948 err_msix:
1949 be_free_irq(adapter, &adapter->tx_eq, adapter);
1951 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
1952 be_free_irq(adapter, &rxo->rx_eq, rxo);
1954 err:
1955 dev_warn(&adapter->pdev->dev,
1956 "MSIX Request IRQ failed - err %d\n", status);
1957 pci_disable_msix(adapter->pdev);
1958 adapter->msix_enabled = false;
1959 return status;
1962 static int be_irq_register(struct be_adapter *adapter)
1964 struct net_device *netdev = adapter->netdev;
1965 int status;
1967 if (adapter->msix_enabled) {
1968 status = be_msix_register(adapter);
1969 if (status == 0)
1970 goto done;
1971 /* INTx is not supported for VF */
1972 if (!be_physfn(adapter))
1973 return status;
1976 /* INTx */
1977 netdev->irq = adapter->pdev->irq;
1978 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1979 adapter);
1980 if (status) {
1981 dev_err(&adapter->pdev->dev,
1982 "INTx request IRQ failed - err %d\n", status);
1983 return status;
1985 done:
1986 adapter->isr_registered = true;
1987 return 0;
1990 static void be_irq_unregister(struct be_adapter *adapter)
1992 struct net_device *netdev = adapter->netdev;
1993 struct be_rx_obj *rxo;
1994 int i;
1996 if (!adapter->isr_registered)
1997 return;
1999 /* INTx */
2000 if (!adapter->msix_enabled) {
2001 free_irq(netdev->irq, adapter);
2002 goto done;
2005 /* MSIx */
2006 be_free_irq(adapter, &adapter->tx_eq, adapter);
2008 for_all_rx_queues(adapter, rxo, i)
2009 be_free_irq(adapter, &rxo->rx_eq, rxo);
2011 done:
2012 adapter->isr_registered = false;
2015 static int be_close(struct net_device *netdev)
2017 struct be_adapter *adapter = netdev_priv(netdev);
2018 struct be_rx_obj *rxo;
2019 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2020 int vec, i;
2022 cancel_delayed_work_sync(&adapter->work);
2024 be_async_mcc_disable(adapter);
2026 netif_stop_queue(netdev);
2027 netif_carrier_off(netdev);
2028 adapter->link_up = false;
2030 be_intr_set(adapter, false);
2032 if (adapter->msix_enabled) {
2033 vec = be_msix_vec_get(adapter, tx_eq->q.id);
2034 synchronize_irq(vec);
2036 for_all_rx_queues(adapter, rxo, i) {
2037 vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id);
2038 synchronize_irq(vec);
2040 } else {
2041 synchronize_irq(netdev->irq);
2043 be_irq_unregister(adapter);
2045 for_all_rx_queues(adapter, rxo, i)
2046 napi_disable(&rxo->rx_eq.napi);
2048 napi_disable(&tx_eq->napi);
2050 /* Wait for all pending tx completions to arrive so that
2051 * all tx skbs are freed.
2053 be_tx_compl_clean(adapter);
2055 return 0;
2058 static int be_open(struct net_device *netdev)
2060 struct be_adapter *adapter = netdev_priv(netdev);
2061 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2062 struct be_rx_obj *rxo;
2063 bool link_up;
2064 int status, i;
2065 u8 mac_speed;
2066 u16 link_speed;
2068 for_all_rx_queues(adapter, rxo, i) {
2069 be_post_rx_frags(rxo);
2070 napi_enable(&rxo->rx_eq.napi);
2072 napi_enable(&tx_eq->napi);
2074 be_irq_register(adapter);
2076 be_intr_set(adapter, true);
2078 /* The evt queues are created in unarmed state; arm them */
2079 for_all_rx_queues(adapter, rxo, i) {
2080 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2081 be_cq_notify(adapter, rxo->cq.id, true, 0);
2083 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2085 /* Now that interrupts are on we can process async mcc */
2086 be_async_mcc_enable(adapter);
2088 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2090 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2091 &link_speed);
2092 if (status)
2093 goto err;
2094 be_link_status_update(adapter, link_up);
2096 if (be_physfn(adapter)) {
2097 status = be_vid_config(adapter, false, 0);
2098 if (status)
2099 goto err;
2101 status = be_cmd_set_flow_control(adapter,
2102 adapter->tx_fc, adapter->rx_fc);
2103 if (status)
2104 goto err;
2107 return 0;
2108 err:
2109 be_close(adapter->netdev);
2110 return -EIO;
2113 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2115 struct be_dma_mem cmd;
2116 int status = 0;
2117 u8 mac[ETH_ALEN];
2119 memset(mac, 0, ETH_ALEN);
2121 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2122 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2123 if (cmd.va == NULL)
2124 return -1;
2125 memset(cmd.va, 0, cmd.size);
2127 if (enable) {
2128 status = pci_write_config_dword(adapter->pdev,
2129 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2130 if (status) {
2131 dev_err(&adapter->pdev->dev,
2132 "Could not enable Wake-on-lan\n");
2133 pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2134 cmd.dma);
2135 return status;
2137 status = be_cmd_enable_magic_wol(adapter,
2138 adapter->netdev->dev_addr, &cmd);
2139 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2140 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2141 } else {
2142 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2143 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2144 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2147 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2148 return status;
2152 * Generate a seed MAC address from the PF MAC Address using jhash.
2153 * MAC Address for VFs are assigned incrementally starting from the seed.
2154 * These addresses are programmed in the ASIC by the PF and the VF driver
2155 * queries for the MAC address during its probe.
2157 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2159 u32 vf = 0;
2160 int status = 0;
2161 u8 mac[ETH_ALEN];
2163 be_vf_eth_addr_generate(adapter, mac);
2165 for (vf = 0; vf < num_vfs; vf++) {
2166 status = be_cmd_pmac_add(adapter, mac,
2167 adapter->vf_cfg[vf].vf_if_handle,
2168 &adapter->vf_cfg[vf].vf_pmac_id);
2169 if (status)
2170 dev_err(&adapter->pdev->dev,
2171 "Mac address add failed for VF %d\n", vf);
2172 else
2173 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2175 mac[5] += 1;
2177 return status;
2180 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2182 u32 vf;
2184 for (vf = 0; vf < num_vfs; vf++) {
2185 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2186 be_cmd_pmac_del(adapter,
2187 adapter->vf_cfg[vf].vf_if_handle,
2188 adapter->vf_cfg[vf].vf_pmac_id);
2192 static int be_setup(struct be_adapter *adapter)
2194 struct net_device *netdev = adapter->netdev;
2195 u32 cap_flags, en_flags, vf = 0;
2196 int status;
2197 u8 mac[ETH_ALEN];
2199 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2201 if (be_physfn(adapter)) {
2202 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2203 BE_IF_FLAGS_PROMISCUOUS |
2204 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2205 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2207 if (be_multi_rxq(adapter)) {
2208 cap_flags |= BE_IF_FLAGS_RSS;
2209 en_flags |= BE_IF_FLAGS_RSS;
2213 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2214 netdev->dev_addr, false/* pmac_invalid */,
2215 &adapter->if_handle, &adapter->pmac_id, 0);
2216 if (status != 0)
2217 goto do_none;
2219 if (be_physfn(adapter)) {
2220 while (vf < num_vfs) {
2221 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2222 | BE_IF_FLAGS_BROADCAST;
2223 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2224 mac, true,
2225 &adapter->vf_cfg[vf].vf_if_handle,
2226 NULL, vf+1);
2227 if (status) {
2228 dev_err(&adapter->pdev->dev,
2229 "Interface Create failed for VF %d\n", vf);
2230 goto if_destroy;
2232 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2233 vf++;
2235 } else if (!be_physfn(adapter)) {
2236 status = be_cmd_mac_addr_query(adapter, mac,
2237 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2238 if (!status) {
2239 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2240 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2244 status = be_tx_queues_create(adapter);
2245 if (status != 0)
2246 goto if_destroy;
2248 status = be_rx_queues_create(adapter);
2249 if (status != 0)
2250 goto tx_qs_destroy;
2252 status = be_mcc_queues_create(adapter);
2253 if (status != 0)
2254 goto rx_qs_destroy;
2256 if (be_physfn(adapter)) {
2257 status = be_vf_eth_addr_config(adapter);
2258 if (status)
2259 goto mcc_q_destroy;
2262 adapter->link_speed = -1;
2264 return 0;
2266 mcc_q_destroy:
2267 if (be_physfn(adapter))
2268 be_vf_eth_addr_rem(adapter);
2269 be_mcc_queues_destroy(adapter);
2270 rx_qs_destroy:
2271 be_rx_queues_destroy(adapter);
2272 tx_qs_destroy:
2273 be_tx_queues_destroy(adapter);
2274 if_destroy:
2275 for (vf = 0; vf < num_vfs; vf++)
2276 if (adapter->vf_cfg[vf].vf_if_handle)
2277 be_cmd_if_destroy(adapter,
2278 adapter->vf_cfg[vf].vf_if_handle);
2279 be_cmd_if_destroy(adapter, adapter->if_handle);
2280 do_none:
2281 return status;
2284 static int be_clear(struct be_adapter *adapter)
2286 if (be_physfn(adapter))
2287 be_vf_eth_addr_rem(adapter);
2289 be_mcc_queues_destroy(adapter);
2290 be_rx_queues_destroy(adapter);
2291 be_tx_queues_destroy(adapter);
2293 be_cmd_if_destroy(adapter, adapter->if_handle);
2295 /* tell fw we're done with firing cmds */
2296 be_cmd_fw_clean(adapter);
2297 return 0;
2301 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2302 static bool be_flash_redboot(struct be_adapter *adapter,
2303 const u8 *p, u32 img_start, int image_size,
2304 int hdr_size)
2306 u32 crc_offset;
2307 u8 flashed_crc[4];
2308 int status;
2310 crc_offset = hdr_size + img_start + image_size - 4;
2312 p += crc_offset;
2314 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2315 (image_size - 4));
2316 if (status) {
2317 dev_err(&adapter->pdev->dev,
2318 "could not get crc from flash, not flashing redboot\n");
2319 return false;
2322 /*update redboot only if crc does not match*/
2323 if (!memcmp(flashed_crc, p, 4))
2324 return false;
2325 else
2326 return true;
2329 static int be_flash_data(struct be_adapter *adapter,
2330 const struct firmware *fw,
2331 struct be_dma_mem *flash_cmd, int num_of_images)
2334 int status = 0, i, filehdr_size = 0;
2335 u32 total_bytes = 0, flash_op;
2336 int num_bytes;
2337 const u8 *p = fw->data;
2338 struct be_cmd_write_flashrom *req = flash_cmd->va;
2339 struct flash_comp *pflashcomp;
2340 int num_comp;
2342 struct flash_comp gen3_flash_types[9] = {
2343 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2344 FLASH_IMAGE_MAX_SIZE_g3},
2345 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2346 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2347 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2348 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2349 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2350 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2351 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2352 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2353 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2354 FLASH_IMAGE_MAX_SIZE_g3},
2355 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2356 FLASH_IMAGE_MAX_SIZE_g3},
2357 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2358 FLASH_IMAGE_MAX_SIZE_g3},
2359 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2360 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2362 struct flash_comp gen2_flash_types[8] = {
2363 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2364 FLASH_IMAGE_MAX_SIZE_g2},
2365 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2366 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2367 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2368 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2369 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2370 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2371 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2372 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2373 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2374 FLASH_IMAGE_MAX_SIZE_g2},
2375 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2376 FLASH_IMAGE_MAX_SIZE_g2},
2377 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2378 FLASH_IMAGE_MAX_SIZE_g2}
2381 if (adapter->generation == BE_GEN3) {
2382 pflashcomp = gen3_flash_types;
2383 filehdr_size = sizeof(struct flash_file_hdr_g3);
2384 num_comp = 9;
2385 } else {
2386 pflashcomp = gen2_flash_types;
2387 filehdr_size = sizeof(struct flash_file_hdr_g2);
2388 num_comp = 8;
2390 for (i = 0; i < num_comp; i++) {
2391 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2392 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2393 continue;
2394 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2395 (!be_flash_redboot(adapter, fw->data,
2396 pflashcomp[i].offset, pflashcomp[i].size,
2397 filehdr_size)))
2398 continue;
2399 p = fw->data;
2400 p += filehdr_size + pflashcomp[i].offset
2401 + (num_of_images * sizeof(struct image_hdr));
2402 if (p + pflashcomp[i].size > fw->data + fw->size)
2403 return -1;
2404 total_bytes = pflashcomp[i].size;
2405 while (total_bytes) {
2406 if (total_bytes > 32*1024)
2407 num_bytes = 32*1024;
2408 else
2409 num_bytes = total_bytes;
2410 total_bytes -= num_bytes;
2412 if (!total_bytes)
2413 flash_op = FLASHROM_OPER_FLASH;
2414 else
2415 flash_op = FLASHROM_OPER_SAVE;
2416 memcpy(req->params.data_buf, p, num_bytes);
2417 p += num_bytes;
2418 status = be_cmd_write_flashrom(adapter, flash_cmd,
2419 pflashcomp[i].optype, flash_op, num_bytes);
2420 if (status) {
2421 dev_err(&adapter->pdev->dev,
2422 "cmd to write to flash rom failed.\n");
2423 return -1;
2425 yield();
2428 return 0;
2431 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2433 if (fhdr == NULL)
2434 return 0;
2435 if (fhdr->build[0] == '3')
2436 return BE_GEN3;
2437 else if (fhdr->build[0] == '2')
2438 return BE_GEN2;
2439 else
2440 return 0;
2443 int be_load_fw(struct be_adapter *adapter, u8 *func)
2445 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2446 const struct firmware *fw;
2447 struct flash_file_hdr_g2 *fhdr;
2448 struct flash_file_hdr_g3 *fhdr3;
2449 struct image_hdr *img_hdr_ptr = NULL;
2450 struct be_dma_mem flash_cmd;
2451 int status, i = 0, num_imgs = 0;
2452 const u8 *p;
2454 strcpy(fw_file, func);
2456 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2457 if (status)
2458 goto fw_exit;
2460 p = fw->data;
2461 fhdr = (struct flash_file_hdr_g2 *) p;
2462 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2464 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2465 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2466 &flash_cmd.dma);
2467 if (!flash_cmd.va) {
2468 status = -ENOMEM;
2469 dev_err(&adapter->pdev->dev,
2470 "Memory allocation failure while flashing\n");
2471 goto fw_exit;
2474 if ((adapter->generation == BE_GEN3) &&
2475 (get_ufigen_type(fhdr) == BE_GEN3)) {
2476 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2477 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2478 for (i = 0; i < num_imgs; i++) {
2479 img_hdr_ptr = (struct image_hdr *) (fw->data +
2480 (sizeof(struct flash_file_hdr_g3) +
2481 i * sizeof(struct image_hdr)));
2482 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2483 status = be_flash_data(adapter, fw, &flash_cmd,
2484 num_imgs);
2486 } else if ((adapter->generation == BE_GEN2) &&
2487 (get_ufigen_type(fhdr) == BE_GEN2)) {
2488 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2489 } else {
2490 dev_err(&adapter->pdev->dev,
2491 "UFI and Interface are not compatible for flashing\n");
2492 status = -1;
2495 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2496 flash_cmd.dma);
2497 if (status) {
2498 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2499 goto fw_exit;
2502 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2504 fw_exit:
2505 release_firmware(fw);
2506 return status;
2509 static struct net_device_ops be_netdev_ops = {
2510 .ndo_open = be_open,
2511 .ndo_stop = be_close,
2512 .ndo_start_xmit = be_xmit,
2513 .ndo_set_rx_mode = be_set_multicast_list,
2514 .ndo_set_mac_address = be_mac_addr_set,
2515 .ndo_change_mtu = be_change_mtu,
2516 .ndo_validate_addr = eth_validate_addr,
2517 .ndo_vlan_rx_register = be_vlan_register,
2518 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2519 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2520 .ndo_set_vf_mac = be_set_vf_mac,
2521 .ndo_set_vf_vlan = be_set_vf_vlan,
2522 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2523 .ndo_get_vf_config = be_get_vf_config
2526 static void be_netdev_init(struct net_device *netdev)
2528 struct be_adapter *adapter = netdev_priv(netdev);
2529 struct be_rx_obj *rxo;
2530 int i;
2532 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2533 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
2534 NETIF_F_GRO | NETIF_F_TSO6;
2536 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2538 netdev->flags |= IFF_MULTICAST;
2540 adapter->rx_csum = true;
2542 /* Default settings for Rx and Tx flow control */
2543 adapter->rx_fc = true;
2544 adapter->tx_fc = true;
2546 netif_set_gso_max_size(netdev, 65535);
2548 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2550 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2552 for_all_rx_queues(adapter, rxo, i)
2553 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2554 BE_NAPI_WEIGHT);
2556 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2557 BE_NAPI_WEIGHT);
2559 netif_stop_queue(netdev);
2562 static void be_unmap_pci_bars(struct be_adapter *adapter)
2564 if (adapter->csr)
2565 iounmap(adapter->csr);
2566 if (adapter->db)
2567 iounmap(adapter->db);
2568 if (adapter->pcicfg && be_physfn(adapter))
2569 iounmap(adapter->pcicfg);
2572 static int be_map_pci_bars(struct be_adapter *adapter)
2574 u8 __iomem *addr;
2575 int pcicfg_reg, db_reg;
2577 if (be_physfn(adapter)) {
2578 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2579 pci_resource_len(adapter->pdev, 2));
2580 if (addr == NULL)
2581 return -ENOMEM;
2582 adapter->csr = addr;
2585 if (adapter->generation == BE_GEN2) {
2586 pcicfg_reg = 1;
2587 db_reg = 4;
2588 } else {
2589 pcicfg_reg = 0;
2590 if (be_physfn(adapter))
2591 db_reg = 4;
2592 else
2593 db_reg = 0;
2595 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2596 pci_resource_len(adapter->pdev, db_reg));
2597 if (addr == NULL)
2598 goto pci_map_err;
2599 adapter->db = addr;
2601 if (be_physfn(adapter)) {
2602 addr = ioremap_nocache(
2603 pci_resource_start(adapter->pdev, pcicfg_reg),
2604 pci_resource_len(adapter->pdev, pcicfg_reg));
2605 if (addr == NULL)
2606 goto pci_map_err;
2607 adapter->pcicfg = addr;
2608 } else
2609 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2611 return 0;
2612 pci_map_err:
2613 be_unmap_pci_bars(adapter);
2614 return -ENOMEM;
2618 static void be_ctrl_cleanup(struct be_adapter *adapter)
2620 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2622 be_unmap_pci_bars(adapter);
2624 if (mem->va)
2625 pci_free_consistent(adapter->pdev, mem->size,
2626 mem->va, mem->dma);
2628 mem = &adapter->mc_cmd_mem;
2629 if (mem->va)
2630 pci_free_consistent(adapter->pdev, mem->size,
2631 mem->va, mem->dma);
2634 static int be_ctrl_init(struct be_adapter *adapter)
2636 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2637 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2638 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2639 int status;
2641 status = be_map_pci_bars(adapter);
2642 if (status)
2643 goto done;
2645 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2646 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2647 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2648 if (!mbox_mem_alloc->va) {
2649 status = -ENOMEM;
2650 goto unmap_pci_bars;
2653 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2654 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2655 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2656 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2658 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2659 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2660 &mc_cmd_mem->dma);
2661 if (mc_cmd_mem->va == NULL) {
2662 status = -ENOMEM;
2663 goto free_mbox;
2665 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2667 spin_lock_init(&adapter->mbox_lock);
2668 spin_lock_init(&adapter->mcc_lock);
2669 spin_lock_init(&adapter->mcc_cq_lock);
2671 init_completion(&adapter->flash_compl);
2672 pci_save_state(adapter->pdev);
2673 return 0;
2675 free_mbox:
2676 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2677 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2679 unmap_pci_bars:
2680 be_unmap_pci_bars(adapter);
2682 done:
2683 return status;
2686 static void be_stats_cleanup(struct be_adapter *adapter)
2688 struct be_dma_mem *cmd = &adapter->stats_cmd;
2690 if (cmd->va)
2691 pci_free_consistent(adapter->pdev, cmd->size,
2692 cmd->va, cmd->dma);
2695 static int be_stats_init(struct be_adapter *adapter)
2697 struct be_dma_mem *cmd = &adapter->stats_cmd;
2699 cmd->size = sizeof(struct be_cmd_req_get_stats);
2700 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2701 if (cmd->va == NULL)
2702 return -1;
2703 memset(cmd->va, 0, cmd->size);
2704 return 0;
2707 static void __devexit be_remove(struct pci_dev *pdev)
2709 struct be_adapter *adapter = pci_get_drvdata(pdev);
2711 if (!adapter)
2712 return;
2714 unregister_netdev(adapter->netdev);
2716 be_clear(adapter);
2718 be_stats_cleanup(adapter);
2720 be_ctrl_cleanup(adapter);
2722 be_sriov_disable(adapter);
2724 be_msix_disable(adapter);
2726 pci_set_drvdata(pdev, NULL);
2727 pci_release_regions(pdev);
2728 pci_disable_device(pdev);
2730 free_netdev(adapter->netdev);
2733 static int be_get_config(struct be_adapter *adapter)
2735 int status;
2736 u8 mac[ETH_ALEN];
2738 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2739 if (status)
2740 return status;
2742 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2743 &adapter->function_mode, &adapter->function_caps);
2744 if (status)
2745 return status;
2747 memset(mac, 0, ETH_ALEN);
2749 if (be_physfn(adapter)) {
2750 status = be_cmd_mac_addr_query(adapter, mac,
2751 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2753 if (status)
2754 return status;
2756 if (!is_valid_ether_addr(mac))
2757 return -EADDRNOTAVAIL;
2759 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2760 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2763 if (adapter->function_mode & 0x400)
2764 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2765 else
2766 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2768 return 0;
2771 static int __devinit be_probe(struct pci_dev *pdev,
2772 const struct pci_device_id *pdev_id)
2774 int status = 0;
2775 struct be_adapter *adapter;
2776 struct net_device *netdev;
2778 status = pci_enable_device(pdev);
2779 if (status)
2780 goto do_none;
2782 status = pci_request_regions(pdev, DRV_NAME);
2783 if (status)
2784 goto disable_dev;
2785 pci_set_master(pdev);
2787 netdev = alloc_etherdev(sizeof(struct be_adapter));
2788 if (netdev == NULL) {
2789 status = -ENOMEM;
2790 goto rel_reg;
2792 adapter = netdev_priv(netdev);
2794 switch (pdev->device) {
2795 case BE_DEVICE_ID1:
2796 case OC_DEVICE_ID1:
2797 adapter->generation = BE_GEN2;
2798 break;
2799 case BE_DEVICE_ID2:
2800 case OC_DEVICE_ID2:
2801 adapter->generation = BE_GEN3;
2802 break;
2803 default:
2804 adapter->generation = 0;
2807 adapter->pdev = pdev;
2808 pci_set_drvdata(pdev, adapter);
2809 adapter->netdev = netdev;
2810 SET_NETDEV_DEV(netdev, &pdev->dev);
2812 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2813 if (!status) {
2814 netdev->features |= NETIF_F_HIGHDMA;
2815 } else {
2816 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2817 if (status) {
2818 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2819 goto free_netdev;
2823 be_sriov_enable(adapter);
2825 status = be_ctrl_init(adapter);
2826 if (status)
2827 goto free_netdev;
2829 /* sync up with fw's ready state */
2830 if (be_physfn(adapter)) {
2831 status = be_cmd_POST(adapter);
2832 if (status)
2833 goto ctrl_clean;
2836 /* tell fw we're ready to fire cmds */
2837 status = be_cmd_fw_init(adapter);
2838 if (status)
2839 goto ctrl_clean;
2841 if (be_physfn(adapter)) {
2842 status = be_cmd_reset_function(adapter);
2843 if (status)
2844 goto ctrl_clean;
2847 status = be_stats_init(adapter);
2848 if (status)
2849 goto ctrl_clean;
2851 status = be_get_config(adapter);
2852 if (status)
2853 goto stats_clean;
2855 be_msix_enable(adapter);
2857 INIT_DELAYED_WORK(&adapter->work, be_worker);
2859 status = be_setup(adapter);
2860 if (status)
2861 goto msix_disable;
2863 be_netdev_init(netdev);
2864 status = register_netdev(netdev);
2865 if (status != 0)
2866 goto unsetup;
2867 netif_carrier_off(netdev);
2869 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2870 return 0;
2872 unsetup:
2873 be_clear(adapter);
2874 msix_disable:
2875 be_msix_disable(adapter);
2876 stats_clean:
2877 be_stats_cleanup(adapter);
2878 ctrl_clean:
2879 be_ctrl_cleanup(adapter);
2880 free_netdev:
2881 be_sriov_disable(adapter);
2882 free_netdev(adapter->netdev);
2883 pci_set_drvdata(pdev, NULL);
2884 rel_reg:
2885 pci_release_regions(pdev);
2886 disable_dev:
2887 pci_disable_device(pdev);
2888 do_none:
2889 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
2890 return status;
2893 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2895 struct be_adapter *adapter = pci_get_drvdata(pdev);
2896 struct net_device *netdev = adapter->netdev;
2898 if (adapter->wol)
2899 be_setup_wol(adapter, true);
2901 netif_device_detach(netdev);
2902 if (netif_running(netdev)) {
2903 rtnl_lock();
2904 be_close(netdev);
2905 rtnl_unlock();
2907 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
2908 be_clear(adapter);
2910 pci_save_state(pdev);
2911 pci_disable_device(pdev);
2912 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2913 return 0;
2916 static int be_resume(struct pci_dev *pdev)
2918 int status = 0;
2919 struct be_adapter *adapter = pci_get_drvdata(pdev);
2920 struct net_device *netdev = adapter->netdev;
2922 netif_device_detach(netdev);
2924 status = pci_enable_device(pdev);
2925 if (status)
2926 return status;
2928 pci_set_power_state(pdev, 0);
2929 pci_restore_state(pdev);
2931 /* tell fw we're ready to fire cmds */
2932 status = be_cmd_fw_init(adapter);
2933 if (status)
2934 return status;
2936 be_setup(adapter);
2937 if (netif_running(netdev)) {
2938 rtnl_lock();
2939 be_open(netdev);
2940 rtnl_unlock();
2942 netif_device_attach(netdev);
2944 if (adapter->wol)
2945 be_setup_wol(adapter, false);
2946 return 0;
2950 * An FLR will stop BE from DMAing any data.
2952 static void be_shutdown(struct pci_dev *pdev)
2954 struct be_adapter *adapter = pci_get_drvdata(pdev);
2955 struct net_device *netdev = adapter->netdev;
2957 netif_device_detach(netdev);
2959 be_cmd_reset_function(adapter);
2961 if (adapter->wol)
2962 be_setup_wol(adapter, true);
2964 pci_disable_device(pdev);
2967 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
2968 pci_channel_state_t state)
2970 struct be_adapter *adapter = pci_get_drvdata(pdev);
2971 struct net_device *netdev = adapter->netdev;
2973 dev_err(&adapter->pdev->dev, "EEH error detected\n");
2975 adapter->eeh_err = true;
2977 netif_device_detach(netdev);
2979 if (netif_running(netdev)) {
2980 rtnl_lock();
2981 be_close(netdev);
2982 rtnl_unlock();
2984 be_clear(adapter);
2986 if (state == pci_channel_io_perm_failure)
2987 return PCI_ERS_RESULT_DISCONNECT;
2989 pci_disable_device(pdev);
2991 return PCI_ERS_RESULT_NEED_RESET;
2994 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
2996 struct be_adapter *adapter = pci_get_drvdata(pdev);
2997 int status;
2999 dev_info(&adapter->pdev->dev, "EEH reset\n");
3000 adapter->eeh_err = false;
3002 status = pci_enable_device(pdev);
3003 if (status)
3004 return PCI_ERS_RESULT_DISCONNECT;
3006 pci_set_master(pdev);
3007 pci_set_power_state(pdev, 0);
3008 pci_restore_state(pdev);
3010 /* Check if card is ok and fw is ready */
3011 status = be_cmd_POST(adapter);
3012 if (status)
3013 return PCI_ERS_RESULT_DISCONNECT;
3015 return PCI_ERS_RESULT_RECOVERED;
3018 static void be_eeh_resume(struct pci_dev *pdev)
3020 int status = 0;
3021 struct be_adapter *adapter = pci_get_drvdata(pdev);
3022 struct net_device *netdev = adapter->netdev;
3024 dev_info(&adapter->pdev->dev, "EEH resume\n");
3026 pci_save_state(pdev);
3028 /* tell fw we're ready to fire cmds */
3029 status = be_cmd_fw_init(adapter);
3030 if (status)
3031 goto err;
3033 status = be_setup(adapter);
3034 if (status)
3035 goto err;
3037 if (netif_running(netdev)) {
3038 status = be_open(netdev);
3039 if (status)
3040 goto err;
3042 netif_device_attach(netdev);
3043 return;
3044 err:
3045 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3048 static struct pci_error_handlers be_eeh_handlers = {
3049 .error_detected = be_eeh_err_detected,
3050 .slot_reset = be_eeh_reset,
3051 .resume = be_eeh_resume,
3054 static struct pci_driver be_driver = {
3055 .name = DRV_NAME,
3056 .id_table = be_dev_ids,
3057 .probe = be_probe,
3058 .remove = be_remove,
3059 .suspend = be_suspend,
3060 .resume = be_resume,
3061 .shutdown = be_shutdown,
3062 .err_handler = &be_eeh_handlers
3065 static int __init be_init_module(void)
3067 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3068 rx_frag_size != 2048) {
3069 printk(KERN_WARNING DRV_NAME
3070 " : Module param rx_frag_size must be 2048/4096/8192."
3071 " Using 2048\n");
3072 rx_frag_size = 2048;
3075 if (num_vfs > 32) {
3076 printk(KERN_WARNING DRV_NAME
3077 " : Module param num_vfs must not be greater than 32."
3078 "Using 32\n");
3079 num_vfs = 32;
3082 return pci_register_driver(&be_driver);
3084 module_init(be_init_module);
3086 static void __exit be_exit_module(void)
3088 pci_unregister_driver(&be_driver);
3090 module_exit(be_exit_module);