benet: use GFP_KERNEL allocations when possible
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / benet / be_main.c
blobef66dc61e6ea8d76b833119638fdae4bb7cf324f
1 /*
2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
28 static unsigned int rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, uint, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45 { 0 }
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
119 static inline bool be_multi_rxq(struct be_adapter *adapter)
121 return (adapter->num_rx_qs > 1);
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126 struct be_dma_mem *mem = &q->dma_mem;
127 if (mem->va)
128 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129 mem->dma);
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133 u16 len, u16 entry_size)
135 struct be_dma_mem *mem = &q->dma_mem;
137 memset(q, 0, sizeof(*q));
138 q->len = len;
139 q->entry_size = entry_size;
140 mem->size = len * entry_size;
141 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142 GFP_KERNEL);
143 if (!mem->va)
144 return -1;
145 memset(mem->va, 0, mem->size);
146 return 0;
149 static void be_intr_set(struct be_adapter *adapter, bool enable)
151 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
152 u32 reg = ioread32(addr);
153 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155 if (adapter->eeh_err)
156 return;
158 if (!enabled && enable)
159 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160 else if (enabled && !enable)
161 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162 else
163 return;
165 iowrite32(reg, addr);
168 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
170 u32 val = 0;
171 val |= qid & DB_RQ_RING_ID_MASK;
172 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
174 wmb();
175 iowrite32(val, adapter->db + DB_RQ_OFFSET);
178 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
180 u32 val = 0;
181 val |= qid & DB_TXULP_RING_ID_MASK;
182 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
184 wmb();
185 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
188 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
189 bool arm, bool clear_int, u16 num_popped)
191 u32 val = 0;
192 val |= qid & DB_EQ_RING_ID_MASK;
193 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
194 DB_EQ_RING_ID_EXT_MASK_SHIFT);
196 if (adapter->eeh_err)
197 return;
199 if (arm)
200 val |= 1 << DB_EQ_REARM_SHIFT;
201 if (clear_int)
202 val |= 1 << DB_EQ_CLR_SHIFT;
203 val |= 1 << DB_EQ_EVNT_SHIFT;
204 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
205 iowrite32(val, adapter->db + DB_EQ_OFFSET);
208 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
210 u32 val = 0;
211 val |= qid & DB_CQ_RING_ID_MASK;
212 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
213 DB_CQ_RING_ID_EXT_MASK_SHIFT);
215 if (adapter->eeh_err)
216 return;
218 if (arm)
219 val |= 1 << DB_CQ_REARM_SHIFT;
220 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
221 iowrite32(val, adapter->db + DB_CQ_OFFSET);
224 static int be_mac_addr_set(struct net_device *netdev, void *p)
226 struct be_adapter *adapter = netdev_priv(netdev);
227 struct sockaddr *addr = p;
228 int status = 0;
230 if (!is_valid_ether_addr(addr->sa_data))
231 return -EADDRNOTAVAIL;
233 /* MAC addr configuration will be done in hardware for VFs
234 * by their corresponding PFs. Just copy to netdev addr here
236 if (!be_physfn(adapter))
237 goto netdev_addr;
239 status = be_cmd_pmac_del(adapter, adapter->if_handle,
240 adapter->pmac_id, 0);
241 if (status)
242 return status;
244 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
245 adapter->if_handle, &adapter->pmac_id, 0);
246 netdev_addr:
247 if (!status)
248 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
250 return status;
253 void netdev_stats_update(struct be_adapter *adapter)
255 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
256 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
257 struct be_port_rxf_stats *port_stats =
258 &rxf_stats->port[adapter->port_num];
259 struct net_device_stats *dev_stats = &adapter->netdev->stats;
260 struct be_erx_stats *erx_stats = &hw_stats->erx;
261 struct be_rx_obj *rxo;
262 int i;
264 memset(dev_stats, 0, sizeof(*dev_stats));
265 for_all_rx_queues(adapter, rxo, i) {
266 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
267 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
268 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
269 /* no space in linux buffers: best possible approximation */
270 dev_stats->rx_dropped +=
271 erx_stats->rx_drops_no_fragments[rxo->q.id];
274 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
275 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
277 /* bad pkts received */
278 dev_stats->rx_errors = port_stats->rx_crc_errors +
279 port_stats->rx_alignment_symbol_errors +
280 port_stats->rx_in_range_errors +
281 port_stats->rx_out_range_errors +
282 port_stats->rx_frame_too_long +
283 port_stats->rx_dropped_too_small +
284 port_stats->rx_dropped_too_short +
285 port_stats->rx_dropped_header_too_small +
286 port_stats->rx_dropped_tcp_length +
287 port_stats->rx_dropped_runt +
288 port_stats->rx_tcp_checksum_errs +
289 port_stats->rx_ip_checksum_errs +
290 port_stats->rx_udp_checksum_errs;
292 /* detailed rx errors */
293 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
294 port_stats->rx_out_range_errors +
295 port_stats->rx_frame_too_long;
297 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
299 /* frame alignment errors */
300 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
302 /* receiver fifo overrun */
303 /* drops_no_pbuf is no per i/f, it's per BE card */
304 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
305 port_stats->rx_input_fifo_overflow +
306 rxf_stats->rx_drops_no_pbuf;
309 void be_link_status_update(struct be_adapter *adapter, bool link_up)
311 struct net_device *netdev = adapter->netdev;
313 /* If link came up or went down */
314 if (adapter->link_up != link_up) {
315 adapter->link_speed = -1;
316 if (link_up) {
317 netif_carrier_on(netdev);
318 printk(KERN_INFO "%s: Link up\n", netdev->name);
319 } else {
320 netif_carrier_off(netdev);
321 printk(KERN_INFO "%s: Link down\n", netdev->name);
323 adapter->link_up = link_up;
327 /* Update the EQ delay n BE based on the RX frags consumed / sec */
328 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
330 struct be_eq_obj *rx_eq = &rxo->rx_eq;
331 struct be_rx_stats *stats = &rxo->stats;
332 ulong now = jiffies;
333 u32 eqd;
335 if (!rx_eq->enable_aic)
336 return;
338 /* Wrapped around */
339 if (time_before(now, stats->rx_fps_jiffies)) {
340 stats->rx_fps_jiffies = now;
341 return;
344 /* Update once a second */
345 if ((now - stats->rx_fps_jiffies) < HZ)
346 return;
348 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
349 ((now - stats->rx_fps_jiffies) / HZ);
351 stats->rx_fps_jiffies = now;
352 stats->prev_rx_frags = stats->rx_frags;
353 eqd = stats->rx_fps / 110000;
354 eqd = eqd << 3;
355 if (eqd > rx_eq->max_eqd)
356 eqd = rx_eq->max_eqd;
357 if (eqd < rx_eq->min_eqd)
358 eqd = rx_eq->min_eqd;
359 if (eqd < 10)
360 eqd = 0;
361 if (eqd != rx_eq->cur_eqd)
362 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
364 rx_eq->cur_eqd = eqd;
367 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
369 u64 rate = bytes;
371 do_div(rate, ticks / HZ);
372 rate <<= 3; /* bytes/sec -> bits/sec */
373 do_div(rate, 1000000ul); /* MB/Sec */
375 return rate;
378 static void be_tx_rate_update(struct be_adapter *adapter)
380 struct be_tx_stats *stats = tx_stats(adapter);
381 ulong now = jiffies;
383 /* Wrapped around? */
384 if (time_before(now, stats->be_tx_jiffies)) {
385 stats->be_tx_jiffies = now;
386 return;
389 /* Update tx rate once in two seconds */
390 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
391 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
392 - stats->be_tx_bytes_prev,
393 now - stats->be_tx_jiffies);
394 stats->be_tx_jiffies = now;
395 stats->be_tx_bytes_prev = stats->be_tx_bytes;
399 static void be_tx_stats_update(struct be_adapter *adapter,
400 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
402 struct be_tx_stats *stats = tx_stats(adapter);
403 stats->be_tx_reqs++;
404 stats->be_tx_wrbs += wrb_cnt;
405 stats->be_tx_bytes += copied;
406 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
407 if (stopped)
408 stats->be_tx_stops++;
411 /* Determine number of WRB entries needed to xmit data in an skb */
412 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413 bool *dummy)
415 int cnt = (skb->len > skb->data_len);
417 cnt += skb_shinfo(skb)->nr_frags;
419 /* to account for hdr wrb */
420 cnt++;
421 if (lancer_chip(adapter) || !(cnt & 1)) {
422 *dummy = false;
423 } else {
424 /* add a dummy to make it an even num */
425 cnt++;
426 *dummy = true;
428 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
429 return cnt;
432 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
434 wrb->frag_pa_hi = upper_32_bits(addr);
435 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
436 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
439 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
440 struct sk_buff *skb, u32 wrb_cnt, u32 len)
442 u8 vlan_prio = 0;
443 u16 vlan_tag = 0;
445 memset(hdr, 0, sizeof(*hdr));
447 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
449 if (skb_is_gso(skb)) {
450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
451 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
452 hdr, skb_shinfo(skb)->gso_size);
453 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
454 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
455 if (lancer_chip(adapter) && adapter->sli_family ==
456 LANCER_A0_SLI_FAMILY) {
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458 if (is_tcp_pkt(skb))
459 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460 tcpcs, hdr, 1);
461 else if (is_udp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463 udpcs, hdr, 1);
465 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
466 if (is_tcp_pkt(skb))
467 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
468 else if (is_udp_pkt(skb))
469 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
472 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
473 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
474 vlan_tag = vlan_tx_tag_get(skb);
475 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
476 /* If vlan priority provided by OS is NOT in available bmap */
477 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
478 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
479 adapter->recommended_prio;
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
483 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
484 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
485 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
486 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
489 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
490 bool unmap_single)
492 dma_addr_t dma;
494 be_dws_le_to_cpu(wrb, sizeof(*wrb));
496 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
497 if (wrb->frag_len) {
498 if (unmap_single)
499 dma_unmap_single(dev, dma, wrb->frag_len,
500 DMA_TO_DEVICE);
501 else
502 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
506 static int make_tx_wrbs(struct be_adapter *adapter,
507 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
509 dma_addr_t busaddr;
510 int i, copied = 0;
511 struct device *dev = &adapter->pdev->dev;
512 struct sk_buff *first_skb = skb;
513 struct be_queue_info *txq = &adapter->tx_obj.q;
514 struct be_eth_wrb *wrb;
515 struct be_eth_hdr_wrb *hdr;
516 bool map_single = false;
517 u16 map_head;
519 hdr = queue_head_node(txq);
520 queue_head_inc(txq);
521 map_head = txq->head;
523 if (skb->len > skb->data_len) {
524 int len = skb_headlen(skb);
525 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
526 if (dma_mapping_error(dev, busaddr))
527 goto dma_err;
528 map_single = true;
529 wrb = queue_head_node(txq);
530 wrb_fill(wrb, busaddr, len);
531 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532 queue_head_inc(txq);
533 copied += len;
536 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
537 struct skb_frag_struct *frag =
538 &skb_shinfo(skb)->frags[i];
539 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
540 frag->size, DMA_TO_DEVICE);
541 if (dma_mapping_error(dev, busaddr))
542 goto dma_err;
543 wrb = queue_head_node(txq);
544 wrb_fill(wrb, busaddr, frag->size);
545 be_dws_cpu_to_le(wrb, sizeof(*wrb));
546 queue_head_inc(txq);
547 copied += frag->size;
550 if (dummy_wrb) {
551 wrb = queue_head_node(txq);
552 wrb_fill(wrb, 0, 0);
553 be_dws_cpu_to_le(wrb, sizeof(*wrb));
554 queue_head_inc(txq);
557 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
558 be_dws_cpu_to_le(hdr, sizeof(*hdr));
560 return copied;
561 dma_err:
562 txq->head = map_head;
563 while (copied) {
564 wrb = queue_head_node(txq);
565 unmap_tx_frag(dev, wrb, map_single);
566 map_single = false;
567 copied -= wrb->frag_len;
568 queue_head_inc(txq);
570 return 0;
573 static netdev_tx_t be_xmit(struct sk_buff *skb,
574 struct net_device *netdev)
576 struct be_adapter *adapter = netdev_priv(netdev);
577 struct be_tx_obj *tx_obj = &adapter->tx_obj;
578 struct be_queue_info *txq = &tx_obj->q;
579 u32 wrb_cnt = 0, copied = 0;
580 u32 start = txq->head;
581 bool dummy_wrb, stopped = false;
583 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
585 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
586 if (copied) {
587 /* record the sent skb in the sent_skb table */
588 BUG_ON(tx_obj->sent_skb_list[start]);
589 tx_obj->sent_skb_list[start] = skb;
591 /* Ensure txq has space for the next skb; Else stop the queue
592 * *BEFORE* ringing the tx doorbell, so that we serialze the
593 * tx compls of the current transmit which'll wake up the queue
595 atomic_add(wrb_cnt, &txq->used);
596 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
597 txq->len) {
598 netif_stop_queue(netdev);
599 stopped = true;
602 be_txq_notify(adapter, txq->id, wrb_cnt);
604 be_tx_stats_update(adapter, wrb_cnt, copied,
605 skb_shinfo(skb)->gso_segs, stopped);
606 } else {
607 txq->head = start;
608 dev_kfree_skb_any(skb);
610 return NETDEV_TX_OK;
613 static int be_change_mtu(struct net_device *netdev, int new_mtu)
615 struct be_adapter *adapter = netdev_priv(netdev);
616 if (new_mtu < BE_MIN_MTU ||
617 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
618 (ETH_HLEN + ETH_FCS_LEN))) {
619 dev_info(&adapter->pdev->dev,
620 "MTU must be between %d and %d bytes\n",
621 BE_MIN_MTU,
622 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
623 return -EINVAL;
625 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
626 netdev->mtu, new_mtu);
627 netdev->mtu = new_mtu;
628 return 0;
632 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
633 * If the user configures more, place BE in vlan promiscuous mode.
635 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
637 u16 vtag[BE_NUM_VLANS_SUPPORTED];
638 u16 ntags = 0, i;
639 int status = 0;
640 u32 if_handle;
642 if (vf) {
643 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
644 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
645 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
648 if (adapter->vlans_added <= adapter->max_vlans) {
649 /* Construct VLAN Table to give to HW */
650 for (i = 0; i < VLAN_N_VID; i++) {
651 if (adapter->vlan_tag[i]) {
652 vtag[ntags] = cpu_to_le16(i);
653 ntags++;
656 status = be_cmd_vlan_config(adapter, adapter->if_handle,
657 vtag, ntags, 1, 0);
658 } else {
659 status = be_cmd_vlan_config(adapter, adapter->if_handle,
660 NULL, 0, 1, 1);
663 return status;
666 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
668 struct be_adapter *adapter = netdev_priv(netdev);
670 adapter->vlan_grp = grp;
673 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
675 struct be_adapter *adapter = netdev_priv(netdev);
677 adapter->vlans_added++;
678 if (!be_physfn(adapter))
679 return;
681 adapter->vlan_tag[vid] = 1;
682 if (adapter->vlans_added <= (adapter->max_vlans + 1))
683 be_vid_config(adapter, false, 0);
686 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
688 struct be_adapter *adapter = netdev_priv(netdev);
690 adapter->vlans_added--;
691 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
693 if (!be_physfn(adapter))
694 return;
696 adapter->vlan_tag[vid] = 0;
697 if (adapter->vlans_added <= adapter->max_vlans)
698 be_vid_config(adapter, false, 0);
701 static void be_set_multicast_list(struct net_device *netdev)
703 struct be_adapter *adapter = netdev_priv(netdev);
705 if (netdev->flags & IFF_PROMISC) {
706 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
707 adapter->promiscuous = true;
708 goto done;
711 /* BE was previously in promiscous mode; disable it */
712 if (adapter->promiscuous) {
713 adapter->promiscuous = false;
714 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
717 /* Enable multicast promisc if num configured exceeds what we support */
718 if (netdev->flags & IFF_ALLMULTI ||
719 netdev_mc_count(netdev) > BE_MAX_MC) {
720 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
721 &adapter->mc_cmd_mem);
722 goto done;
725 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
726 &adapter->mc_cmd_mem);
727 done:
728 return;
731 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
733 struct be_adapter *adapter = netdev_priv(netdev);
734 int status;
736 if (!adapter->sriov_enabled)
737 return -EPERM;
739 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
740 return -EINVAL;
742 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
743 status = be_cmd_pmac_del(adapter,
744 adapter->vf_cfg[vf].vf_if_handle,
745 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
747 status = be_cmd_pmac_add(adapter, mac,
748 adapter->vf_cfg[vf].vf_if_handle,
749 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
751 if (status)
752 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
753 mac, vf);
754 else
755 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
757 return status;
760 static int be_get_vf_config(struct net_device *netdev, int vf,
761 struct ifla_vf_info *vi)
763 struct be_adapter *adapter = netdev_priv(netdev);
765 if (!adapter->sriov_enabled)
766 return -EPERM;
768 if (vf >= num_vfs)
769 return -EINVAL;
771 vi->vf = vf;
772 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
773 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
774 vi->qos = 0;
775 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
777 return 0;
780 static int be_set_vf_vlan(struct net_device *netdev,
781 int vf, u16 vlan, u8 qos)
783 struct be_adapter *adapter = netdev_priv(netdev);
784 int status = 0;
786 if (!adapter->sriov_enabled)
787 return -EPERM;
789 if ((vf >= num_vfs) || (vlan > 4095))
790 return -EINVAL;
792 if (vlan) {
793 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
794 adapter->vlans_added++;
795 } else {
796 adapter->vf_cfg[vf].vf_vlan_tag = 0;
797 adapter->vlans_added--;
800 status = be_vid_config(adapter, true, vf);
802 if (status)
803 dev_info(&adapter->pdev->dev,
804 "VLAN %d config on VF %d failed\n", vlan, vf);
805 return status;
808 static int be_set_vf_tx_rate(struct net_device *netdev,
809 int vf, int rate)
811 struct be_adapter *adapter = netdev_priv(netdev);
812 int status = 0;
814 if (!adapter->sriov_enabled)
815 return -EPERM;
817 if ((vf >= num_vfs) || (rate < 0))
818 return -EINVAL;
820 if (rate > 10000)
821 rate = 10000;
823 adapter->vf_cfg[vf].vf_tx_rate = rate;
824 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
826 if (status)
827 dev_info(&adapter->pdev->dev,
828 "tx rate %d on VF %d failed\n", rate, vf);
829 return status;
832 static void be_rx_rate_update(struct be_rx_obj *rxo)
834 struct be_rx_stats *stats = &rxo->stats;
835 ulong now = jiffies;
837 /* Wrapped around */
838 if (time_before(now, stats->rx_jiffies)) {
839 stats->rx_jiffies = now;
840 return;
843 /* Update the rate once in two seconds */
844 if ((now - stats->rx_jiffies) < 2 * HZ)
845 return;
847 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
848 now - stats->rx_jiffies);
849 stats->rx_jiffies = now;
850 stats->rx_bytes_prev = stats->rx_bytes;
853 static void be_rx_stats_update(struct be_rx_obj *rxo,
854 u32 pktsize, u16 numfrags, u8 pkt_type)
856 struct be_rx_stats *stats = &rxo->stats;
858 stats->rx_compl++;
859 stats->rx_frags += numfrags;
860 stats->rx_bytes += pktsize;
861 stats->rx_pkts++;
862 if (pkt_type == BE_MULTICAST_PACKET)
863 stats->rx_mcast_pkts++;
866 static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
868 u8 l4_cksm, ipv6, ipcksm;
870 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
871 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
872 ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
874 /* Ignore ipcksm for ipv6 pkts */
875 return l4_cksm && (ipcksm || ipv6);
878 static struct be_rx_page_info *
879 get_rx_page_info(struct be_adapter *adapter,
880 struct be_rx_obj *rxo,
881 u16 frag_idx)
883 struct be_rx_page_info *rx_page_info;
884 struct be_queue_info *rxq = &rxo->q;
886 rx_page_info = &rxo->page_info_tbl[frag_idx];
887 BUG_ON(!rx_page_info->page);
889 if (rx_page_info->last_page_user) {
890 dma_unmap_page(&adapter->pdev->dev,
891 dma_unmap_addr(rx_page_info, bus),
892 adapter->big_page_size, DMA_FROM_DEVICE);
893 rx_page_info->last_page_user = false;
896 atomic_dec(&rxq->used);
897 return rx_page_info;
900 /* Throwaway the data in the Rx completion */
901 static void be_rx_compl_discard(struct be_adapter *adapter,
902 struct be_rx_obj *rxo,
903 struct be_eth_rx_compl *rxcp)
905 struct be_queue_info *rxq = &rxo->q;
906 struct be_rx_page_info *page_info;
907 u16 rxq_idx, i, num_rcvd;
909 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
910 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
912 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
913 if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
915 rxo->last_frag_index = rxq_idx;
917 for (i = 0; i < num_rcvd; i++) {
918 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
919 put_page(page_info->page);
920 memset(page_info, 0, sizeof(*page_info));
921 index_inc(&rxq_idx, rxq->len);
927 * skb_fill_rx_data forms a complete skb for an ether frame
928 * indicated by rxcp.
930 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
931 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
932 u16 num_rcvd)
934 struct be_queue_info *rxq = &rxo->q;
935 struct be_rx_page_info *page_info;
936 u16 rxq_idx, i, j;
937 u32 pktsize, hdr_len, curr_frag_len, size;
938 u8 *start;
939 u8 pkt_type;
941 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
942 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
943 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
945 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
947 start = page_address(page_info->page) + page_info->page_offset;
948 prefetch(start);
950 /* Copy data in the first descriptor of this completion */
951 curr_frag_len = min(pktsize, rx_frag_size);
953 /* Copy the header portion into skb_data */
954 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
955 memcpy(skb->data, start, hdr_len);
956 skb->len = curr_frag_len;
957 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
958 /* Complete packet has now been moved to data */
959 put_page(page_info->page);
960 skb->data_len = 0;
961 skb->tail += curr_frag_len;
962 } else {
963 skb_shinfo(skb)->nr_frags = 1;
964 skb_shinfo(skb)->frags[0].page = page_info->page;
965 skb_shinfo(skb)->frags[0].page_offset =
966 page_info->page_offset + hdr_len;
967 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
968 skb->data_len = curr_frag_len - hdr_len;
969 skb->tail += hdr_len;
971 page_info->page = NULL;
973 if (pktsize <= rx_frag_size) {
974 BUG_ON(num_rcvd != 1);
975 goto done;
978 /* More frags present for this completion */
979 size = pktsize;
980 for (i = 1, j = 0; i < num_rcvd; i++) {
981 size -= curr_frag_len;
982 index_inc(&rxq_idx, rxq->len);
983 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
985 curr_frag_len = min(size, rx_frag_size);
987 /* Coalesce all frags from the same physical page in one slot */
988 if (page_info->page_offset == 0) {
989 /* Fresh page */
990 j++;
991 skb_shinfo(skb)->frags[j].page = page_info->page;
992 skb_shinfo(skb)->frags[j].page_offset =
993 page_info->page_offset;
994 skb_shinfo(skb)->frags[j].size = 0;
995 skb_shinfo(skb)->nr_frags++;
996 } else {
997 put_page(page_info->page);
1000 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1001 skb->len += curr_frag_len;
1002 skb->data_len += curr_frag_len;
1004 page_info->page = NULL;
1006 BUG_ON(j > MAX_SKB_FRAGS);
1008 done:
1009 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
1012 /* Process the RX completion indicated by rxcp when GRO is disabled */
1013 static void be_rx_compl_process(struct be_adapter *adapter,
1014 struct be_rx_obj *rxo,
1015 struct be_eth_rx_compl *rxcp)
1017 struct sk_buff *skb;
1018 u32 vlanf, vid;
1019 u16 num_rcvd;
1020 u8 vtm;
1022 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1024 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1025 if (unlikely(!skb)) {
1026 if (net_ratelimit())
1027 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1028 be_rx_compl_discard(adapter, rxo, rxcp);
1029 return;
1032 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1034 if (likely(adapter->rx_csum && csum_passed(rxcp)))
1035 skb->ip_summed = CHECKSUM_UNNECESSARY;
1036 else
1037 skb_checksum_none_assert(skb);
1039 skb->truesize = skb->len + sizeof(struct sk_buff);
1040 skb->protocol = eth_type_trans(skb, adapter->netdev);
1042 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1043 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1045 /* vlanf could be wrongly set in some cards.
1046 * ignore if vtm is not set */
1047 if ((adapter->function_mode & 0x400) && !vtm)
1048 vlanf = 0;
1050 if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
1051 vlanf = 0;
1053 if (unlikely(vlanf)) {
1054 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1055 kfree_skb(skb);
1056 return;
1058 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1059 if (!lancer_chip(adapter))
1060 vid = swab16(vid);
1061 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1062 } else {
1063 netif_receive_skb(skb);
1067 /* Process the RX completion indicated by rxcp when GRO is enabled */
1068 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1069 struct be_rx_obj *rxo,
1070 struct be_eth_rx_compl *rxcp)
1072 struct be_rx_page_info *page_info;
1073 struct sk_buff *skb = NULL;
1074 struct be_queue_info *rxq = &rxo->q;
1075 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1076 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1077 u16 i, rxq_idx = 0, vid, j;
1078 u8 vtm;
1079 u8 pkt_type;
1081 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1082 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1083 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1084 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1085 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1086 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1088 /* vlanf could be wrongly set in some cards.
1089 * ignore if vtm is not set */
1090 if ((adapter->function_mode & 0x400) && !vtm)
1091 vlanf = 0;
1093 if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
1094 vlanf = 0;
1096 skb = napi_get_frags(&eq_obj->napi);
1097 if (!skb) {
1098 be_rx_compl_discard(adapter, rxo, rxcp);
1099 return;
1102 remaining = pkt_size;
1103 for (i = 0, j = -1; i < num_rcvd; i++) {
1104 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
1106 curr_frag_len = min(remaining, rx_frag_size);
1108 /* Coalesce all frags from the same physical page in one slot */
1109 if (i == 0 || page_info->page_offset == 0) {
1110 /* First frag or Fresh page */
1111 j++;
1112 skb_shinfo(skb)->frags[j].page = page_info->page;
1113 skb_shinfo(skb)->frags[j].page_offset =
1114 page_info->page_offset;
1115 skb_shinfo(skb)->frags[j].size = 0;
1116 } else {
1117 put_page(page_info->page);
1119 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1121 remaining -= curr_frag_len;
1122 index_inc(&rxq_idx, rxq->len);
1123 memset(page_info, 0, sizeof(*page_info));
1125 BUG_ON(j > MAX_SKB_FRAGS);
1127 skb_shinfo(skb)->nr_frags = j + 1;
1128 skb->len = pkt_size;
1129 skb->data_len = pkt_size;
1130 skb->truesize += pkt_size;
1131 skb->ip_summed = CHECKSUM_UNNECESSARY;
1133 if (likely(!vlanf)) {
1134 napi_gro_frags(&eq_obj->napi);
1135 } else {
1136 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1137 if (!lancer_chip(adapter))
1138 vid = swab16(vid);
1140 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1141 return;
1143 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1146 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
1149 static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
1151 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
1153 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1154 return NULL;
1156 rmb();
1157 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1159 queue_tail_inc(&rxo->cq);
1160 return rxcp;
1163 /* To reset the valid bit, we need to reset the whole word as
1164 * when walking the queue the valid entries are little-endian
1165 * and invalid entries are host endian
1167 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1169 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1172 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1174 u32 order = get_order(size);
1176 if (order > 0)
1177 gfp |= __GFP_COMP;
1178 return alloc_pages(gfp, order);
1182 * Allocate a page, split it to fragments of size rx_frag_size and post as
1183 * receive buffers to BE
1185 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1187 struct be_adapter *adapter = rxo->adapter;
1188 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1189 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1190 struct be_queue_info *rxq = &rxo->q;
1191 struct page *pagep = NULL;
1192 struct be_eth_rx_d *rxd;
1193 u64 page_dmaaddr = 0, frag_dmaaddr;
1194 u32 posted, page_offset = 0;
1196 page_info = &rxo->page_info_tbl[rxq->head];
1197 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1198 if (!pagep) {
1199 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1200 if (unlikely(!pagep)) {
1201 rxo->stats.rx_post_fail++;
1202 break;
1204 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1205 0, adapter->big_page_size,
1206 DMA_FROM_DEVICE);
1207 page_info->page_offset = 0;
1208 } else {
1209 get_page(pagep);
1210 page_info->page_offset = page_offset + rx_frag_size;
1212 page_offset = page_info->page_offset;
1213 page_info->page = pagep;
1214 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1215 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1217 rxd = queue_head_node(rxq);
1218 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1219 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1221 /* Any space left in the current big page for another frag? */
1222 if ((page_offset + rx_frag_size + rx_frag_size) >
1223 adapter->big_page_size) {
1224 pagep = NULL;
1225 page_info->last_page_user = true;
1228 prev_page_info = page_info;
1229 queue_head_inc(rxq);
1230 page_info = &page_info_tbl[rxq->head];
1232 if (pagep)
1233 prev_page_info->last_page_user = true;
1235 if (posted) {
1236 atomic_add(posted, &rxq->used);
1237 be_rxq_notify(adapter, rxq->id, posted);
1238 } else if (atomic_read(&rxq->used) == 0) {
1239 /* Let be_worker replenish when memory is available */
1240 rxo->rx_post_starved = true;
1244 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1246 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1248 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1249 return NULL;
1251 rmb();
1252 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1254 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1256 queue_tail_inc(tx_cq);
1257 return txcp;
1260 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1262 struct be_queue_info *txq = &adapter->tx_obj.q;
1263 struct be_eth_wrb *wrb;
1264 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1265 struct sk_buff *sent_skb;
1266 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1267 bool unmap_skb_hdr = true;
1269 sent_skb = sent_skbs[txq->tail];
1270 BUG_ON(!sent_skb);
1271 sent_skbs[txq->tail] = NULL;
1273 /* skip header wrb */
1274 queue_tail_inc(txq);
1276 do {
1277 cur_index = txq->tail;
1278 wrb = queue_tail_node(txq);
1279 unmap_tx_frag(&adapter->pdev->dev, wrb,
1280 (unmap_skb_hdr && skb_headlen(sent_skb)));
1281 unmap_skb_hdr = false;
1283 num_wrbs++;
1284 queue_tail_inc(txq);
1285 } while (cur_index != last_index);
1287 atomic_sub(num_wrbs, &txq->used);
1289 kfree_skb(sent_skb);
1292 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1294 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1296 if (!eqe->evt)
1297 return NULL;
1299 rmb();
1300 eqe->evt = le32_to_cpu(eqe->evt);
1301 queue_tail_inc(&eq_obj->q);
1302 return eqe;
1305 static int event_handle(struct be_adapter *adapter,
1306 struct be_eq_obj *eq_obj)
1308 struct be_eq_entry *eqe;
1309 u16 num = 0;
1311 while ((eqe = event_get(eq_obj)) != NULL) {
1312 eqe->evt = 0;
1313 num++;
1316 /* Deal with any spurious interrupts that come
1317 * without events
1319 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1320 if (num)
1321 napi_schedule(&eq_obj->napi);
1323 return num;
1326 /* Just read and notify events without processing them.
1327 * Used at the time of destroying event queues */
1328 static void be_eq_clean(struct be_adapter *adapter,
1329 struct be_eq_obj *eq_obj)
1331 struct be_eq_entry *eqe;
1332 u16 num = 0;
1334 while ((eqe = event_get(eq_obj)) != NULL) {
1335 eqe->evt = 0;
1336 num++;
1339 if (num)
1340 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1343 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1345 struct be_rx_page_info *page_info;
1346 struct be_queue_info *rxq = &rxo->q;
1347 struct be_queue_info *rx_cq = &rxo->cq;
1348 struct be_eth_rx_compl *rxcp;
1349 u16 tail;
1351 /* First cleanup pending rx completions */
1352 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1353 be_rx_compl_discard(adapter, rxo, rxcp);
1354 be_rx_compl_reset(rxcp);
1355 be_cq_notify(adapter, rx_cq->id, false, 1);
1358 /* Then free posted rx buffer that were not used */
1359 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1360 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1361 page_info = get_rx_page_info(adapter, rxo, tail);
1362 put_page(page_info->page);
1363 memset(page_info, 0, sizeof(*page_info));
1365 BUG_ON(atomic_read(&rxq->used));
1368 static void be_tx_compl_clean(struct be_adapter *adapter)
1370 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1371 struct be_queue_info *txq = &adapter->tx_obj.q;
1372 struct be_eth_tx_compl *txcp;
1373 u16 end_idx, cmpl = 0, timeo = 0;
1374 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1375 struct sk_buff *sent_skb;
1376 bool dummy_wrb;
1378 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1379 do {
1380 while ((txcp = be_tx_compl_get(tx_cq))) {
1381 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1382 wrb_index, txcp);
1383 be_tx_compl_process(adapter, end_idx);
1384 cmpl++;
1386 if (cmpl) {
1387 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1388 cmpl = 0;
1391 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1392 break;
1394 mdelay(1);
1395 } while (true);
1397 if (atomic_read(&txq->used))
1398 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1399 atomic_read(&txq->used));
1401 /* free posted tx for which compls will never arrive */
1402 while (atomic_read(&txq->used)) {
1403 sent_skb = sent_skbs[txq->tail];
1404 end_idx = txq->tail;
1405 index_adv(&end_idx,
1406 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1407 txq->len);
1408 be_tx_compl_process(adapter, end_idx);
1412 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1414 struct be_queue_info *q;
1416 q = &adapter->mcc_obj.q;
1417 if (q->created)
1418 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1419 be_queue_free(adapter, q);
1421 q = &adapter->mcc_obj.cq;
1422 if (q->created)
1423 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1424 be_queue_free(adapter, q);
1427 /* Must be called only after TX qs are created as MCC shares TX EQ */
1428 static int be_mcc_queues_create(struct be_adapter *adapter)
1430 struct be_queue_info *q, *cq;
1432 /* Alloc MCC compl queue */
1433 cq = &adapter->mcc_obj.cq;
1434 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1435 sizeof(struct be_mcc_compl)))
1436 goto err;
1438 /* Ask BE to create MCC compl queue; share TX's eq */
1439 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1440 goto mcc_cq_free;
1442 /* Alloc MCC queue */
1443 q = &adapter->mcc_obj.q;
1444 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1445 goto mcc_cq_destroy;
1447 /* Ask BE to create MCC queue */
1448 if (be_cmd_mccq_create(adapter, q, cq))
1449 goto mcc_q_free;
1451 return 0;
1453 mcc_q_free:
1454 be_queue_free(adapter, q);
1455 mcc_cq_destroy:
1456 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1457 mcc_cq_free:
1458 be_queue_free(adapter, cq);
1459 err:
1460 return -1;
1463 static void be_tx_queues_destroy(struct be_adapter *adapter)
1465 struct be_queue_info *q;
1467 q = &adapter->tx_obj.q;
1468 if (q->created)
1469 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1470 be_queue_free(adapter, q);
1472 q = &adapter->tx_obj.cq;
1473 if (q->created)
1474 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1475 be_queue_free(adapter, q);
1477 /* Clear any residual events */
1478 be_eq_clean(adapter, &adapter->tx_eq);
1480 q = &adapter->tx_eq.q;
1481 if (q->created)
1482 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1483 be_queue_free(adapter, q);
1486 static int be_tx_queues_create(struct be_adapter *adapter)
1488 struct be_queue_info *eq, *q, *cq;
1490 adapter->tx_eq.max_eqd = 0;
1491 adapter->tx_eq.min_eqd = 0;
1492 adapter->tx_eq.cur_eqd = 96;
1493 adapter->tx_eq.enable_aic = false;
1494 /* Alloc Tx Event queue */
1495 eq = &adapter->tx_eq.q;
1496 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1497 return -1;
1499 /* Ask BE to create Tx Event queue */
1500 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1501 goto tx_eq_free;
1503 adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1506 /* Alloc TX eth compl queue */
1507 cq = &adapter->tx_obj.cq;
1508 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1509 sizeof(struct be_eth_tx_compl)))
1510 goto tx_eq_destroy;
1512 /* Ask BE to create Tx eth compl queue */
1513 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1514 goto tx_cq_free;
1516 /* Alloc TX eth queue */
1517 q = &adapter->tx_obj.q;
1518 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1519 goto tx_cq_destroy;
1521 /* Ask BE to create Tx eth queue */
1522 if (be_cmd_txq_create(adapter, q, cq))
1523 goto tx_q_free;
1524 return 0;
1526 tx_q_free:
1527 be_queue_free(adapter, q);
1528 tx_cq_destroy:
1529 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1530 tx_cq_free:
1531 be_queue_free(adapter, cq);
1532 tx_eq_destroy:
1533 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1534 tx_eq_free:
1535 be_queue_free(adapter, eq);
1536 return -1;
1539 static void be_rx_queues_destroy(struct be_adapter *adapter)
1541 struct be_queue_info *q;
1542 struct be_rx_obj *rxo;
1543 int i;
1545 for_all_rx_queues(adapter, rxo, i) {
1546 q = &rxo->q;
1547 if (q->created) {
1548 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1549 /* After the rxq is invalidated, wait for a grace time
1550 * of 1ms for all dma to end and the flush compl to
1551 * arrive
1553 mdelay(1);
1554 be_rx_q_clean(adapter, rxo);
1556 be_queue_free(adapter, q);
1558 q = &rxo->cq;
1559 if (q->created)
1560 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1561 be_queue_free(adapter, q);
1563 /* Clear any residual events */
1564 q = &rxo->rx_eq.q;
1565 if (q->created) {
1566 be_eq_clean(adapter, &rxo->rx_eq);
1567 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1569 be_queue_free(adapter, q);
1573 static int be_rx_queues_create(struct be_adapter *adapter)
1575 struct be_queue_info *eq, *q, *cq;
1576 struct be_rx_obj *rxo;
1577 int rc, i;
1579 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1580 for_all_rx_queues(adapter, rxo, i) {
1581 rxo->adapter = adapter;
1582 /* Init last_frag_index so that the frag index in the first
1583 * completion will never match */
1584 rxo->last_frag_index = 0xffff;
1585 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1586 rxo->rx_eq.enable_aic = true;
1588 /* EQ */
1589 eq = &rxo->rx_eq.q;
1590 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1591 sizeof(struct be_eq_entry));
1592 if (rc)
1593 goto err;
1595 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1596 if (rc)
1597 goto err;
1599 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1601 /* CQ */
1602 cq = &rxo->cq;
1603 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1604 sizeof(struct be_eth_rx_compl));
1605 if (rc)
1606 goto err;
1608 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1609 if (rc)
1610 goto err;
1611 /* Rx Q */
1612 q = &rxo->q;
1613 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1614 sizeof(struct be_eth_rx_d));
1615 if (rc)
1616 goto err;
1618 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1619 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1620 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1621 if (rc)
1622 goto err;
1625 if (be_multi_rxq(adapter)) {
1626 u8 rsstable[MAX_RSS_QS];
1628 for_all_rss_queues(adapter, rxo, i)
1629 rsstable[i] = rxo->rss_id;
1631 rc = be_cmd_rss_config(adapter, rsstable,
1632 adapter->num_rx_qs - 1);
1633 if (rc)
1634 goto err;
1637 return 0;
1638 err:
1639 be_rx_queues_destroy(adapter);
1640 return -1;
1643 static bool event_peek(struct be_eq_obj *eq_obj)
1645 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1646 if (!eqe->evt)
1647 return false;
1648 else
1649 return true;
1652 static irqreturn_t be_intx(int irq, void *dev)
1654 struct be_adapter *adapter = dev;
1655 struct be_rx_obj *rxo;
1656 int isr, i, tx = 0 , rx = 0;
1658 if (lancer_chip(adapter)) {
1659 if (event_peek(&adapter->tx_eq))
1660 tx = event_handle(adapter, &adapter->tx_eq);
1661 for_all_rx_queues(adapter, rxo, i) {
1662 if (event_peek(&rxo->rx_eq))
1663 rx |= event_handle(adapter, &rxo->rx_eq);
1666 if (!(tx || rx))
1667 return IRQ_NONE;
1669 } else {
1670 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1671 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1672 if (!isr)
1673 return IRQ_NONE;
1675 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1676 event_handle(adapter, &adapter->tx_eq);
1678 for_all_rx_queues(adapter, rxo, i) {
1679 if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1680 event_handle(adapter, &rxo->rx_eq);
1684 return IRQ_HANDLED;
1687 static irqreturn_t be_msix_rx(int irq, void *dev)
1689 struct be_rx_obj *rxo = dev;
1690 struct be_adapter *adapter = rxo->adapter;
1692 event_handle(adapter, &rxo->rx_eq);
1694 return IRQ_HANDLED;
1697 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1699 struct be_adapter *adapter = dev;
1701 event_handle(adapter, &adapter->tx_eq);
1703 return IRQ_HANDLED;
1706 static inline bool do_gro(struct be_rx_obj *rxo,
1707 struct be_eth_rx_compl *rxcp, u8 err)
1709 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1711 if (err)
1712 rxo->stats.rxcp_err++;
1714 return (tcp_frame && !err) ? true : false;
1717 static int be_poll_rx(struct napi_struct *napi, int budget)
1719 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1720 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1721 struct be_adapter *adapter = rxo->adapter;
1722 struct be_queue_info *rx_cq = &rxo->cq;
1723 struct be_eth_rx_compl *rxcp;
1724 u32 work_done;
1725 u16 frag_index, num_rcvd;
1726 u8 err;
1728 rxo->stats.rx_polls++;
1729 for (work_done = 0; work_done < budget; work_done++) {
1730 rxcp = be_rx_compl_get(rxo);
1731 if (!rxcp)
1732 break;
1734 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1735 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1736 rxcp);
1737 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1738 rxcp);
1740 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1741 if (likely(frag_index != rxo->last_frag_index &&
1742 num_rcvd != 0)) {
1743 rxo->last_frag_index = frag_index;
1745 if (do_gro(rxo, rxcp, err))
1746 be_rx_compl_process_gro(adapter, rxo, rxcp);
1747 else
1748 be_rx_compl_process(adapter, rxo, rxcp);
1751 be_rx_compl_reset(rxcp);
1754 /* Refill the queue */
1755 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1756 be_post_rx_frags(rxo, GFP_ATOMIC);
1758 /* All consumed */
1759 if (work_done < budget) {
1760 napi_complete(napi);
1761 be_cq_notify(adapter, rx_cq->id, true, work_done);
1762 } else {
1763 /* More to be consumed; continue with interrupts disabled */
1764 be_cq_notify(adapter, rx_cq->id, false, work_done);
1766 return work_done;
1769 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1770 * For TX/MCC we don't honour budget; consume everything
1772 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1774 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1775 struct be_adapter *adapter =
1776 container_of(tx_eq, struct be_adapter, tx_eq);
1777 struct be_queue_info *txq = &adapter->tx_obj.q;
1778 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1779 struct be_eth_tx_compl *txcp;
1780 int tx_compl = 0, mcc_compl, status = 0;
1781 u16 end_idx;
1783 while ((txcp = be_tx_compl_get(tx_cq))) {
1784 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1785 wrb_index, txcp);
1786 be_tx_compl_process(adapter, end_idx);
1787 tx_compl++;
1790 mcc_compl = be_process_mcc(adapter, &status);
1792 napi_complete(napi);
1794 if (mcc_compl) {
1795 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1796 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1799 if (tx_compl) {
1800 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1802 /* As Tx wrbs have been freed up, wake up netdev queue if
1803 * it was stopped due to lack of tx wrbs.
1805 if (netif_queue_stopped(adapter->netdev) &&
1806 atomic_read(&txq->used) < txq->len / 2) {
1807 netif_wake_queue(adapter->netdev);
1810 tx_stats(adapter)->be_tx_events++;
1811 tx_stats(adapter)->be_tx_compl += tx_compl;
1814 return 1;
1817 void be_detect_dump_ue(struct be_adapter *adapter)
1819 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1820 u32 i;
1822 pci_read_config_dword(adapter->pdev,
1823 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1824 pci_read_config_dword(adapter->pdev,
1825 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1826 pci_read_config_dword(adapter->pdev,
1827 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1828 pci_read_config_dword(adapter->pdev,
1829 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1831 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1832 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1834 if (ue_status_lo || ue_status_hi) {
1835 adapter->ue_detected = true;
1836 adapter->eeh_err = true;
1837 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1840 if (ue_status_lo) {
1841 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1842 if (ue_status_lo & 1)
1843 dev_err(&adapter->pdev->dev,
1844 "UE: %s bit set\n", ue_status_low_desc[i]);
1847 if (ue_status_hi) {
1848 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1849 if (ue_status_hi & 1)
1850 dev_err(&adapter->pdev->dev,
1851 "UE: %s bit set\n", ue_status_hi_desc[i]);
1857 static void be_worker(struct work_struct *work)
1859 struct be_adapter *adapter =
1860 container_of(work, struct be_adapter, work.work);
1861 struct be_rx_obj *rxo;
1862 int i;
1864 /* when interrupts are not yet enabled, just reap any pending
1865 * mcc completions */
1866 if (!netif_running(adapter->netdev)) {
1867 int mcc_compl, status = 0;
1869 mcc_compl = be_process_mcc(adapter, &status);
1871 if (mcc_compl) {
1872 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1873 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1876 if (!adapter->ue_detected && !lancer_chip(adapter))
1877 be_detect_dump_ue(adapter);
1879 goto reschedule;
1882 if (!adapter->stats_cmd_sent)
1883 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1885 be_tx_rate_update(adapter);
1887 for_all_rx_queues(adapter, rxo, i) {
1888 be_rx_rate_update(rxo);
1889 be_rx_eqd_update(adapter, rxo);
1891 if (rxo->rx_post_starved) {
1892 rxo->rx_post_starved = false;
1893 be_post_rx_frags(rxo, GFP_KERNEL);
1896 if (!adapter->ue_detected && !lancer_chip(adapter))
1897 be_detect_dump_ue(adapter);
1899 reschedule:
1900 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1903 static void be_msix_disable(struct be_adapter *adapter)
1905 if (adapter->msix_enabled) {
1906 pci_disable_msix(adapter->pdev);
1907 adapter->msix_enabled = false;
1911 static int be_num_rxqs_get(struct be_adapter *adapter)
1913 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1914 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1915 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1916 } else {
1917 dev_warn(&adapter->pdev->dev,
1918 "No support for multiple RX queues\n");
1919 return 1;
1923 static void be_msix_enable(struct be_adapter *adapter)
1925 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
1926 int i, status;
1928 adapter->num_rx_qs = be_num_rxqs_get(adapter);
1930 for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1931 adapter->msix_entries[i].entry = i;
1933 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1934 adapter->num_rx_qs + 1);
1935 if (status == 0) {
1936 goto done;
1937 } else if (status >= BE_MIN_MSIX_VECTORS) {
1938 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1939 status) == 0) {
1940 adapter->num_rx_qs = status - 1;
1941 dev_warn(&adapter->pdev->dev,
1942 "Could alloc only %d MSIx vectors. "
1943 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1944 goto done;
1947 return;
1948 done:
1949 adapter->msix_enabled = true;
1952 static void be_sriov_enable(struct be_adapter *adapter)
1954 be_check_sriov_fn_type(adapter);
1955 #ifdef CONFIG_PCI_IOV
1956 if (be_physfn(adapter) && num_vfs) {
1957 int status;
1959 status = pci_enable_sriov(adapter->pdev, num_vfs);
1960 adapter->sriov_enabled = status ? false : true;
1962 #endif
1965 static void be_sriov_disable(struct be_adapter *adapter)
1967 #ifdef CONFIG_PCI_IOV
1968 if (adapter->sriov_enabled) {
1969 pci_disable_sriov(adapter->pdev);
1970 adapter->sriov_enabled = false;
1972 #endif
1975 static inline int be_msix_vec_get(struct be_adapter *adapter,
1976 struct be_eq_obj *eq_obj)
1978 return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
1981 static int be_request_irq(struct be_adapter *adapter,
1982 struct be_eq_obj *eq_obj,
1983 void *handler, char *desc, void *context)
1985 struct net_device *netdev = adapter->netdev;
1986 int vec;
1988 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1989 vec = be_msix_vec_get(adapter, eq_obj);
1990 return request_irq(vec, handler, 0, eq_obj->desc, context);
1993 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1994 void *context)
1996 int vec = be_msix_vec_get(adapter, eq_obj);
1997 free_irq(vec, context);
2000 static int be_msix_register(struct be_adapter *adapter)
2002 struct be_rx_obj *rxo;
2003 int status, i;
2004 char qname[10];
2006 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2007 adapter);
2008 if (status)
2009 goto err;
2011 for_all_rx_queues(adapter, rxo, i) {
2012 sprintf(qname, "rxq%d", i);
2013 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2014 qname, rxo);
2015 if (status)
2016 goto err_msix;
2019 return 0;
2021 err_msix:
2022 be_free_irq(adapter, &adapter->tx_eq, adapter);
2024 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2025 be_free_irq(adapter, &rxo->rx_eq, rxo);
2027 err:
2028 dev_warn(&adapter->pdev->dev,
2029 "MSIX Request IRQ failed - err %d\n", status);
2030 pci_disable_msix(adapter->pdev);
2031 adapter->msix_enabled = false;
2032 return status;
2035 static int be_irq_register(struct be_adapter *adapter)
2037 struct net_device *netdev = adapter->netdev;
2038 int status;
2040 if (adapter->msix_enabled) {
2041 status = be_msix_register(adapter);
2042 if (status == 0)
2043 goto done;
2044 /* INTx is not supported for VF */
2045 if (!be_physfn(adapter))
2046 return status;
2049 /* INTx */
2050 netdev->irq = adapter->pdev->irq;
2051 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2052 adapter);
2053 if (status) {
2054 dev_err(&adapter->pdev->dev,
2055 "INTx request IRQ failed - err %d\n", status);
2056 return status;
2058 done:
2059 adapter->isr_registered = true;
2060 return 0;
2063 static void be_irq_unregister(struct be_adapter *adapter)
2065 struct net_device *netdev = adapter->netdev;
2066 struct be_rx_obj *rxo;
2067 int i;
2069 if (!adapter->isr_registered)
2070 return;
2072 /* INTx */
2073 if (!adapter->msix_enabled) {
2074 free_irq(netdev->irq, adapter);
2075 goto done;
2078 /* MSIx */
2079 be_free_irq(adapter, &adapter->tx_eq, adapter);
2081 for_all_rx_queues(adapter, rxo, i)
2082 be_free_irq(adapter, &rxo->rx_eq, rxo);
2084 done:
2085 adapter->isr_registered = false;
2088 static int be_close(struct net_device *netdev)
2090 struct be_adapter *adapter = netdev_priv(netdev);
2091 struct be_rx_obj *rxo;
2092 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2093 int vec, i;
2095 be_async_mcc_disable(adapter);
2097 netif_stop_queue(netdev);
2098 netif_carrier_off(netdev);
2099 adapter->link_up = false;
2101 if (!lancer_chip(adapter))
2102 be_intr_set(adapter, false);
2104 if (adapter->msix_enabled) {
2105 vec = be_msix_vec_get(adapter, tx_eq);
2106 synchronize_irq(vec);
2108 for_all_rx_queues(adapter, rxo, i) {
2109 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2110 synchronize_irq(vec);
2112 } else {
2113 synchronize_irq(netdev->irq);
2115 be_irq_unregister(adapter);
2117 for_all_rx_queues(adapter, rxo, i)
2118 napi_disable(&rxo->rx_eq.napi);
2120 napi_disable(&tx_eq->napi);
2122 /* Wait for all pending tx completions to arrive so that
2123 * all tx skbs are freed.
2125 be_tx_compl_clean(adapter);
2127 return 0;
2130 static int be_open(struct net_device *netdev)
2132 struct be_adapter *adapter = netdev_priv(netdev);
2133 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2134 struct be_rx_obj *rxo;
2135 bool link_up;
2136 int status, i;
2137 u8 mac_speed;
2138 u16 link_speed;
2140 for_all_rx_queues(adapter, rxo, i) {
2141 be_post_rx_frags(rxo, GFP_KERNEL);
2142 napi_enable(&rxo->rx_eq.napi);
2144 napi_enable(&tx_eq->napi);
2146 be_irq_register(adapter);
2148 if (!lancer_chip(adapter))
2149 be_intr_set(adapter, true);
2151 /* The evt queues are created in unarmed state; arm them */
2152 for_all_rx_queues(adapter, rxo, i) {
2153 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2154 be_cq_notify(adapter, rxo->cq.id, true, 0);
2156 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2158 /* Now that interrupts are on we can process async mcc */
2159 be_async_mcc_enable(adapter);
2161 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2162 &link_speed);
2163 if (status)
2164 goto err;
2165 be_link_status_update(adapter, link_up);
2167 if (be_physfn(adapter)) {
2168 status = be_vid_config(adapter, false, 0);
2169 if (status)
2170 goto err;
2172 status = be_cmd_set_flow_control(adapter,
2173 adapter->tx_fc, adapter->rx_fc);
2174 if (status)
2175 goto err;
2178 return 0;
2179 err:
2180 be_close(adapter->netdev);
2181 return -EIO;
2184 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2186 struct be_dma_mem cmd;
2187 int status = 0;
2188 u8 mac[ETH_ALEN];
2190 memset(mac, 0, ETH_ALEN);
2192 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2193 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2194 GFP_KERNEL);
2195 if (cmd.va == NULL)
2196 return -1;
2197 memset(cmd.va, 0, cmd.size);
2199 if (enable) {
2200 status = pci_write_config_dword(adapter->pdev,
2201 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2202 if (status) {
2203 dev_err(&adapter->pdev->dev,
2204 "Could not enable Wake-on-lan\n");
2205 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2206 cmd.dma);
2207 return status;
2209 status = be_cmd_enable_magic_wol(adapter,
2210 adapter->netdev->dev_addr, &cmd);
2211 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2212 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2213 } else {
2214 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2215 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2216 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2219 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2220 return status;
2224 * Generate a seed MAC address from the PF MAC Address using jhash.
2225 * MAC Address for VFs are assigned incrementally starting from the seed.
2226 * These addresses are programmed in the ASIC by the PF and the VF driver
2227 * queries for the MAC address during its probe.
2229 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2231 u32 vf = 0;
2232 int status = 0;
2233 u8 mac[ETH_ALEN];
2235 be_vf_eth_addr_generate(adapter, mac);
2237 for (vf = 0; vf < num_vfs; vf++) {
2238 status = be_cmd_pmac_add(adapter, mac,
2239 adapter->vf_cfg[vf].vf_if_handle,
2240 &adapter->vf_cfg[vf].vf_pmac_id,
2241 vf + 1);
2242 if (status)
2243 dev_err(&adapter->pdev->dev,
2244 "Mac address add failed for VF %d\n", vf);
2245 else
2246 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2248 mac[5] += 1;
2250 return status;
2253 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2255 u32 vf;
2257 for (vf = 0; vf < num_vfs; vf++) {
2258 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2259 be_cmd_pmac_del(adapter,
2260 adapter->vf_cfg[vf].vf_if_handle,
2261 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2265 static int be_setup(struct be_adapter *adapter)
2267 struct net_device *netdev = adapter->netdev;
2268 u32 cap_flags, en_flags, vf = 0;
2269 int status;
2270 u8 mac[ETH_ALEN];
2272 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2274 if (be_physfn(adapter)) {
2275 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2276 BE_IF_FLAGS_PROMISCUOUS |
2277 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2278 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2280 if (be_multi_rxq(adapter)) {
2281 cap_flags |= BE_IF_FLAGS_RSS;
2282 en_flags |= BE_IF_FLAGS_RSS;
2286 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2287 netdev->dev_addr, false/* pmac_invalid */,
2288 &adapter->if_handle, &adapter->pmac_id, 0);
2289 if (status != 0)
2290 goto do_none;
2292 if (be_physfn(adapter)) {
2293 if (adapter->sriov_enabled) {
2294 while (vf < num_vfs) {
2295 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2296 BE_IF_FLAGS_BROADCAST;
2297 status = be_cmd_if_create(adapter, cap_flags,
2298 en_flags, mac, true,
2299 &adapter->vf_cfg[vf].vf_if_handle,
2300 NULL, vf+1);
2301 if (status) {
2302 dev_err(&adapter->pdev->dev,
2303 "Interface Create failed for VF %d\n",
2304 vf);
2305 goto if_destroy;
2307 adapter->vf_cfg[vf].vf_pmac_id =
2308 BE_INVALID_PMAC_ID;
2309 vf++;
2312 } else {
2313 status = be_cmd_mac_addr_query(adapter, mac,
2314 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2315 if (!status) {
2316 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2317 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2321 status = be_tx_queues_create(adapter);
2322 if (status != 0)
2323 goto if_destroy;
2325 status = be_rx_queues_create(adapter);
2326 if (status != 0)
2327 goto tx_qs_destroy;
2329 status = be_mcc_queues_create(adapter);
2330 if (status != 0)
2331 goto rx_qs_destroy;
2333 adapter->link_speed = -1;
2335 return 0;
2337 be_mcc_queues_destroy(adapter);
2338 rx_qs_destroy:
2339 be_rx_queues_destroy(adapter);
2340 tx_qs_destroy:
2341 be_tx_queues_destroy(adapter);
2342 if_destroy:
2343 if (be_physfn(adapter) && adapter->sriov_enabled)
2344 for (vf = 0; vf < num_vfs; vf++)
2345 if (adapter->vf_cfg[vf].vf_if_handle)
2346 be_cmd_if_destroy(adapter,
2347 adapter->vf_cfg[vf].vf_if_handle,
2348 vf + 1);
2349 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2350 do_none:
2351 return status;
2354 static int be_clear(struct be_adapter *adapter)
2356 int vf;
2358 if (be_physfn(adapter) && adapter->sriov_enabled)
2359 be_vf_eth_addr_rem(adapter);
2361 be_mcc_queues_destroy(adapter);
2362 be_rx_queues_destroy(adapter);
2363 be_tx_queues_destroy(adapter);
2365 if (be_physfn(adapter) && adapter->sriov_enabled)
2366 for (vf = 0; vf < num_vfs; vf++)
2367 if (adapter->vf_cfg[vf].vf_if_handle)
2368 be_cmd_if_destroy(adapter,
2369 adapter->vf_cfg[vf].vf_if_handle,
2370 vf + 1);
2372 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2374 /* tell fw we're done with firing cmds */
2375 be_cmd_fw_clean(adapter);
2376 return 0;
2380 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2381 static bool be_flash_redboot(struct be_adapter *adapter,
2382 const u8 *p, u32 img_start, int image_size,
2383 int hdr_size)
2385 u32 crc_offset;
2386 u8 flashed_crc[4];
2387 int status;
2389 crc_offset = hdr_size + img_start + image_size - 4;
2391 p += crc_offset;
2393 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2394 (image_size - 4));
2395 if (status) {
2396 dev_err(&adapter->pdev->dev,
2397 "could not get crc from flash, not flashing redboot\n");
2398 return false;
2401 /*update redboot only if crc does not match*/
2402 if (!memcmp(flashed_crc, p, 4))
2403 return false;
2404 else
2405 return true;
2408 static int be_flash_data(struct be_adapter *adapter,
2409 const struct firmware *fw,
2410 struct be_dma_mem *flash_cmd, int num_of_images)
2413 int status = 0, i, filehdr_size = 0;
2414 u32 total_bytes = 0, flash_op;
2415 int num_bytes;
2416 const u8 *p = fw->data;
2417 struct be_cmd_write_flashrom *req = flash_cmd->va;
2418 const struct flash_comp *pflashcomp;
2419 int num_comp;
2421 static const struct flash_comp gen3_flash_types[9] = {
2422 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2423 FLASH_IMAGE_MAX_SIZE_g3},
2424 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2425 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2426 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2427 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2428 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2429 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2430 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2431 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2432 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2433 FLASH_IMAGE_MAX_SIZE_g3},
2434 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2435 FLASH_IMAGE_MAX_SIZE_g3},
2436 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2437 FLASH_IMAGE_MAX_SIZE_g3},
2438 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2439 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2441 static const struct flash_comp gen2_flash_types[8] = {
2442 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2443 FLASH_IMAGE_MAX_SIZE_g2},
2444 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2445 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2446 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2447 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2448 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2449 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2450 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2451 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2452 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2453 FLASH_IMAGE_MAX_SIZE_g2},
2454 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2455 FLASH_IMAGE_MAX_SIZE_g2},
2456 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2457 FLASH_IMAGE_MAX_SIZE_g2}
2460 if (adapter->generation == BE_GEN3) {
2461 pflashcomp = gen3_flash_types;
2462 filehdr_size = sizeof(struct flash_file_hdr_g3);
2463 num_comp = ARRAY_SIZE(gen3_flash_types);
2464 } else {
2465 pflashcomp = gen2_flash_types;
2466 filehdr_size = sizeof(struct flash_file_hdr_g2);
2467 num_comp = ARRAY_SIZE(gen2_flash_types);
2469 for (i = 0; i < num_comp; i++) {
2470 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2471 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2472 continue;
2473 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2474 (!be_flash_redboot(adapter, fw->data,
2475 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2476 (num_of_images * sizeof(struct image_hdr)))))
2477 continue;
2478 p = fw->data;
2479 p += filehdr_size + pflashcomp[i].offset
2480 + (num_of_images * sizeof(struct image_hdr));
2481 if (p + pflashcomp[i].size > fw->data + fw->size)
2482 return -1;
2483 total_bytes = pflashcomp[i].size;
2484 while (total_bytes) {
2485 if (total_bytes > 32*1024)
2486 num_bytes = 32*1024;
2487 else
2488 num_bytes = total_bytes;
2489 total_bytes -= num_bytes;
2491 if (!total_bytes)
2492 flash_op = FLASHROM_OPER_FLASH;
2493 else
2494 flash_op = FLASHROM_OPER_SAVE;
2495 memcpy(req->params.data_buf, p, num_bytes);
2496 p += num_bytes;
2497 status = be_cmd_write_flashrom(adapter, flash_cmd,
2498 pflashcomp[i].optype, flash_op, num_bytes);
2499 if (status) {
2500 dev_err(&adapter->pdev->dev,
2501 "cmd to write to flash rom failed.\n");
2502 return -1;
2504 yield();
2507 return 0;
2510 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2512 if (fhdr == NULL)
2513 return 0;
2514 if (fhdr->build[0] == '3')
2515 return BE_GEN3;
2516 else if (fhdr->build[0] == '2')
2517 return BE_GEN2;
2518 else
2519 return 0;
2522 int be_load_fw(struct be_adapter *adapter, u8 *func)
2524 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2525 const struct firmware *fw;
2526 struct flash_file_hdr_g2 *fhdr;
2527 struct flash_file_hdr_g3 *fhdr3;
2528 struct image_hdr *img_hdr_ptr = NULL;
2529 struct be_dma_mem flash_cmd;
2530 int status, i = 0, num_imgs = 0;
2531 const u8 *p;
2533 if (!netif_running(adapter->netdev)) {
2534 dev_err(&adapter->pdev->dev,
2535 "Firmware load not allowed (interface is down)\n");
2536 return -EPERM;
2539 strcpy(fw_file, func);
2541 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2542 if (status)
2543 goto fw_exit;
2545 p = fw->data;
2546 fhdr = (struct flash_file_hdr_g2 *) p;
2547 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2549 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2550 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2551 &flash_cmd.dma, GFP_KERNEL);
2552 if (!flash_cmd.va) {
2553 status = -ENOMEM;
2554 dev_err(&adapter->pdev->dev,
2555 "Memory allocation failure while flashing\n");
2556 goto fw_exit;
2559 if ((adapter->generation == BE_GEN3) &&
2560 (get_ufigen_type(fhdr) == BE_GEN3)) {
2561 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2562 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2563 for (i = 0; i < num_imgs; i++) {
2564 img_hdr_ptr = (struct image_hdr *) (fw->data +
2565 (sizeof(struct flash_file_hdr_g3) +
2566 i * sizeof(struct image_hdr)));
2567 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2568 status = be_flash_data(adapter, fw, &flash_cmd,
2569 num_imgs);
2571 } else if ((adapter->generation == BE_GEN2) &&
2572 (get_ufigen_type(fhdr) == BE_GEN2)) {
2573 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2574 } else {
2575 dev_err(&adapter->pdev->dev,
2576 "UFI and Interface are not compatible for flashing\n");
2577 status = -1;
2580 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2581 flash_cmd.dma);
2582 if (status) {
2583 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2584 goto fw_exit;
2587 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2589 fw_exit:
2590 release_firmware(fw);
2591 return status;
2594 static struct net_device_ops be_netdev_ops = {
2595 .ndo_open = be_open,
2596 .ndo_stop = be_close,
2597 .ndo_start_xmit = be_xmit,
2598 .ndo_set_rx_mode = be_set_multicast_list,
2599 .ndo_set_mac_address = be_mac_addr_set,
2600 .ndo_change_mtu = be_change_mtu,
2601 .ndo_validate_addr = eth_validate_addr,
2602 .ndo_vlan_rx_register = be_vlan_register,
2603 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2604 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2605 .ndo_set_vf_mac = be_set_vf_mac,
2606 .ndo_set_vf_vlan = be_set_vf_vlan,
2607 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2608 .ndo_get_vf_config = be_get_vf_config
2611 static void be_netdev_init(struct net_device *netdev)
2613 struct be_adapter *adapter = netdev_priv(netdev);
2614 struct be_rx_obj *rxo;
2615 int i;
2617 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2618 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2619 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2620 NETIF_F_GRO | NETIF_F_TSO6;
2622 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2623 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2625 if (lancer_chip(adapter))
2626 netdev->vlan_features |= NETIF_F_TSO6;
2628 netdev->flags |= IFF_MULTICAST;
2630 adapter->rx_csum = true;
2632 /* Default settings for Rx and Tx flow control */
2633 adapter->rx_fc = true;
2634 adapter->tx_fc = true;
2636 netif_set_gso_max_size(netdev, 65535);
2638 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2640 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2642 for_all_rx_queues(adapter, rxo, i)
2643 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2644 BE_NAPI_WEIGHT);
2646 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2647 BE_NAPI_WEIGHT);
2650 static void be_unmap_pci_bars(struct be_adapter *adapter)
2652 if (adapter->csr)
2653 iounmap(adapter->csr);
2654 if (adapter->db)
2655 iounmap(adapter->db);
2656 if (adapter->pcicfg && be_physfn(adapter))
2657 iounmap(adapter->pcicfg);
2660 static int be_map_pci_bars(struct be_adapter *adapter)
2662 u8 __iomem *addr;
2663 int pcicfg_reg, db_reg;
2665 if (lancer_chip(adapter)) {
2666 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2667 pci_resource_len(adapter->pdev, 0));
2668 if (addr == NULL)
2669 return -ENOMEM;
2670 adapter->db = addr;
2671 return 0;
2674 if (be_physfn(adapter)) {
2675 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2676 pci_resource_len(adapter->pdev, 2));
2677 if (addr == NULL)
2678 return -ENOMEM;
2679 adapter->csr = addr;
2682 if (adapter->generation == BE_GEN2) {
2683 pcicfg_reg = 1;
2684 db_reg = 4;
2685 } else {
2686 pcicfg_reg = 0;
2687 if (be_physfn(adapter))
2688 db_reg = 4;
2689 else
2690 db_reg = 0;
2692 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2693 pci_resource_len(adapter->pdev, db_reg));
2694 if (addr == NULL)
2695 goto pci_map_err;
2696 adapter->db = addr;
2698 if (be_physfn(adapter)) {
2699 addr = ioremap_nocache(
2700 pci_resource_start(adapter->pdev, pcicfg_reg),
2701 pci_resource_len(adapter->pdev, pcicfg_reg));
2702 if (addr == NULL)
2703 goto pci_map_err;
2704 adapter->pcicfg = addr;
2705 } else
2706 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2708 return 0;
2709 pci_map_err:
2710 be_unmap_pci_bars(adapter);
2711 return -ENOMEM;
2715 static void be_ctrl_cleanup(struct be_adapter *adapter)
2717 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2719 be_unmap_pci_bars(adapter);
2721 if (mem->va)
2722 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2723 mem->dma);
2725 mem = &adapter->mc_cmd_mem;
2726 if (mem->va)
2727 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2728 mem->dma);
2731 static int be_ctrl_init(struct be_adapter *adapter)
2733 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2734 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2735 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2736 int status;
2738 status = be_map_pci_bars(adapter);
2739 if (status)
2740 goto done;
2742 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2743 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2744 mbox_mem_alloc->size,
2745 &mbox_mem_alloc->dma,
2746 GFP_KERNEL);
2747 if (!mbox_mem_alloc->va) {
2748 status = -ENOMEM;
2749 goto unmap_pci_bars;
2752 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2753 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2754 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2755 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2757 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2758 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2759 mc_cmd_mem->size, &mc_cmd_mem->dma,
2760 GFP_KERNEL);
2761 if (mc_cmd_mem->va == NULL) {
2762 status = -ENOMEM;
2763 goto free_mbox;
2765 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2767 mutex_init(&adapter->mbox_lock);
2768 spin_lock_init(&adapter->mcc_lock);
2769 spin_lock_init(&adapter->mcc_cq_lock);
2771 init_completion(&adapter->flash_compl);
2772 pci_save_state(adapter->pdev);
2773 return 0;
2775 free_mbox:
2776 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2777 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2779 unmap_pci_bars:
2780 be_unmap_pci_bars(adapter);
2782 done:
2783 return status;
2786 static void be_stats_cleanup(struct be_adapter *adapter)
2788 struct be_dma_mem *cmd = &adapter->stats_cmd;
2790 if (cmd->va)
2791 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2792 cmd->va, cmd->dma);
2795 static int be_stats_init(struct be_adapter *adapter)
2797 struct be_dma_mem *cmd = &adapter->stats_cmd;
2799 cmd->size = sizeof(struct be_cmd_req_get_stats);
2800 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2801 GFP_KERNEL);
2802 if (cmd->va == NULL)
2803 return -1;
2804 memset(cmd->va, 0, cmd->size);
2805 return 0;
2808 static void __devexit be_remove(struct pci_dev *pdev)
2810 struct be_adapter *adapter = pci_get_drvdata(pdev);
2812 if (!adapter)
2813 return;
2815 cancel_delayed_work_sync(&adapter->work);
2817 unregister_netdev(adapter->netdev);
2819 be_clear(adapter);
2821 be_stats_cleanup(adapter);
2823 be_ctrl_cleanup(adapter);
2825 be_sriov_disable(adapter);
2827 be_msix_disable(adapter);
2829 pci_set_drvdata(pdev, NULL);
2830 pci_release_regions(pdev);
2831 pci_disable_device(pdev);
2833 free_netdev(adapter->netdev);
2836 static int be_get_config(struct be_adapter *adapter)
2838 int status;
2839 u8 mac[ETH_ALEN];
2841 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2842 if (status)
2843 return status;
2845 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2846 &adapter->function_mode, &adapter->function_caps);
2847 if (status)
2848 return status;
2850 memset(mac, 0, ETH_ALEN);
2852 if (be_physfn(adapter)) {
2853 status = be_cmd_mac_addr_query(adapter, mac,
2854 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2856 if (status)
2857 return status;
2859 if (!is_valid_ether_addr(mac))
2860 return -EADDRNOTAVAIL;
2862 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2863 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2866 if (adapter->function_mode & 0x400)
2867 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2868 else
2869 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2871 status = be_cmd_get_cntl_attributes(adapter);
2872 if (status)
2873 return status;
2875 return 0;
2878 static int be_dev_family_check(struct be_adapter *adapter)
2880 struct pci_dev *pdev = adapter->pdev;
2881 u32 sli_intf = 0, if_type;
2883 switch (pdev->device) {
2884 case BE_DEVICE_ID1:
2885 case OC_DEVICE_ID1:
2886 adapter->generation = BE_GEN2;
2887 break;
2888 case BE_DEVICE_ID2:
2889 case OC_DEVICE_ID2:
2890 adapter->generation = BE_GEN3;
2891 break;
2892 case OC_DEVICE_ID3:
2893 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2894 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2895 SLI_INTF_IF_TYPE_SHIFT;
2897 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2898 if_type != 0x02) {
2899 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2900 return -EINVAL;
2902 if (num_vfs > 0) {
2903 dev_err(&pdev->dev, "VFs not supported\n");
2904 return -EINVAL;
2906 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2907 SLI_INTF_FAMILY_SHIFT);
2908 adapter->generation = BE_GEN3;
2909 break;
2910 default:
2911 adapter->generation = 0;
2913 return 0;
2916 static int __devinit be_probe(struct pci_dev *pdev,
2917 const struct pci_device_id *pdev_id)
2919 int status = 0;
2920 struct be_adapter *adapter;
2921 struct net_device *netdev;
2923 status = pci_enable_device(pdev);
2924 if (status)
2925 goto do_none;
2927 status = pci_request_regions(pdev, DRV_NAME);
2928 if (status)
2929 goto disable_dev;
2930 pci_set_master(pdev);
2932 netdev = alloc_etherdev(sizeof(struct be_adapter));
2933 if (netdev == NULL) {
2934 status = -ENOMEM;
2935 goto rel_reg;
2937 adapter = netdev_priv(netdev);
2938 adapter->pdev = pdev;
2939 pci_set_drvdata(pdev, adapter);
2941 status = be_dev_family_check(adapter);
2942 if (status)
2943 goto free_netdev;
2945 adapter->netdev = netdev;
2946 SET_NETDEV_DEV(netdev, &pdev->dev);
2948 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2949 if (!status) {
2950 netdev->features |= NETIF_F_HIGHDMA;
2951 } else {
2952 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2953 if (status) {
2954 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2955 goto free_netdev;
2959 be_sriov_enable(adapter);
2961 status = be_ctrl_init(adapter);
2962 if (status)
2963 goto free_netdev;
2965 /* sync up with fw's ready state */
2966 if (be_physfn(adapter)) {
2967 status = be_cmd_POST(adapter);
2968 if (status)
2969 goto ctrl_clean;
2972 /* tell fw we're ready to fire cmds */
2973 status = be_cmd_fw_init(adapter);
2974 if (status)
2975 goto ctrl_clean;
2977 status = be_cmd_reset_function(adapter);
2978 if (status)
2979 goto ctrl_clean;
2981 status = be_stats_init(adapter);
2982 if (status)
2983 goto ctrl_clean;
2985 status = be_get_config(adapter);
2986 if (status)
2987 goto stats_clean;
2989 be_msix_enable(adapter);
2991 INIT_DELAYED_WORK(&adapter->work, be_worker);
2993 status = be_setup(adapter);
2994 if (status)
2995 goto msix_disable;
2997 be_netdev_init(netdev);
2998 status = register_netdev(netdev);
2999 if (status != 0)
3000 goto unsetup;
3001 netif_carrier_off(netdev);
3003 if (be_physfn(adapter) && adapter->sriov_enabled) {
3004 status = be_vf_eth_addr_config(adapter);
3005 if (status)
3006 goto unreg_netdev;
3009 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3010 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3011 return 0;
3013 unreg_netdev:
3014 unregister_netdev(netdev);
3015 unsetup:
3016 be_clear(adapter);
3017 msix_disable:
3018 be_msix_disable(adapter);
3019 stats_clean:
3020 be_stats_cleanup(adapter);
3021 ctrl_clean:
3022 be_ctrl_cleanup(adapter);
3023 free_netdev:
3024 be_sriov_disable(adapter);
3025 free_netdev(netdev);
3026 pci_set_drvdata(pdev, NULL);
3027 rel_reg:
3028 pci_release_regions(pdev);
3029 disable_dev:
3030 pci_disable_device(pdev);
3031 do_none:
3032 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3033 return status;
3036 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3038 struct be_adapter *adapter = pci_get_drvdata(pdev);
3039 struct net_device *netdev = adapter->netdev;
3041 cancel_delayed_work_sync(&adapter->work);
3042 if (adapter->wol)
3043 be_setup_wol(adapter, true);
3045 netif_device_detach(netdev);
3046 if (netif_running(netdev)) {
3047 rtnl_lock();
3048 be_close(netdev);
3049 rtnl_unlock();
3051 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3052 be_clear(adapter);
3054 be_msix_disable(adapter);
3055 pci_save_state(pdev);
3056 pci_disable_device(pdev);
3057 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3058 return 0;
3061 static int be_resume(struct pci_dev *pdev)
3063 int status = 0;
3064 struct be_adapter *adapter = pci_get_drvdata(pdev);
3065 struct net_device *netdev = adapter->netdev;
3067 netif_device_detach(netdev);
3069 status = pci_enable_device(pdev);
3070 if (status)
3071 return status;
3073 pci_set_power_state(pdev, 0);
3074 pci_restore_state(pdev);
3076 be_msix_enable(adapter);
3077 /* tell fw we're ready to fire cmds */
3078 status = be_cmd_fw_init(adapter);
3079 if (status)
3080 return status;
3082 be_setup(adapter);
3083 if (netif_running(netdev)) {
3084 rtnl_lock();
3085 be_open(netdev);
3086 rtnl_unlock();
3088 netif_device_attach(netdev);
3090 if (adapter->wol)
3091 be_setup_wol(adapter, false);
3093 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3094 return 0;
3098 * An FLR will stop BE from DMAing any data.
3100 static void be_shutdown(struct pci_dev *pdev)
3102 struct be_adapter *adapter = pci_get_drvdata(pdev);
3103 struct net_device *netdev = adapter->netdev;
3105 if (netif_running(netdev))
3106 cancel_delayed_work_sync(&adapter->work);
3108 netif_device_detach(netdev);
3110 be_cmd_reset_function(adapter);
3112 if (adapter->wol)
3113 be_setup_wol(adapter, true);
3115 pci_disable_device(pdev);
3118 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3119 pci_channel_state_t state)
3121 struct be_adapter *adapter = pci_get_drvdata(pdev);
3122 struct net_device *netdev = adapter->netdev;
3124 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3126 adapter->eeh_err = true;
3128 netif_device_detach(netdev);
3130 if (netif_running(netdev)) {
3131 rtnl_lock();
3132 be_close(netdev);
3133 rtnl_unlock();
3135 be_clear(adapter);
3137 if (state == pci_channel_io_perm_failure)
3138 return PCI_ERS_RESULT_DISCONNECT;
3140 pci_disable_device(pdev);
3142 return PCI_ERS_RESULT_NEED_RESET;
3145 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3147 struct be_adapter *adapter = pci_get_drvdata(pdev);
3148 int status;
3150 dev_info(&adapter->pdev->dev, "EEH reset\n");
3151 adapter->eeh_err = false;
3153 status = pci_enable_device(pdev);
3154 if (status)
3155 return PCI_ERS_RESULT_DISCONNECT;
3157 pci_set_master(pdev);
3158 pci_set_power_state(pdev, 0);
3159 pci_restore_state(pdev);
3161 /* Check if card is ok and fw is ready */
3162 status = be_cmd_POST(adapter);
3163 if (status)
3164 return PCI_ERS_RESULT_DISCONNECT;
3166 return PCI_ERS_RESULT_RECOVERED;
3169 static void be_eeh_resume(struct pci_dev *pdev)
3171 int status = 0;
3172 struct be_adapter *adapter = pci_get_drvdata(pdev);
3173 struct net_device *netdev = adapter->netdev;
3175 dev_info(&adapter->pdev->dev, "EEH resume\n");
3177 pci_save_state(pdev);
3179 /* tell fw we're ready to fire cmds */
3180 status = be_cmd_fw_init(adapter);
3181 if (status)
3182 goto err;
3184 status = be_setup(adapter);
3185 if (status)
3186 goto err;
3188 if (netif_running(netdev)) {
3189 status = be_open(netdev);
3190 if (status)
3191 goto err;
3193 netif_device_attach(netdev);
3194 return;
3195 err:
3196 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3199 static struct pci_error_handlers be_eeh_handlers = {
3200 .error_detected = be_eeh_err_detected,
3201 .slot_reset = be_eeh_reset,
3202 .resume = be_eeh_resume,
3205 static struct pci_driver be_driver = {
3206 .name = DRV_NAME,
3207 .id_table = be_dev_ids,
3208 .probe = be_probe,
3209 .remove = be_remove,
3210 .suspend = be_suspend,
3211 .resume = be_resume,
3212 .shutdown = be_shutdown,
3213 .err_handler = &be_eeh_handlers
3216 static int __init be_init_module(void)
3218 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3219 rx_frag_size != 2048) {
3220 printk(KERN_WARNING DRV_NAME
3221 " : Module param rx_frag_size must be 2048/4096/8192."
3222 " Using 2048\n");
3223 rx_frag_size = 2048;
3226 if (num_vfs > 32) {
3227 printk(KERN_WARNING DRV_NAME
3228 " : Module param num_vfs must not be greater than 32."
3229 "Using 32\n");
3230 num_vfs = 32;
3233 return pci_register_driver(&be_driver);
3235 module_init(be_init_module);
3237 static void __exit be_exit_module(void)
3239 pci_unregister_driver(&be_driver);
3241 module_exit(be_exit_module);