be2net: fix initialization of vlan_prio_bmap
[linux-2.6/kvm.git] / drivers / net / benet / be_main.c
blobae281de94b99e4d1cc24863f2e028af54de43421
1 /*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 #include <asm/div64.h>
23 MODULE_VERSION(DRV_VER);
24 MODULE_DEVICE_TABLE(pci, be_dev_ids);
25 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
29 static ushort rx_frag_size = 2048;
30 static unsigned int num_vfs;
31 module_param(rx_frag_size, ushort, S_IRUGO);
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
41 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
43 { 0 }
45 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46 /* UE Status Low CSR */
47 static char *ue_status_low_desc[] = {
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
81 /* UE Status High CSR */
82 static char *ue_status_hi_desc[] = {
83 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
106 "NETC"
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
117 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
125 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
128 struct be_dma_mem *mem = &q->dma_mem;
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
142 static void be_intr_set(struct be_adapter *adapter, bool enable)
144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
148 if (adapter->eeh_err)
149 return;
151 if (!enabled && enable)
152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
153 else if (enabled && !enable)
154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155 else
156 return;
158 iowrite32(reg, addr);
161 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
167 wmb();
168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
171 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
177 wmb();
178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
181 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
182 bool arm, bool clear_int, u16 num_popped)
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
189 if (adapter->eeh_err)
190 return;
192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
201 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
208 if (adapter->eeh_err)
209 return;
211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
217 static int be_mac_addr_set(struct net_device *netdev, void *p)
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
229 if (!be_physfn(adapter))
230 goto netdev_addr;
232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
234 if (status)
235 return status;
237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
238 adapter->if_handle, &adapter->pmac_id, 0);
239 netdev_addr:
240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
243 return status;
246 static void populate_be2_stats(struct be_adapter *adapter)
249 struct be_drv_stats *drvs = &adapter->drv_stats;
250 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
251 struct be_port_rxf_stats_v0 *port_stats =
252 be_port_rxf_stats_from_cmd(adapter);
253 struct be_rxf_stats_v0 *rxf_stats =
254 be_rxf_stats_from_cmd(adapter);
256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
270 drvs->rx_input_fifo_overflow_drop =
271 port_stats->rx_input_fifo_overflow;
272 drvs->rx_dropped_header_too_small =
273 port_stats->rx_dropped_header_too_small;
274 drvs->rx_address_match_errors =
275 port_stats->rx_address_match_errors;
276 drvs->rx_alignment_symbol_errors =
277 port_stats->rx_alignment_symbol_errors;
279 drvs->tx_pauseframes = port_stats->tx_pauseframes;
280 drvs->tx_controlframes = port_stats->tx_controlframes;
282 if (adapter->port_num)
283 drvs->jabber_events =
284 rxf_stats->port1_jabber_events;
285 else
286 drvs->jabber_events =
287 rxf_stats->port0_jabber_events;
288 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
289 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
290 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
291 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
292 drvs->forwarded_packets = rxf_stats->forwarded_packets;
293 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
294 drvs->rx_drops_no_tpre_descr =
295 rxf_stats->rx_drops_no_tpre_descr;
296 drvs->rx_drops_too_many_frags =
297 rxf_stats->rx_drops_too_many_frags;
298 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
301 static void populate_be3_stats(struct be_adapter *adapter)
303 struct be_drv_stats *drvs = &adapter->drv_stats;
304 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
306 struct be_rxf_stats_v1 *rxf_stats =
307 be_rxf_stats_from_cmd(adapter);
308 struct be_port_rxf_stats_v1 *port_stats =
309 be_port_rxf_stats_from_cmd(adapter);
311 drvs->rx_priority_pause_frames = 0;
312 drvs->pmem_fifo_overflow_drop = 0;
313 drvs->rx_pause_frames = port_stats->rx_pause_frames;
314 drvs->rx_crc_errors = port_stats->rx_crc_errors;
315 drvs->rx_control_frames = port_stats->rx_control_frames;
316 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
317 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
318 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
319 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
320 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
321 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
322 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
323 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
324 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
325 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
326 drvs->rx_dropped_header_too_small =
327 port_stats->rx_dropped_header_too_small;
328 drvs->rx_input_fifo_overflow_drop =
329 port_stats->rx_input_fifo_overflow_drop;
330 drvs->rx_address_match_errors =
331 port_stats->rx_address_match_errors;
332 drvs->rx_alignment_symbol_errors =
333 port_stats->rx_alignment_symbol_errors;
334 drvs->rxpp_fifo_overflow_drop =
335 port_stats->rxpp_fifo_overflow_drop;
336 drvs->tx_pauseframes = port_stats->tx_pauseframes;
337 drvs->tx_controlframes = port_stats->tx_controlframes;
338 drvs->jabber_events = port_stats->jabber_events;
339 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
340 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
341 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
342 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
343 drvs->forwarded_packets = rxf_stats->forwarded_packets;
344 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
345 drvs->rx_drops_no_tpre_descr =
346 rxf_stats->rx_drops_no_tpre_descr;
347 drvs->rx_drops_too_many_frags =
348 rxf_stats->rx_drops_too_many_frags;
349 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
352 static void populate_lancer_stats(struct be_adapter *adapter)
355 struct be_drv_stats *drvs = &adapter->drv_stats;
356 struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
357 (adapter);
358 drvs->rx_priority_pause_frames = 0;
359 drvs->pmem_fifo_overflow_drop = 0;
360 drvs->rx_pause_frames =
361 make_64bit_val(pport_stats->rx_pause_frames_hi,
362 pport_stats->rx_pause_frames_lo);
363 drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
364 pport_stats->rx_crc_errors_lo);
365 drvs->rx_control_frames =
366 make_64bit_val(pport_stats->rx_control_frames_hi,
367 pport_stats->rx_control_frames_lo);
368 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
369 drvs->rx_frame_too_long =
370 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
371 pport_stats->rx_frames_too_long_lo);
372 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
373 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
374 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
375 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
376 drvs->rx_dropped_tcp_length =
377 pport_stats->rx_dropped_invalid_tcp_length;
378 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
379 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
380 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
381 drvs->rx_dropped_header_too_small =
382 pport_stats->rx_dropped_header_too_small;
383 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
384 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
385 drvs->rx_alignment_symbol_errors =
386 make_64bit_val(pport_stats->rx_symbol_errors_hi,
387 pport_stats->rx_symbol_errors_lo);
388 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
389 drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
390 pport_stats->tx_pause_frames_lo);
391 drvs->tx_controlframes =
392 make_64bit_val(pport_stats->tx_control_frames_hi,
393 pport_stats->tx_control_frames_lo);
394 drvs->jabber_events = pport_stats->rx_jabbers;
395 drvs->rx_drops_no_pbuf = 0;
396 drvs->rx_drops_no_txpb = 0;
397 drvs->rx_drops_no_erx_descr = 0;
398 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
399 drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
400 pport_stats->num_forwards_lo);
401 drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
402 pport_stats->rx_drops_mtu_lo);
403 drvs->rx_drops_no_tpre_descr = 0;
404 drvs->rx_drops_too_many_frags =
405 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
406 pport_stats->rx_drops_too_many_frags_lo);
409 void be_parse_stats(struct be_adapter *adapter)
411 if (adapter->generation == BE_GEN3) {
412 if (lancer_chip(adapter))
413 populate_lancer_stats(adapter);
414 else
415 populate_be3_stats(adapter);
416 } else {
417 populate_be2_stats(adapter);
421 void netdev_stats_update(struct be_adapter *adapter)
423 struct be_drv_stats *drvs = &adapter->drv_stats;
424 struct net_device_stats *dev_stats = &adapter->netdev->stats;
425 struct be_rx_obj *rxo;
426 struct be_tx_obj *txo;
427 unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
428 int i;
430 for_all_rx_queues(adapter, rxo, i) {
431 pkts += rx_stats(rxo)->rx_pkts;
432 bytes += rx_stats(rxo)->rx_bytes;
433 mcast += rx_stats(rxo)->rx_mcast_pkts;
434 /* no space in linux buffers: best possible approximation */
435 if (adapter->generation == BE_GEN3) {
436 if (!(lancer_chip(adapter))) {
437 struct be_erx_stats_v1 *erx =
438 be_erx_stats_from_cmd(adapter);
439 drops += erx->rx_drops_no_fragments[rxo->q.id];
441 } else {
442 struct be_erx_stats_v0 *erx =
443 be_erx_stats_from_cmd(adapter);
444 drops += erx->rx_drops_no_fragments[rxo->q.id];
447 dev_stats->rx_packets = pkts;
448 dev_stats->rx_bytes = bytes;
449 dev_stats->multicast = mcast;
450 dev_stats->rx_dropped = drops;
452 pkts = bytes = 0;
453 for_all_tx_queues(adapter, txo, i) {
454 pkts += tx_stats(txo)->be_tx_pkts;
455 bytes += tx_stats(txo)->be_tx_bytes;
457 dev_stats->tx_packets = pkts;
458 dev_stats->tx_bytes = bytes;
460 /* bad pkts received */
461 dev_stats->rx_errors = drvs->rx_crc_errors +
462 drvs->rx_alignment_symbol_errors +
463 drvs->rx_in_range_errors +
464 drvs->rx_out_range_errors +
465 drvs->rx_frame_too_long +
466 drvs->rx_dropped_too_small +
467 drvs->rx_dropped_too_short +
468 drvs->rx_dropped_header_too_small +
469 drvs->rx_dropped_tcp_length +
470 drvs->rx_dropped_runt +
471 drvs->rx_tcp_checksum_errs +
472 drvs->rx_ip_checksum_errs +
473 drvs->rx_udp_checksum_errs;
475 /* detailed rx errors */
476 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
477 drvs->rx_out_range_errors +
478 drvs->rx_frame_too_long;
480 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
482 /* frame alignment errors */
483 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
485 /* receiver fifo overrun */
486 /* drops_no_pbuf is no per i/f, it's per BE card */
487 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
488 drvs->rx_input_fifo_overflow_drop +
489 drvs->rx_drops_no_pbuf;
492 void be_link_status_update(struct be_adapter *adapter, bool link_up)
494 struct net_device *netdev = adapter->netdev;
496 /* If link came up or went down */
497 if (adapter->link_up != link_up) {
498 adapter->link_speed = -1;
499 if (link_up) {
500 netif_carrier_on(netdev);
501 printk(KERN_INFO "%s: Link up\n", netdev->name);
502 } else {
503 netif_carrier_off(netdev);
504 printk(KERN_INFO "%s: Link down\n", netdev->name);
506 adapter->link_up = link_up;
510 /* Update the EQ delay n BE based on the RX frags consumed / sec */
511 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
513 struct be_eq_obj *rx_eq = &rxo->rx_eq;
514 struct be_rx_stats *stats = &rxo->stats;
515 ulong now = jiffies;
516 u32 eqd;
518 if (!rx_eq->enable_aic)
519 return;
521 /* Wrapped around */
522 if (time_before(now, stats->rx_fps_jiffies)) {
523 stats->rx_fps_jiffies = now;
524 return;
527 /* Update once a second */
528 if ((now - stats->rx_fps_jiffies) < HZ)
529 return;
531 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
532 ((now - stats->rx_fps_jiffies) / HZ);
534 stats->rx_fps_jiffies = now;
535 stats->prev_rx_frags = stats->rx_frags;
536 eqd = stats->rx_fps / 110000;
537 eqd = eqd << 3;
538 if (eqd > rx_eq->max_eqd)
539 eqd = rx_eq->max_eqd;
540 if (eqd < rx_eq->min_eqd)
541 eqd = rx_eq->min_eqd;
542 if (eqd < 10)
543 eqd = 0;
544 if (eqd != rx_eq->cur_eqd)
545 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
547 rx_eq->cur_eqd = eqd;
550 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
552 u64 rate = bytes;
554 do_div(rate, ticks / HZ);
555 rate <<= 3; /* bytes/sec -> bits/sec */
556 do_div(rate, 1000000ul); /* MB/Sec */
558 return rate;
561 static void be_tx_rate_update(struct be_tx_obj *txo)
563 struct be_tx_stats *stats = tx_stats(txo);
564 ulong now = jiffies;
566 /* Wrapped around? */
567 if (time_before(now, stats->be_tx_jiffies)) {
568 stats->be_tx_jiffies = now;
569 return;
572 /* Update tx rate once in two seconds */
573 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
574 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
575 - stats->be_tx_bytes_prev,
576 now - stats->be_tx_jiffies);
577 stats->be_tx_jiffies = now;
578 stats->be_tx_bytes_prev = stats->be_tx_bytes;
582 static void be_tx_stats_update(struct be_tx_obj *txo,
583 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
585 struct be_tx_stats *stats = tx_stats(txo);
587 stats->be_tx_reqs++;
588 stats->be_tx_wrbs += wrb_cnt;
589 stats->be_tx_bytes += copied;
590 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
591 if (stopped)
592 stats->be_tx_stops++;
595 /* Determine number of WRB entries needed to xmit data in an skb */
596 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
597 bool *dummy)
599 int cnt = (skb->len > skb->data_len);
601 cnt += skb_shinfo(skb)->nr_frags;
603 /* to account for hdr wrb */
604 cnt++;
605 if (lancer_chip(adapter) || !(cnt & 1)) {
606 *dummy = false;
607 } else {
608 /* add a dummy to make it an even num */
609 cnt++;
610 *dummy = true;
612 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
613 return cnt;
616 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
618 wrb->frag_pa_hi = upper_32_bits(addr);
619 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
620 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
623 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
624 struct sk_buff *skb, u32 wrb_cnt, u32 len)
626 u8 vlan_prio = 0;
627 u16 vlan_tag = 0;
629 memset(hdr, 0, sizeof(*hdr));
631 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
633 if (skb_is_gso(skb)) {
634 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
635 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
636 hdr, skb_shinfo(skb)->gso_size);
637 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
638 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
639 if (lancer_chip(adapter) && adapter->sli_family ==
640 LANCER_A0_SLI_FAMILY) {
641 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
642 if (is_tcp_pkt(skb))
643 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
644 tcpcs, hdr, 1);
645 else if (is_udp_pkt(skb))
646 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
647 udpcs, hdr, 1);
649 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
650 if (is_tcp_pkt(skb))
651 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
652 else if (is_udp_pkt(skb))
653 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
656 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
658 vlan_tag = vlan_tx_tag_get(skb);
659 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
660 /* If vlan priority provided by OS is NOT in available bmap */
661 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
662 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
663 adapter->recommended_prio;
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
667 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
669 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
670 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
673 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
674 bool unmap_single)
676 dma_addr_t dma;
678 be_dws_le_to_cpu(wrb, sizeof(*wrb));
680 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
681 if (wrb->frag_len) {
682 if (unmap_single)
683 dma_unmap_single(dev, dma, wrb->frag_len,
684 DMA_TO_DEVICE);
685 else
686 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
690 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
691 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
693 dma_addr_t busaddr;
694 int i, copied = 0;
695 struct device *dev = &adapter->pdev->dev;
696 struct sk_buff *first_skb = skb;
697 struct be_eth_wrb *wrb;
698 struct be_eth_hdr_wrb *hdr;
699 bool map_single = false;
700 u16 map_head;
702 hdr = queue_head_node(txq);
703 queue_head_inc(txq);
704 map_head = txq->head;
706 if (skb->len > skb->data_len) {
707 int len = skb_headlen(skb);
708 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
709 if (dma_mapping_error(dev, busaddr))
710 goto dma_err;
711 map_single = true;
712 wrb = queue_head_node(txq);
713 wrb_fill(wrb, busaddr, len);
714 be_dws_cpu_to_le(wrb, sizeof(*wrb));
715 queue_head_inc(txq);
716 copied += len;
719 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
720 struct skb_frag_struct *frag =
721 &skb_shinfo(skb)->frags[i];
722 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
723 frag->size, DMA_TO_DEVICE);
724 if (dma_mapping_error(dev, busaddr))
725 goto dma_err;
726 wrb = queue_head_node(txq);
727 wrb_fill(wrb, busaddr, frag->size);
728 be_dws_cpu_to_le(wrb, sizeof(*wrb));
729 queue_head_inc(txq);
730 copied += frag->size;
733 if (dummy_wrb) {
734 wrb = queue_head_node(txq);
735 wrb_fill(wrb, 0, 0);
736 be_dws_cpu_to_le(wrb, sizeof(*wrb));
737 queue_head_inc(txq);
740 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
741 be_dws_cpu_to_le(hdr, sizeof(*hdr));
743 return copied;
744 dma_err:
745 txq->head = map_head;
746 while (copied) {
747 wrb = queue_head_node(txq);
748 unmap_tx_frag(dev, wrb, map_single);
749 map_single = false;
750 copied -= wrb->frag_len;
751 queue_head_inc(txq);
753 return 0;
756 static netdev_tx_t be_xmit(struct sk_buff *skb,
757 struct net_device *netdev)
759 struct be_adapter *adapter = netdev_priv(netdev);
760 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
761 struct be_queue_info *txq = &txo->q;
762 u32 wrb_cnt = 0, copied = 0;
763 u32 start = txq->head;
764 bool dummy_wrb, stopped = false;
766 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
768 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
769 if (copied) {
770 /* record the sent skb in the sent_skb table */
771 BUG_ON(txo->sent_skb_list[start]);
772 txo->sent_skb_list[start] = skb;
774 /* Ensure txq has space for the next skb; Else stop the queue
775 * *BEFORE* ringing the tx doorbell, so that we serialze the
776 * tx compls of the current transmit which'll wake up the queue
778 atomic_add(wrb_cnt, &txq->used);
779 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780 txq->len) {
781 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
782 stopped = true;
785 be_txq_notify(adapter, txq->id, wrb_cnt);
787 be_tx_stats_update(txo, wrb_cnt, copied,
788 skb_shinfo(skb)->gso_segs, stopped);
789 } else {
790 txq->head = start;
791 dev_kfree_skb_any(skb);
793 return NETDEV_TX_OK;
796 static int be_change_mtu(struct net_device *netdev, int new_mtu)
798 struct be_adapter *adapter = netdev_priv(netdev);
799 if (new_mtu < BE_MIN_MTU ||
800 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801 (ETH_HLEN + ETH_FCS_LEN))) {
802 dev_info(&adapter->pdev->dev,
803 "MTU must be between %d and %d bytes\n",
804 BE_MIN_MTU,
805 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
806 return -EINVAL;
808 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809 netdev->mtu, new_mtu);
810 netdev->mtu = new_mtu;
811 return 0;
815 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816 * If the user configures more, place BE in vlan promiscuous mode.
818 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
820 u16 vtag[BE_NUM_VLANS_SUPPORTED];
821 u16 ntags = 0, i;
822 int status = 0;
823 u32 if_handle;
825 if (vf) {
826 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
827 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
828 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
831 if (adapter->vlans_added <= adapter->max_vlans) {
832 /* Construct VLAN Table to give to HW */
833 for (i = 0; i < VLAN_N_VID; i++) {
834 if (adapter->vlan_tag[i]) {
835 vtag[ntags] = cpu_to_le16(i);
836 ntags++;
839 status = be_cmd_vlan_config(adapter, adapter->if_handle,
840 vtag, ntags, 1, 0);
841 } else {
842 status = be_cmd_vlan_config(adapter, adapter->if_handle,
843 NULL, 0, 1, 1);
846 return status;
849 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
851 struct be_adapter *adapter = netdev_priv(netdev);
853 adapter->vlan_grp = grp;
856 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
858 struct be_adapter *adapter = netdev_priv(netdev);
860 adapter->vlans_added++;
861 if (!be_physfn(adapter))
862 return;
864 adapter->vlan_tag[vid] = 1;
865 if (adapter->vlans_added <= (adapter->max_vlans + 1))
866 be_vid_config(adapter, false, 0);
869 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
871 struct be_adapter *adapter = netdev_priv(netdev);
873 adapter->vlans_added--;
874 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
876 if (!be_physfn(adapter))
877 return;
879 adapter->vlan_tag[vid] = 0;
880 if (adapter->vlans_added <= adapter->max_vlans)
881 be_vid_config(adapter, false, 0);
884 static void be_set_multicast_list(struct net_device *netdev)
886 struct be_adapter *adapter = netdev_priv(netdev);
888 if (netdev->flags & IFF_PROMISC) {
889 be_cmd_promiscuous_config(adapter, true);
890 adapter->promiscuous = true;
891 goto done;
894 /* BE was previously in promiscuous mode; disable it */
895 if (adapter->promiscuous) {
896 adapter->promiscuous = false;
897 be_cmd_promiscuous_config(adapter, false);
900 /* Enable multicast promisc if num configured exceeds what we support */
901 if (netdev->flags & IFF_ALLMULTI ||
902 netdev_mc_count(netdev) > BE_MAX_MC) {
903 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
904 &adapter->mc_cmd_mem);
905 goto done;
908 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
909 &adapter->mc_cmd_mem);
910 done:
911 return;
914 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
916 struct be_adapter *adapter = netdev_priv(netdev);
917 int status;
919 if (!adapter->sriov_enabled)
920 return -EPERM;
922 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
923 return -EINVAL;
925 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
926 status = be_cmd_pmac_del(adapter,
927 adapter->vf_cfg[vf].vf_if_handle,
928 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
930 status = be_cmd_pmac_add(adapter, mac,
931 adapter->vf_cfg[vf].vf_if_handle,
932 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
934 if (status)
935 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
936 mac, vf);
937 else
938 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
940 return status;
943 static int be_get_vf_config(struct net_device *netdev, int vf,
944 struct ifla_vf_info *vi)
946 struct be_adapter *adapter = netdev_priv(netdev);
948 if (!adapter->sriov_enabled)
949 return -EPERM;
951 if (vf >= num_vfs)
952 return -EINVAL;
954 vi->vf = vf;
955 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
956 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
957 vi->qos = 0;
958 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
960 return 0;
963 static int be_set_vf_vlan(struct net_device *netdev,
964 int vf, u16 vlan, u8 qos)
966 struct be_adapter *adapter = netdev_priv(netdev);
967 int status = 0;
969 if (!adapter->sriov_enabled)
970 return -EPERM;
972 if ((vf >= num_vfs) || (vlan > 4095))
973 return -EINVAL;
975 if (vlan) {
976 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
977 adapter->vlans_added++;
978 } else {
979 adapter->vf_cfg[vf].vf_vlan_tag = 0;
980 adapter->vlans_added--;
983 status = be_vid_config(adapter, true, vf);
985 if (status)
986 dev_info(&adapter->pdev->dev,
987 "VLAN %d config on VF %d failed\n", vlan, vf);
988 return status;
991 static int be_set_vf_tx_rate(struct net_device *netdev,
992 int vf, int rate)
994 struct be_adapter *adapter = netdev_priv(netdev);
995 int status = 0;
997 if (!adapter->sriov_enabled)
998 return -EPERM;
1000 if ((vf >= num_vfs) || (rate < 0))
1001 return -EINVAL;
1003 if (rate > 10000)
1004 rate = 10000;
1006 adapter->vf_cfg[vf].vf_tx_rate = rate;
1007 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1009 if (status)
1010 dev_info(&adapter->pdev->dev,
1011 "tx rate %d on VF %d failed\n", rate, vf);
1012 return status;
1015 static void be_rx_rate_update(struct be_rx_obj *rxo)
1017 struct be_rx_stats *stats = &rxo->stats;
1018 ulong now = jiffies;
1020 /* Wrapped around */
1021 if (time_before(now, stats->rx_jiffies)) {
1022 stats->rx_jiffies = now;
1023 return;
1026 /* Update the rate once in two seconds */
1027 if ((now - stats->rx_jiffies) < 2 * HZ)
1028 return;
1030 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1031 now - stats->rx_jiffies);
1032 stats->rx_jiffies = now;
1033 stats->rx_bytes_prev = stats->rx_bytes;
1036 static void be_rx_stats_update(struct be_rx_obj *rxo,
1037 struct be_rx_compl_info *rxcp)
1039 struct be_rx_stats *stats = &rxo->stats;
1041 stats->rx_compl++;
1042 stats->rx_frags += rxcp->num_rcvd;
1043 stats->rx_bytes += rxcp->pkt_size;
1044 stats->rx_pkts++;
1045 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1046 stats->rx_mcast_pkts++;
1047 if (rxcp->err)
1048 stats->rxcp_err++;
1051 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1053 /* L4 checksum is not reliable for non TCP/UDP packets.
1054 * Also ignore ipcksm for ipv6 pkts */
1055 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1056 (rxcp->ip_csum || rxcp->ipv6);
1059 static struct be_rx_page_info *
1060 get_rx_page_info(struct be_adapter *adapter,
1061 struct be_rx_obj *rxo,
1062 u16 frag_idx)
1064 struct be_rx_page_info *rx_page_info;
1065 struct be_queue_info *rxq = &rxo->q;
1067 rx_page_info = &rxo->page_info_tbl[frag_idx];
1068 BUG_ON(!rx_page_info->page);
1070 if (rx_page_info->last_page_user) {
1071 dma_unmap_page(&adapter->pdev->dev,
1072 dma_unmap_addr(rx_page_info, bus),
1073 adapter->big_page_size, DMA_FROM_DEVICE);
1074 rx_page_info->last_page_user = false;
1077 atomic_dec(&rxq->used);
1078 return rx_page_info;
1081 /* Throwaway the data in the Rx completion */
1082 static void be_rx_compl_discard(struct be_adapter *adapter,
1083 struct be_rx_obj *rxo,
1084 struct be_rx_compl_info *rxcp)
1086 struct be_queue_info *rxq = &rxo->q;
1087 struct be_rx_page_info *page_info;
1088 u16 i, num_rcvd = rxcp->num_rcvd;
1090 for (i = 0; i < num_rcvd; i++) {
1091 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1092 put_page(page_info->page);
1093 memset(page_info, 0, sizeof(*page_info));
1094 index_inc(&rxcp->rxq_idx, rxq->len);
1099 * skb_fill_rx_data forms a complete skb for an ether frame
1100 * indicated by rxcp.
1102 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1103 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1105 struct be_queue_info *rxq = &rxo->q;
1106 struct be_rx_page_info *page_info;
1107 u16 i, j;
1108 u16 hdr_len, curr_frag_len, remaining;
1109 u8 *start;
1111 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1112 start = page_address(page_info->page) + page_info->page_offset;
1113 prefetch(start);
1115 /* Copy data in the first descriptor of this completion */
1116 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1118 /* Copy the header portion into skb_data */
1119 hdr_len = min(BE_HDR_LEN, curr_frag_len);
1120 memcpy(skb->data, start, hdr_len);
1121 skb->len = curr_frag_len;
1122 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1123 /* Complete packet has now been moved to data */
1124 put_page(page_info->page);
1125 skb->data_len = 0;
1126 skb->tail += curr_frag_len;
1127 } else {
1128 skb_shinfo(skb)->nr_frags = 1;
1129 skb_shinfo(skb)->frags[0].page = page_info->page;
1130 skb_shinfo(skb)->frags[0].page_offset =
1131 page_info->page_offset + hdr_len;
1132 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1133 skb->data_len = curr_frag_len - hdr_len;
1134 skb->tail += hdr_len;
1136 page_info->page = NULL;
1138 if (rxcp->pkt_size <= rx_frag_size) {
1139 BUG_ON(rxcp->num_rcvd != 1);
1140 return;
1143 /* More frags present for this completion */
1144 index_inc(&rxcp->rxq_idx, rxq->len);
1145 remaining = rxcp->pkt_size - curr_frag_len;
1146 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1147 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1148 curr_frag_len = min(remaining, rx_frag_size);
1150 /* Coalesce all frags from the same physical page in one slot */
1151 if (page_info->page_offset == 0) {
1152 /* Fresh page */
1153 j++;
1154 skb_shinfo(skb)->frags[j].page = page_info->page;
1155 skb_shinfo(skb)->frags[j].page_offset =
1156 page_info->page_offset;
1157 skb_shinfo(skb)->frags[j].size = 0;
1158 skb_shinfo(skb)->nr_frags++;
1159 } else {
1160 put_page(page_info->page);
1163 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1164 skb->len += curr_frag_len;
1165 skb->data_len += curr_frag_len;
1167 remaining -= curr_frag_len;
1168 index_inc(&rxcp->rxq_idx, rxq->len);
1169 page_info->page = NULL;
1171 BUG_ON(j > MAX_SKB_FRAGS);
1174 /* Process the RX completion indicated by rxcp when GRO is disabled */
1175 static void be_rx_compl_process(struct be_adapter *adapter,
1176 struct be_rx_obj *rxo,
1177 struct be_rx_compl_info *rxcp)
1179 struct net_device *netdev = adapter->netdev;
1180 struct sk_buff *skb;
1182 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1183 if (unlikely(!skb)) {
1184 if (net_ratelimit())
1185 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1186 be_rx_compl_discard(adapter, rxo, rxcp);
1187 return;
1190 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1192 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1193 skb->ip_summed = CHECKSUM_UNNECESSARY;
1194 else
1195 skb_checksum_none_assert(skb);
1197 skb->truesize = skb->len + sizeof(struct sk_buff);
1198 skb->protocol = eth_type_trans(skb, netdev);
1199 if (adapter->netdev->features & NETIF_F_RXHASH)
1200 skb->rxhash = rxcp->rss_hash;
1203 if (unlikely(rxcp->vlanf)) {
1204 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1205 kfree_skb(skb);
1206 return;
1208 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1209 rxcp->vlan_tag);
1210 } else {
1211 netif_receive_skb(skb);
1215 /* Process the RX completion indicated by rxcp when GRO is enabled */
1216 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1217 struct be_rx_obj *rxo,
1218 struct be_rx_compl_info *rxcp)
1220 struct be_rx_page_info *page_info;
1221 struct sk_buff *skb = NULL;
1222 struct be_queue_info *rxq = &rxo->q;
1223 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1224 u16 remaining, curr_frag_len;
1225 u16 i, j;
1227 skb = napi_get_frags(&eq_obj->napi);
1228 if (!skb) {
1229 be_rx_compl_discard(adapter, rxo, rxcp);
1230 return;
1233 remaining = rxcp->pkt_size;
1234 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1235 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1237 curr_frag_len = min(remaining, rx_frag_size);
1239 /* Coalesce all frags from the same physical page in one slot */
1240 if (i == 0 || page_info->page_offset == 0) {
1241 /* First frag or Fresh page */
1242 j++;
1243 skb_shinfo(skb)->frags[j].page = page_info->page;
1244 skb_shinfo(skb)->frags[j].page_offset =
1245 page_info->page_offset;
1246 skb_shinfo(skb)->frags[j].size = 0;
1247 } else {
1248 put_page(page_info->page);
1250 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1252 remaining -= curr_frag_len;
1253 index_inc(&rxcp->rxq_idx, rxq->len);
1254 memset(page_info, 0, sizeof(*page_info));
1256 BUG_ON(j > MAX_SKB_FRAGS);
1258 skb_shinfo(skb)->nr_frags = j + 1;
1259 skb->len = rxcp->pkt_size;
1260 skb->data_len = rxcp->pkt_size;
1261 skb->truesize += rxcp->pkt_size;
1262 skb->ip_summed = CHECKSUM_UNNECESSARY;
1263 if (adapter->netdev->features & NETIF_F_RXHASH)
1264 skb->rxhash = rxcp->rss_hash;
1266 if (likely(!rxcp->vlanf))
1267 napi_gro_frags(&eq_obj->napi);
1268 else
1269 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1270 rxcp->vlan_tag);
1273 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1274 struct be_eth_rx_compl *compl,
1275 struct be_rx_compl_info *rxcp)
1277 rxcp->pkt_size =
1278 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1279 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1280 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1281 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1282 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1283 rxcp->ip_csum =
1284 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1285 rxcp->l4_csum =
1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1287 rxcp->ipv6 =
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1289 rxcp->rxq_idx =
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1291 rxcp->num_rcvd =
1292 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1293 rxcp->pkt_type =
1294 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1295 rxcp->rss_hash =
1296 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1297 if (rxcp->vlanf) {
1298 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1299 compl);
1300 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1301 compl);
1305 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1306 struct be_eth_rx_compl *compl,
1307 struct be_rx_compl_info *rxcp)
1309 rxcp->pkt_size =
1310 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1311 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1312 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1313 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1314 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1315 rxcp->ip_csum =
1316 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1317 rxcp->l4_csum =
1318 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1319 rxcp->ipv6 =
1320 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1321 rxcp->rxq_idx =
1322 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1323 rxcp->num_rcvd =
1324 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1325 rxcp->pkt_type =
1326 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1327 rxcp->rss_hash =
1328 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1329 if (rxcp->vlanf) {
1330 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1331 compl);
1332 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1333 compl);
1337 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1339 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1340 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1341 struct be_adapter *adapter = rxo->adapter;
1343 /* For checking the valid bit it is Ok to use either definition as the
1344 * valid bit is at the same position in both v0 and v1 Rx compl */
1345 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1346 return NULL;
1348 rmb();
1349 be_dws_le_to_cpu(compl, sizeof(*compl));
1351 if (adapter->be3_native)
1352 be_parse_rx_compl_v1(adapter, compl, rxcp);
1353 else
1354 be_parse_rx_compl_v0(adapter, compl, rxcp);
1356 if (rxcp->vlanf) {
1357 /* vlanf could be wrongly set in some cards.
1358 * ignore if vtm is not set */
1359 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1360 rxcp->vlanf = 0;
1362 if (!lancer_chip(adapter))
1363 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1365 if (((adapter->pvid & VLAN_VID_MASK) ==
1366 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1367 !adapter->vlan_tag[rxcp->vlan_tag])
1368 rxcp->vlanf = 0;
1371 /* As the compl has been parsed, reset it; we wont touch it again */
1372 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1374 queue_tail_inc(&rxo->cq);
1375 return rxcp;
1378 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1380 u32 order = get_order(size);
1382 if (order > 0)
1383 gfp |= __GFP_COMP;
1384 return alloc_pages(gfp, order);
1388 * Allocate a page, split it to fragments of size rx_frag_size and post as
1389 * receive buffers to BE
1391 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1393 struct be_adapter *adapter = rxo->adapter;
1394 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1395 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1396 struct be_queue_info *rxq = &rxo->q;
1397 struct page *pagep = NULL;
1398 struct be_eth_rx_d *rxd;
1399 u64 page_dmaaddr = 0, frag_dmaaddr;
1400 u32 posted, page_offset = 0;
1402 page_info = &rxo->page_info_tbl[rxq->head];
1403 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1404 if (!pagep) {
1405 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1406 if (unlikely(!pagep)) {
1407 rxo->stats.rx_post_fail++;
1408 break;
1410 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1411 0, adapter->big_page_size,
1412 DMA_FROM_DEVICE);
1413 page_info->page_offset = 0;
1414 } else {
1415 get_page(pagep);
1416 page_info->page_offset = page_offset + rx_frag_size;
1418 page_offset = page_info->page_offset;
1419 page_info->page = pagep;
1420 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1421 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1423 rxd = queue_head_node(rxq);
1424 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1425 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1427 /* Any space left in the current big page for another frag? */
1428 if ((page_offset + rx_frag_size + rx_frag_size) >
1429 adapter->big_page_size) {
1430 pagep = NULL;
1431 page_info->last_page_user = true;
1434 prev_page_info = page_info;
1435 queue_head_inc(rxq);
1436 page_info = &page_info_tbl[rxq->head];
1438 if (pagep)
1439 prev_page_info->last_page_user = true;
1441 if (posted) {
1442 atomic_add(posted, &rxq->used);
1443 be_rxq_notify(adapter, rxq->id, posted);
1444 } else if (atomic_read(&rxq->used) == 0) {
1445 /* Let be_worker replenish when memory is available */
1446 rxo->rx_post_starved = true;
1450 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1452 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1454 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1455 return NULL;
1457 rmb();
1458 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1460 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1462 queue_tail_inc(tx_cq);
1463 return txcp;
1466 static u16 be_tx_compl_process(struct be_adapter *adapter,
1467 struct be_tx_obj *txo, u16 last_index)
1469 struct be_queue_info *txq = &txo->q;
1470 struct be_eth_wrb *wrb;
1471 struct sk_buff **sent_skbs = txo->sent_skb_list;
1472 struct sk_buff *sent_skb;
1473 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1474 bool unmap_skb_hdr = true;
1476 sent_skb = sent_skbs[txq->tail];
1477 BUG_ON(!sent_skb);
1478 sent_skbs[txq->tail] = NULL;
1480 /* skip header wrb */
1481 queue_tail_inc(txq);
1483 do {
1484 cur_index = txq->tail;
1485 wrb = queue_tail_node(txq);
1486 unmap_tx_frag(&adapter->pdev->dev, wrb,
1487 (unmap_skb_hdr && skb_headlen(sent_skb)));
1488 unmap_skb_hdr = false;
1490 num_wrbs++;
1491 queue_tail_inc(txq);
1492 } while (cur_index != last_index);
1494 kfree_skb(sent_skb);
1495 return num_wrbs;
1498 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1500 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1502 if (!eqe->evt)
1503 return NULL;
1505 rmb();
1506 eqe->evt = le32_to_cpu(eqe->evt);
1507 queue_tail_inc(&eq_obj->q);
1508 return eqe;
1511 static int event_handle(struct be_adapter *adapter,
1512 struct be_eq_obj *eq_obj,
1513 bool rearm)
1515 struct be_eq_entry *eqe;
1516 u16 num = 0;
1518 while ((eqe = event_get(eq_obj)) != NULL) {
1519 eqe->evt = 0;
1520 num++;
1523 /* Deal with any spurious interrupts that come
1524 * without events
1526 if (!num)
1527 rearm = true;
1529 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1530 if (num)
1531 napi_schedule(&eq_obj->napi);
1533 return num;
1536 /* Just read and notify events without processing them.
1537 * Used at the time of destroying event queues */
1538 static void be_eq_clean(struct be_adapter *adapter,
1539 struct be_eq_obj *eq_obj)
1541 struct be_eq_entry *eqe;
1542 u16 num = 0;
1544 while ((eqe = event_get(eq_obj)) != NULL) {
1545 eqe->evt = 0;
1546 num++;
1549 if (num)
1550 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1553 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1555 struct be_rx_page_info *page_info;
1556 struct be_queue_info *rxq = &rxo->q;
1557 struct be_queue_info *rx_cq = &rxo->cq;
1558 struct be_rx_compl_info *rxcp;
1559 u16 tail;
1561 /* First cleanup pending rx completions */
1562 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1563 be_rx_compl_discard(adapter, rxo, rxcp);
1564 be_cq_notify(adapter, rx_cq->id, false, 1);
1567 /* Then free posted rx buffer that were not used */
1568 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1569 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1570 page_info = get_rx_page_info(adapter, rxo, tail);
1571 put_page(page_info->page);
1572 memset(page_info, 0, sizeof(*page_info));
1574 BUG_ON(atomic_read(&rxq->used));
1577 static void be_tx_compl_clean(struct be_adapter *adapter,
1578 struct be_tx_obj *txo)
1580 struct be_queue_info *tx_cq = &txo->cq;
1581 struct be_queue_info *txq = &txo->q;
1582 struct be_eth_tx_compl *txcp;
1583 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1584 struct sk_buff **sent_skbs = txo->sent_skb_list;
1585 struct sk_buff *sent_skb;
1586 bool dummy_wrb;
1588 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1589 do {
1590 while ((txcp = be_tx_compl_get(tx_cq))) {
1591 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1592 wrb_index, txcp);
1593 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1594 cmpl++;
1596 if (cmpl) {
1597 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1598 atomic_sub(num_wrbs, &txq->used);
1599 cmpl = 0;
1600 num_wrbs = 0;
1603 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1604 break;
1606 mdelay(1);
1607 } while (true);
1609 if (atomic_read(&txq->used))
1610 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1611 atomic_read(&txq->used));
1613 /* free posted tx for which compls will never arrive */
1614 while (atomic_read(&txq->used)) {
1615 sent_skb = sent_skbs[txq->tail];
1616 end_idx = txq->tail;
1617 index_adv(&end_idx,
1618 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1619 txq->len);
1620 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1621 atomic_sub(num_wrbs, &txq->used);
1625 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1627 struct be_queue_info *q;
1629 q = &adapter->mcc_obj.q;
1630 if (q->created)
1631 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1632 be_queue_free(adapter, q);
1634 q = &adapter->mcc_obj.cq;
1635 if (q->created)
1636 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1637 be_queue_free(adapter, q);
1640 /* Must be called only after TX qs are created as MCC shares TX EQ */
1641 static int be_mcc_queues_create(struct be_adapter *adapter)
1643 struct be_queue_info *q, *cq;
1645 /* Alloc MCC compl queue */
1646 cq = &adapter->mcc_obj.cq;
1647 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1648 sizeof(struct be_mcc_compl)))
1649 goto err;
1651 /* Ask BE to create MCC compl queue; share TX's eq */
1652 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1653 goto mcc_cq_free;
1655 /* Alloc MCC queue */
1656 q = &adapter->mcc_obj.q;
1657 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1658 goto mcc_cq_destroy;
1660 /* Ask BE to create MCC queue */
1661 if (be_cmd_mccq_create(adapter, q, cq))
1662 goto mcc_q_free;
1664 return 0;
1666 mcc_q_free:
1667 be_queue_free(adapter, q);
1668 mcc_cq_destroy:
1669 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1670 mcc_cq_free:
1671 be_queue_free(adapter, cq);
1672 err:
1673 return -1;
1676 static void be_tx_queues_destroy(struct be_adapter *adapter)
1678 struct be_queue_info *q;
1679 struct be_tx_obj *txo;
1680 u8 i;
1682 for_all_tx_queues(adapter, txo, i) {
1683 q = &txo->q;
1684 if (q->created)
1685 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1686 be_queue_free(adapter, q);
1688 q = &txo->cq;
1689 if (q->created)
1690 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1691 be_queue_free(adapter, q);
1694 /* Clear any residual events */
1695 be_eq_clean(adapter, &adapter->tx_eq);
1697 q = &adapter->tx_eq.q;
1698 if (q->created)
1699 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1700 be_queue_free(adapter, q);
1703 /* One TX event queue is shared by all TX compl qs */
1704 static int be_tx_queues_create(struct be_adapter *adapter)
1706 struct be_queue_info *eq, *q, *cq;
1707 struct be_tx_obj *txo;
1708 u8 i;
1710 adapter->tx_eq.max_eqd = 0;
1711 adapter->tx_eq.min_eqd = 0;
1712 adapter->tx_eq.cur_eqd = 96;
1713 adapter->tx_eq.enable_aic = false;
1715 eq = &adapter->tx_eq.q;
1716 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1717 sizeof(struct be_eq_entry)))
1718 return -1;
1720 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1721 goto err;
1722 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1724 for_all_tx_queues(adapter, txo, i) {
1725 cq = &txo->cq;
1726 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1727 sizeof(struct be_eth_tx_compl)))
1728 goto err;
1730 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1731 goto err;
1733 q = &txo->q;
1734 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1735 sizeof(struct be_eth_wrb)))
1736 goto err;
1738 if (be_cmd_txq_create(adapter, q, cq))
1739 goto err;
1741 return 0;
1743 err:
1744 be_tx_queues_destroy(adapter);
1745 return -1;
1748 static void be_rx_queues_destroy(struct be_adapter *adapter)
1750 struct be_queue_info *q;
1751 struct be_rx_obj *rxo;
1752 int i;
1754 for_all_rx_queues(adapter, rxo, i) {
1755 q = &rxo->q;
1756 if (q->created) {
1757 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1758 /* After the rxq is invalidated, wait for a grace time
1759 * of 1ms for all dma to end and the flush compl to
1760 * arrive
1762 mdelay(1);
1763 be_rx_q_clean(adapter, rxo);
1765 be_queue_free(adapter, q);
1767 q = &rxo->cq;
1768 if (q->created)
1769 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1770 be_queue_free(adapter, q);
1772 /* Clear any residual events */
1773 q = &rxo->rx_eq.q;
1774 if (q->created) {
1775 be_eq_clean(adapter, &rxo->rx_eq);
1776 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1778 be_queue_free(adapter, q);
1782 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1784 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1785 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1786 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1787 } else {
1788 dev_warn(&adapter->pdev->dev,
1789 "No support for multiple RX queues\n");
1790 return 1;
1794 static int be_rx_queues_create(struct be_adapter *adapter)
1796 struct be_queue_info *eq, *q, *cq;
1797 struct be_rx_obj *rxo;
1798 int rc, i;
1800 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1801 msix_enabled(adapter) ?
1802 adapter->num_msix_vec - 1 : 1);
1803 if (adapter->num_rx_qs != MAX_RX_QS)
1804 dev_warn(&adapter->pdev->dev,
1805 "Can create only %d RX queues", adapter->num_rx_qs);
1807 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1808 for_all_rx_queues(adapter, rxo, i) {
1809 rxo->adapter = adapter;
1810 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1811 rxo->rx_eq.enable_aic = true;
1813 /* EQ */
1814 eq = &rxo->rx_eq.q;
1815 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1816 sizeof(struct be_eq_entry));
1817 if (rc)
1818 goto err;
1820 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1821 if (rc)
1822 goto err;
1824 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1826 /* CQ */
1827 cq = &rxo->cq;
1828 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1829 sizeof(struct be_eth_rx_compl));
1830 if (rc)
1831 goto err;
1833 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1834 if (rc)
1835 goto err;
1836 /* Rx Q */
1837 q = &rxo->q;
1838 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1839 sizeof(struct be_eth_rx_d));
1840 if (rc)
1841 goto err;
1843 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1844 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1845 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1846 if (rc)
1847 goto err;
1850 if (be_multi_rxq(adapter)) {
1851 u8 rsstable[MAX_RSS_QS];
1853 for_all_rss_queues(adapter, rxo, i)
1854 rsstable[i] = rxo->rss_id;
1856 rc = be_cmd_rss_config(adapter, rsstable,
1857 adapter->num_rx_qs - 1);
1858 if (rc)
1859 goto err;
1862 return 0;
1863 err:
1864 be_rx_queues_destroy(adapter);
1865 return -1;
1868 static bool event_peek(struct be_eq_obj *eq_obj)
1870 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1871 if (!eqe->evt)
1872 return false;
1873 else
1874 return true;
1877 static irqreturn_t be_intx(int irq, void *dev)
1879 struct be_adapter *adapter = dev;
1880 struct be_rx_obj *rxo;
1881 int isr, i, tx = 0 , rx = 0;
1883 if (lancer_chip(adapter)) {
1884 if (event_peek(&adapter->tx_eq))
1885 tx = event_handle(adapter, &adapter->tx_eq, false);
1886 for_all_rx_queues(adapter, rxo, i) {
1887 if (event_peek(&rxo->rx_eq))
1888 rx |= event_handle(adapter, &rxo->rx_eq, true);
1891 if (!(tx || rx))
1892 return IRQ_NONE;
1894 } else {
1895 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1896 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1897 if (!isr)
1898 return IRQ_NONE;
1900 if ((1 << adapter->tx_eq.eq_idx & isr))
1901 event_handle(adapter, &adapter->tx_eq, false);
1903 for_all_rx_queues(adapter, rxo, i) {
1904 if ((1 << rxo->rx_eq.eq_idx & isr))
1905 event_handle(adapter, &rxo->rx_eq, true);
1909 return IRQ_HANDLED;
1912 static irqreturn_t be_msix_rx(int irq, void *dev)
1914 struct be_rx_obj *rxo = dev;
1915 struct be_adapter *adapter = rxo->adapter;
1917 event_handle(adapter, &rxo->rx_eq, true);
1919 return IRQ_HANDLED;
1922 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1924 struct be_adapter *adapter = dev;
1926 event_handle(adapter, &adapter->tx_eq, false);
1928 return IRQ_HANDLED;
1931 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1933 return (rxcp->tcpf && !rxcp->err) ? true : false;
1936 static int be_poll_rx(struct napi_struct *napi, int budget)
1938 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1939 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1940 struct be_adapter *adapter = rxo->adapter;
1941 struct be_queue_info *rx_cq = &rxo->cq;
1942 struct be_rx_compl_info *rxcp;
1943 u32 work_done;
1945 rxo->stats.rx_polls++;
1946 for (work_done = 0; work_done < budget; work_done++) {
1947 rxcp = be_rx_compl_get(rxo);
1948 if (!rxcp)
1949 break;
1951 /* Ignore flush completions */
1952 if (rxcp->num_rcvd && rxcp->pkt_size) {
1953 if (do_gro(rxcp))
1954 be_rx_compl_process_gro(adapter, rxo, rxcp);
1955 else
1956 be_rx_compl_process(adapter, rxo, rxcp);
1957 } else if (rxcp->pkt_size == 0) {
1958 be_rx_compl_discard(adapter, rxo, rxcp);
1961 be_rx_stats_update(rxo, rxcp);
1964 /* Refill the queue */
1965 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1966 be_post_rx_frags(rxo, GFP_ATOMIC);
1968 /* All consumed */
1969 if (work_done < budget) {
1970 napi_complete(napi);
1971 be_cq_notify(adapter, rx_cq->id, true, work_done);
1972 } else {
1973 /* More to be consumed; continue with interrupts disabled */
1974 be_cq_notify(adapter, rx_cq->id, false, work_done);
1976 return work_done;
1979 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1980 * For TX/MCC we don't honour budget; consume everything
1982 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1984 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1985 struct be_adapter *adapter =
1986 container_of(tx_eq, struct be_adapter, tx_eq);
1987 struct be_tx_obj *txo;
1988 struct be_eth_tx_compl *txcp;
1989 int tx_compl, mcc_compl, status = 0;
1990 u8 i;
1991 u16 num_wrbs;
1993 for_all_tx_queues(adapter, txo, i) {
1994 tx_compl = 0;
1995 num_wrbs = 0;
1996 while ((txcp = be_tx_compl_get(&txo->cq))) {
1997 num_wrbs += be_tx_compl_process(adapter, txo,
1998 AMAP_GET_BITS(struct amap_eth_tx_compl,
1999 wrb_index, txcp));
2000 tx_compl++;
2002 if (tx_compl) {
2003 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
2005 atomic_sub(num_wrbs, &txo->q.used);
2007 /* As Tx wrbs have been freed up, wake up netdev queue
2008 * if it was stopped due to lack of tx wrbs. */
2009 if (__netif_subqueue_stopped(adapter->netdev, i) &&
2010 atomic_read(&txo->q.used) < txo->q.len / 2) {
2011 netif_wake_subqueue(adapter->netdev, i);
2014 adapter->drv_stats.be_tx_events++;
2015 txo->stats.be_tx_compl += tx_compl;
2019 mcc_compl = be_process_mcc(adapter, &status);
2021 if (mcc_compl) {
2022 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2023 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2026 napi_complete(napi);
2028 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2029 return 1;
2032 void be_detect_dump_ue(struct be_adapter *adapter)
2034 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
2035 u32 i;
2037 pci_read_config_dword(adapter->pdev,
2038 PCICFG_UE_STATUS_LOW, &ue_status_lo);
2039 pci_read_config_dword(adapter->pdev,
2040 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
2041 pci_read_config_dword(adapter->pdev,
2042 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2043 pci_read_config_dword(adapter->pdev,
2044 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2046 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2047 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2049 if (ue_status_lo || ue_status_hi) {
2050 adapter->ue_detected = true;
2051 adapter->eeh_err = true;
2052 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2055 if (ue_status_lo) {
2056 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2057 if (ue_status_lo & 1)
2058 dev_err(&adapter->pdev->dev,
2059 "UE: %s bit set\n", ue_status_low_desc[i]);
2062 if (ue_status_hi) {
2063 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2064 if (ue_status_hi & 1)
2065 dev_err(&adapter->pdev->dev,
2066 "UE: %s bit set\n", ue_status_hi_desc[i]);
2072 static void be_worker(struct work_struct *work)
2074 struct be_adapter *adapter =
2075 container_of(work, struct be_adapter, work.work);
2076 struct be_rx_obj *rxo;
2077 struct be_tx_obj *txo;
2078 int i;
2080 if (!adapter->ue_detected && !lancer_chip(adapter))
2081 be_detect_dump_ue(adapter);
2083 /* when interrupts are not yet enabled, just reap any pending
2084 * mcc completions */
2085 if (!netif_running(adapter->netdev)) {
2086 int mcc_compl, status = 0;
2088 mcc_compl = be_process_mcc(adapter, &status);
2090 if (mcc_compl) {
2091 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2092 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2095 goto reschedule;
2098 if (!adapter->stats_cmd_sent) {
2099 if (lancer_chip(adapter))
2100 lancer_cmd_get_pport_stats(adapter,
2101 &adapter->stats_cmd);
2102 else
2103 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2106 for_all_tx_queues(adapter, txo, i)
2107 be_tx_rate_update(txo);
2109 for_all_rx_queues(adapter, rxo, i) {
2110 be_rx_rate_update(rxo);
2111 be_rx_eqd_update(adapter, rxo);
2113 if (rxo->rx_post_starved) {
2114 rxo->rx_post_starved = false;
2115 be_post_rx_frags(rxo, GFP_KERNEL);
2119 reschedule:
2120 adapter->work_counter++;
2121 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2124 static void be_msix_disable(struct be_adapter *adapter)
2126 if (msix_enabled(adapter)) {
2127 pci_disable_msix(adapter->pdev);
2128 adapter->num_msix_vec = 0;
2132 static void be_msix_enable(struct be_adapter *adapter)
2134 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
2135 int i, status, num_vec;
2137 num_vec = be_num_rxqs_want(adapter) + 1;
2139 for (i = 0; i < num_vec; i++)
2140 adapter->msix_entries[i].entry = i;
2142 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2143 if (status == 0) {
2144 goto done;
2145 } else if (status >= BE_MIN_MSIX_VECTORS) {
2146 num_vec = status;
2147 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2148 num_vec) == 0)
2149 goto done;
2151 return;
2152 done:
2153 adapter->num_msix_vec = num_vec;
2154 return;
2157 static void be_sriov_enable(struct be_adapter *adapter)
2159 be_check_sriov_fn_type(adapter);
2160 #ifdef CONFIG_PCI_IOV
2161 if (be_physfn(adapter) && num_vfs) {
2162 int status, pos;
2163 u16 nvfs;
2165 pos = pci_find_ext_capability(adapter->pdev,
2166 PCI_EXT_CAP_ID_SRIOV);
2167 pci_read_config_word(adapter->pdev,
2168 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2170 if (num_vfs > nvfs) {
2171 dev_info(&adapter->pdev->dev,
2172 "Device supports %d VFs and not %d\n",
2173 nvfs, num_vfs);
2174 num_vfs = nvfs;
2177 status = pci_enable_sriov(adapter->pdev, num_vfs);
2178 adapter->sriov_enabled = status ? false : true;
2180 #endif
2183 static void be_sriov_disable(struct be_adapter *adapter)
2185 #ifdef CONFIG_PCI_IOV
2186 if (adapter->sriov_enabled) {
2187 pci_disable_sriov(adapter->pdev);
2188 adapter->sriov_enabled = false;
2190 #endif
2193 static inline int be_msix_vec_get(struct be_adapter *adapter,
2194 struct be_eq_obj *eq_obj)
2196 return adapter->msix_entries[eq_obj->eq_idx].vector;
2199 static int be_request_irq(struct be_adapter *adapter,
2200 struct be_eq_obj *eq_obj,
2201 void *handler, char *desc, void *context)
2203 struct net_device *netdev = adapter->netdev;
2204 int vec;
2206 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2207 vec = be_msix_vec_get(adapter, eq_obj);
2208 return request_irq(vec, handler, 0, eq_obj->desc, context);
2211 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2212 void *context)
2214 int vec = be_msix_vec_get(adapter, eq_obj);
2215 free_irq(vec, context);
2218 static int be_msix_register(struct be_adapter *adapter)
2220 struct be_rx_obj *rxo;
2221 int status, i;
2222 char qname[10];
2224 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2225 adapter);
2226 if (status)
2227 goto err;
2229 for_all_rx_queues(adapter, rxo, i) {
2230 sprintf(qname, "rxq%d", i);
2231 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2232 qname, rxo);
2233 if (status)
2234 goto err_msix;
2237 return 0;
2239 err_msix:
2240 be_free_irq(adapter, &adapter->tx_eq, adapter);
2242 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2243 be_free_irq(adapter, &rxo->rx_eq, rxo);
2245 err:
2246 dev_warn(&adapter->pdev->dev,
2247 "MSIX Request IRQ failed - err %d\n", status);
2248 be_msix_disable(adapter);
2249 return status;
2252 static int be_irq_register(struct be_adapter *adapter)
2254 struct net_device *netdev = adapter->netdev;
2255 int status;
2257 if (msix_enabled(adapter)) {
2258 status = be_msix_register(adapter);
2259 if (status == 0)
2260 goto done;
2261 /* INTx is not supported for VF */
2262 if (!be_physfn(adapter))
2263 return status;
2266 /* INTx */
2267 netdev->irq = adapter->pdev->irq;
2268 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2269 adapter);
2270 if (status) {
2271 dev_err(&adapter->pdev->dev,
2272 "INTx request IRQ failed - err %d\n", status);
2273 return status;
2275 done:
2276 adapter->isr_registered = true;
2277 return 0;
2280 static void be_irq_unregister(struct be_adapter *adapter)
2282 struct net_device *netdev = adapter->netdev;
2283 struct be_rx_obj *rxo;
2284 int i;
2286 if (!adapter->isr_registered)
2287 return;
2289 /* INTx */
2290 if (!msix_enabled(adapter)) {
2291 free_irq(netdev->irq, adapter);
2292 goto done;
2295 /* MSIx */
2296 be_free_irq(adapter, &adapter->tx_eq, adapter);
2298 for_all_rx_queues(adapter, rxo, i)
2299 be_free_irq(adapter, &rxo->rx_eq, rxo);
2301 done:
2302 adapter->isr_registered = false;
2305 static int be_close(struct net_device *netdev)
2307 struct be_adapter *adapter = netdev_priv(netdev);
2308 struct be_rx_obj *rxo;
2309 struct be_tx_obj *txo;
2310 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2311 int vec, i;
2313 be_async_mcc_disable(adapter);
2315 netif_carrier_off(netdev);
2316 adapter->link_up = false;
2318 if (!lancer_chip(adapter))
2319 be_intr_set(adapter, false);
2321 for_all_rx_queues(adapter, rxo, i)
2322 napi_disable(&rxo->rx_eq.napi);
2324 napi_disable(&tx_eq->napi);
2326 if (lancer_chip(adapter)) {
2327 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2328 for_all_rx_queues(adapter, rxo, i)
2329 be_cq_notify(adapter, rxo->cq.id, false, 0);
2330 for_all_tx_queues(adapter, txo, i)
2331 be_cq_notify(adapter, txo->cq.id, false, 0);
2334 if (msix_enabled(adapter)) {
2335 vec = be_msix_vec_get(adapter, tx_eq);
2336 synchronize_irq(vec);
2338 for_all_rx_queues(adapter, rxo, i) {
2339 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2340 synchronize_irq(vec);
2342 } else {
2343 synchronize_irq(netdev->irq);
2345 be_irq_unregister(adapter);
2347 /* Wait for all pending tx completions to arrive so that
2348 * all tx skbs are freed.
2350 for_all_tx_queues(adapter, txo, i)
2351 be_tx_compl_clean(adapter, txo);
2353 return 0;
2356 static int be_open(struct net_device *netdev)
2358 struct be_adapter *adapter = netdev_priv(netdev);
2359 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2360 struct be_rx_obj *rxo;
2361 bool link_up;
2362 int status, i;
2363 u8 mac_speed;
2364 u16 link_speed;
2366 for_all_rx_queues(adapter, rxo, i) {
2367 be_post_rx_frags(rxo, GFP_KERNEL);
2368 napi_enable(&rxo->rx_eq.napi);
2370 napi_enable(&tx_eq->napi);
2372 be_irq_register(adapter);
2374 if (!lancer_chip(adapter))
2375 be_intr_set(adapter, true);
2377 /* The evt queues are created in unarmed state; arm them */
2378 for_all_rx_queues(adapter, rxo, i) {
2379 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2380 be_cq_notify(adapter, rxo->cq.id, true, 0);
2382 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2384 /* Now that interrupts are on we can process async mcc */
2385 be_async_mcc_enable(adapter);
2387 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2388 &link_speed, 0);
2389 if (status)
2390 goto err;
2391 be_link_status_update(adapter, link_up);
2393 if (be_physfn(adapter)) {
2394 status = be_vid_config(adapter, false, 0);
2395 if (status)
2396 goto err;
2398 status = be_cmd_set_flow_control(adapter,
2399 adapter->tx_fc, adapter->rx_fc);
2400 if (status)
2401 goto err;
2404 return 0;
2405 err:
2406 be_close(adapter->netdev);
2407 return -EIO;
2410 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2412 struct be_dma_mem cmd;
2413 int status = 0;
2414 u8 mac[ETH_ALEN];
2416 memset(mac, 0, ETH_ALEN);
2418 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2419 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2420 GFP_KERNEL);
2421 if (cmd.va == NULL)
2422 return -1;
2423 memset(cmd.va, 0, cmd.size);
2425 if (enable) {
2426 status = pci_write_config_dword(adapter->pdev,
2427 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2428 if (status) {
2429 dev_err(&adapter->pdev->dev,
2430 "Could not enable Wake-on-lan\n");
2431 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2432 cmd.dma);
2433 return status;
2435 status = be_cmd_enable_magic_wol(adapter,
2436 adapter->netdev->dev_addr, &cmd);
2437 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2438 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2439 } else {
2440 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2441 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2442 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2445 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2446 return status;
2450 * Generate a seed MAC address from the PF MAC Address using jhash.
2451 * MAC Address for VFs are assigned incrementally starting from the seed.
2452 * These addresses are programmed in the ASIC by the PF and the VF driver
2453 * queries for the MAC address during its probe.
2455 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2457 u32 vf = 0;
2458 int status = 0;
2459 u8 mac[ETH_ALEN];
2461 be_vf_eth_addr_generate(adapter, mac);
2463 for (vf = 0; vf < num_vfs; vf++) {
2464 status = be_cmd_pmac_add(adapter, mac,
2465 adapter->vf_cfg[vf].vf_if_handle,
2466 &adapter->vf_cfg[vf].vf_pmac_id,
2467 vf + 1);
2468 if (status)
2469 dev_err(&adapter->pdev->dev,
2470 "Mac address add failed for VF %d\n", vf);
2471 else
2472 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2474 mac[5] += 1;
2476 return status;
2479 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2481 u32 vf;
2483 for (vf = 0; vf < num_vfs; vf++) {
2484 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2485 be_cmd_pmac_del(adapter,
2486 adapter->vf_cfg[vf].vf_if_handle,
2487 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2491 static int be_setup(struct be_adapter *adapter)
2493 struct net_device *netdev = adapter->netdev;
2494 u32 cap_flags, en_flags, vf = 0;
2495 int status;
2496 u8 mac[ETH_ALEN];
2498 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2499 BE_IF_FLAGS_BROADCAST |
2500 BE_IF_FLAGS_MULTICAST;
2502 if (be_physfn(adapter)) {
2503 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2504 BE_IF_FLAGS_PROMISCUOUS |
2505 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2506 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2508 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2509 cap_flags |= BE_IF_FLAGS_RSS;
2510 en_flags |= BE_IF_FLAGS_RSS;
2514 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2515 netdev->dev_addr, false/* pmac_invalid */,
2516 &adapter->if_handle, &adapter->pmac_id, 0);
2517 if (status != 0)
2518 goto do_none;
2520 if (be_physfn(adapter)) {
2521 if (adapter->sriov_enabled) {
2522 while (vf < num_vfs) {
2523 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2524 BE_IF_FLAGS_BROADCAST;
2525 status = be_cmd_if_create(adapter, cap_flags,
2526 en_flags, mac, true,
2527 &adapter->vf_cfg[vf].vf_if_handle,
2528 NULL, vf+1);
2529 if (status) {
2530 dev_err(&adapter->pdev->dev,
2531 "Interface Create failed for VF %d\n",
2532 vf);
2533 goto if_destroy;
2535 adapter->vf_cfg[vf].vf_pmac_id =
2536 BE_INVALID_PMAC_ID;
2537 vf++;
2540 } else {
2541 status = be_cmd_mac_addr_query(adapter, mac,
2542 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2543 if (!status) {
2544 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2545 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2549 status = be_tx_queues_create(adapter);
2550 if (status != 0)
2551 goto if_destroy;
2553 status = be_rx_queues_create(adapter);
2554 if (status != 0)
2555 goto tx_qs_destroy;
2557 /* Allow all priorities by default. A GRP5 evt may modify this */
2558 adapter->vlan_prio_bmap = 0xff;
2560 status = be_mcc_queues_create(adapter);
2561 if (status != 0)
2562 goto rx_qs_destroy;
2564 adapter->link_speed = -1;
2566 return 0;
2568 rx_qs_destroy:
2569 be_rx_queues_destroy(adapter);
2570 tx_qs_destroy:
2571 be_tx_queues_destroy(adapter);
2572 if_destroy:
2573 if (be_physfn(adapter) && adapter->sriov_enabled)
2574 for (vf = 0; vf < num_vfs; vf++)
2575 if (adapter->vf_cfg[vf].vf_if_handle)
2576 be_cmd_if_destroy(adapter,
2577 adapter->vf_cfg[vf].vf_if_handle,
2578 vf + 1);
2579 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2580 do_none:
2581 return status;
2584 static int be_clear(struct be_adapter *adapter)
2586 int vf;
2588 if (be_physfn(adapter) && adapter->sriov_enabled)
2589 be_vf_eth_addr_rem(adapter);
2591 be_mcc_queues_destroy(adapter);
2592 be_rx_queues_destroy(adapter);
2593 be_tx_queues_destroy(adapter);
2594 adapter->eq_next_idx = 0;
2596 if (be_physfn(adapter) && adapter->sriov_enabled)
2597 for (vf = 0; vf < num_vfs; vf++)
2598 if (adapter->vf_cfg[vf].vf_if_handle)
2599 be_cmd_if_destroy(adapter,
2600 adapter->vf_cfg[vf].vf_if_handle,
2601 vf + 1);
2603 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2605 /* tell fw we're done with firing cmds */
2606 be_cmd_fw_clean(adapter);
2607 return 0;
2611 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2612 static bool be_flash_redboot(struct be_adapter *adapter,
2613 const u8 *p, u32 img_start, int image_size,
2614 int hdr_size)
2616 u32 crc_offset;
2617 u8 flashed_crc[4];
2618 int status;
2620 crc_offset = hdr_size + img_start + image_size - 4;
2622 p += crc_offset;
2624 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2625 (image_size - 4));
2626 if (status) {
2627 dev_err(&adapter->pdev->dev,
2628 "could not get crc from flash, not flashing redboot\n");
2629 return false;
2632 /*update redboot only if crc does not match*/
2633 if (!memcmp(flashed_crc, p, 4))
2634 return false;
2635 else
2636 return true;
2639 static int be_flash_data(struct be_adapter *adapter,
2640 const struct firmware *fw,
2641 struct be_dma_mem *flash_cmd, int num_of_images)
2644 int status = 0, i, filehdr_size = 0;
2645 u32 total_bytes = 0, flash_op;
2646 int num_bytes;
2647 const u8 *p = fw->data;
2648 struct be_cmd_write_flashrom *req = flash_cmd->va;
2649 const struct flash_comp *pflashcomp;
2650 int num_comp;
2652 static const struct flash_comp gen3_flash_types[9] = {
2653 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2654 FLASH_IMAGE_MAX_SIZE_g3},
2655 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2656 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2657 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2658 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2659 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2660 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2661 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2662 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2663 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2664 FLASH_IMAGE_MAX_SIZE_g3},
2665 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2666 FLASH_IMAGE_MAX_SIZE_g3},
2667 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2668 FLASH_IMAGE_MAX_SIZE_g3},
2669 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2670 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2672 static const struct flash_comp gen2_flash_types[8] = {
2673 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2674 FLASH_IMAGE_MAX_SIZE_g2},
2675 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2676 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2677 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2678 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2679 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2680 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2681 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2682 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2683 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2684 FLASH_IMAGE_MAX_SIZE_g2},
2685 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2686 FLASH_IMAGE_MAX_SIZE_g2},
2687 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2688 FLASH_IMAGE_MAX_SIZE_g2}
2691 if (adapter->generation == BE_GEN3) {
2692 pflashcomp = gen3_flash_types;
2693 filehdr_size = sizeof(struct flash_file_hdr_g3);
2694 num_comp = ARRAY_SIZE(gen3_flash_types);
2695 } else {
2696 pflashcomp = gen2_flash_types;
2697 filehdr_size = sizeof(struct flash_file_hdr_g2);
2698 num_comp = ARRAY_SIZE(gen2_flash_types);
2700 for (i = 0; i < num_comp; i++) {
2701 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2702 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2703 continue;
2704 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2705 (!be_flash_redboot(adapter, fw->data,
2706 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2707 (num_of_images * sizeof(struct image_hdr)))))
2708 continue;
2709 p = fw->data;
2710 p += filehdr_size + pflashcomp[i].offset
2711 + (num_of_images * sizeof(struct image_hdr));
2712 if (p + pflashcomp[i].size > fw->data + fw->size)
2713 return -1;
2714 total_bytes = pflashcomp[i].size;
2715 while (total_bytes) {
2716 if (total_bytes > 32*1024)
2717 num_bytes = 32*1024;
2718 else
2719 num_bytes = total_bytes;
2720 total_bytes -= num_bytes;
2722 if (!total_bytes)
2723 flash_op = FLASHROM_OPER_FLASH;
2724 else
2725 flash_op = FLASHROM_OPER_SAVE;
2726 memcpy(req->params.data_buf, p, num_bytes);
2727 p += num_bytes;
2728 status = be_cmd_write_flashrom(adapter, flash_cmd,
2729 pflashcomp[i].optype, flash_op, num_bytes);
2730 if (status) {
2731 dev_err(&adapter->pdev->dev,
2732 "cmd to write to flash rom failed.\n");
2733 return -1;
2737 return 0;
2740 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2742 if (fhdr == NULL)
2743 return 0;
2744 if (fhdr->build[0] == '3')
2745 return BE_GEN3;
2746 else if (fhdr->build[0] == '2')
2747 return BE_GEN2;
2748 else
2749 return 0;
2752 static int lancer_fw_download(struct be_adapter *adapter,
2753 const struct firmware *fw)
2755 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2756 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2757 struct be_dma_mem flash_cmd;
2758 const u8 *data_ptr = NULL;
2759 u8 *dest_image_ptr = NULL;
2760 size_t image_size = 0;
2761 u32 chunk_size = 0;
2762 u32 data_written = 0;
2763 u32 offset = 0;
2764 int status = 0;
2765 u8 add_status = 0;
2767 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2768 dev_err(&adapter->pdev->dev,
2769 "FW Image not properly aligned. "
2770 "Length must be 4 byte aligned.\n");
2771 status = -EINVAL;
2772 goto lancer_fw_exit;
2775 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2776 + LANCER_FW_DOWNLOAD_CHUNK;
2777 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2778 &flash_cmd.dma, GFP_KERNEL);
2779 if (!flash_cmd.va) {
2780 status = -ENOMEM;
2781 dev_err(&adapter->pdev->dev,
2782 "Memory allocation failure while flashing\n");
2783 goto lancer_fw_exit;
2786 dest_image_ptr = flash_cmd.va +
2787 sizeof(struct lancer_cmd_req_write_object);
2788 image_size = fw->size;
2789 data_ptr = fw->data;
2791 while (image_size) {
2792 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2794 /* Copy the image chunk content. */
2795 memcpy(dest_image_ptr, data_ptr, chunk_size);
2797 status = lancer_cmd_write_object(adapter, &flash_cmd,
2798 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2799 &data_written, &add_status);
2801 if (status)
2802 break;
2804 offset += data_written;
2805 data_ptr += data_written;
2806 image_size -= data_written;
2809 if (!status) {
2810 /* Commit the FW written */
2811 status = lancer_cmd_write_object(adapter, &flash_cmd,
2812 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2813 &data_written, &add_status);
2816 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2817 flash_cmd.dma);
2818 if (status) {
2819 dev_err(&adapter->pdev->dev,
2820 "Firmware load error. "
2821 "Status code: 0x%x Additional Status: 0x%x\n",
2822 status, add_status);
2823 goto lancer_fw_exit;
2826 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2827 lancer_fw_exit:
2828 return status;
2831 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2833 struct flash_file_hdr_g2 *fhdr;
2834 struct flash_file_hdr_g3 *fhdr3;
2835 struct image_hdr *img_hdr_ptr = NULL;
2836 struct be_dma_mem flash_cmd;
2837 const u8 *p;
2838 int status = 0, i = 0, num_imgs = 0;
2840 p = fw->data;
2841 fhdr = (struct flash_file_hdr_g2 *) p;
2843 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2844 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2845 &flash_cmd.dma, GFP_KERNEL);
2846 if (!flash_cmd.va) {
2847 status = -ENOMEM;
2848 dev_err(&adapter->pdev->dev,
2849 "Memory allocation failure while flashing\n");
2850 goto be_fw_exit;
2853 if ((adapter->generation == BE_GEN3) &&
2854 (get_ufigen_type(fhdr) == BE_GEN3)) {
2855 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2856 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2857 for (i = 0; i < num_imgs; i++) {
2858 img_hdr_ptr = (struct image_hdr *) (fw->data +
2859 (sizeof(struct flash_file_hdr_g3) +
2860 i * sizeof(struct image_hdr)));
2861 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2862 status = be_flash_data(adapter, fw, &flash_cmd,
2863 num_imgs);
2865 } else if ((adapter->generation == BE_GEN2) &&
2866 (get_ufigen_type(fhdr) == BE_GEN2)) {
2867 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2868 } else {
2869 dev_err(&adapter->pdev->dev,
2870 "UFI and Interface are not compatible for flashing\n");
2871 status = -1;
2874 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2875 flash_cmd.dma);
2876 if (status) {
2877 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2878 goto be_fw_exit;
2881 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2883 be_fw_exit:
2884 return status;
2887 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2889 const struct firmware *fw;
2890 int status;
2892 if (!netif_running(adapter->netdev)) {
2893 dev_err(&adapter->pdev->dev,
2894 "Firmware load not allowed (interface is down)\n");
2895 return -1;
2898 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2899 if (status)
2900 goto fw_exit;
2902 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2904 if (lancer_chip(adapter))
2905 status = lancer_fw_download(adapter, fw);
2906 else
2907 status = be_fw_download(adapter, fw);
2909 fw_exit:
2910 release_firmware(fw);
2911 return status;
2914 static struct net_device_ops be_netdev_ops = {
2915 .ndo_open = be_open,
2916 .ndo_stop = be_close,
2917 .ndo_start_xmit = be_xmit,
2918 .ndo_set_rx_mode = be_set_multicast_list,
2919 .ndo_set_mac_address = be_mac_addr_set,
2920 .ndo_change_mtu = be_change_mtu,
2921 .ndo_validate_addr = eth_validate_addr,
2922 .ndo_vlan_rx_register = be_vlan_register,
2923 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2924 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2925 .ndo_set_vf_mac = be_set_vf_mac,
2926 .ndo_set_vf_vlan = be_set_vf_vlan,
2927 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2928 .ndo_get_vf_config = be_get_vf_config
2931 static void be_netdev_init(struct net_device *netdev)
2933 struct be_adapter *adapter = netdev_priv(netdev);
2934 struct be_rx_obj *rxo;
2935 int i;
2937 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2938 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2939 NETIF_F_HW_VLAN_TX;
2940 if (be_multi_rxq(adapter))
2941 netdev->hw_features |= NETIF_F_RXHASH;
2943 netdev->features |= netdev->hw_features |
2944 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2946 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2947 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2949 netdev->flags |= IFF_MULTICAST;
2951 /* Default settings for Rx and Tx flow control */
2952 adapter->rx_fc = true;
2953 adapter->tx_fc = true;
2955 netif_set_gso_max_size(netdev, 65535);
2957 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2959 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2961 for_all_rx_queues(adapter, rxo, i)
2962 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2963 BE_NAPI_WEIGHT);
2965 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2966 BE_NAPI_WEIGHT);
2969 static void be_unmap_pci_bars(struct be_adapter *adapter)
2971 if (adapter->csr)
2972 iounmap(adapter->csr);
2973 if (adapter->db)
2974 iounmap(adapter->db);
2975 if (adapter->pcicfg && be_physfn(adapter))
2976 iounmap(adapter->pcicfg);
2979 static int be_map_pci_bars(struct be_adapter *adapter)
2981 u8 __iomem *addr;
2982 int pcicfg_reg, db_reg;
2984 if (lancer_chip(adapter)) {
2985 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2986 pci_resource_len(adapter->pdev, 0));
2987 if (addr == NULL)
2988 return -ENOMEM;
2989 adapter->db = addr;
2990 return 0;
2993 if (be_physfn(adapter)) {
2994 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2995 pci_resource_len(adapter->pdev, 2));
2996 if (addr == NULL)
2997 return -ENOMEM;
2998 adapter->csr = addr;
3001 if (adapter->generation == BE_GEN2) {
3002 pcicfg_reg = 1;
3003 db_reg = 4;
3004 } else {
3005 pcicfg_reg = 0;
3006 if (be_physfn(adapter))
3007 db_reg = 4;
3008 else
3009 db_reg = 0;
3011 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3012 pci_resource_len(adapter->pdev, db_reg));
3013 if (addr == NULL)
3014 goto pci_map_err;
3015 adapter->db = addr;
3017 if (be_physfn(adapter)) {
3018 addr = ioremap_nocache(
3019 pci_resource_start(adapter->pdev, pcicfg_reg),
3020 pci_resource_len(adapter->pdev, pcicfg_reg));
3021 if (addr == NULL)
3022 goto pci_map_err;
3023 adapter->pcicfg = addr;
3024 } else
3025 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
3027 return 0;
3028 pci_map_err:
3029 be_unmap_pci_bars(adapter);
3030 return -ENOMEM;
3034 static void be_ctrl_cleanup(struct be_adapter *adapter)
3036 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3038 be_unmap_pci_bars(adapter);
3040 if (mem->va)
3041 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3042 mem->dma);
3044 mem = &adapter->mc_cmd_mem;
3045 if (mem->va)
3046 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3047 mem->dma);
3050 static int be_ctrl_init(struct be_adapter *adapter)
3052 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3053 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3054 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
3055 int status;
3057 status = be_map_pci_bars(adapter);
3058 if (status)
3059 goto done;
3061 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3062 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3063 mbox_mem_alloc->size,
3064 &mbox_mem_alloc->dma,
3065 GFP_KERNEL);
3066 if (!mbox_mem_alloc->va) {
3067 status = -ENOMEM;
3068 goto unmap_pci_bars;
3071 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3072 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3073 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3074 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3076 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
3077 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3078 mc_cmd_mem->size, &mc_cmd_mem->dma,
3079 GFP_KERNEL);
3080 if (mc_cmd_mem->va == NULL) {
3081 status = -ENOMEM;
3082 goto free_mbox;
3084 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3086 mutex_init(&adapter->mbox_lock);
3087 spin_lock_init(&adapter->mcc_lock);
3088 spin_lock_init(&adapter->mcc_cq_lock);
3090 init_completion(&adapter->flash_compl);
3091 pci_save_state(adapter->pdev);
3092 return 0;
3094 free_mbox:
3095 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3096 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3098 unmap_pci_bars:
3099 be_unmap_pci_bars(adapter);
3101 done:
3102 return status;
3105 static void be_stats_cleanup(struct be_adapter *adapter)
3107 struct be_dma_mem *cmd = &adapter->stats_cmd;
3109 if (cmd->va)
3110 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3111 cmd->va, cmd->dma);
3114 static int be_stats_init(struct be_adapter *adapter)
3116 struct be_dma_mem *cmd = &adapter->stats_cmd;
3118 if (adapter->generation == BE_GEN2) {
3119 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3120 } else {
3121 if (lancer_chip(adapter))
3122 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3123 else
3124 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3126 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3127 GFP_KERNEL);
3128 if (cmd->va == NULL)
3129 return -1;
3130 memset(cmd->va, 0, cmd->size);
3131 return 0;
3134 static void __devexit be_remove(struct pci_dev *pdev)
3136 struct be_adapter *adapter = pci_get_drvdata(pdev);
3138 if (!adapter)
3139 return;
3141 cancel_delayed_work_sync(&adapter->work);
3143 unregister_netdev(adapter->netdev);
3145 be_clear(adapter);
3147 be_stats_cleanup(adapter);
3149 be_ctrl_cleanup(adapter);
3151 kfree(adapter->vf_cfg);
3152 be_sriov_disable(adapter);
3154 be_msix_disable(adapter);
3156 pci_set_drvdata(pdev, NULL);
3157 pci_release_regions(pdev);
3158 pci_disable_device(pdev);
3160 free_netdev(adapter->netdev);
3163 static int be_get_config(struct be_adapter *adapter)
3165 int status;
3166 u8 mac[ETH_ALEN];
3168 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
3169 if (status)
3170 return status;
3172 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3173 &adapter->function_mode, &adapter->function_caps);
3174 if (status)
3175 return status;
3177 memset(mac, 0, ETH_ALEN);
3179 /* A default permanent address is given to each VF for Lancer*/
3180 if (be_physfn(adapter) || lancer_chip(adapter)) {
3181 status = be_cmd_mac_addr_query(adapter, mac,
3182 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
3184 if (status)
3185 return status;
3187 if (!is_valid_ether_addr(mac))
3188 return -EADDRNOTAVAIL;
3190 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3191 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3194 if (adapter->function_mode & 0x400)
3195 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3196 else
3197 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3199 status = be_cmd_get_cntl_attributes(adapter);
3200 if (status)
3201 return status;
3203 be_cmd_check_native_mode(adapter);
3205 if ((num_vfs && adapter->sriov_enabled) ||
3206 (adapter->function_mode & 0x400) ||
3207 lancer_chip(adapter) || !be_physfn(adapter)) {
3208 adapter->num_tx_qs = 1;
3209 netif_set_real_num_tx_queues(adapter->netdev,
3210 adapter->num_tx_qs);
3211 } else {
3212 adapter->num_tx_qs = MAX_TX_QS;
3215 return 0;
3218 static int be_dev_family_check(struct be_adapter *adapter)
3220 struct pci_dev *pdev = adapter->pdev;
3221 u32 sli_intf = 0, if_type;
3223 switch (pdev->device) {
3224 case BE_DEVICE_ID1:
3225 case OC_DEVICE_ID1:
3226 adapter->generation = BE_GEN2;
3227 break;
3228 case BE_DEVICE_ID2:
3229 case OC_DEVICE_ID2:
3230 adapter->generation = BE_GEN3;
3231 break;
3232 case OC_DEVICE_ID3:
3233 case OC_DEVICE_ID4:
3234 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3235 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3236 SLI_INTF_IF_TYPE_SHIFT;
3238 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3239 if_type != 0x02) {
3240 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3241 return -EINVAL;
3243 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3244 SLI_INTF_FAMILY_SHIFT);
3245 adapter->generation = BE_GEN3;
3246 break;
3247 default:
3248 adapter->generation = 0;
3250 return 0;
3253 static int lancer_wait_ready(struct be_adapter *adapter)
3255 #define SLIPORT_READY_TIMEOUT 500
3256 u32 sliport_status;
3257 int status = 0, i;
3259 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3260 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3261 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3262 break;
3264 msleep(20);
3267 if (i == SLIPORT_READY_TIMEOUT)
3268 status = -1;
3270 return status;
3273 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3275 int status;
3276 u32 sliport_status, err, reset_needed;
3277 status = lancer_wait_ready(adapter);
3278 if (!status) {
3279 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3280 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3281 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3282 if (err && reset_needed) {
3283 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3284 adapter->db + SLIPORT_CONTROL_OFFSET);
3286 /* check adapter has corrected the error */
3287 status = lancer_wait_ready(adapter);
3288 sliport_status = ioread32(adapter->db +
3289 SLIPORT_STATUS_OFFSET);
3290 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3291 SLIPORT_STATUS_RN_MASK);
3292 if (status || sliport_status)
3293 status = -1;
3294 } else if (err || reset_needed) {
3295 status = -1;
3298 return status;
3301 static int __devinit be_probe(struct pci_dev *pdev,
3302 const struct pci_device_id *pdev_id)
3304 int status = 0;
3305 struct be_adapter *adapter;
3306 struct net_device *netdev;
3308 status = pci_enable_device(pdev);
3309 if (status)
3310 goto do_none;
3312 status = pci_request_regions(pdev, DRV_NAME);
3313 if (status)
3314 goto disable_dev;
3315 pci_set_master(pdev);
3317 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3318 if (netdev == NULL) {
3319 status = -ENOMEM;
3320 goto rel_reg;
3322 adapter = netdev_priv(netdev);
3323 adapter->pdev = pdev;
3324 pci_set_drvdata(pdev, adapter);
3326 status = be_dev_family_check(adapter);
3327 if (status)
3328 goto free_netdev;
3330 adapter->netdev = netdev;
3331 SET_NETDEV_DEV(netdev, &pdev->dev);
3333 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3334 if (!status) {
3335 netdev->features |= NETIF_F_HIGHDMA;
3336 } else {
3337 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3338 if (status) {
3339 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3340 goto free_netdev;
3344 be_sriov_enable(adapter);
3345 if (adapter->sriov_enabled) {
3346 adapter->vf_cfg = kcalloc(num_vfs,
3347 sizeof(struct be_vf_cfg), GFP_KERNEL);
3349 if (!adapter->vf_cfg)
3350 goto free_netdev;
3353 status = be_ctrl_init(adapter);
3354 if (status)
3355 goto free_vf_cfg;
3357 if (lancer_chip(adapter)) {
3358 status = lancer_test_and_set_rdy_state(adapter);
3359 if (status) {
3360 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3361 goto ctrl_clean;
3365 /* sync up with fw's ready state */
3366 if (be_physfn(adapter)) {
3367 status = be_cmd_POST(adapter);
3368 if (status)
3369 goto ctrl_clean;
3372 /* tell fw we're ready to fire cmds */
3373 status = be_cmd_fw_init(adapter);
3374 if (status)
3375 goto ctrl_clean;
3377 status = be_cmd_reset_function(adapter);
3378 if (status)
3379 goto ctrl_clean;
3381 status = be_stats_init(adapter);
3382 if (status)
3383 goto ctrl_clean;
3385 status = be_get_config(adapter);
3386 if (status)
3387 goto stats_clean;
3389 be_msix_enable(adapter);
3391 INIT_DELAYED_WORK(&adapter->work, be_worker);
3393 status = be_setup(adapter);
3394 if (status)
3395 goto msix_disable;
3397 be_netdev_init(netdev);
3398 status = register_netdev(netdev);
3399 if (status != 0)
3400 goto unsetup;
3401 netif_carrier_off(netdev);
3403 if (be_physfn(adapter) && adapter->sriov_enabled) {
3404 u8 mac_speed;
3405 bool link_up;
3406 u16 vf, lnk_speed;
3408 if (!lancer_chip(adapter)) {
3409 status = be_vf_eth_addr_config(adapter);
3410 if (status)
3411 goto unreg_netdev;
3414 for (vf = 0; vf < num_vfs; vf++) {
3415 status = be_cmd_link_status_query(adapter, &link_up,
3416 &mac_speed, &lnk_speed, vf + 1);
3417 if (!status)
3418 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3419 else
3420 goto unreg_netdev;
3424 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3426 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3427 return 0;
3429 unreg_netdev:
3430 unregister_netdev(netdev);
3431 unsetup:
3432 be_clear(adapter);
3433 msix_disable:
3434 be_msix_disable(adapter);
3435 stats_clean:
3436 be_stats_cleanup(adapter);
3437 ctrl_clean:
3438 be_ctrl_cleanup(adapter);
3439 free_vf_cfg:
3440 kfree(adapter->vf_cfg);
3441 free_netdev:
3442 be_sriov_disable(adapter);
3443 free_netdev(netdev);
3444 pci_set_drvdata(pdev, NULL);
3445 rel_reg:
3446 pci_release_regions(pdev);
3447 disable_dev:
3448 pci_disable_device(pdev);
3449 do_none:
3450 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3451 return status;
3454 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3456 struct be_adapter *adapter = pci_get_drvdata(pdev);
3457 struct net_device *netdev = adapter->netdev;
3459 cancel_delayed_work_sync(&adapter->work);
3460 if (adapter->wol)
3461 be_setup_wol(adapter, true);
3463 netif_device_detach(netdev);
3464 if (netif_running(netdev)) {
3465 rtnl_lock();
3466 be_close(netdev);
3467 rtnl_unlock();
3469 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3470 be_clear(adapter);
3472 be_msix_disable(adapter);
3473 pci_save_state(pdev);
3474 pci_disable_device(pdev);
3475 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3476 return 0;
3479 static int be_resume(struct pci_dev *pdev)
3481 int status = 0;
3482 struct be_adapter *adapter = pci_get_drvdata(pdev);
3483 struct net_device *netdev = adapter->netdev;
3485 netif_device_detach(netdev);
3487 status = pci_enable_device(pdev);
3488 if (status)
3489 return status;
3491 pci_set_power_state(pdev, 0);
3492 pci_restore_state(pdev);
3494 be_msix_enable(adapter);
3495 /* tell fw we're ready to fire cmds */
3496 status = be_cmd_fw_init(adapter);
3497 if (status)
3498 return status;
3500 be_setup(adapter);
3501 if (netif_running(netdev)) {
3502 rtnl_lock();
3503 be_open(netdev);
3504 rtnl_unlock();
3506 netif_device_attach(netdev);
3508 if (adapter->wol)
3509 be_setup_wol(adapter, false);
3511 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3512 return 0;
3516 * An FLR will stop BE from DMAing any data.
3518 static void be_shutdown(struct pci_dev *pdev)
3520 struct be_adapter *adapter = pci_get_drvdata(pdev);
3522 if (!adapter)
3523 return;
3525 cancel_delayed_work_sync(&adapter->work);
3527 netif_device_detach(adapter->netdev);
3529 if (adapter->wol)
3530 be_setup_wol(adapter, true);
3532 be_cmd_reset_function(adapter);
3534 pci_disable_device(pdev);
3537 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3538 pci_channel_state_t state)
3540 struct be_adapter *adapter = pci_get_drvdata(pdev);
3541 struct net_device *netdev = adapter->netdev;
3543 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3545 adapter->eeh_err = true;
3547 netif_device_detach(netdev);
3549 if (netif_running(netdev)) {
3550 rtnl_lock();
3551 be_close(netdev);
3552 rtnl_unlock();
3554 be_clear(adapter);
3556 if (state == pci_channel_io_perm_failure)
3557 return PCI_ERS_RESULT_DISCONNECT;
3559 pci_disable_device(pdev);
3561 return PCI_ERS_RESULT_NEED_RESET;
3564 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3566 struct be_adapter *adapter = pci_get_drvdata(pdev);
3567 int status;
3569 dev_info(&adapter->pdev->dev, "EEH reset\n");
3570 adapter->eeh_err = false;
3572 status = pci_enable_device(pdev);
3573 if (status)
3574 return PCI_ERS_RESULT_DISCONNECT;
3576 pci_set_master(pdev);
3577 pci_set_power_state(pdev, 0);
3578 pci_restore_state(pdev);
3580 /* Check if card is ok and fw is ready */
3581 status = be_cmd_POST(adapter);
3582 if (status)
3583 return PCI_ERS_RESULT_DISCONNECT;
3585 return PCI_ERS_RESULT_RECOVERED;
3588 static void be_eeh_resume(struct pci_dev *pdev)
3590 int status = 0;
3591 struct be_adapter *adapter = pci_get_drvdata(pdev);
3592 struct net_device *netdev = adapter->netdev;
3594 dev_info(&adapter->pdev->dev, "EEH resume\n");
3596 pci_save_state(pdev);
3598 /* tell fw we're ready to fire cmds */
3599 status = be_cmd_fw_init(adapter);
3600 if (status)
3601 goto err;
3603 status = be_setup(adapter);
3604 if (status)
3605 goto err;
3607 if (netif_running(netdev)) {
3608 status = be_open(netdev);
3609 if (status)
3610 goto err;
3612 netif_device_attach(netdev);
3613 return;
3614 err:
3615 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3618 static struct pci_error_handlers be_eeh_handlers = {
3619 .error_detected = be_eeh_err_detected,
3620 .slot_reset = be_eeh_reset,
3621 .resume = be_eeh_resume,
3624 static struct pci_driver be_driver = {
3625 .name = DRV_NAME,
3626 .id_table = be_dev_ids,
3627 .probe = be_probe,
3628 .remove = be_remove,
3629 .suspend = be_suspend,
3630 .resume = be_resume,
3631 .shutdown = be_shutdown,
3632 .err_handler = &be_eeh_handlers
3635 static int __init be_init_module(void)
3637 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3638 rx_frag_size != 2048) {
3639 printk(KERN_WARNING DRV_NAME
3640 " : Module param rx_frag_size must be 2048/4096/8192."
3641 " Using 2048\n");
3642 rx_frag_size = 2048;
3645 return pci_register_driver(&be_driver);
3647 module_init(be_init_module);
3649 static void __exit be_exit_module(void)
3651 pci_unregister_driver(&be_driver);
3653 module_exit(be_exit_module);