net: use netdev_mc_count and netdev_mc_empty when appropriate
[linux-2.6.git] / drivers / net / qlge / qlge_main.c
blobdd3e0f1b2965fbd3cd5d90feaeae11624eb16c72
1 /*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
39 #include <linux/mm.h>
40 #include <linux/vmalloc.h>
41 #include <net/ip6_checksum.h>
43 #include "qlge.h"
45 char qlge_driver_name[] = DRV_NAME;
46 const char qlge_driver_version[] = DRV_VERSION;
48 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49 MODULE_DESCRIPTION(DRV_STRING " ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION);
53 static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55 /* NETIF_MSG_TIMER | */
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
60 /* NETIF_MSG_TX_QUEUED | */
61 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
62 /* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65 static int debug = 0x00007fff; /* defaults above */
66 module_param(debug, int, 0);
67 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69 #define MSIX_IRQ 0
70 #define MSI_IRQ 1
71 #define LEG_IRQ 2
72 static int qlge_irq_type = MSIX_IRQ;
73 module_param(qlge_irq_type, int, MSIX_IRQ);
74 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76 static int qlge_mpi_coredump;
77 module_param(qlge_mpi_coredump, int, 0);
78 MODULE_PARM_DESC(qlge_mpi_coredump,
79 "Option to enable MPI firmware dump. "
80 "Default is OFF - Do Not allocate memory. ");
82 static int qlge_force_coredump;
83 module_param(qlge_force_coredump, int, 0);
84 MODULE_PARM_DESC(qlge_force_coredump,
85 "Option to allow force of firmware core dump. "
86 "Default is OFF - Do not allow.");
88 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
89 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
90 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
91 /* required last entry */
92 {0,}
95 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
97 /* This hardware semaphore causes exclusive access to
98 * resources shared between the NIC driver, MPI firmware,
99 * FCOE firmware and the FC driver.
101 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
103 u32 sem_bits = 0;
105 switch (sem_mask) {
106 case SEM_XGMAC0_MASK:
107 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
108 break;
109 case SEM_XGMAC1_MASK:
110 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
111 break;
112 case SEM_ICB_MASK:
113 sem_bits = SEM_SET << SEM_ICB_SHIFT;
114 break;
115 case SEM_MAC_ADDR_MASK:
116 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
117 break;
118 case SEM_FLASH_MASK:
119 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
120 break;
121 case SEM_PROBE_MASK:
122 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
123 break;
124 case SEM_RT_IDX_MASK:
125 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
126 break;
127 case SEM_PROC_REG_MASK:
128 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
129 break;
130 default:
131 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
132 return -EINVAL;
135 ql_write32(qdev, SEM, sem_bits | sem_mask);
136 return !(ql_read32(qdev, SEM) & sem_bits);
139 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
141 unsigned int wait_count = 30;
142 do {
143 if (!ql_sem_trylock(qdev, sem_mask))
144 return 0;
145 udelay(100);
146 } while (--wait_count);
147 return -ETIMEDOUT;
150 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
152 ql_write32(qdev, SEM, sem_mask);
153 ql_read32(qdev, SEM); /* flush */
156 /* This function waits for a specific bit to come ready
157 * in a given register. It is used mostly by the initialize
158 * process, but is also used in kernel thread API such as
159 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
161 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
163 u32 temp;
164 int count = UDELAY_COUNT;
166 while (count) {
167 temp = ql_read32(qdev, reg);
169 /* check for errors */
170 if (temp & err_bit) {
171 QPRINTK(qdev, PROBE, ALERT,
172 "register 0x%.08x access error, value = 0x%.08x!.\n",
173 reg, temp);
174 return -EIO;
175 } else if (temp & bit)
176 return 0;
177 udelay(UDELAY_DELAY);
178 count--;
180 QPRINTK(qdev, PROBE, ALERT,
181 "Timed out waiting for reg %x to come ready.\n", reg);
182 return -ETIMEDOUT;
185 /* The CFG register is used to download TX and RX control blocks
186 * to the chip. This function waits for an operation to complete.
188 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
190 int count = UDELAY_COUNT;
191 u32 temp;
193 while (count) {
194 temp = ql_read32(qdev, CFG);
195 if (temp & CFG_LE)
196 return -EIO;
197 if (!(temp & bit))
198 return 0;
199 udelay(UDELAY_DELAY);
200 count--;
202 return -ETIMEDOUT;
206 /* Used to issue init control blocks to hw. Maps control block,
207 * sets address, triggers download, waits for completion.
209 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
210 u16 q_id)
212 u64 map;
213 int status = 0;
214 int direction;
215 u32 mask;
216 u32 value;
218 direction =
219 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
220 PCI_DMA_FROMDEVICE;
222 map = pci_map_single(qdev->pdev, ptr, size, direction);
223 if (pci_dma_mapping_error(qdev->pdev, map)) {
224 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
225 return -ENOMEM;
228 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
229 if (status)
230 return status;
232 status = ql_wait_cfg(qdev, bit);
233 if (status) {
234 QPRINTK(qdev, IFUP, ERR,
235 "Timed out waiting for CFG to come ready.\n");
236 goto exit;
239 ql_write32(qdev, ICB_L, (u32) map);
240 ql_write32(qdev, ICB_H, (u32) (map >> 32));
242 mask = CFG_Q_MASK | (bit << 16);
243 value = bit | (q_id << CFG_Q_SHIFT);
244 ql_write32(qdev, CFG, (mask | value));
247 * Wait for the bit to clear after signaling hw.
249 status = ql_wait_cfg(qdev, bit);
250 exit:
251 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
252 pci_unmap_single(qdev->pdev, map, size, direction);
253 return status;
256 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
257 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
258 u32 *value)
260 u32 offset = 0;
261 int status;
263 switch (type) {
264 case MAC_ADDR_TYPE_MULTI_MAC:
265 case MAC_ADDR_TYPE_CAM_MAC:
267 status =
268 ql_wait_reg_rdy(qdev,
269 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
270 if (status)
271 goto exit;
272 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
273 (index << MAC_ADDR_IDX_SHIFT) | /* index */
274 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
275 status =
276 ql_wait_reg_rdy(qdev,
277 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
278 if (status)
279 goto exit;
280 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
281 status =
282 ql_wait_reg_rdy(qdev,
283 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
284 if (status)
285 goto exit;
286 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
287 (index << MAC_ADDR_IDX_SHIFT) | /* index */
288 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
289 status =
290 ql_wait_reg_rdy(qdev,
291 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
292 if (status)
293 goto exit;
294 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
295 if (type == MAC_ADDR_TYPE_CAM_MAC) {
296 status =
297 ql_wait_reg_rdy(qdev,
298 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
299 if (status)
300 goto exit;
301 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
302 (index << MAC_ADDR_IDX_SHIFT) | /* index */
303 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
304 status =
305 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
306 MAC_ADDR_MR, 0);
307 if (status)
308 goto exit;
309 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
311 break;
313 case MAC_ADDR_TYPE_VLAN:
314 case MAC_ADDR_TYPE_MULTI_FLTR:
315 default:
316 QPRINTK(qdev, IFUP, CRIT,
317 "Address type %d not yet supported.\n", type);
318 status = -EPERM;
320 exit:
321 return status;
324 /* Set up a MAC, multicast or VLAN address for the
325 * inbound frame matching.
327 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
328 u16 index)
330 u32 offset = 0;
331 int status = 0;
333 switch (type) {
334 case MAC_ADDR_TYPE_MULTI_MAC:
336 u32 upper = (addr[0] << 8) | addr[1];
337 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
338 (addr[4] << 8) | (addr[5]);
340 status =
341 ql_wait_reg_rdy(qdev,
342 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
343 if (status)
344 goto exit;
345 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
346 (index << MAC_ADDR_IDX_SHIFT) |
347 type | MAC_ADDR_E);
348 ql_write32(qdev, MAC_ADDR_DATA, lower);
349 status =
350 ql_wait_reg_rdy(qdev,
351 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
352 if (status)
353 goto exit;
354 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
355 (index << MAC_ADDR_IDX_SHIFT) |
356 type | MAC_ADDR_E);
358 ql_write32(qdev, MAC_ADDR_DATA, upper);
359 status =
360 ql_wait_reg_rdy(qdev,
361 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
362 if (status)
363 goto exit;
364 break;
366 case MAC_ADDR_TYPE_CAM_MAC:
368 u32 cam_output;
369 u32 upper = (addr[0] << 8) | addr[1];
370 u32 lower =
371 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
372 (addr[5]);
374 QPRINTK(qdev, IFUP, DEBUG,
375 "Adding %s address %pM"
376 " at index %d in the CAM.\n",
377 ((type ==
378 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
379 "UNICAST"), addr, index);
381 status =
382 ql_wait_reg_rdy(qdev,
383 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
384 if (status)
385 goto exit;
386 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
387 (index << MAC_ADDR_IDX_SHIFT) | /* index */
388 type); /* type */
389 ql_write32(qdev, MAC_ADDR_DATA, lower);
390 status =
391 ql_wait_reg_rdy(qdev,
392 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
393 if (status)
394 goto exit;
395 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
396 (index << MAC_ADDR_IDX_SHIFT) | /* index */
397 type); /* type */
398 ql_write32(qdev, MAC_ADDR_DATA, upper);
399 status =
400 ql_wait_reg_rdy(qdev,
401 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
402 if (status)
403 goto exit;
404 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
405 (index << MAC_ADDR_IDX_SHIFT) | /* index */
406 type); /* type */
407 /* This field should also include the queue id
408 and possibly the function id. Right now we hardcode
409 the route field to NIC core.
411 cam_output = (CAM_OUT_ROUTE_NIC |
412 (qdev->
413 func << CAM_OUT_FUNC_SHIFT) |
414 (0 << CAM_OUT_CQ_ID_SHIFT));
415 if (qdev->vlgrp)
416 cam_output |= CAM_OUT_RV;
417 /* route to NIC core */
418 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
419 break;
421 case MAC_ADDR_TYPE_VLAN:
423 u32 enable_bit = *((u32 *) &addr[0]);
424 /* For VLAN, the addr actually holds a bit that
425 * either enables or disables the vlan id we are
426 * addressing. It's either MAC_ADDR_E on or off.
427 * That's bit-27 we're talking about.
429 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
430 (enable_bit ? "Adding" : "Removing"),
431 index, (enable_bit ? "to" : "from"));
433 status =
434 ql_wait_reg_rdy(qdev,
435 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
436 if (status)
437 goto exit;
438 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
439 (index << MAC_ADDR_IDX_SHIFT) | /* index */
440 type | /* type */
441 enable_bit); /* enable/disable */
442 break;
444 case MAC_ADDR_TYPE_MULTI_FLTR:
445 default:
446 QPRINTK(qdev, IFUP, CRIT,
447 "Address type %d not yet supported.\n", type);
448 status = -EPERM;
450 exit:
451 return status;
454 /* Set or clear MAC address in hardware. We sometimes
455 * have to clear it to prevent wrong frame routing
456 * especially in a bonding environment.
458 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
460 int status;
461 char zero_mac_addr[ETH_ALEN];
462 char *addr;
464 if (set) {
465 addr = &qdev->ndev->dev_addr[0];
466 QPRINTK(qdev, IFUP, DEBUG,
467 "Set Mac addr %pM\n", addr);
468 } else {
469 memset(zero_mac_addr, 0, ETH_ALEN);
470 addr = &zero_mac_addr[0];
471 QPRINTK(qdev, IFUP, DEBUG,
472 "Clearing MAC address on %s\n",
473 qdev->ndev->name);
475 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
476 if (status)
477 return status;
478 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
479 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
480 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
481 if (status)
482 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
483 "address.\n");
484 return status;
487 void ql_link_on(struct ql_adapter *qdev)
489 QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
490 qdev->ndev->name);
491 netif_carrier_on(qdev->ndev);
492 ql_set_mac_addr(qdev, 1);
495 void ql_link_off(struct ql_adapter *qdev)
497 QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
498 qdev->ndev->name);
499 netif_carrier_off(qdev->ndev);
500 ql_set_mac_addr(qdev, 0);
503 /* Get a specific frame routing value from the CAM.
504 * Used for debug and reg dump.
506 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
508 int status = 0;
510 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
511 if (status)
512 goto exit;
514 ql_write32(qdev, RT_IDX,
515 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
516 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
517 if (status)
518 goto exit;
519 *value = ql_read32(qdev, RT_DATA);
520 exit:
521 return status;
524 /* The NIC function for this chip has 16 routing indexes. Each one can be used
525 * to route different frame types to various inbound queues. We send broadcast/
526 * multicast/error frames to the default queue for slow handling,
527 * and CAM hit/RSS frames to the fast handling queues.
529 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
530 int enable)
532 int status = -EINVAL; /* Return error if no mask match. */
533 u32 value = 0;
535 QPRINTK(qdev, IFUP, DEBUG,
536 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
537 (enable ? "Adding" : "Removing"),
538 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
539 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
540 ((index ==
541 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
542 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
543 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
544 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
545 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
546 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
547 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
548 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
549 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
550 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
551 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
552 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
553 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
554 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
555 (enable ? "to" : "from"));
557 switch (mask) {
558 case RT_IDX_CAM_HIT:
560 value = RT_IDX_DST_CAM_Q | /* dest */
561 RT_IDX_TYPE_NICQ | /* type */
562 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
563 break;
565 case RT_IDX_VALID: /* Promiscuous Mode frames. */
567 value = RT_IDX_DST_DFLT_Q | /* dest */
568 RT_IDX_TYPE_NICQ | /* type */
569 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
570 break;
572 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
574 value = RT_IDX_DST_DFLT_Q | /* dest */
575 RT_IDX_TYPE_NICQ | /* type */
576 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
577 break;
579 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
581 value = RT_IDX_DST_DFLT_Q | /* dest */
582 RT_IDX_TYPE_NICQ | /* type */
583 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
584 break;
586 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
588 value = RT_IDX_DST_DFLT_Q | /* dest */
589 RT_IDX_TYPE_NICQ | /* type */
590 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
591 break;
593 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
595 value = RT_IDX_DST_DFLT_Q | /* dest */
596 RT_IDX_TYPE_NICQ | /* type */
597 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
598 break;
600 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
602 value = RT_IDX_DST_RSS | /* dest */
603 RT_IDX_TYPE_NICQ | /* type */
604 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
605 break;
607 case 0: /* Clear the E-bit on an entry. */
609 value = RT_IDX_DST_DFLT_Q | /* dest */
610 RT_IDX_TYPE_NICQ | /* type */
611 (index << RT_IDX_IDX_SHIFT);/* index */
612 break;
614 default:
615 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
616 mask);
617 status = -EPERM;
618 goto exit;
621 if (value) {
622 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
623 if (status)
624 goto exit;
625 value |= (enable ? RT_IDX_E : 0);
626 ql_write32(qdev, RT_IDX, value);
627 ql_write32(qdev, RT_DATA, enable ? mask : 0);
629 exit:
630 return status;
633 static void ql_enable_interrupts(struct ql_adapter *qdev)
635 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
638 static void ql_disable_interrupts(struct ql_adapter *qdev)
640 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
643 /* If we're running with multiple MSI-X vectors then we enable on the fly.
644 * Otherwise, we may have multiple outstanding workers and don't want to
645 * enable until the last one finishes. In this case, the irq_cnt gets
646 * incremented everytime we queue a worker and decremented everytime
647 * a worker finishes. Once it hits zero we enable the interrupt.
649 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
651 u32 var = 0;
652 unsigned long hw_flags = 0;
653 struct intr_context *ctx = qdev->intr_context + intr;
655 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
656 /* Always enable if we're MSIX multi interrupts and
657 * it's not the default (zeroeth) interrupt.
659 ql_write32(qdev, INTR_EN,
660 ctx->intr_en_mask);
661 var = ql_read32(qdev, STS);
662 return var;
665 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
666 if (atomic_dec_and_test(&ctx->irq_cnt)) {
667 ql_write32(qdev, INTR_EN,
668 ctx->intr_en_mask);
669 var = ql_read32(qdev, STS);
671 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
672 return var;
675 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
677 u32 var = 0;
678 struct intr_context *ctx;
680 /* HW disables for us if we're MSIX multi interrupts and
681 * it's not the default (zeroeth) interrupt.
683 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
684 return 0;
686 ctx = qdev->intr_context + intr;
687 spin_lock(&qdev->hw_lock);
688 if (!atomic_read(&ctx->irq_cnt)) {
689 ql_write32(qdev, INTR_EN,
690 ctx->intr_dis_mask);
691 var = ql_read32(qdev, STS);
693 atomic_inc(&ctx->irq_cnt);
694 spin_unlock(&qdev->hw_lock);
695 return var;
698 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
700 int i;
701 for (i = 0; i < qdev->intr_count; i++) {
702 /* The enable call does a atomic_dec_and_test
703 * and enables only if the result is zero.
704 * So we precharge it here.
706 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
707 i == 0))
708 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
709 ql_enable_completion_interrupt(qdev, i);
714 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
716 int status, i;
717 u16 csum = 0;
718 __le16 *flash = (__le16 *)&qdev->flash;
720 status = strncmp((char *)&qdev->flash, str, 4);
721 if (status) {
722 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
723 return status;
726 for (i = 0; i < size; i++)
727 csum += le16_to_cpu(*flash++);
729 if (csum)
730 QPRINTK(qdev, IFUP, ERR,
731 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
733 return csum;
736 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
738 int status = 0;
739 /* wait for reg to come ready */
740 status = ql_wait_reg_rdy(qdev,
741 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
742 if (status)
743 goto exit;
744 /* set up for reg read */
745 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
746 /* wait for reg to come ready */
747 status = ql_wait_reg_rdy(qdev,
748 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
749 if (status)
750 goto exit;
751 /* This data is stored on flash as an array of
752 * __le32. Since ql_read32() returns cpu endian
753 * we need to swap it back.
755 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
756 exit:
757 return status;
760 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
762 u32 i, size;
763 int status;
764 __le32 *p = (__le32 *)&qdev->flash;
765 u32 offset;
766 u8 mac_addr[6];
768 /* Get flash offset for function and adjust
769 * for dword access.
771 if (!qdev->port)
772 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
773 else
774 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
776 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
777 return -ETIMEDOUT;
779 size = sizeof(struct flash_params_8000) / sizeof(u32);
780 for (i = 0; i < size; i++, p++) {
781 status = ql_read_flash_word(qdev, i+offset, p);
782 if (status) {
783 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
784 goto exit;
788 status = ql_validate_flash(qdev,
789 sizeof(struct flash_params_8000) / sizeof(u16),
790 "8000");
791 if (status) {
792 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
793 status = -EINVAL;
794 goto exit;
797 /* Extract either manufacturer or BOFM modified
798 * MAC address.
800 if (qdev->flash.flash_params_8000.data_type1 == 2)
801 memcpy(mac_addr,
802 qdev->flash.flash_params_8000.mac_addr1,
803 qdev->ndev->addr_len);
804 else
805 memcpy(mac_addr,
806 qdev->flash.flash_params_8000.mac_addr,
807 qdev->ndev->addr_len);
809 if (!is_valid_ether_addr(mac_addr)) {
810 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
811 status = -EINVAL;
812 goto exit;
815 memcpy(qdev->ndev->dev_addr,
816 mac_addr,
817 qdev->ndev->addr_len);
819 exit:
820 ql_sem_unlock(qdev, SEM_FLASH_MASK);
821 return status;
824 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
826 int i;
827 int status;
828 __le32 *p = (__le32 *)&qdev->flash;
829 u32 offset = 0;
830 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
832 /* Second function's parameters follow the first
833 * function's.
835 if (qdev->port)
836 offset = size;
838 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
839 return -ETIMEDOUT;
841 for (i = 0; i < size; i++, p++) {
842 status = ql_read_flash_word(qdev, i+offset, p);
843 if (status) {
844 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
845 goto exit;
850 status = ql_validate_flash(qdev,
851 sizeof(struct flash_params_8012) / sizeof(u16),
852 "8012");
853 if (status) {
854 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
855 status = -EINVAL;
856 goto exit;
859 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
860 status = -EINVAL;
861 goto exit;
864 memcpy(qdev->ndev->dev_addr,
865 qdev->flash.flash_params_8012.mac_addr,
866 qdev->ndev->addr_len);
868 exit:
869 ql_sem_unlock(qdev, SEM_FLASH_MASK);
870 return status;
873 /* xgmac register are located behind the xgmac_addr and xgmac_data
874 * register pair. Each read/write requires us to wait for the ready
875 * bit before reading/writing the data.
877 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
879 int status;
880 /* wait for reg to come ready */
881 status = ql_wait_reg_rdy(qdev,
882 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
883 if (status)
884 return status;
885 /* write the data to the data reg */
886 ql_write32(qdev, XGMAC_DATA, data);
887 /* trigger the write */
888 ql_write32(qdev, XGMAC_ADDR, reg);
889 return status;
892 /* xgmac register are located behind the xgmac_addr and xgmac_data
893 * register pair. Each read/write requires us to wait for the ready
894 * bit before reading/writing the data.
896 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
898 int status = 0;
899 /* wait for reg to come ready */
900 status = ql_wait_reg_rdy(qdev,
901 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
902 if (status)
903 goto exit;
904 /* set up for reg read */
905 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
906 /* wait for reg to come ready */
907 status = ql_wait_reg_rdy(qdev,
908 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
909 if (status)
910 goto exit;
911 /* get the data */
912 *data = ql_read32(qdev, XGMAC_DATA);
913 exit:
914 return status;
917 /* This is used for reading the 64-bit statistics regs. */
918 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
920 int status = 0;
921 u32 hi = 0;
922 u32 lo = 0;
924 status = ql_read_xgmac_reg(qdev, reg, &lo);
925 if (status)
926 goto exit;
928 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
929 if (status)
930 goto exit;
932 *data = (u64) lo | ((u64) hi << 32);
934 exit:
935 return status;
938 static int ql_8000_port_initialize(struct ql_adapter *qdev)
940 int status;
942 * Get MPI firmware version for driver banner
943 * and ethool info.
945 status = ql_mb_about_fw(qdev);
946 if (status)
947 goto exit;
948 status = ql_mb_get_fw_state(qdev);
949 if (status)
950 goto exit;
951 /* Wake up a worker to get/set the TX/RX frame sizes. */
952 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
953 exit:
954 return status;
957 /* Take the MAC Core out of reset.
958 * Enable statistics counting.
959 * Take the transmitter/receiver out of reset.
960 * This functionality may be done in the MPI firmware at a
961 * later date.
963 static int ql_8012_port_initialize(struct ql_adapter *qdev)
965 int status = 0;
966 u32 data;
968 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
969 /* Another function has the semaphore, so
970 * wait for the port init bit to come ready.
972 QPRINTK(qdev, LINK, INFO,
973 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
974 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
975 if (status) {
976 QPRINTK(qdev, LINK, CRIT,
977 "Port initialize timed out.\n");
979 return status;
982 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
983 /* Set the core reset. */
984 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
985 if (status)
986 goto end;
987 data |= GLOBAL_CFG_RESET;
988 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
989 if (status)
990 goto end;
992 /* Clear the core reset and turn on jumbo for receiver. */
993 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
994 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
995 data |= GLOBAL_CFG_TX_STAT_EN;
996 data |= GLOBAL_CFG_RX_STAT_EN;
997 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
998 if (status)
999 goto end;
1001 /* Enable transmitter, and clear it's reset. */
1002 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1003 if (status)
1004 goto end;
1005 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1006 data |= TX_CFG_EN; /* Enable the transmitter. */
1007 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1008 if (status)
1009 goto end;
1011 /* Enable receiver and clear it's reset. */
1012 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1013 if (status)
1014 goto end;
1015 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1016 data |= RX_CFG_EN; /* Enable the receiver. */
1017 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1018 if (status)
1019 goto end;
1021 /* Turn on jumbo. */
1022 status =
1023 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1024 if (status)
1025 goto end;
1026 status =
1027 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1028 if (status)
1029 goto end;
1031 /* Signal to the world that the port is enabled. */
1032 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1033 end:
1034 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1035 return status;
1038 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1040 return PAGE_SIZE << qdev->lbq_buf_order;
1043 /* Get the next large buffer. */
1044 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1046 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1047 rx_ring->lbq_curr_idx++;
1048 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1049 rx_ring->lbq_curr_idx = 0;
1050 rx_ring->lbq_free_cnt++;
1051 return lbq_desc;
1054 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1055 struct rx_ring *rx_ring)
1057 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1059 pci_dma_sync_single_for_cpu(qdev->pdev,
1060 pci_unmap_addr(lbq_desc, mapaddr),
1061 rx_ring->lbq_buf_size,
1062 PCI_DMA_FROMDEVICE);
1064 /* If it's the last chunk of our master page then
1065 * we unmap it.
1067 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1068 == ql_lbq_block_size(qdev))
1069 pci_unmap_page(qdev->pdev,
1070 lbq_desc->p.pg_chunk.map,
1071 ql_lbq_block_size(qdev),
1072 PCI_DMA_FROMDEVICE);
1073 return lbq_desc;
1076 /* Get the next small buffer. */
1077 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1079 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1080 rx_ring->sbq_curr_idx++;
1081 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1082 rx_ring->sbq_curr_idx = 0;
1083 rx_ring->sbq_free_cnt++;
1084 return sbq_desc;
1087 /* Update an rx ring index. */
1088 static void ql_update_cq(struct rx_ring *rx_ring)
1090 rx_ring->cnsmr_idx++;
1091 rx_ring->curr_entry++;
1092 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1093 rx_ring->cnsmr_idx = 0;
1094 rx_ring->curr_entry = rx_ring->cq_base;
1098 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1100 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1103 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1104 struct bq_desc *lbq_desc)
1106 if (!rx_ring->pg_chunk.page) {
1107 u64 map;
1108 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1109 GFP_ATOMIC,
1110 qdev->lbq_buf_order);
1111 if (unlikely(!rx_ring->pg_chunk.page)) {
1112 QPRINTK(qdev, DRV, ERR,
1113 "page allocation failed.\n");
1114 return -ENOMEM;
1116 rx_ring->pg_chunk.offset = 0;
1117 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1118 0, ql_lbq_block_size(qdev),
1119 PCI_DMA_FROMDEVICE);
1120 if (pci_dma_mapping_error(qdev->pdev, map)) {
1121 __free_pages(rx_ring->pg_chunk.page,
1122 qdev->lbq_buf_order);
1123 QPRINTK(qdev, DRV, ERR,
1124 "PCI mapping failed.\n");
1125 return -ENOMEM;
1127 rx_ring->pg_chunk.map = map;
1128 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1131 /* Copy the current master pg_chunk info
1132 * to the current descriptor.
1134 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1136 /* Adjust the master page chunk for next
1137 * buffer get.
1139 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1140 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1141 rx_ring->pg_chunk.page = NULL;
1142 lbq_desc->p.pg_chunk.last_flag = 1;
1143 } else {
1144 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1145 get_page(rx_ring->pg_chunk.page);
1146 lbq_desc->p.pg_chunk.last_flag = 0;
1148 return 0;
1150 /* Process (refill) a large buffer queue. */
1151 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1153 u32 clean_idx = rx_ring->lbq_clean_idx;
1154 u32 start_idx = clean_idx;
1155 struct bq_desc *lbq_desc;
1156 u64 map;
1157 int i;
1159 while (rx_ring->lbq_free_cnt > 32) {
1160 for (i = 0; i < 16; i++) {
1161 QPRINTK(qdev, RX_STATUS, DEBUG,
1162 "lbq: try cleaning clean_idx = %d.\n",
1163 clean_idx);
1164 lbq_desc = &rx_ring->lbq[clean_idx];
1165 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1166 QPRINTK(qdev, IFUP, ERR,
1167 "Could not get a page chunk.\n");
1168 return;
1171 map = lbq_desc->p.pg_chunk.map +
1172 lbq_desc->p.pg_chunk.offset;
1173 pci_unmap_addr_set(lbq_desc, mapaddr, map);
1174 pci_unmap_len_set(lbq_desc, maplen,
1175 rx_ring->lbq_buf_size);
1176 *lbq_desc->addr = cpu_to_le64(map);
1178 pci_dma_sync_single_for_device(qdev->pdev, map,
1179 rx_ring->lbq_buf_size,
1180 PCI_DMA_FROMDEVICE);
1181 clean_idx++;
1182 if (clean_idx == rx_ring->lbq_len)
1183 clean_idx = 0;
1186 rx_ring->lbq_clean_idx = clean_idx;
1187 rx_ring->lbq_prod_idx += 16;
1188 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1189 rx_ring->lbq_prod_idx = 0;
1190 rx_ring->lbq_free_cnt -= 16;
1193 if (start_idx != clean_idx) {
1194 QPRINTK(qdev, RX_STATUS, DEBUG,
1195 "lbq: updating prod idx = %d.\n",
1196 rx_ring->lbq_prod_idx);
1197 ql_write_db_reg(rx_ring->lbq_prod_idx,
1198 rx_ring->lbq_prod_idx_db_reg);
1202 /* Process (refill) a small buffer queue. */
1203 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1205 u32 clean_idx = rx_ring->sbq_clean_idx;
1206 u32 start_idx = clean_idx;
1207 struct bq_desc *sbq_desc;
1208 u64 map;
1209 int i;
1211 while (rx_ring->sbq_free_cnt > 16) {
1212 for (i = 0; i < 16; i++) {
1213 sbq_desc = &rx_ring->sbq[clean_idx];
1214 QPRINTK(qdev, RX_STATUS, DEBUG,
1215 "sbq: try cleaning clean_idx = %d.\n",
1216 clean_idx);
1217 if (sbq_desc->p.skb == NULL) {
1218 QPRINTK(qdev, RX_STATUS, DEBUG,
1219 "sbq: getting new skb for index %d.\n",
1220 sbq_desc->index);
1221 sbq_desc->p.skb =
1222 netdev_alloc_skb(qdev->ndev,
1223 SMALL_BUFFER_SIZE);
1224 if (sbq_desc->p.skb == NULL) {
1225 QPRINTK(qdev, PROBE, ERR,
1226 "Couldn't get an skb.\n");
1227 rx_ring->sbq_clean_idx = clean_idx;
1228 return;
1230 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1231 map = pci_map_single(qdev->pdev,
1232 sbq_desc->p.skb->data,
1233 rx_ring->sbq_buf_size,
1234 PCI_DMA_FROMDEVICE);
1235 if (pci_dma_mapping_error(qdev->pdev, map)) {
1236 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1237 rx_ring->sbq_clean_idx = clean_idx;
1238 dev_kfree_skb_any(sbq_desc->p.skb);
1239 sbq_desc->p.skb = NULL;
1240 return;
1242 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1243 pci_unmap_len_set(sbq_desc, maplen,
1244 rx_ring->sbq_buf_size);
1245 *sbq_desc->addr = cpu_to_le64(map);
1248 clean_idx++;
1249 if (clean_idx == rx_ring->sbq_len)
1250 clean_idx = 0;
1252 rx_ring->sbq_clean_idx = clean_idx;
1253 rx_ring->sbq_prod_idx += 16;
1254 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1255 rx_ring->sbq_prod_idx = 0;
1256 rx_ring->sbq_free_cnt -= 16;
1259 if (start_idx != clean_idx) {
1260 QPRINTK(qdev, RX_STATUS, DEBUG,
1261 "sbq: updating prod idx = %d.\n",
1262 rx_ring->sbq_prod_idx);
1263 ql_write_db_reg(rx_ring->sbq_prod_idx,
1264 rx_ring->sbq_prod_idx_db_reg);
1268 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1269 struct rx_ring *rx_ring)
1271 ql_update_sbq(qdev, rx_ring);
1272 ql_update_lbq(qdev, rx_ring);
1275 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1276 * fails at some stage, or from the interrupt when a tx completes.
1278 static void ql_unmap_send(struct ql_adapter *qdev,
1279 struct tx_ring_desc *tx_ring_desc, int mapped)
1281 int i;
1282 for (i = 0; i < mapped; i++) {
1283 if (i == 0 || (i == 7 && mapped > 7)) {
1285 * Unmap the skb->data area, or the
1286 * external sglist (AKA the Outbound
1287 * Address List (OAL)).
1288 * If its the zeroeth element, then it's
1289 * the skb->data area. If it's the 7th
1290 * element and there is more than 6 frags,
1291 * then its an OAL.
1293 if (i == 7) {
1294 QPRINTK(qdev, TX_DONE, DEBUG,
1295 "unmapping OAL area.\n");
1297 pci_unmap_single(qdev->pdev,
1298 pci_unmap_addr(&tx_ring_desc->map[i],
1299 mapaddr),
1300 pci_unmap_len(&tx_ring_desc->map[i],
1301 maplen),
1302 PCI_DMA_TODEVICE);
1303 } else {
1304 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1306 pci_unmap_page(qdev->pdev,
1307 pci_unmap_addr(&tx_ring_desc->map[i],
1308 mapaddr),
1309 pci_unmap_len(&tx_ring_desc->map[i],
1310 maplen), PCI_DMA_TODEVICE);
1316 /* Map the buffers for this transmit. This will return
1317 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1319 static int ql_map_send(struct ql_adapter *qdev,
1320 struct ob_mac_iocb_req *mac_iocb_ptr,
1321 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1323 int len = skb_headlen(skb);
1324 dma_addr_t map;
1325 int frag_idx, err, map_idx = 0;
1326 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1327 int frag_cnt = skb_shinfo(skb)->nr_frags;
1329 if (frag_cnt) {
1330 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1333 * Map the skb buffer first.
1335 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1337 err = pci_dma_mapping_error(qdev->pdev, map);
1338 if (err) {
1339 QPRINTK(qdev, TX_QUEUED, ERR,
1340 "PCI mapping failed with error: %d\n", err);
1342 return NETDEV_TX_BUSY;
1345 tbd->len = cpu_to_le32(len);
1346 tbd->addr = cpu_to_le64(map);
1347 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1348 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1349 map_idx++;
1352 * This loop fills the remainder of the 8 address descriptors
1353 * in the IOCB. If there are more than 7 fragments, then the
1354 * eighth address desc will point to an external list (OAL).
1355 * When this happens, the remainder of the frags will be stored
1356 * in this list.
1358 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1359 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1360 tbd++;
1361 if (frag_idx == 6 && frag_cnt > 7) {
1362 /* Let's tack on an sglist.
1363 * Our control block will now
1364 * look like this:
1365 * iocb->seg[0] = skb->data
1366 * iocb->seg[1] = frag[0]
1367 * iocb->seg[2] = frag[1]
1368 * iocb->seg[3] = frag[2]
1369 * iocb->seg[4] = frag[3]
1370 * iocb->seg[5] = frag[4]
1371 * iocb->seg[6] = frag[5]
1372 * iocb->seg[7] = ptr to OAL (external sglist)
1373 * oal->seg[0] = frag[6]
1374 * oal->seg[1] = frag[7]
1375 * oal->seg[2] = frag[8]
1376 * oal->seg[3] = frag[9]
1377 * oal->seg[4] = frag[10]
1378 * etc...
1380 /* Tack on the OAL in the eighth segment of IOCB. */
1381 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1382 sizeof(struct oal),
1383 PCI_DMA_TODEVICE);
1384 err = pci_dma_mapping_error(qdev->pdev, map);
1385 if (err) {
1386 QPRINTK(qdev, TX_QUEUED, ERR,
1387 "PCI mapping outbound address list with error: %d\n",
1388 err);
1389 goto map_error;
1392 tbd->addr = cpu_to_le64(map);
1394 * The length is the number of fragments
1395 * that remain to be mapped times the length
1396 * of our sglist (OAL).
1398 tbd->len =
1399 cpu_to_le32((sizeof(struct tx_buf_desc) *
1400 (frag_cnt - frag_idx)) | TX_DESC_C);
1401 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1402 map);
1403 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1404 sizeof(struct oal));
1405 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1406 map_idx++;
1409 map =
1410 pci_map_page(qdev->pdev, frag->page,
1411 frag->page_offset, frag->size,
1412 PCI_DMA_TODEVICE);
1414 err = pci_dma_mapping_error(qdev->pdev, map);
1415 if (err) {
1416 QPRINTK(qdev, TX_QUEUED, ERR,
1417 "PCI mapping frags failed with error: %d.\n",
1418 err);
1419 goto map_error;
1422 tbd->addr = cpu_to_le64(map);
1423 tbd->len = cpu_to_le32(frag->size);
1424 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1425 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1426 frag->size);
1429 /* Save the number of segments we've mapped. */
1430 tx_ring_desc->map_cnt = map_idx;
1431 /* Terminate the last segment. */
1432 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1433 return NETDEV_TX_OK;
1435 map_error:
1437 * If the first frag mapping failed, then i will be zero.
1438 * This causes the unmap of the skb->data area. Otherwise
1439 * we pass in the number of frags that mapped successfully
1440 * so they can be umapped.
1442 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1443 return NETDEV_TX_BUSY;
1446 /* Process an inbound completion from an rx ring. */
1447 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1448 struct rx_ring *rx_ring,
1449 struct ib_mac_iocb_rsp *ib_mac_rsp,
1450 u32 length,
1451 u16 vlan_id)
1453 struct sk_buff *skb;
1454 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1455 struct skb_frag_struct *rx_frag;
1456 int nr_frags;
1457 struct napi_struct *napi = &rx_ring->napi;
1459 napi->dev = qdev->ndev;
1461 skb = napi_get_frags(napi);
1462 if (!skb) {
1463 QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, exiting.\n");
1464 rx_ring->rx_dropped++;
1465 put_page(lbq_desc->p.pg_chunk.page);
1466 return;
1468 prefetch(lbq_desc->p.pg_chunk.va);
1469 rx_frag = skb_shinfo(skb)->frags;
1470 nr_frags = skb_shinfo(skb)->nr_frags;
1471 rx_frag += nr_frags;
1472 rx_frag->page = lbq_desc->p.pg_chunk.page;
1473 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1474 rx_frag->size = length;
1476 skb->len += length;
1477 skb->data_len += length;
1478 skb->truesize += length;
1479 skb_shinfo(skb)->nr_frags++;
1481 rx_ring->rx_packets++;
1482 rx_ring->rx_bytes += length;
1483 skb->ip_summed = CHECKSUM_UNNECESSARY;
1484 skb_record_rx_queue(skb, rx_ring->cq_id);
1485 if (qdev->vlgrp && (vlan_id != 0xffff))
1486 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1487 else
1488 napi_gro_frags(napi);
1491 /* Process an inbound completion from an rx ring. */
1492 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1493 struct rx_ring *rx_ring,
1494 struct ib_mac_iocb_rsp *ib_mac_rsp,
1495 u32 length,
1496 u16 vlan_id)
1498 struct net_device *ndev = qdev->ndev;
1499 struct sk_buff *skb = NULL;
1500 void *addr;
1501 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1502 struct napi_struct *napi = &rx_ring->napi;
1504 skb = netdev_alloc_skb(ndev, length);
1505 if (!skb) {
1506 QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, "
1507 "need to unwind!.\n");
1508 rx_ring->rx_dropped++;
1509 put_page(lbq_desc->p.pg_chunk.page);
1510 return;
1513 addr = lbq_desc->p.pg_chunk.va;
1514 prefetch(addr);
1517 /* Frame error, so drop the packet. */
1518 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1519 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1520 ib_mac_rsp->flags2);
1521 rx_ring->rx_errors++;
1522 goto err_out;
1525 /* The max framesize filter on this chip is set higher than
1526 * MTU since FCoE uses 2k frames.
1528 if (skb->len > ndev->mtu + ETH_HLEN) {
1529 QPRINTK(qdev, DRV, ERR, "Segment too small, dropping.\n");
1530 rx_ring->rx_dropped++;
1531 goto err_out;
1533 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1534 QPRINTK(qdev, RX_STATUS, DEBUG,
1535 "%d bytes of headers and data in large. Chain "
1536 "page to new skb and pull tail.\n", length);
1537 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1538 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1539 length-ETH_HLEN);
1540 skb->len += length-ETH_HLEN;
1541 skb->data_len += length-ETH_HLEN;
1542 skb->truesize += length-ETH_HLEN;
1544 rx_ring->rx_packets++;
1545 rx_ring->rx_bytes += skb->len;
1546 skb->protocol = eth_type_trans(skb, ndev);
1547 skb->ip_summed = CHECKSUM_NONE;
1549 if (qdev->rx_csum &&
1550 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1551 /* TCP frame. */
1552 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1553 QPRINTK(qdev, RX_STATUS, DEBUG,
1554 "TCP checksum done!\n");
1555 skb->ip_summed = CHECKSUM_UNNECESSARY;
1556 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1557 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1558 /* Unfragmented ipv4 UDP frame. */
1559 struct iphdr *iph = (struct iphdr *) skb->data;
1560 if (!(iph->frag_off &
1561 cpu_to_be16(IP_MF|IP_OFFSET))) {
1562 skb->ip_summed = CHECKSUM_UNNECESSARY;
1563 QPRINTK(qdev, RX_STATUS, DEBUG,
1564 "TCP checksum done!\n");
1569 skb_record_rx_queue(skb, rx_ring->cq_id);
1570 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1571 if (qdev->vlgrp && (vlan_id != 0xffff))
1572 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1573 else
1574 napi_gro_receive(napi, skb);
1575 } else {
1576 if (qdev->vlgrp && (vlan_id != 0xffff))
1577 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1578 else
1579 netif_receive_skb(skb);
1581 return;
1582 err_out:
1583 dev_kfree_skb_any(skb);
1584 put_page(lbq_desc->p.pg_chunk.page);
1587 /* Process an inbound completion from an rx ring. */
1588 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1589 struct rx_ring *rx_ring,
1590 struct ib_mac_iocb_rsp *ib_mac_rsp,
1591 u32 length,
1592 u16 vlan_id)
1594 struct net_device *ndev = qdev->ndev;
1595 struct sk_buff *skb = NULL;
1596 struct sk_buff *new_skb = NULL;
1597 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1599 skb = sbq_desc->p.skb;
1600 /* Allocate new_skb and copy */
1601 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1602 if (new_skb == NULL) {
1603 QPRINTK(qdev, PROBE, ERR,
1604 "No skb available, drop the packet.\n");
1605 rx_ring->rx_dropped++;
1606 return;
1608 skb_reserve(new_skb, NET_IP_ALIGN);
1609 memcpy(skb_put(new_skb, length), skb->data, length);
1610 skb = new_skb;
1612 /* Frame error, so drop the packet. */
1613 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1614 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1615 ib_mac_rsp->flags2);
1616 dev_kfree_skb_any(skb);
1617 rx_ring->rx_errors++;
1618 return;
1621 /* loopback self test for ethtool */
1622 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1623 ql_check_lb_frame(qdev, skb);
1624 dev_kfree_skb_any(skb);
1625 return;
1628 /* The max framesize filter on this chip is set higher than
1629 * MTU since FCoE uses 2k frames.
1631 if (skb->len > ndev->mtu + ETH_HLEN) {
1632 dev_kfree_skb_any(skb);
1633 rx_ring->rx_dropped++;
1634 return;
1637 prefetch(skb->data);
1638 skb->dev = ndev;
1639 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1640 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1641 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1642 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1643 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1644 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1645 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1646 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1648 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1649 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1651 rx_ring->rx_packets++;
1652 rx_ring->rx_bytes += skb->len;
1653 skb->protocol = eth_type_trans(skb, ndev);
1654 skb->ip_summed = CHECKSUM_NONE;
1656 /* If rx checksum is on, and there are no
1657 * csum or frame errors.
1659 if (qdev->rx_csum &&
1660 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1661 /* TCP frame. */
1662 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1663 QPRINTK(qdev, RX_STATUS, DEBUG,
1664 "TCP checksum done!\n");
1665 skb->ip_summed = CHECKSUM_UNNECESSARY;
1666 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1667 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1668 /* Unfragmented ipv4 UDP frame. */
1669 struct iphdr *iph = (struct iphdr *) skb->data;
1670 if (!(iph->frag_off &
1671 cpu_to_be16(IP_MF|IP_OFFSET))) {
1672 skb->ip_summed = CHECKSUM_UNNECESSARY;
1673 QPRINTK(qdev, RX_STATUS, DEBUG,
1674 "TCP checksum done!\n");
1679 skb_record_rx_queue(skb, rx_ring->cq_id);
1680 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1681 if (qdev->vlgrp && (vlan_id != 0xffff))
1682 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1683 vlan_id, skb);
1684 else
1685 napi_gro_receive(&rx_ring->napi, skb);
1686 } else {
1687 if (qdev->vlgrp && (vlan_id != 0xffff))
1688 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1689 else
1690 netif_receive_skb(skb);
1694 static void ql_realign_skb(struct sk_buff *skb, int len)
1696 void *temp_addr = skb->data;
1698 /* Undo the skb_reserve(skb,32) we did before
1699 * giving to hardware, and realign data on
1700 * a 2-byte boundary.
1702 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1703 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1704 skb_copy_to_linear_data(skb, temp_addr,
1705 (unsigned int)len);
1709 * This function builds an skb for the given inbound
1710 * completion. It will be rewritten for readability in the near
1711 * future, but for not it works well.
1713 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1714 struct rx_ring *rx_ring,
1715 struct ib_mac_iocb_rsp *ib_mac_rsp)
1717 struct bq_desc *lbq_desc;
1718 struct bq_desc *sbq_desc;
1719 struct sk_buff *skb = NULL;
1720 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1721 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1724 * Handle the header buffer if present.
1726 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1727 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1728 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1730 * Headers fit nicely into a small buffer.
1732 sbq_desc = ql_get_curr_sbuf(rx_ring);
1733 pci_unmap_single(qdev->pdev,
1734 pci_unmap_addr(sbq_desc, mapaddr),
1735 pci_unmap_len(sbq_desc, maplen),
1736 PCI_DMA_FROMDEVICE);
1737 skb = sbq_desc->p.skb;
1738 ql_realign_skb(skb, hdr_len);
1739 skb_put(skb, hdr_len);
1740 sbq_desc->p.skb = NULL;
1744 * Handle the data buffer(s).
1746 if (unlikely(!length)) { /* Is there data too? */
1747 QPRINTK(qdev, RX_STATUS, DEBUG,
1748 "No Data buffer in this packet.\n");
1749 return skb;
1752 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1753 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1754 QPRINTK(qdev, RX_STATUS, DEBUG,
1755 "Headers in small, data of %d bytes in small, combine them.\n", length);
1757 * Data is less than small buffer size so it's
1758 * stuffed in a small buffer.
1759 * For this case we append the data
1760 * from the "data" small buffer to the "header" small
1761 * buffer.
1763 sbq_desc = ql_get_curr_sbuf(rx_ring);
1764 pci_dma_sync_single_for_cpu(qdev->pdev,
1765 pci_unmap_addr
1766 (sbq_desc, mapaddr),
1767 pci_unmap_len
1768 (sbq_desc, maplen),
1769 PCI_DMA_FROMDEVICE);
1770 memcpy(skb_put(skb, length),
1771 sbq_desc->p.skb->data, length);
1772 pci_dma_sync_single_for_device(qdev->pdev,
1773 pci_unmap_addr
1774 (sbq_desc,
1775 mapaddr),
1776 pci_unmap_len
1777 (sbq_desc,
1778 maplen),
1779 PCI_DMA_FROMDEVICE);
1780 } else {
1781 QPRINTK(qdev, RX_STATUS, DEBUG,
1782 "%d bytes in a single small buffer.\n", length);
1783 sbq_desc = ql_get_curr_sbuf(rx_ring);
1784 skb = sbq_desc->p.skb;
1785 ql_realign_skb(skb, length);
1786 skb_put(skb, length);
1787 pci_unmap_single(qdev->pdev,
1788 pci_unmap_addr(sbq_desc,
1789 mapaddr),
1790 pci_unmap_len(sbq_desc,
1791 maplen),
1792 PCI_DMA_FROMDEVICE);
1793 sbq_desc->p.skb = NULL;
1795 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1796 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1797 QPRINTK(qdev, RX_STATUS, DEBUG,
1798 "Header in small, %d bytes in large. Chain large to small!\n", length);
1800 * The data is in a single large buffer. We
1801 * chain it to the header buffer's skb and let
1802 * it rip.
1804 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1805 QPRINTK(qdev, RX_STATUS, DEBUG,
1806 "Chaining page at offset = %d,"
1807 "for %d bytes to skb.\n",
1808 lbq_desc->p.pg_chunk.offset, length);
1809 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1810 lbq_desc->p.pg_chunk.offset,
1811 length);
1812 skb->len += length;
1813 skb->data_len += length;
1814 skb->truesize += length;
1815 } else {
1817 * The headers and data are in a single large buffer. We
1818 * copy it to a new skb and let it go. This can happen with
1819 * jumbo mtu on a non-TCP/UDP frame.
1821 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1822 skb = netdev_alloc_skb(qdev->ndev, length);
1823 if (skb == NULL) {
1824 QPRINTK(qdev, PROBE, DEBUG,
1825 "No skb available, drop the packet.\n");
1826 return NULL;
1828 pci_unmap_page(qdev->pdev,
1829 pci_unmap_addr(lbq_desc,
1830 mapaddr),
1831 pci_unmap_len(lbq_desc, maplen),
1832 PCI_DMA_FROMDEVICE);
1833 skb_reserve(skb, NET_IP_ALIGN);
1834 QPRINTK(qdev, RX_STATUS, DEBUG,
1835 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1836 skb_fill_page_desc(skb, 0,
1837 lbq_desc->p.pg_chunk.page,
1838 lbq_desc->p.pg_chunk.offset,
1839 length);
1840 skb->len += length;
1841 skb->data_len += length;
1842 skb->truesize += length;
1843 length -= length;
1844 __pskb_pull_tail(skb,
1845 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1846 VLAN_ETH_HLEN : ETH_HLEN);
1848 } else {
1850 * The data is in a chain of large buffers
1851 * pointed to by a small buffer. We loop
1852 * thru and chain them to the our small header
1853 * buffer's skb.
1854 * frags: There are 18 max frags and our small
1855 * buffer will hold 32 of them. The thing is,
1856 * we'll use 3 max for our 9000 byte jumbo
1857 * frames. If the MTU goes up we could
1858 * eventually be in trouble.
1860 int size, i = 0;
1861 sbq_desc = ql_get_curr_sbuf(rx_ring);
1862 pci_unmap_single(qdev->pdev,
1863 pci_unmap_addr(sbq_desc, mapaddr),
1864 pci_unmap_len(sbq_desc, maplen),
1865 PCI_DMA_FROMDEVICE);
1866 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1868 * This is an non TCP/UDP IP frame, so
1869 * the headers aren't split into a small
1870 * buffer. We have to use the small buffer
1871 * that contains our sg list as our skb to
1872 * send upstairs. Copy the sg list here to
1873 * a local buffer and use it to find the
1874 * pages to chain.
1876 QPRINTK(qdev, RX_STATUS, DEBUG,
1877 "%d bytes of headers & data in chain of large.\n", length);
1878 skb = sbq_desc->p.skb;
1879 sbq_desc->p.skb = NULL;
1880 skb_reserve(skb, NET_IP_ALIGN);
1882 while (length > 0) {
1883 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1884 size = (length < rx_ring->lbq_buf_size) ? length :
1885 rx_ring->lbq_buf_size;
1887 QPRINTK(qdev, RX_STATUS, DEBUG,
1888 "Adding page %d to skb for %d bytes.\n",
1889 i, size);
1890 skb_fill_page_desc(skb, i,
1891 lbq_desc->p.pg_chunk.page,
1892 lbq_desc->p.pg_chunk.offset,
1893 size);
1894 skb->len += size;
1895 skb->data_len += size;
1896 skb->truesize += size;
1897 length -= size;
1898 i++;
1900 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1901 VLAN_ETH_HLEN : ETH_HLEN);
1903 return skb;
1906 /* Process an inbound completion from an rx ring. */
1907 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1908 struct rx_ring *rx_ring,
1909 struct ib_mac_iocb_rsp *ib_mac_rsp,
1910 u16 vlan_id)
1912 struct net_device *ndev = qdev->ndev;
1913 struct sk_buff *skb = NULL;
1915 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1917 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1918 if (unlikely(!skb)) {
1919 QPRINTK(qdev, RX_STATUS, DEBUG,
1920 "No skb available, drop packet.\n");
1921 rx_ring->rx_dropped++;
1922 return;
1925 /* Frame error, so drop the packet. */
1926 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1927 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1928 ib_mac_rsp->flags2);
1929 dev_kfree_skb_any(skb);
1930 rx_ring->rx_errors++;
1931 return;
1934 /* The max framesize filter on this chip is set higher than
1935 * MTU since FCoE uses 2k frames.
1937 if (skb->len > ndev->mtu + ETH_HLEN) {
1938 dev_kfree_skb_any(skb);
1939 rx_ring->rx_dropped++;
1940 return;
1943 /* loopback self test for ethtool */
1944 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1945 ql_check_lb_frame(qdev, skb);
1946 dev_kfree_skb_any(skb);
1947 return;
1950 prefetch(skb->data);
1951 skb->dev = ndev;
1952 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1953 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1954 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1955 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1956 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1957 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1958 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1959 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1960 rx_ring->rx_multicast++;
1962 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1963 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1966 skb->protocol = eth_type_trans(skb, ndev);
1967 skb->ip_summed = CHECKSUM_NONE;
1969 /* If rx checksum is on, and there are no
1970 * csum or frame errors.
1972 if (qdev->rx_csum &&
1973 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1974 /* TCP frame. */
1975 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1976 QPRINTK(qdev, RX_STATUS, DEBUG,
1977 "TCP checksum done!\n");
1978 skb->ip_summed = CHECKSUM_UNNECESSARY;
1979 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1980 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1981 /* Unfragmented ipv4 UDP frame. */
1982 struct iphdr *iph = (struct iphdr *) skb->data;
1983 if (!(iph->frag_off &
1984 cpu_to_be16(IP_MF|IP_OFFSET))) {
1985 skb->ip_summed = CHECKSUM_UNNECESSARY;
1986 QPRINTK(qdev, RX_STATUS, DEBUG,
1987 "TCP checksum done!\n");
1992 rx_ring->rx_packets++;
1993 rx_ring->rx_bytes += skb->len;
1994 skb_record_rx_queue(skb, rx_ring->cq_id);
1995 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1996 if (qdev->vlgrp &&
1997 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1998 (vlan_id != 0))
1999 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2000 vlan_id, skb);
2001 else
2002 napi_gro_receive(&rx_ring->napi, skb);
2003 } else {
2004 if (qdev->vlgrp &&
2005 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2006 (vlan_id != 0))
2007 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2008 else
2009 netif_receive_skb(skb);
2013 /* Process an inbound completion from an rx ring. */
2014 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2015 struct rx_ring *rx_ring,
2016 struct ib_mac_iocb_rsp *ib_mac_rsp)
2018 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2019 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2020 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2021 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2023 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2025 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2026 /* The data and headers are split into
2027 * separate buffers.
2029 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2030 vlan_id);
2031 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2032 /* The data fit in a single small buffer.
2033 * Allocate a new skb, copy the data and
2034 * return the buffer to the free pool.
2036 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2037 length, vlan_id);
2038 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2039 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2040 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2041 /* TCP packet in a page chunk that's been checksummed.
2042 * Tack it on to our GRO skb and let it go.
2044 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2045 length, vlan_id);
2046 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2047 /* Non-TCP packet in a page chunk. Allocate an
2048 * skb, tack it on frags, and send it up.
2050 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2051 length, vlan_id);
2052 } else {
2053 struct bq_desc *lbq_desc;
2055 /* Free small buffer that holds the IAL */
2056 lbq_desc = ql_get_curr_sbuf(rx_ring);
2057 QPRINTK(qdev, RX_ERR, ERR, "Dropping frame, len %d > mtu %d\n",
2058 length, qdev->ndev->mtu);
2060 /* Unwind the large buffers for this frame. */
2061 while (length > 0) {
2062 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
2063 length -= (length < rx_ring->lbq_buf_size) ?
2064 length : rx_ring->lbq_buf_size;
2065 put_page(lbq_desc->p.pg_chunk.page);
2069 return (unsigned long)length;
2072 /* Process an outbound completion from an rx ring. */
2073 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2074 struct ob_mac_iocb_rsp *mac_rsp)
2076 struct tx_ring *tx_ring;
2077 struct tx_ring_desc *tx_ring_desc;
2079 QL_DUMP_OB_MAC_RSP(mac_rsp);
2080 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2081 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2082 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2083 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2084 tx_ring->tx_packets++;
2085 dev_kfree_skb(tx_ring_desc->skb);
2086 tx_ring_desc->skb = NULL;
2088 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2089 OB_MAC_IOCB_RSP_S |
2090 OB_MAC_IOCB_RSP_L |
2091 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2092 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2093 QPRINTK(qdev, TX_DONE, WARNING,
2094 "Total descriptor length did not match transfer length.\n");
2096 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2097 QPRINTK(qdev, TX_DONE, WARNING,
2098 "Frame too short to be legal, not sent.\n");
2100 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2101 QPRINTK(qdev, TX_DONE, WARNING,
2102 "Frame too long, but sent anyway.\n");
2104 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2105 QPRINTK(qdev, TX_DONE, WARNING,
2106 "PCI backplane error. Frame not sent.\n");
2109 atomic_inc(&tx_ring->tx_count);
2112 /* Fire up a handler to reset the MPI processor. */
2113 void ql_queue_fw_error(struct ql_adapter *qdev)
2115 ql_link_off(qdev);
2116 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2119 void ql_queue_asic_error(struct ql_adapter *qdev)
2121 ql_link_off(qdev);
2122 ql_disable_interrupts(qdev);
2123 /* Clear adapter up bit to signal the recovery
2124 * process that it shouldn't kill the reset worker
2125 * thread
2127 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2128 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2131 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2132 struct ib_ae_iocb_rsp *ib_ae_rsp)
2134 switch (ib_ae_rsp->event) {
2135 case MGMT_ERR_EVENT:
2136 QPRINTK(qdev, RX_ERR, ERR,
2137 "Management Processor Fatal Error.\n");
2138 ql_queue_fw_error(qdev);
2139 return;
2141 case CAM_LOOKUP_ERR_EVENT:
2142 QPRINTK(qdev, LINK, ERR,
2143 "Multiple CAM hits lookup occurred.\n");
2144 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
2145 ql_queue_asic_error(qdev);
2146 return;
2148 case SOFT_ECC_ERROR_EVENT:
2149 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
2150 ql_queue_asic_error(qdev);
2151 break;
2153 case PCI_ERR_ANON_BUF_RD:
2154 QPRINTK(qdev, RX_ERR, ERR,
2155 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2156 ib_ae_rsp->q_id);
2157 ql_queue_asic_error(qdev);
2158 break;
2160 default:
2161 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
2162 ib_ae_rsp->event);
2163 ql_queue_asic_error(qdev);
2164 break;
2168 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2170 struct ql_adapter *qdev = rx_ring->qdev;
2171 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2172 struct ob_mac_iocb_rsp *net_rsp = NULL;
2173 int count = 0;
2175 struct tx_ring *tx_ring;
2176 /* While there are entries in the completion queue. */
2177 while (prod != rx_ring->cnsmr_idx) {
2179 QPRINTK(qdev, RX_STATUS, DEBUG,
2180 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
2181 prod, rx_ring->cnsmr_idx);
2183 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2184 rmb();
2185 switch (net_rsp->opcode) {
2187 case OPCODE_OB_MAC_TSO_IOCB:
2188 case OPCODE_OB_MAC_IOCB:
2189 ql_process_mac_tx_intr(qdev, net_rsp);
2190 break;
2191 default:
2192 QPRINTK(qdev, RX_STATUS, DEBUG,
2193 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2194 net_rsp->opcode);
2196 count++;
2197 ql_update_cq(rx_ring);
2198 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2200 ql_write_cq_idx(rx_ring);
2201 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2202 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
2203 net_rsp != NULL) {
2204 if (atomic_read(&tx_ring->queue_stopped) &&
2205 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2207 * The queue got stopped because the tx_ring was full.
2208 * Wake it up, because it's now at least 25% empty.
2210 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2213 return count;
2216 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2218 struct ql_adapter *qdev = rx_ring->qdev;
2219 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2220 struct ql_net_rsp_iocb *net_rsp;
2221 int count = 0;
2223 /* While there are entries in the completion queue. */
2224 while (prod != rx_ring->cnsmr_idx) {
2226 QPRINTK(qdev, RX_STATUS, DEBUG,
2227 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
2228 prod, rx_ring->cnsmr_idx);
2230 net_rsp = rx_ring->curr_entry;
2231 rmb();
2232 switch (net_rsp->opcode) {
2233 case OPCODE_IB_MAC_IOCB:
2234 ql_process_mac_rx_intr(qdev, rx_ring,
2235 (struct ib_mac_iocb_rsp *)
2236 net_rsp);
2237 break;
2239 case OPCODE_IB_AE_IOCB:
2240 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2241 net_rsp);
2242 break;
2243 default:
2245 QPRINTK(qdev, RX_STATUS, DEBUG,
2246 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2247 net_rsp->opcode);
2250 count++;
2251 ql_update_cq(rx_ring);
2252 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2253 if (count == budget)
2254 break;
2256 ql_update_buffer_queues(qdev, rx_ring);
2257 ql_write_cq_idx(rx_ring);
2258 return count;
2261 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2263 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2264 struct ql_adapter *qdev = rx_ring->qdev;
2265 struct rx_ring *trx_ring;
2266 int i, work_done = 0;
2267 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2269 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
2270 rx_ring->cq_id);
2272 /* Service the TX rings first. They start
2273 * right after the RSS rings. */
2274 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2275 trx_ring = &qdev->rx_ring[i];
2276 /* If this TX completion ring belongs to this vector and
2277 * it's not empty then service it.
2279 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2280 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2281 trx_ring->cnsmr_idx)) {
2282 QPRINTK(qdev, INTR, DEBUG,
2283 "%s: Servicing TX completion ring %d.\n",
2284 __func__, trx_ring->cq_id);
2285 ql_clean_outbound_rx_ring(trx_ring);
2290 * Now service the RSS ring if it's active.
2292 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2293 rx_ring->cnsmr_idx) {
2294 QPRINTK(qdev, INTR, DEBUG,
2295 "%s: Servicing RX completion ring %d.\n",
2296 __func__, rx_ring->cq_id);
2297 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2300 if (work_done < budget) {
2301 napi_complete(napi);
2302 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2304 return work_done;
2307 static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
2309 struct ql_adapter *qdev = netdev_priv(ndev);
2311 qdev->vlgrp = grp;
2312 if (grp) {
2313 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
2314 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2315 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2316 } else {
2317 QPRINTK(qdev, IFUP, DEBUG,
2318 "Turning off VLAN in NIC_RCV_CFG.\n");
2319 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2323 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2325 struct ql_adapter *qdev = netdev_priv(ndev);
2326 u32 enable_bit = MAC_ADDR_E;
2327 int status;
2329 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2330 if (status)
2331 return;
2332 if (ql_set_mac_addr_reg
2333 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2334 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
2336 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2339 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2341 struct ql_adapter *qdev = netdev_priv(ndev);
2342 u32 enable_bit = 0;
2343 int status;
2345 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2346 if (status)
2347 return;
2349 if (ql_set_mac_addr_reg
2350 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2351 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
2353 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2357 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2358 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2360 struct rx_ring *rx_ring = dev_id;
2361 napi_schedule(&rx_ring->napi);
2362 return IRQ_HANDLED;
2365 /* This handles a fatal error, MPI activity, and the default
2366 * rx_ring in an MSI-X multiple vector environment.
2367 * In MSI/Legacy environment it also process the rest of
2368 * the rx_rings.
2370 static irqreturn_t qlge_isr(int irq, void *dev_id)
2372 struct rx_ring *rx_ring = dev_id;
2373 struct ql_adapter *qdev = rx_ring->qdev;
2374 struct intr_context *intr_context = &qdev->intr_context[0];
2375 u32 var;
2376 int work_done = 0;
2378 spin_lock(&qdev->hw_lock);
2379 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2380 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
2381 spin_unlock(&qdev->hw_lock);
2382 return IRQ_NONE;
2384 spin_unlock(&qdev->hw_lock);
2386 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2389 * Check for fatal error.
2391 if (var & STS_FE) {
2392 ql_queue_asic_error(qdev);
2393 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
2394 var = ql_read32(qdev, ERR_STS);
2395 QPRINTK(qdev, INTR, ERR,
2396 "Resetting chip. Error Status Register = 0x%x\n", var);
2397 return IRQ_HANDLED;
2401 * Check MPI processor activity.
2403 if ((var & STS_PI) &&
2404 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2406 * We've got an async event or mailbox completion.
2407 * Handle it and clear the source of the interrupt.
2409 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2410 ql_disable_completion_interrupt(qdev, intr_context->intr);
2411 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2412 queue_delayed_work_on(smp_processor_id(),
2413 qdev->workqueue, &qdev->mpi_work, 0);
2414 work_done++;
2418 * Get the bit-mask that shows the active queues for this
2419 * pass. Compare it to the queues that this irq services
2420 * and call napi if there's a match.
2422 var = ql_read32(qdev, ISR1);
2423 if (var & intr_context->irq_mask) {
2424 QPRINTK(qdev, INTR, INFO,
2425 "Waking handler for rx_ring[0].\n");
2426 ql_disable_completion_interrupt(qdev, intr_context->intr);
2427 napi_schedule(&rx_ring->napi);
2428 work_done++;
2430 ql_enable_completion_interrupt(qdev, intr_context->intr);
2431 return work_done ? IRQ_HANDLED : IRQ_NONE;
2434 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2437 if (skb_is_gso(skb)) {
2438 int err;
2439 if (skb_header_cloned(skb)) {
2440 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2441 if (err)
2442 return err;
2445 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2446 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2447 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2448 mac_iocb_ptr->total_hdrs_len =
2449 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2450 mac_iocb_ptr->net_trans_offset =
2451 cpu_to_le16(skb_network_offset(skb) |
2452 skb_transport_offset(skb)
2453 << OB_MAC_TRANSPORT_HDR_SHIFT);
2454 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2455 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2456 if (likely(skb->protocol == htons(ETH_P_IP))) {
2457 struct iphdr *iph = ip_hdr(skb);
2458 iph->check = 0;
2459 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2460 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2461 iph->daddr, 0,
2462 IPPROTO_TCP,
2464 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2465 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2466 tcp_hdr(skb)->check =
2467 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2468 &ipv6_hdr(skb)->daddr,
2469 0, IPPROTO_TCP, 0);
2471 return 1;
2473 return 0;
2476 static void ql_hw_csum_setup(struct sk_buff *skb,
2477 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2479 int len;
2480 struct iphdr *iph = ip_hdr(skb);
2481 __sum16 *check;
2482 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2483 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2484 mac_iocb_ptr->net_trans_offset =
2485 cpu_to_le16(skb_network_offset(skb) |
2486 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2488 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2489 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2490 if (likely(iph->protocol == IPPROTO_TCP)) {
2491 check = &(tcp_hdr(skb)->check);
2492 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2493 mac_iocb_ptr->total_hdrs_len =
2494 cpu_to_le16(skb_transport_offset(skb) +
2495 (tcp_hdr(skb)->doff << 2));
2496 } else {
2497 check = &(udp_hdr(skb)->check);
2498 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2499 mac_iocb_ptr->total_hdrs_len =
2500 cpu_to_le16(skb_transport_offset(skb) +
2501 sizeof(struct udphdr));
2503 *check = ~csum_tcpudp_magic(iph->saddr,
2504 iph->daddr, len, iph->protocol, 0);
2507 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2509 struct tx_ring_desc *tx_ring_desc;
2510 struct ob_mac_iocb_req *mac_iocb_ptr;
2511 struct ql_adapter *qdev = netdev_priv(ndev);
2512 int tso;
2513 struct tx_ring *tx_ring;
2514 u32 tx_ring_idx = (u32) skb->queue_mapping;
2516 tx_ring = &qdev->tx_ring[tx_ring_idx];
2518 if (skb_padto(skb, ETH_ZLEN))
2519 return NETDEV_TX_OK;
2521 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2522 QPRINTK(qdev, TX_QUEUED, INFO,
2523 "%s: shutting down tx queue %d du to lack of resources.\n",
2524 __func__, tx_ring_idx);
2525 netif_stop_subqueue(ndev, tx_ring->wq_id);
2526 atomic_inc(&tx_ring->queue_stopped);
2527 tx_ring->tx_errors++;
2528 return NETDEV_TX_BUSY;
2530 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2531 mac_iocb_ptr = tx_ring_desc->queue_entry;
2532 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2534 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2535 mac_iocb_ptr->tid = tx_ring_desc->index;
2536 /* We use the upper 32-bits to store the tx queue for this IO.
2537 * When we get the completion we can use it to establish the context.
2539 mac_iocb_ptr->txq_idx = tx_ring_idx;
2540 tx_ring_desc->skb = skb;
2542 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2544 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2545 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
2546 vlan_tx_tag_get(skb));
2547 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2548 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2550 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2551 if (tso < 0) {
2552 dev_kfree_skb_any(skb);
2553 return NETDEV_TX_OK;
2554 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2555 ql_hw_csum_setup(skb,
2556 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2558 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2559 NETDEV_TX_OK) {
2560 QPRINTK(qdev, TX_QUEUED, ERR,
2561 "Could not map the segments.\n");
2562 tx_ring->tx_errors++;
2563 return NETDEV_TX_BUSY;
2565 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2566 tx_ring->prod_idx++;
2567 if (tx_ring->prod_idx == tx_ring->wq_len)
2568 tx_ring->prod_idx = 0;
2569 wmb();
2571 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2572 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2573 tx_ring->prod_idx, skb->len);
2575 atomic_dec(&tx_ring->tx_count);
2576 return NETDEV_TX_OK;
2580 static void ql_free_shadow_space(struct ql_adapter *qdev)
2582 if (qdev->rx_ring_shadow_reg_area) {
2583 pci_free_consistent(qdev->pdev,
2584 PAGE_SIZE,
2585 qdev->rx_ring_shadow_reg_area,
2586 qdev->rx_ring_shadow_reg_dma);
2587 qdev->rx_ring_shadow_reg_area = NULL;
2589 if (qdev->tx_ring_shadow_reg_area) {
2590 pci_free_consistent(qdev->pdev,
2591 PAGE_SIZE,
2592 qdev->tx_ring_shadow_reg_area,
2593 qdev->tx_ring_shadow_reg_dma);
2594 qdev->tx_ring_shadow_reg_area = NULL;
2598 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2600 qdev->rx_ring_shadow_reg_area =
2601 pci_alloc_consistent(qdev->pdev,
2602 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2603 if (qdev->rx_ring_shadow_reg_area == NULL) {
2604 QPRINTK(qdev, IFUP, ERR,
2605 "Allocation of RX shadow space failed.\n");
2606 return -ENOMEM;
2608 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2609 qdev->tx_ring_shadow_reg_area =
2610 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2611 &qdev->tx_ring_shadow_reg_dma);
2612 if (qdev->tx_ring_shadow_reg_area == NULL) {
2613 QPRINTK(qdev, IFUP, ERR,
2614 "Allocation of TX shadow space failed.\n");
2615 goto err_wqp_sh_area;
2617 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2618 return 0;
2620 err_wqp_sh_area:
2621 pci_free_consistent(qdev->pdev,
2622 PAGE_SIZE,
2623 qdev->rx_ring_shadow_reg_area,
2624 qdev->rx_ring_shadow_reg_dma);
2625 return -ENOMEM;
2628 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2630 struct tx_ring_desc *tx_ring_desc;
2631 int i;
2632 struct ob_mac_iocb_req *mac_iocb_ptr;
2634 mac_iocb_ptr = tx_ring->wq_base;
2635 tx_ring_desc = tx_ring->q;
2636 for (i = 0; i < tx_ring->wq_len; i++) {
2637 tx_ring_desc->index = i;
2638 tx_ring_desc->skb = NULL;
2639 tx_ring_desc->queue_entry = mac_iocb_ptr;
2640 mac_iocb_ptr++;
2641 tx_ring_desc++;
2643 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2644 atomic_set(&tx_ring->queue_stopped, 0);
2647 static void ql_free_tx_resources(struct ql_adapter *qdev,
2648 struct tx_ring *tx_ring)
2650 if (tx_ring->wq_base) {
2651 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2652 tx_ring->wq_base, tx_ring->wq_base_dma);
2653 tx_ring->wq_base = NULL;
2655 kfree(tx_ring->q);
2656 tx_ring->q = NULL;
2659 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2660 struct tx_ring *tx_ring)
2662 tx_ring->wq_base =
2663 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2664 &tx_ring->wq_base_dma);
2666 if ((tx_ring->wq_base == NULL) ||
2667 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2668 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2669 return -ENOMEM;
2671 tx_ring->q =
2672 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2673 if (tx_ring->q == NULL)
2674 goto err;
2676 return 0;
2677 err:
2678 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2679 tx_ring->wq_base, tx_ring->wq_base_dma);
2680 return -ENOMEM;
2683 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2685 struct bq_desc *lbq_desc;
2687 uint32_t curr_idx, clean_idx;
2689 curr_idx = rx_ring->lbq_curr_idx;
2690 clean_idx = rx_ring->lbq_clean_idx;
2691 while (curr_idx != clean_idx) {
2692 lbq_desc = &rx_ring->lbq[curr_idx];
2694 if (lbq_desc->p.pg_chunk.last_flag) {
2695 pci_unmap_page(qdev->pdev,
2696 lbq_desc->p.pg_chunk.map,
2697 ql_lbq_block_size(qdev),
2698 PCI_DMA_FROMDEVICE);
2699 lbq_desc->p.pg_chunk.last_flag = 0;
2702 put_page(lbq_desc->p.pg_chunk.page);
2703 lbq_desc->p.pg_chunk.page = NULL;
2705 if (++curr_idx == rx_ring->lbq_len)
2706 curr_idx = 0;
2711 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2713 int i;
2714 struct bq_desc *sbq_desc;
2716 for (i = 0; i < rx_ring->sbq_len; i++) {
2717 sbq_desc = &rx_ring->sbq[i];
2718 if (sbq_desc == NULL) {
2719 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2720 return;
2722 if (sbq_desc->p.skb) {
2723 pci_unmap_single(qdev->pdev,
2724 pci_unmap_addr(sbq_desc, mapaddr),
2725 pci_unmap_len(sbq_desc, maplen),
2726 PCI_DMA_FROMDEVICE);
2727 dev_kfree_skb(sbq_desc->p.skb);
2728 sbq_desc->p.skb = NULL;
2733 /* Free all large and small rx buffers associated
2734 * with the completion queues for this device.
2736 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2738 int i;
2739 struct rx_ring *rx_ring;
2741 for (i = 0; i < qdev->rx_ring_count; i++) {
2742 rx_ring = &qdev->rx_ring[i];
2743 if (rx_ring->lbq)
2744 ql_free_lbq_buffers(qdev, rx_ring);
2745 if (rx_ring->sbq)
2746 ql_free_sbq_buffers(qdev, rx_ring);
2750 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2752 struct rx_ring *rx_ring;
2753 int i;
2755 for (i = 0; i < qdev->rx_ring_count; i++) {
2756 rx_ring = &qdev->rx_ring[i];
2757 if (rx_ring->type != TX_Q)
2758 ql_update_buffer_queues(qdev, rx_ring);
2762 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2763 struct rx_ring *rx_ring)
2765 int i;
2766 struct bq_desc *lbq_desc;
2767 __le64 *bq = rx_ring->lbq_base;
2769 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2770 for (i = 0; i < rx_ring->lbq_len; i++) {
2771 lbq_desc = &rx_ring->lbq[i];
2772 memset(lbq_desc, 0, sizeof(*lbq_desc));
2773 lbq_desc->index = i;
2774 lbq_desc->addr = bq;
2775 bq++;
2779 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2780 struct rx_ring *rx_ring)
2782 int i;
2783 struct bq_desc *sbq_desc;
2784 __le64 *bq = rx_ring->sbq_base;
2786 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2787 for (i = 0; i < rx_ring->sbq_len; i++) {
2788 sbq_desc = &rx_ring->sbq[i];
2789 memset(sbq_desc, 0, sizeof(*sbq_desc));
2790 sbq_desc->index = i;
2791 sbq_desc->addr = bq;
2792 bq++;
2796 static void ql_free_rx_resources(struct ql_adapter *qdev,
2797 struct rx_ring *rx_ring)
2799 /* Free the small buffer queue. */
2800 if (rx_ring->sbq_base) {
2801 pci_free_consistent(qdev->pdev,
2802 rx_ring->sbq_size,
2803 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2804 rx_ring->sbq_base = NULL;
2807 /* Free the small buffer queue control blocks. */
2808 kfree(rx_ring->sbq);
2809 rx_ring->sbq = NULL;
2811 /* Free the large buffer queue. */
2812 if (rx_ring->lbq_base) {
2813 pci_free_consistent(qdev->pdev,
2814 rx_ring->lbq_size,
2815 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2816 rx_ring->lbq_base = NULL;
2819 /* Free the large buffer queue control blocks. */
2820 kfree(rx_ring->lbq);
2821 rx_ring->lbq = NULL;
2823 /* Free the rx queue. */
2824 if (rx_ring->cq_base) {
2825 pci_free_consistent(qdev->pdev,
2826 rx_ring->cq_size,
2827 rx_ring->cq_base, rx_ring->cq_base_dma);
2828 rx_ring->cq_base = NULL;
2832 /* Allocate queues and buffers for this completions queue based
2833 * on the values in the parameter structure. */
2834 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2835 struct rx_ring *rx_ring)
2839 * Allocate the completion queue for this rx_ring.
2841 rx_ring->cq_base =
2842 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2843 &rx_ring->cq_base_dma);
2845 if (rx_ring->cq_base == NULL) {
2846 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2847 return -ENOMEM;
2850 if (rx_ring->sbq_len) {
2852 * Allocate small buffer queue.
2854 rx_ring->sbq_base =
2855 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2856 &rx_ring->sbq_base_dma);
2858 if (rx_ring->sbq_base == NULL) {
2859 QPRINTK(qdev, IFUP, ERR,
2860 "Small buffer queue allocation failed.\n");
2861 goto err_mem;
2865 * Allocate small buffer queue control blocks.
2867 rx_ring->sbq =
2868 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2869 GFP_KERNEL);
2870 if (rx_ring->sbq == NULL) {
2871 QPRINTK(qdev, IFUP, ERR,
2872 "Small buffer queue control block allocation failed.\n");
2873 goto err_mem;
2876 ql_init_sbq_ring(qdev, rx_ring);
2879 if (rx_ring->lbq_len) {
2881 * Allocate large buffer queue.
2883 rx_ring->lbq_base =
2884 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2885 &rx_ring->lbq_base_dma);
2887 if (rx_ring->lbq_base == NULL) {
2888 QPRINTK(qdev, IFUP, ERR,
2889 "Large buffer queue allocation failed.\n");
2890 goto err_mem;
2893 * Allocate large buffer queue control blocks.
2895 rx_ring->lbq =
2896 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2897 GFP_KERNEL);
2898 if (rx_ring->lbq == NULL) {
2899 QPRINTK(qdev, IFUP, ERR,
2900 "Large buffer queue control block allocation failed.\n");
2901 goto err_mem;
2904 ql_init_lbq_ring(qdev, rx_ring);
2907 return 0;
2909 err_mem:
2910 ql_free_rx_resources(qdev, rx_ring);
2911 return -ENOMEM;
2914 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2916 struct tx_ring *tx_ring;
2917 struct tx_ring_desc *tx_ring_desc;
2918 int i, j;
2921 * Loop through all queues and free
2922 * any resources.
2924 for (j = 0; j < qdev->tx_ring_count; j++) {
2925 tx_ring = &qdev->tx_ring[j];
2926 for (i = 0; i < tx_ring->wq_len; i++) {
2927 tx_ring_desc = &tx_ring->q[i];
2928 if (tx_ring_desc && tx_ring_desc->skb) {
2929 QPRINTK(qdev, IFDOWN, ERR,
2930 "Freeing lost SKB %p, from queue %d, index %d.\n",
2931 tx_ring_desc->skb, j,
2932 tx_ring_desc->index);
2933 ql_unmap_send(qdev, tx_ring_desc,
2934 tx_ring_desc->map_cnt);
2935 dev_kfree_skb(tx_ring_desc->skb);
2936 tx_ring_desc->skb = NULL;
2942 static void ql_free_mem_resources(struct ql_adapter *qdev)
2944 int i;
2946 for (i = 0; i < qdev->tx_ring_count; i++)
2947 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2948 for (i = 0; i < qdev->rx_ring_count; i++)
2949 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2950 ql_free_shadow_space(qdev);
2953 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2955 int i;
2957 /* Allocate space for our shadow registers and such. */
2958 if (ql_alloc_shadow_space(qdev))
2959 return -ENOMEM;
2961 for (i = 0; i < qdev->rx_ring_count; i++) {
2962 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2963 QPRINTK(qdev, IFUP, ERR,
2964 "RX resource allocation failed.\n");
2965 goto err_mem;
2968 /* Allocate tx queue resources */
2969 for (i = 0; i < qdev->tx_ring_count; i++) {
2970 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2971 QPRINTK(qdev, IFUP, ERR,
2972 "TX resource allocation failed.\n");
2973 goto err_mem;
2976 return 0;
2978 err_mem:
2979 ql_free_mem_resources(qdev);
2980 return -ENOMEM;
2983 /* Set up the rx ring control block and pass it to the chip.
2984 * The control block is defined as
2985 * "Completion Queue Initialization Control Block", or cqicb.
2987 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2989 struct cqicb *cqicb = &rx_ring->cqicb;
2990 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2991 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2992 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2993 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2994 void __iomem *doorbell_area =
2995 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2996 int err = 0;
2997 u16 bq_len;
2998 u64 tmp;
2999 __le64 *base_indirect_ptr;
3000 int page_entries;
3002 /* Set up the shadow registers for this ring. */
3003 rx_ring->prod_idx_sh_reg = shadow_reg;
3004 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3005 *rx_ring->prod_idx_sh_reg = 0;
3006 shadow_reg += sizeof(u64);
3007 shadow_reg_dma += sizeof(u64);
3008 rx_ring->lbq_base_indirect = shadow_reg;
3009 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3010 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3011 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3012 rx_ring->sbq_base_indirect = shadow_reg;
3013 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3015 /* PCI doorbell mem area + 0x00 for consumer index register */
3016 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3017 rx_ring->cnsmr_idx = 0;
3018 rx_ring->curr_entry = rx_ring->cq_base;
3020 /* PCI doorbell mem area + 0x04 for valid register */
3021 rx_ring->valid_db_reg = doorbell_area + 0x04;
3023 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3024 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3026 /* PCI doorbell mem area + 0x1c */
3027 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3029 memset((void *)cqicb, 0, sizeof(struct cqicb));
3030 cqicb->msix_vect = rx_ring->irq;
3032 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3033 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3035 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3037 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3040 * Set up the control block load flags.
3042 cqicb->flags = FLAGS_LC | /* Load queue base address */
3043 FLAGS_LV | /* Load MSI-X vector */
3044 FLAGS_LI; /* Load irq delay values */
3045 if (rx_ring->lbq_len) {
3046 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3047 tmp = (u64)rx_ring->lbq_base_dma;
3048 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3049 page_entries = 0;
3050 do {
3051 *base_indirect_ptr = cpu_to_le64(tmp);
3052 tmp += DB_PAGE_SIZE;
3053 base_indirect_ptr++;
3054 page_entries++;
3055 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3056 cqicb->lbq_addr =
3057 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3058 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3059 (u16) rx_ring->lbq_buf_size;
3060 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3061 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3062 (u16) rx_ring->lbq_len;
3063 cqicb->lbq_len = cpu_to_le16(bq_len);
3064 rx_ring->lbq_prod_idx = 0;
3065 rx_ring->lbq_curr_idx = 0;
3066 rx_ring->lbq_clean_idx = 0;
3067 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3069 if (rx_ring->sbq_len) {
3070 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3071 tmp = (u64)rx_ring->sbq_base_dma;
3072 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3073 page_entries = 0;
3074 do {
3075 *base_indirect_ptr = cpu_to_le64(tmp);
3076 tmp += DB_PAGE_SIZE;
3077 base_indirect_ptr++;
3078 page_entries++;
3079 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3080 cqicb->sbq_addr =
3081 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3082 cqicb->sbq_buf_size =
3083 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3084 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3085 (u16) rx_ring->sbq_len;
3086 cqicb->sbq_len = cpu_to_le16(bq_len);
3087 rx_ring->sbq_prod_idx = 0;
3088 rx_ring->sbq_curr_idx = 0;
3089 rx_ring->sbq_clean_idx = 0;
3090 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3092 switch (rx_ring->type) {
3093 case TX_Q:
3094 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3095 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3096 break;
3097 case RX_Q:
3098 /* Inbound completion handling rx_rings run in
3099 * separate NAPI contexts.
3101 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3102 64);
3103 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3104 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3105 break;
3106 default:
3107 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
3108 rx_ring->type);
3110 QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
3111 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3112 CFG_LCQ, rx_ring->cq_id);
3113 if (err) {
3114 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
3115 return err;
3117 return err;
3120 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3122 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3123 void __iomem *doorbell_area =
3124 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3125 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3126 (tx_ring->wq_id * sizeof(u64));
3127 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3128 (tx_ring->wq_id * sizeof(u64));
3129 int err = 0;
3132 * Assign doorbell registers for this tx_ring.
3134 /* TX PCI doorbell mem area for tx producer index */
3135 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3136 tx_ring->prod_idx = 0;
3137 /* TX PCI doorbell mem area + 0x04 */
3138 tx_ring->valid_db_reg = doorbell_area + 0x04;
3141 * Assign shadow registers for this tx_ring.
3143 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3144 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3146 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3147 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3148 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3149 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3150 wqicb->rid = 0;
3151 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3153 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3155 ql_init_tx_ring(qdev, tx_ring);
3157 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3158 (u16) tx_ring->wq_id);
3159 if (err) {
3160 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
3161 return err;
3163 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
3164 return err;
3167 static void ql_disable_msix(struct ql_adapter *qdev)
3169 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3170 pci_disable_msix(qdev->pdev);
3171 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3172 kfree(qdev->msi_x_entry);
3173 qdev->msi_x_entry = NULL;
3174 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3175 pci_disable_msi(qdev->pdev);
3176 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3180 /* We start by trying to get the number of vectors
3181 * stored in qdev->intr_count. If we don't get that
3182 * many then we reduce the count and try again.
3184 static void ql_enable_msix(struct ql_adapter *qdev)
3186 int i, err;
3188 /* Get the MSIX vectors. */
3189 if (qlge_irq_type == MSIX_IRQ) {
3190 /* Try to alloc space for the msix struct,
3191 * if it fails then go to MSI/legacy.
3193 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3194 sizeof(struct msix_entry),
3195 GFP_KERNEL);
3196 if (!qdev->msi_x_entry) {
3197 qlge_irq_type = MSI_IRQ;
3198 goto msi;
3201 for (i = 0; i < qdev->intr_count; i++)
3202 qdev->msi_x_entry[i].entry = i;
3204 /* Loop to get our vectors. We start with
3205 * what we want and settle for what we get.
3207 do {
3208 err = pci_enable_msix(qdev->pdev,
3209 qdev->msi_x_entry, qdev->intr_count);
3210 if (err > 0)
3211 qdev->intr_count = err;
3212 } while (err > 0);
3214 if (err < 0) {
3215 kfree(qdev->msi_x_entry);
3216 qdev->msi_x_entry = NULL;
3217 QPRINTK(qdev, IFUP, WARNING,
3218 "MSI-X Enable failed, trying MSI.\n");
3219 qdev->intr_count = 1;
3220 qlge_irq_type = MSI_IRQ;
3221 } else if (err == 0) {
3222 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3223 QPRINTK(qdev, IFUP, INFO,
3224 "MSI-X Enabled, got %d vectors.\n",
3225 qdev->intr_count);
3226 return;
3229 msi:
3230 qdev->intr_count = 1;
3231 if (qlge_irq_type == MSI_IRQ) {
3232 if (!pci_enable_msi(qdev->pdev)) {
3233 set_bit(QL_MSI_ENABLED, &qdev->flags);
3234 QPRINTK(qdev, IFUP, INFO,
3235 "Running with MSI interrupts.\n");
3236 return;
3239 qlge_irq_type = LEG_IRQ;
3240 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
3243 /* Each vector services 1 RSS ring and and 1 or more
3244 * TX completion rings. This function loops through
3245 * the TX completion rings and assigns the vector that
3246 * will service it. An example would be if there are
3247 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3248 * This would mean that vector 0 would service RSS ring 0
3249 * and TX competion rings 0,1,2 and 3. Vector 1 would
3250 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3252 static void ql_set_tx_vect(struct ql_adapter *qdev)
3254 int i, j, vect;
3255 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3257 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3258 /* Assign irq vectors to TX rx_rings.*/
3259 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3260 i < qdev->rx_ring_count; i++) {
3261 if (j == tx_rings_per_vector) {
3262 vect++;
3263 j = 0;
3265 qdev->rx_ring[i].irq = vect;
3266 j++;
3268 } else {
3269 /* For single vector all rings have an irq
3270 * of zero.
3272 for (i = 0; i < qdev->rx_ring_count; i++)
3273 qdev->rx_ring[i].irq = 0;
3277 /* Set the interrupt mask for this vector. Each vector
3278 * will service 1 RSS ring and 1 or more TX completion
3279 * rings. This function sets up a bit mask per vector
3280 * that indicates which rings it services.
3282 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3284 int j, vect = ctx->intr;
3285 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3287 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3288 /* Add the RSS ring serviced by this vector
3289 * to the mask.
3291 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3292 /* Add the TX ring(s) serviced by this vector
3293 * to the mask. */
3294 for (j = 0; j < tx_rings_per_vector; j++) {
3295 ctx->irq_mask |=
3296 (1 << qdev->rx_ring[qdev->rss_ring_count +
3297 (vect * tx_rings_per_vector) + j].cq_id);
3299 } else {
3300 /* For single vector we just shift each queue's
3301 * ID into the mask.
3303 for (j = 0; j < qdev->rx_ring_count; j++)
3304 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3309 * Here we build the intr_context structures based on
3310 * our rx_ring count and intr vector count.
3311 * The intr_context structure is used to hook each vector
3312 * to possibly different handlers.
3314 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3316 int i = 0;
3317 struct intr_context *intr_context = &qdev->intr_context[0];
3319 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3320 /* Each rx_ring has it's
3321 * own intr_context since we have separate
3322 * vectors for each queue.
3324 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3325 qdev->rx_ring[i].irq = i;
3326 intr_context->intr = i;
3327 intr_context->qdev = qdev;
3328 /* Set up this vector's bit-mask that indicates
3329 * which queues it services.
3331 ql_set_irq_mask(qdev, intr_context);
3333 * We set up each vectors enable/disable/read bits so
3334 * there's no bit/mask calculations in the critical path.
3336 intr_context->intr_en_mask =
3337 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3338 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3339 | i;
3340 intr_context->intr_dis_mask =
3341 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3342 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3343 INTR_EN_IHD | i;
3344 intr_context->intr_read_mask =
3345 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3346 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3348 if (i == 0) {
3349 /* The first vector/queue handles
3350 * broadcast/multicast, fatal errors,
3351 * and firmware events. This in addition
3352 * to normal inbound NAPI processing.
3354 intr_context->handler = qlge_isr;
3355 sprintf(intr_context->name, "%s-rx-%d",
3356 qdev->ndev->name, i);
3357 } else {
3359 * Inbound queues handle unicast frames only.
3361 intr_context->handler = qlge_msix_rx_isr;
3362 sprintf(intr_context->name, "%s-rx-%d",
3363 qdev->ndev->name, i);
3366 } else {
3368 * All rx_rings use the same intr_context since
3369 * there is only one vector.
3371 intr_context->intr = 0;
3372 intr_context->qdev = qdev;
3374 * We set up each vectors enable/disable/read bits so
3375 * there's no bit/mask calculations in the critical path.
3377 intr_context->intr_en_mask =
3378 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3379 intr_context->intr_dis_mask =
3380 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3381 INTR_EN_TYPE_DISABLE;
3382 intr_context->intr_read_mask =
3383 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3385 * Single interrupt means one handler for all rings.
3387 intr_context->handler = qlge_isr;
3388 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3389 /* Set up this vector's bit-mask that indicates
3390 * which queues it services. In this case there is
3391 * a single vector so it will service all RSS and
3392 * TX completion rings.
3394 ql_set_irq_mask(qdev, intr_context);
3396 /* Tell the TX completion rings which MSIx vector
3397 * they will be using.
3399 ql_set_tx_vect(qdev);
3402 static void ql_free_irq(struct ql_adapter *qdev)
3404 int i;
3405 struct intr_context *intr_context = &qdev->intr_context[0];
3407 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3408 if (intr_context->hooked) {
3409 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3410 free_irq(qdev->msi_x_entry[i].vector,
3411 &qdev->rx_ring[i]);
3412 QPRINTK(qdev, IFDOWN, DEBUG,
3413 "freeing msix interrupt %d.\n", i);
3414 } else {
3415 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3416 QPRINTK(qdev, IFDOWN, DEBUG,
3417 "freeing msi interrupt %d.\n", i);
3421 ql_disable_msix(qdev);
3424 static int ql_request_irq(struct ql_adapter *qdev)
3426 int i;
3427 int status = 0;
3428 struct pci_dev *pdev = qdev->pdev;
3429 struct intr_context *intr_context = &qdev->intr_context[0];
3431 ql_resolve_queues_to_irqs(qdev);
3433 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3434 atomic_set(&intr_context->irq_cnt, 0);
3435 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3436 status = request_irq(qdev->msi_x_entry[i].vector,
3437 intr_context->handler,
3439 intr_context->name,
3440 &qdev->rx_ring[i]);
3441 if (status) {
3442 QPRINTK(qdev, IFUP, ERR,
3443 "Failed request for MSIX interrupt %d.\n",
3445 goto err_irq;
3446 } else {
3447 QPRINTK(qdev, IFUP, DEBUG,
3448 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3450 qdev->rx_ring[i].type ==
3451 DEFAULT_Q ? "DEFAULT_Q" : "",
3452 qdev->rx_ring[i].type ==
3453 TX_Q ? "TX_Q" : "",
3454 qdev->rx_ring[i].type ==
3455 RX_Q ? "RX_Q" : "", intr_context->name);
3457 } else {
3458 QPRINTK(qdev, IFUP, DEBUG,
3459 "trying msi or legacy interrupts.\n");
3460 QPRINTK(qdev, IFUP, DEBUG,
3461 "%s: irq = %d.\n", __func__, pdev->irq);
3462 QPRINTK(qdev, IFUP, DEBUG,
3463 "%s: context->name = %s.\n", __func__,
3464 intr_context->name);
3465 QPRINTK(qdev, IFUP, DEBUG,
3466 "%s: dev_id = 0x%p.\n", __func__,
3467 &qdev->rx_ring[0]);
3468 status =
3469 request_irq(pdev->irq, qlge_isr,
3470 test_bit(QL_MSI_ENABLED,
3471 &qdev->
3472 flags) ? 0 : IRQF_SHARED,
3473 intr_context->name, &qdev->rx_ring[0]);
3474 if (status)
3475 goto err_irq;
3477 QPRINTK(qdev, IFUP, ERR,
3478 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3480 qdev->rx_ring[0].type ==
3481 DEFAULT_Q ? "DEFAULT_Q" : "",
3482 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
3483 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3484 intr_context->name);
3486 intr_context->hooked = 1;
3488 return status;
3489 err_irq:
3490 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
3491 ql_free_irq(qdev);
3492 return status;
3495 static int ql_start_rss(struct ql_adapter *qdev)
3497 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3498 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3499 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3500 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3501 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3502 0xbe, 0xac, 0x01, 0xfa};
3503 struct ricb *ricb = &qdev->ricb;
3504 int status = 0;
3505 int i;
3506 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3508 memset((void *)ricb, 0, sizeof(*ricb));
3510 ricb->base_cq = RSS_L4K;
3511 ricb->flags =
3512 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3513 ricb->mask = cpu_to_le16((u16)(0x3ff));
3516 * Fill out the Indirection Table.
3518 for (i = 0; i < 1024; i++)
3519 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3521 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3522 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3524 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
3526 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3527 if (status) {
3528 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3529 return status;
3531 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
3532 return status;
3535 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3537 int i, status = 0;
3539 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3540 if (status)
3541 return status;
3542 /* Clear all the entries in the routing table. */
3543 for (i = 0; i < 16; i++) {
3544 status = ql_set_routing_reg(qdev, i, 0, 0);
3545 if (status) {
3546 QPRINTK(qdev, IFUP, ERR,
3547 "Failed to init routing register for CAM "
3548 "packets.\n");
3549 break;
3552 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3553 return status;
3556 /* Initialize the frame-to-queue routing. */
3557 static int ql_route_initialize(struct ql_adapter *qdev)
3559 int status = 0;
3561 /* Clear all the entries in the routing table. */
3562 status = ql_clear_routing_entries(qdev);
3563 if (status)
3564 return status;
3566 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3567 if (status)
3568 return status;
3570 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3571 if (status) {
3572 QPRINTK(qdev, IFUP, ERR,
3573 "Failed to init routing register for error packets.\n");
3574 goto exit;
3576 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3577 if (status) {
3578 QPRINTK(qdev, IFUP, ERR,
3579 "Failed to init routing register for broadcast packets.\n");
3580 goto exit;
3582 /* If we have more than one inbound queue, then turn on RSS in the
3583 * routing block.
3585 if (qdev->rss_ring_count > 1) {
3586 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3587 RT_IDX_RSS_MATCH, 1);
3588 if (status) {
3589 QPRINTK(qdev, IFUP, ERR,
3590 "Failed to init routing register for MATCH RSS packets.\n");
3591 goto exit;
3595 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3596 RT_IDX_CAM_HIT, 1);
3597 if (status)
3598 QPRINTK(qdev, IFUP, ERR,
3599 "Failed to init routing register for CAM packets.\n");
3600 exit:
3601 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3602 return status;
3605 int ql_cam_route_initialize(struct ql_adapter *qdev)
3607 int status, set;
3609 /* If check if the link is up and use to
3610 * determine if we are setting or clearing
3611 * the MAC address in the CAM.
3613 set = ql_read32(qdev, STS);
3614 set &= qdev->port_link_up;
3615 status = ql_set_mac_addr(qdev, set);
3616 if (status) {
3617 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3618 return status;
3621 status = ql_route_initialize(qdev);
3622 if (status)
3623 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3625 return status;
3628 static int ql_adapter_initialize(struct ql_adapter *qdev)
3630 u32 value, mask;
3631 int i;
3632 int status = 0;
3635 * Set up the System register to halt on errors.
3637 value = SYS_EFE | SYS_FAE;
3638 mask = value << 16;
3639 ql_write32(qdev, SYS, mask | value);
3641 /* Set the default queue, and VLAN behavior. */
3642 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3643 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3644 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3646 /* Set the MPI interrupt to enabled. */
3647 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3649 /* Enable the function, set pagesize, enable error checking. */
3650 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3651 FSC_EC | FSC_VM_PAGE_4K;
3652 value |= SPLT_SETTING;
3654 /* Set/clear header splitting. */
3655 mask = FSC_VM_PAGESIZE_MASK |
3656 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3657 ql_write32(qdev, FSC, mask | value);
3659 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3661 /* Set RX packet routing to use port/pci function on which the
3662 * packet arrived on in addition to usual frame routing.
3663 * This is helpful on bonding where both interfaces can have
3664 * the same MAC address.
3666 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3667 /* Reroute all packets to our Interface.
3668 * They may have been routed to MPI firmware
3669 * due to WOL.
3671 value = ql_read32(qdev, MGMT_RCV_CFG);
3672 value &= ~MGMT_RCV_CFG_RM;
3673 mask = 0xffff0000;
3675 /* Sticky reg needs clearing due to WOL. */
3676 ql_write32(qdev, MGMT_RCV_CFG, mask);
3677 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3679 /* Default WOL is enable on Mezz cards */
3680 if (qdev->pdev->subsystem_device == 0x0068 ||
3681 qdev->pdev->subsystem_device == 0x0180)
3682 qdev->wol = WAKE_MAGIC;
3684 /* Start up the rx queues. */
3685 for (i = 0; i < qdev->rx_ring_count; i++) {
3686 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3687 if (status) {
3688 QPRINTK(qdev, IFUP, ERR,
3689 "Failed to start rx ring[%d].\n", i);
3690 return status;
3694 /* If there is more than one inbound completion queue
3695 * then download a RICB to configure RSS.
3697 if (qdev->rss_ring_count > 1) {
3698 status = ql_start_rss(qdev);
3699 if (status) {
3700 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3701 return status;
3705 /* Start up the tx queues. */
3706 for (i = 0; i < qdev->tx_ring_count; i++) {
3707 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3708 if (status) {
3709 QPRINTK(qdev, IFUP, ERR,
3710 "Failed to start tx ring[%d].\n", i);
3711 return status;
3715 /* Initialize the port and set the max framesize. */
3716 status = qdev->nic_ops->port_initialize(qdev);
3717 if (status)
3718 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3720 /* Set up the MAC address and frame routing filter. */
3721 status = ql_cam_route_initialize(qdev);
3722 if (status) {
3723 QPRINTK(qdev, IFUP, ERR,
3724 "Failed to init CAM/Routing tables.\n");
3725 return status;
3728 /* Start NAPI for the RSS queues. */
3729 for (i = 0; i < qdev->rss_ring_count; i++) {
3730 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
3732 napi_enable(&qdev->rx_ring[i].napi);
3735 return status;
3738 /* Issue soft reset to chip. */
3739 static int ql_adapter_reset(struct ql_adapter *qdev)
3741 u32 value;
3742 int status = 0;
3743 unsigned long end_jiffies;
3745 /* Clear all the entries in the routing table. */
3746 status = ql_clear_routing_entries(qdev);
3747 if (status) {
3748 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3749 return status;
3752 end_jiffies = jiffies +
3753 max((unsigned long)1, usecs_to_jiffies(30));
3755 /* Stop management traffic. */
3756 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3758 /* Wait for the NIC and MGMNT FIFOs to empty. */
3759 ql_wait_fifo_empty(qdev);
3761 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3763 do {
3764 value = ql_read32(qdev, RST_FO);
3765 if ((value & RST_FO_FR) == 0)
3766 break;
3767 cpu_relax();
3768 } while (time_before(jiffies, end_jiffies));
3770 if (value & RST_FO_FR) {
3771 QPRINTK(qdev, IFDOWN, ERR,
3772 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3773 status = -ETIMEDOUT;
3776 /* Resume management traffic. */
3777 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3778 return status;
3781 static void ql_display_dev_info(struct net_device *ndev)
3783 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3785 QPRINTK(qdev, PROBE, INFO,
3786 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3787 "XG Roll = %d, XG Rev = %d.\n",
3788 qdev->func,
3789 qdev->port,
3790 qdev->chip_rev_id & 0x0000000f,
3791 qdev->chip_rev_id >> 4 & 0x0000000f,
3792 qdev->chip_rev_id >> 8 & 0x0000000f,
3793 qdev->chip_rev_id >> 12 & 0x0000000f);
3794 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
3797 int ql_wol(struct ql_adapter *qdev)
3799 int status = 0;
3800 u32 wol = MB_WOL_DISABLE;
3802 /* The CAM is still intact after a reset, but if we
3803 * are doing WOL, then we may need to program the
3804 * routing regs. We would also need to issue the mailbox
3805 * commands to instruct the MPI what to do per the ethtool
3806 * settings.
3809 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3810 WAKE_MCAST | WAKE_BCAST)) {
3811 QPRINTK(qdev, IFDOWN, ERR,
3812 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3813 qdev->wol);
3814 return -EINVAL;
3817 if (qdev->wol & WAKE_MAGIC) {
3818 status = ql_mb_wol_set_magic(qdev, 1);
3819 if (status) {
3820 QPRINTK(qdev, IFDOWN, ERR,
3821 "Failed to set magic packet on %s.\n",
3822 qdev->ndev->name);
3823 return status;
3824 } else
3825 QPRINTK(qdev, DRV, INFO,
3826 "Enabled magic packet successfully on %s.\n",
3827 qdev->ndev->name);
3829 wol |= MB_WOL_MAGIC_PKT;
3832 if (qdev->wol) {
3833 wol |= MB_WOL_MODE_ON;
3834 status = ql_mb_wol_mode(qdev, wol);
3835 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
3836 (status == 0) ? "Sucessfully set" : "Failed", wol,
3837 qdev->ndev->name);
3840 return status;
3843 static int ql_adapter_down(struct ql_adapter *qdev)
3845 int i, status = 0;
3847 ql_link_off(qdev);
3849 /* Don't kill the reset worker thread if we
3850 * are in the process of recovery.
3852 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3853 cancel_delayed_work_sync(&qdev->asic_reset_work);
3854 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3855 cancel_delayed_work_sync(&qdev->mpi_work);
3856 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3857 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3858 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3860 for (i = 0; i < qdev->rss_ring_count; i++)
3861 napi_disable(&qdev->rx_ring[i].napi);
3863 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3865 ql_disable_interrupts(qdev);
3867 ql_tx_ring_clean(qdev);
3869 /* Call netif_napi_del() from common point.
3871 for (i = 0; i < qdev->rss_ring_count; i++)
3872 netif_napi_del(&qdev->rx_ring[i].napi);
3874 ql_free_rx_buffers(qdev);
3876 status = ql_adapter_reset(qdev);
3877 if (status)
3878 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3879 qdev->func);
3880 return status;
3883 static int ql_adapter_up(struct ql_adapter *qdev)
3885 int err = 0;
3887 err = ql_adapter_initialize(qdev);
3888 if (err) {
3889 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
3890 goto err_init;
3892 set_bit(QL_ADAPTER_UP, &qdev->flags);
3893 ql_alloc_rx_buffers(qdev);
3894 /* If the port is initialized and the
3895 * link is up the turn on the carrier.
3897 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3898 (ql_read32(qdev, STS) & qdev->port_link_up))
3899 ql_link_on(qdev);
3900 ql_enable_interrupts(qdev);
3901 ql_enable_all_completion_interrupts(qdev);
3902 netif_tx_start_all_queues(qdev->ndev);
3904 return 0;
3905 err_init:
3906 ql_adapter_reset(qdev);
3907 return err;
3910 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3912 ql_free_mem_resources(qdev);
3913 ql_free_irq(qdev);
3916 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3918 int status = 0;
3920 if (ql_alloc_mem_resources(qdev)) {
3921 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3922 return -ENOMEM;
3924 status = ql_request_irq(qdev);
3925 return status;
3928 static int qlge_close(struct net_device *ndev)
3930 struct ql_adapter *qdev = netdev_priv(ndev);
3932 /* If we hit pci_channel_io_perm_failure
3933 * failure condition, then we already
3934 * brought the adapter down.
3936 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3937 QPRINTK(qdev, DRV, ERR, "EEH fatal did unload.\n");
3938 clear_bit(QL_EEH_FATAL, &qdev->flags);
3939 return 0;
3943 * Wait for device to recover from a reset.
3944 * (Rarely happens, but possible.)
3946 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3947 msleep(1);
3948 ql_adapter_down(qdev);
3949 ql_release_adapter_resources(qdev);
3950 return 0;
3953 static int ql_configure_rings(struct ql_adapter *qdev)
3955 int i;
3956 struct rx_ring *rx_ring;
3957 struct tx_ring *tx_ring;
3958 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3959 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3960 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3962 qdev->lbq_buf_order = get_order(lbq_buf_len);
3964 /* In a perfect world we have one RSS ring for each CPU
3965 * and each has it's own vector. To do that we ask for
3966 * cpu_cnt vectors. ql_enable_msix() will adjust the
3967 * vector count to what we actually get. We then
3968 * allocate an RSS ring for each.
3969 * Essentially, we are doing min(cpu_count, msix_vector_count).
3971 qdev->intr_count = cpu_cnt;
3972 ql_enable_msix(qdev);
3973 /* Adjust the RSS ring count to the actual vector count. */
3974 qdev->rss_ring_count = qdev->intr_count;
3975 qdev->tx_ring_count = cpu_cnt;
3976 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3978 for (i = 0; i < qdev->tx_ring_count; i++) {
3979 tx_ring = &qdev->tx_ring[i];
3980 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3981 tx_ring->qdev = qdev;
3982 tx_ring->wq_id = i;
3983 tx_ring->wq_len = qdev->tx_ring_size;
3984 tx_ring->wq_size =
3985 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3988 * The completion queue ID for the tx rings start
3989 * immediately after the rss rings.
3991 tx_ring->cq_id = qdev->rss_ring_count + i;
3994 for (i = 0; i < qdev->rx_ring_count; i++) {
3995 rx_ring = &qdev->rx_ring[i];
3996 memset((void *)rx_ring, 0, sizeof(*rx_ring));
3997 rx_ring->qdev = qdev;
3998 rx_ring->cq_id = i;
3999 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4000 if (i < qdev->rss_ring_count) {
4002 * Inbound (RSS) queues.
4004 rx_ring->cq_len = qdev->rx_ring_size;
4005 rx_ring->cq_size =
4006 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4007 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4008 rx_ring->lbq_size =
4009 rx_ring->lbq_len * sizeof(__le64);
4010 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4011 QPRINTK(qdev, IFUP, DEBUG,
4012 "lbq_buf_size %d, order = %d\n",
4013 rx_ring->lbq_buf_size, qdev->lbq_buf_order);
4014 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4015 rx_ring->sbq_size =
4016 rx_ring->sbq_len * sizeof(__le64);
4017 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4018 rx_ring->type = RX_Q;
4019 } else {
4021 * Outbound queue handles outbound completions only.
4023 /* outbound cq is same size as tx_ring it services. */
4024 rx_ring->cq_len = qdev->tx_ring_size;
4025 rx_ring->cq_size =
4026 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4027 rx_ring->lbq_len = 0;
4028 rx_ring->lbq_size = 0;
4029 rx_ring->lbq_buf_size = 0;
4030 rx_ring->sbq_len = 0;
4031 rx_ring->sbq_size = 0;
4032 rx_ring->sbq_buf_size = 0;
4033 rx_ring->type = TX_Q;
4036 return 0;
4039 static int qlge_open(struct net_device *ndev)
4041 int err = 0;
4042 struct ql_adapter *qdev = netdev_priv(ndev);
4044 err = ql_adapter_reset(qdev);
4045 if (err)
4046 return err;
4048 err = ql_configure_rings(qdev);
4049 if (err)
4050 return err;
4052 err = ql_get_adapter_resources(qdev);
4053 if (err)
4054 goto error_up;
4056 err = ql_adapter_up(qdev);
4057 if (err)
4058 goto error_up;
4060 return err;
4062 error_up:
4063 ql_release_adapter_resources(qdev);
4064 return err;
4067 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4069 struct rx_ring *rx_ring;
4070 int i, status;
4071 u32 lbq_buf_len;
4073 /* Wait for an oustanding reset to complete. */
4074 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4075 int i = 3;
4076 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4077 QPRINTK(qdev, IFUP, ERR,
4078 "Waiting for adapter UP...\n");
4079 ssleep(1);
4082 if (!i) {
4083 QPRINTK(qdev, IFUP, ERR,
4084 "Timed out waiting for adapter UP\n");
4085 return -ETIMEDOUT;
4089 status = ql_adapter_down(qdev);
4090 if (status)
4091 goto error;
4093 /* Get the new rx buffer size. */
4094 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4095 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4096 qdev->lbq_buf_order = get_order(lbq_buf_len);
4098 for (i = 0; i < qdev->rss_ring_count; i++) {
4099 rx_ring = &qdev->rx_ring[i];
4100 /* Set the new size. */
4101 rx_ring->lbq_buf_size = lbq_buf_len;
4104 status = ql_adapter_up(qdev);
4105 if (status)
4106 goto error;
4108 return status;
4109 error:
4110 QPRINTK(qdev, IFUP, ALERT,
4111 "Driver up/down cycle failed, closing device.\n");
4112 set_bit(QL_ADAPTER_UP, &qdev->flags);
4113 dev_close(qdev->ndev);
4114 return status;
4117 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4119 struct ql_adapter *qdev = netdev_priv(ndev);
4120 int status;
4122 if (ndev->mtu == 1500 && new_mtu == 9000) {
4123 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
4124 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4125 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
4126 } else
4127 return -EINVAL;
4129 queue_delayed_work(qdev->workqueue,
4130 &qdev->mpi_port_cfg_work, 3*HZ);
4132 ndev->mtu = new_mtu;
4134 if (!netif_running(qdev->ndev)) {
4135 return 0;
4138 status = ql_change_rx_buffers(qdev);
4139 if (status) {
4140 QPRINTK(qdev, IFUP, ERR,
4141 "Changing MTU failed.\n");
4144 return status;
4147 static struct net_device_stats *qlge_get_stats(struct net_device
4148 *ndev)
4150 struct ql_adapter *qdev = netdev_priv(ndev);
4151 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4152 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4153 unsigned long pkts, mcast, dropped, errors, bytes;
4154 int i;
4156 /* Get RX stats. */
4157 pkts = mcast = dropped = errors = bytes = 0;
4158 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4159 pkts += rx_ring->rx_packets;
4160 bytes += rx_ring->rx_bytes;
4161 dropped += rx_ring->rx_dropped;
4162 errors += rx_ring->rx_errors;
4163 mcast += rx_ring->rx_multicast;
4165 ndev->stats.rx_packets = pkts;
4166 ndev->stats.rx_bytes = bytes;
4167 ndev->stats.rx_dropped = dropped;
4168 ndev->stats.rx_errors = errors;
4169 ndev->stats.multicast = mcast;
4171 /* Get TX stats. */
4172 pkts = errors = bytes = 0;
4173 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4174 pkts += tx_ring->tx_packets;
4175 bytes += tx_ring->tx_bytes;
4176 errors += tx_ring->tx_errors;
4178 ndev->stats.tx_packets = pkts;
4179 ndev->stats.tx_bytes = bytes;
4180 ndev->stats.tx_errors = errors;
4181 return &ndev->stats;
4184 static void qlge_set_multicast_list(struct net_device *ndev)
4186 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4187 struct dev_mc_list *mc_ptr;
4188 int i, status;
4190 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4191 if (status)
4192 return;
4194 * Set or clear promiscuous mode if a
4195 * transition is taking place.
4197 if (ndev->flags & IFF_PROMISC) {
4198 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4199 if (ql_set_routing_reg
4200 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4201 QPRINTK(qdev, HW, ERR,
4202 "Failed to set promiscous mode.\n");
4203 } else {
4204 set_bit(QL_PROMISCUOUS, &qdev->flags);
4207 } else {
4208 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4209 if (ql_set_routing_reg
4210 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4211 QPRINTK(qdev, HW, ERR,
4212 "Failed to clear promiscous mode.\n");
4213 } else {
4214 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4220 * Set or clear all multicast mode if a
4221 * transition is taking place.
4223 if ((ndev->flags & IFF_ALLMULTI) ||
4224 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4225 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4226 if (ql_set_routing_reg
4227 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4228 QPRINTK(qdev, HW, ERR,
4229 "Failed to set all-multi mode.\n");
4230 } else {
4231 set_bit(QL_ALLMULTI, &qdev->flags);
4234 } else {
4235 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4236 if (ql_set_routing_reg
4237 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4238 QPRINTK(qdev, HW, ERR,
4239 "Failed to clear all-multi mode.\n");
4240 } else {
4241 clear_bit(QL_ALLMULTI, &qdev->flags);
4246 if (!netdev_mc_empty(ndev)) {
4247 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4248 if (status)
4249 goto exit;
4250 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
4251 i++, mc_ptr = mc_ptr->next)
4252 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
4253 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4254 QPRINTK(qdev, HW, ERR,
4255 "Failed to loadmulticast address.\n");
4256 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4257 goto exit;
4259 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4260 if (ql_set_routing_reg
4261 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4262 QPRINTK(qdev, HW, ERR,
4263 "Failed to set multicast match mode.\n");
4264 } else {
4265 set_bit(QL_ALLMULTI, &qdev->flags);
4268 exit:
4269 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4272 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4274 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4275 struct sockaddr *addr = p;
4276 int status;
4278 if (!is_valid_ether_addr(addr->sa_data))
4279 return -EADDRNOTAVAIL;
4280 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4282 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4283 if (status)
4284 return status;
4285 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4286 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4287 if (status)
4288 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
4289 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4290 return status;
4293 static void qlge_tx_timeout(struct net_device *ndev)
4295 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4296 ql_queue_asic_error(qdev);
4299 static void ql_asic_reset_work(struct work_struct *work)
4301 struct ql_adapter *qdev =
4302 container_of(work, struct ql_adapter, asic_reset_work.work);
4303 int status;
4304 rtnl_lock();
4305 status = ql_adapter_down(qdev);
4306 if (status)
4307 goto error;
4309 status = ql_adapter_up(qdev);
4310 if (status)
4311 goto error;
4313 /* Restore rx mode. */
4314 clear_bit(QL_ALLMULTI, &qdev->flags);
4315 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4316 qlge_set_multicast_list(qdev->ndev);
4318 rtnl_unlock();
4319 return;
4320 error:
4321 QPRINTK(qdev, IFUP, ALERT,
4322 "Driver up/down cycle failed, closing device\n");
4324 set_bit(QL_ADAPTER_UP, &qdev->flags);
4325 dev_close(qdev->ndev);
4326 rtnl_unlock();
4329 static struct nic_operations qla8012_nic_ops = {
4330 .get_flash = ql_get_8012_flash_params,
4331 .port_initialize = ql_8012_port_initialize,
4334 static struct nic_operations qla8000_nic_ops = {
4335 .get_flash = ql_get_8000_flash_params,
4336 .port_initialize = ql_8000_port_initialize,
4339 /* Find the pcie function number for the other NIC
4340 * on this chip. Since both NIC functions share a
4341 * common firmware we have the lowest enabled function
4342 * do any common work. Examples would be resetting
4343 * after a fatal firmware error, or doing a firmware
4344 * coredump.
4346 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4348 int status = 0;
4349 u32 temp;
4350 u32 nic_func1, nic_func2;
4352 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4353 &temp);
4354 if (status)
4355 return status;
4357 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4358 MPI_TEST_NIC_FUNC_MASK);
4359 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4360 MPI_TEST_NIC_FUNC_MASK);
4362 if (qdev->func == nic_func1)
4363 qdev->alt_func = nic_func2;
4364 else if (qdev->func == nic_func2)
4365 qdev->alt_func = nic_func1;
4366 else
4367 status = -EIO;
4369 return status;
4372 static int ql_get_board_info(struct ql_adapter *qdev)
4374 int status;
4375 qdev->func =
4376 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4377 if (qdev->func > 3)
4378 return -EIO;
4380 status = ql_get_alt_pcie_func(qdev);
4381 if (status)
4382 return status;
4384 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4385 if (qdev->port) {
4386 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4387 qdev->port_link_up = STS_PL1;
4388 qdev->port_init = STS_PI1;
4389 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4390 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4391 } else {
4392 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4393 qdev->port_link_up = STS_PL0;
4394 qdev->port_init = STS_PI0;
4395 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4396 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4398 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4399 qdev->device_id = qdev->pdev->device;
4400 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4401 qdev->nic_ops = &qla8012_nic_ops;
4402 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4403 qdev->nic_ops = &qla8000_nic_ops;
4404 return status;
4407 static void ql_release_all(struct pci_dev *pdev)
4409 struct net_device *ndev = pci_get_drvdata(pdev);
4410 struct ql_adapter *qdev = netdev_priv(ndev);
4412 if (qdev->workqueue) {
4413 destroy_workqueue(qdev->workqueue);
4414 qdev->workqueue = NULL;
4417 if (qdev->reg_base)
4418 iounmap(qdev->reg_base);
4419 if (qdev->doorbell_area)
4420 iounmap(qdev->doorbell_area);
4421 vfree(qdev->mpi_coredump);
4422 pci_release_regions(pdev);
4423 pci_set_drvdata(pdev, NULL);
4426 static int __devinit ql_init_device(struct pci_dev *pdev,
4427 struct net_device *ndev, int cards_found)
4429 struct ql_adapter *qdev = netdev_priv(ndev);
4430 int err = 0;
4432 memset((void *)qdev, 0, sizeof(*qdev));
4433 err = pci_enable_device(pdev);
4434 if (err) {
4435 dev_err(&pdev->dev, "PCI device enable failed.\n");
4436 return err;
4439 qdev->ndev = ndev;
4440 qdev->pdev = pdev;
4441 pci_set_drvdata(pdev, ndev);
4443 /* Set PCIe read request size */
4444 err = pcie_set_readrq(pdev, 4096);
4445 if (err) {
4446 dev_err(&pdev->dev, "Set readrq failed.\n");
4447 goto err_out1;
4450 err = pci_request_regions(pdev, DRV_NAME);
4451 if (err) {
4452 dev_err(&pdev->dev, "PCI region request failed.\n");
4453 return err;
4456 pci_set_master(pdev);
4457 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4458 set_bit(QL_DMA64, &qdev->flags);
4459 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4460 } else {
4461 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4462 if (!err)
4463 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4466 if (err) {
4467 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4468 goto err_out2;
4471 /* Set PCIe reset type for EEH to fundamental. */
4472 pdev->needs_freset = 1;
4473 pci_save_state(pdev);
4474 qdev->reg_base =
4475 ioremap_nocache(pci_resource_start(pdev, 1),
4476 pci_resource_len(pdev, 1));
4477 if (!qdev->reg_base) {
4478 dev_err(&pdev->dev, "Register mapping failed.\n");
4479 err = -ENOMEM;
4480 goto err_out2;
4483 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4484 qdev->doorbell_area =
4485 ioremap_nocache(pci_resource_start(pdev, 3),
4486 pci_resource_len(pdev, 3));
4487 if (!qdev->doorbell_area) {
4488 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4489 err = -ENOMEM;
4490 goto err_out2;
4493 err = ql_get_board_info(qdev);
4494 if (err) {
4495 dev_err(&pdev->dev, "Register access failed.\n");
4496 err = -EIO;
4497 goto err_out2;
4499 qdev->msg_enable = netif_msg_init(debug, default_msg);
4500 spin_lock_init(&qdev->hw_lock);
4501 spin_lock_init(&qdev->stats_lock);
4503 if (qlge_mpi_coredump) {
4504 qdev->mpi_coredump =
4505 vmalloc(sizeof(struct ql_mpi_coredump));
4506 if (qdev->mpi_coredump == NULL) {
4507 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4508 err = -ENOMEM;
4509 goto err_out2;
4511 if (qlge_force_coredump)
4512 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4514 /* make sure the EEPROM is good */
4515 err = qdev->nic_ops->get_flash(qdev);
4516 if (err) {
4517 dev_err(&pdev->dev, "Invalid FLASH.\n");
4518 goto err_out2;
4521 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4523 /* Set up the default ring sizes. */
4524 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4525 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4527 /* Set up the coalescing parameters. */
4528 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4529 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4530 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4531 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4534 * Set up the operating parameters.
4536 qdev->rx_csum = 1;
4537 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4538 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4539 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4540 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4541 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4542 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4543 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4544 init_completion(&qdev->ide_completion);
4546 if (!cards_found) {
4547 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4548 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4549 DRV_NAME, DRV_VERSION);
4551 return 0;
4552 err_out2:
4553 ql_release_all(pdev);
4554 err_out1:
4555 pci_disable_device(pdev);
4556 return err;
4559 static const struct net_device_ops qlge_netdev_ops = {
4560 .ndo_open = qlge_open,
4561 .ndo_stop = qlge_close,
4562 .ndo_start_xmit = qlge_send,
4563 .ndo_change_mtu = qlge_change_mtu,
4564 .ndo_get_stats = qlge_get_stats,
4565 .ndo_set_multicast_list = qlge_set_multicast_list,
4566 .ndo_set_mac_address = qlge_set_mac_address,
4567 .ndo_validate_addr = eth_validate_addr,
4568 .ndo_tx_timeout = qlge_tx_timeout,
4569 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4570 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4571 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4574 static void ql_timer(unsigned long data)
4576 struct ql_adapter *qdev = (struct ql_adapter *)data;
4577 u32 var = 0;
4579 var = ql_read32(qdev, STS);
4580 if (pci_channel_offline(qdev->pdev)) {
4581 QPRINTK(qdev, IFUP, ERR, "EEH STS = 0x%.08x.\n", var);
4582 return;
4585 qdev->timer.expires = jiffies + (5*HZ);
4586 add_timer(&qdev->timer);
4589 static int __devinit qlge_probe(struct pci_dev *pdev,
4590 const struct pci_device_id *pci_entry)
4592 struct net_device *ndev = NULL;
4593 struct ql_adapter *qdev = NULL;
4594 static int cards_found = 0;
4595 int err = 0;
4597 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4598 min(MAX_CPUS, (int)num_online_cpus()));
4599 if (!ndev)
4600 return -ENOMEM;
4602 err = ql_init_device(pdev, ndev, cards_found);
4603 if (err < 0) {
4604 free_netdev(ndev);
4605 return err;
4608 qdev = netdev_priv(ndev);
4609 SET_NETDEV_DEV(ndev, &pdev->dev);
4610 ndev->features = (0
4611 | NETIF_F_IP_CSUM
4612 | NETIF_F_SG
4613 | NETIF_F_TSO
4614 | NETIF_F_TSO6
4615 | NETIF_F_TSO_ECN
4616 | NETIF_F_HW_VLAN_TX
4617 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
4618 ndev->features |= NETIF_F_GRO;
4620 if (test_bit(QL_DMA64, &qdev->flags))
4621 ndev->features |= NETIF_F_HIGHDMA;
4624 * Set up net_device structure.
4626 ndev->tx_queue_len = qdev->tx_ring_size;
4627 ndev->irq = pdev->irq;
4629 ndev->netdev_ops = &qlge_netdev_ops;
4630 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4631 ndev->watchdog_timeo = 10 * HZ;
4633 err = register_netdev(ndev);
4634 if (err) {
4635 dev_err(&pdev->dev, "net device registration failed.\n");
4636 ql_release_all(pdev);
4637 pci_disable_device(pdev);
4638 return err;
4640 /* Start up the timer to trigger EEH if
4641 * the bus goes dead
4643 init_timer_deferrable(&qdev->timer);
4644 qdev->timer.data = (unsigned long)qdev;
4645 qdev->timer.function = ql_timer;
4646 qdev->timer.expires = jiffies + (5*HZ);
4647 add_timer(&qdev->timer);
4648 ql_link_off(qdev);
4649 ql_display_dev_info(ndev);
4650 atomic_set(&qdev->lb_count, 0);
4651 cards_found++;
4652 return 0;
4655 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4657 return qlge_send(skb, ndev);
4660 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4662 return ql_clean_inbound_rx_ring(rx_ring, budget);
4665 static void __devexit qlge_remove(struct pci_dev *pdev)
4667 struct net_device *ndev = pci_get_drvdata(pdev);
4668 struct ql_adapter *qdev = netdev_priv(ndev);
4669 del_timer_sync(&qdev->timer);
4670 unregister_netdev(ndev);
4671 ql_release_all(pdev);
4672 pci_disable_device(pdev);
4673 free_netdev(ndev);
4676 /* Clean up resources without touching hardware. */
4677 static void ql_eeh_close(struct net_device *ndev)
4679 int i;
4680 struct ql_adapter *qdev = netdev_priv(ndev);
4682 if (netif_carrier_ok(ndev)) {
4683 netif_carrier_off(ndev);
4684 netif_stop_queue(ndev);
4687 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4688 cancel_delayed_work_sync(&qdev->asic_reset_work);
4689 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4690 cancel_delayed_work_sync(&qdev->mpi_work);
4691 cancel_delayed_work_sync(&qdev->mpi_idc_work);
4692 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
4693 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4695 for (i = 0; i < qdev->rss_ring_count; i++)
4696 netif_napi_del(&qdev->rx_ring[i].napi);
4698 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4699 ql_tx_ring_clean(qdev);
4700 ql_free_rx_buffers(qdev);
4701 ql_release_adapter_resources(qdev);
4705 * This callback is called by the PCI subsystem whenever
4706 * a PCI bus error is detected.
4708 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4709 enum pci_channel_state state)
4711 struct net_device *ndev = pci_get_drvdata(pdev);
4712 struct ql_adapter *qdev = netdev_priv(ndev);
4714 switch (state) {
4715 case pci_channel_io_normal:
4716 return PCI_ERS_RESULT_CAN_RECOVER;
4717 case pci_channel_io_frozen:
4718 netif_device_detach(ndev);
4719 if (netif_running(ndev))
4720 ql_eeh_close(ndev);
4721 pci_disable_device(pdev);
4722 return PCI_ERS_RESULT_NEED_RESET;
4723 case pci_channel_io_perm_failure:
4724 dev_err(&pdev->dev,
4725 "%s: pci_channel_io_perm_failure.\n", __func__);
4726 ql_eeh_close(ndev);
4727 set_bit(QL_EEH_FATAL, &qdev->flags);
4728 return PCI_ERS_RESULT_DISCONNECT;
4731 /* Request a slot reset. */
4732 return PCI_ERS_RESULT_NEED_RESET;
4736 * This callback is called after the PCI buss has been reset.
4737 * Basically, this tries to restart the card from scratch.
4738 * This is a shortened version of the device probe/discovery code,
4739 * it resembles the first-half of the () routine.
4741 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4743 struct net_device *ndev = pci_get_drvdata(pdev);
4744 struct ql_adapter *qdev = netdev_priv(ndev);
4746 pdev->error_state = pci_channel_io_normal;
4748 pci_restore_state(pdev);
4749 if (pci_enable_device(pdev)) {
4750 QPRINTK(qdev, IFUP, ERR,
4751 "Cannot re-enable PCI device after reset.\n");
4752 return PCI_ERS_RESULT_DISCONNECT;
4754 pci_set_master(pdev);
4756 if (ql_adapter_reset(qdev)) {
4757 QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
4758 set_bit(QL_EEH_FATAL, &qdev->flags);
4759 return PCI_ERS_RESULT_DISCONNECT;
4762 return PCI_ERS_RESULT_RECOVERED;
4765 static void qlge_io_resume(struct pci_dev *pdev)
4767 struct net_device *ndev = pci_get_drvdata(pdev);
4768 struct ql_adapter *qdev = netdev_priv(ndev);
4769 int err = 0;
4771 if (netif_running(ndev)) {
4772 err = qlge_open(ndev);
4773 if (err) {
4774 QPRINTK(qdev, IFUP, ERR,
4775 "Device initialization failed after reset.\n");
4776 return;
4778 } else {
4779 QPRINTK(qdev, IFUP, ERR,
4780 "Device was not running prior to EEH.\n");
4782 qdev->timer.expires = jiffies + (5*HZ);
4783 add_timer(&qdev->timer);
4784 netif_device_attach(ndev);
4787 static struct pci_error_handlers qlge_err_handler = {
4788 .error_detected = qlge_io_error_detected,
4789 .slot_reset = qlge_io_slot_reset,
4790 .resume = qlge_io_resume,
4793 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4795 struct net_device *ndev = pci_get_drvdata(pdev);
4796 struct ql_adapter *qdev = netdev_priv(ndev);
4797 int err;
4799 netif_device_detach(ndev);
4800 del_timer_sync(&qdev->timer);
4802 if (netif_running(ndev)) {
4803 err = ql_adapter_down(qdev);
4804 if (!err)
4805 return err;
4808 ql_wol(qdev);
4809 err = pci_save_state(pdev);
4810 if (err)
4811 return err;
4813 pci_disable_device(pdev);
4815 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4817 return 0;
4820 #ifdef CONFIG_PM
4821 static int qlge_resume(struct pci_dev *pdev)
4823 struct net_device *ndev = pci_get_drvdata(pdev);
4824 struct ql_adapter *qdev = netdev_priv(ndev);
4825 int err;
4827 pci_set_power_state(pdev, PCI_D0);
4828 pci_restore_state(pdev);
4829 err = pci_enable_device(pdev);
4830 if (err) {
4831 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
4832 return err;
4834 pci_set_master(pdev);
4836 pci_enable_wake(pdev, PCI_D3hot, 0);
4837 pci_enable_wake(pdev, PCI_D3cold, 0);
4839 if (netif_running(ndev)) {
4840 err = ql_adapter_up(qdev);
4841 if (err)
4842 return err;
4845 qdev->timer.expires = jiffies + (5*HZ);
4846 add_timer(&qdev->timer);
4847 netif_device_attach(ndev);
4849 return 0;
4851 #endif /* CONFIG_PM */
4853 static void qlge_shutdown(struct pci_dev *pdev)
4855 qlge_suspend(pdev, PMSG_SUSPEND);
4858 static struct pci_driver qlge_driver = {
4859 .name = DRV_NAME,
4860 .id_table = qlge_pci_tbl,
4861 .probe = qlge_probe,
4862 .remove = __devexit_p(qlge_remove),
4863 #ifdef CONFIG_PM
4864 .suspend = qlge_suspend,
4865 .resume = qlge_resume,
4866 #endif
4867 .shutdown = qlge_shutdown,
4868 .err_handler = &qlge_err_handler
4871 static int __init qlge_init_module(void)
4873 return pci_register_driver(&qlge_driver);
4876 static void __exit qlge_exit(void)
4878 pci_unregister_driver(&qlge_driver);
4881 module_init(qlge_init_module);
4882 module_exit(qlge_exit);