2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/if_vlan.h>
39 #include <linux/delay.h>
41 #include <linux/vmalloc.h>
42 #include <net/ip6_checksum.h>
46 char qlge_driver_name
[] = DRV_NAME
;
47 const char qlge_driver_version
[] = DRV_VERSION
;
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING
" ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION
);
54 static const u32 default_msg
=
55 NETIF_MSG_DRV
| NETIF_MSG_PROBE
| NETIF_MSG_LINK
|
56 /* NETIF_MSG_TIMER | */
61 /* NETIF_MSG_TX_QUEUED | */
62 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
63 /* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW
| NETIF_MSG_WOL
| 0;
66 static int debug
= 0x00007fff; /* defaults above */
67 module_param(debug
, int, 0);
68 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
73 static int irq_type
= MSIX_IRQ
;
74 module_param(irq_type
, int, MSIX_IRQ
);
75 MODULE_PARM_DESC(irq_type
, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77 static struct pci_device_id qlge_pci_tbl
[] __devinitdata
= {
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QLGE_DEVICE_ID_8012
)},
79 /* required last entry */
83 MODULE_DEVICE_TABLE(pci
, qlge_pci_tbl
);
85 /* This hardware semaphore causes exclusive access to
86 * resources shared between the NIC driver, MPI firmware,
87 * FCOE firmware and the FC driver.
89 static int ql_sem_trylock(struct ql_adapter
*qdev
, u32 sem_mask
)
95 sem_bits
= SEM_SET
<< SEM_XGMAC0_SHIFT
;
98 sem_bits
= SEM_SET
<< SEM_XGMAC1_SHIFT
;
101 sem_bits
= SEM_SET
<< SEM_ICB_SHIFT
;
103 case SEM_MAC_ADDR_MASK
:
104 sem_bits
= SEM_SET
<< SEM_MAC_ADDR_SHIFT
;
107 sem_bits
= SEM_SET
<< SEM_FLASH_SHIFT
;
110 sem_bits
= SEM_SET
<< SEM_PROBE_SHIFT
;
112 case SEM_RT_IDX_MASK
:
113 sem_bits
= SEM_SET
<< SEM_RT_IDX_SHIFT
;
115 case SEM_PROC_REG_MASK
:
116 sem_bits
= SEM_SET
<< SEM_PROC_REG_SHIFT
;
119 QPRINTK(qdev
, PROBE
, ALERT
, "Bad Semaphore mask!.\n");
123 ql_write32(qdev
, SEM
, sem_bits
| sem_mask
);
124 return !(ql_read32(qdev
, SEM
) & sem_bits
);
127 int ql_sem_spinlock(struct ql_adapter
*qdev
, u32 sem_mask
)
129 unsigned int wait_count
= 30;
131 if (!ql_sem_trylock(qdev
, sem_mask
))
134 } while (--wait_count
);
138 void ql_sem_unlock(struct ql_adapter
*qdev
, u32 sem_mask
)
140 ql_write32(qdev
, SEM
, sem_mask
);
141 ql_read32(qdev
, SEM
); /* flush */
144 /* This function waits for a specific bit to come ready
145 * in a given register. It is used mostly by the initialize
146 * process, but is also used in kernel thread API such as
147 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
149 int ql_wait_reg_rdy(struct ql_adapter
*qdev
, u32 reg
, u32 bit
, u32 err_bit
)
152 int count
= UDELAY_COUNT
;
155 temp
= ql_read32(qdev
, reg
);
157 /* check for errors */
158 if (temp
& err_bit
) {
159 QPRINTK(qdev
, PROBE
, ALERT
,
160 "register 0x%.08x access error, value = 0x%.08x!.\n",
163 } else if (temp
& bit
)
165 udelay(UDELAY_DELAY
);
168 QPRINTK(qdev
, PROBE
, ALERT
,
169 "Timed out waiting for reg %x to come ready.\n", reg
);
173 /* The CFG register is used to download TX and RX control blocks
174 * to the chip. This function waits for an operation to complete.
176 static int ql_wait_cfg(struct ql_adapter
*qdev
, u32 bit
)
178 int count
= UDELAY_COUNT
;
182 temp
= ql_read32(qdev
, CFG
);
187 udelay(UDELAY_DELAY
);
194 /* Used to issue init control blocks to hw. Maps control block,
195 * sets address, triggers download, waits for completion.
197 int ql_write_cfg(struct ql_adapter
*qdev
, void *ptr
, int size
, u32 bit
,
207 (bit
& (CFG_LRQ
| CFG_LR
| CFG_LCQ
)) ? PCI_DMA_TODEVICE
:
210 map
= pci_map_single(qdev
->pdev
, ptr
, size
, direction
);
211 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
212 QPRINTK(qdev
, IFUP
, ERR
, "Couldn't map DMA area.\n");
216 status
= ql_wait_cfg(qdev
, bit
);
218 QPRINTK(qdev
, IFUP
, ERR
,
219 "Timed out waiting for CFG to come ready.\n");
223 status
= ql_sem_spinlock(qdev
, SEM_ICB_MASK
);
226 ql_write32(qdev
, ICB_L
, (u32
) map
);
227 ql_write32(qdev
, ICB_H
, (u32
) (map
>> 32));
228 ql_sem_unlock(qdev
, SEM_ICB_MASK
); /* does flush too */
230 mask
= CFG_Q_MASK
| (bit
<< 16);
231 value
= bit
| (q_id
<< CFG_Q_SHIFT
);
232 ql_write32(qdev
, CFG
, (mask
| value
));
235 * Wait for the bit to clear after signaling hw.
237 status
= ql_wait_cfg(qdev
, bit
);
239 pci_unmap_single(qdev
->pdev
, map
, size
, direction
);
243 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
244 int ql_get_mac_addr_reg(struct ql_adapter
*qdev
, u32 type
, u16 index
,
251 case MAC_ADDR_TYPE_MULTI_MAC
:
252 case MAC_ADDR_TYPE_CAM_MAC
:
255 ql_wait_reg_rdy(qdev
,
256 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
259 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
260 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
261 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
263 ql_wait_reg_rdy(qdev
,
264 MAC_ADDR_IDX
, MAC_ADDR_MR
, 0);
267 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
269 ql_wait_reg_rdy(qdev
,
270 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
273 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
274 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
275 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
277 ql_wait_reg_rdy(qdev
,
278 MAC_ADDR_IDX
, MAC_ADDR_MR
, 0);
281 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
282 if (type
== MAC_ADDR_TYPE_CAM_MAC
) {
284 ql_wait_reg_rdy(qdev
,
285 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
288 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
289 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
290 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
292 ql_wait_reg_rdy(qdev
, MAC_ADDR_IDX
,
296 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
300 case MAC_ADDR_TYPE_VLAN
:
301 case MAC_ADDR_TYPE_MULTI_FLTR
:
303 QPRINTK(qdev
, IFUP
, CRIT
,
304 "Address type %d not yet supported.\n", type
);
311 /* Set up a MAC, multicast or VLAN address for the
312 * inbound frame matching.
314 static int ql_set_mac_addr_reg(struct ql_adapter
*qdev
, u8
*addr
, u32 type
,
321 case MAC_ADDR_TYPE_MULTI_MAC
:
322 case MAC_ADDR_TYPE_CAM_MAC
:
325 u32 upper
= (addr
[0] << 8) | addr
[1];
327 (addr
[2] << 24) | (addr
[3] << 16) | (addr
[4] << 8) |
330 QPRINTK(qdev
, IFUP
, DEBUG
,
331 "Adding %s address %pM"
332 " at index %d in the CAM.\n",
334 MAC_ADDR_TYPE_MULTI_MAC
) ? "MULTICAST" :
335 "UNICAST"), addr
, index
);
338 ql_wait_reg_rdy(qdev
,
339 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
342 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
343 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
345 ql_write32(qdev
, MAC_ADDR_DATA
, lower
);
347 ql_wait_reg_rdy(qdev
,
348 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
351 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
352 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
354 ql_write32(qdev
, MAC_ADDR_DATA
, upper
);
356 ql_wait_reg_rdy(qdev
,
357 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
360 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
) | /* offset */
361 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
363 /* This field should also include the queue id
364 and possibly the function id. Right now we hardcode
365 the route field to NIC core.
367 if (type
== MAC_ADDR_TYPE_CAM_MAC
) {
368 cam_output
= (CAM_OUT_ROUTE_NIC
|
370 func
<< CAM_OUT_FUNC_SHIFT
) |
372 rss_ring_first_cq_id
<<
373 CAM_OUT_CQ_ID_SHIFT
));
375 cam_output
|= CAM_OUT_RV
;
376 /* route to NIC core */
377 ql_write32(qdev
, MAC_ADDR_DATA
, cam_output
);
381 case MAC_ADDR_TYPE_VLAN
:
383 u32 enable_bit
= *((u32
*) &addr
[0]);
384 /* For VLAN, the addr actually holds a bit that
385 * either enables or disables the vlan id we are
386 * addressing. It's either MAC_ADDR_E on or off.
387 * That's bit-27 we're talking about.
389 QPRINTK(qdev
, IFUP
, INFO
, "%s VLAN ID %d %s the CAM.\n",
390 (enable_bit
? "Adding" : "Removing"),
391 index
, (enable_bit
? "to" : "from"));
394 ql_wait_reg_rdy(qdev
,
395 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
398 ql_write32(qdev
, MAC_ADDR_IDX
, offset
| /* offset */
399 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
401 enable_bit
); /* enable/disable */
404 case MAC_ADDR_TYPE_MULTI_FLTR
:
406 QPRINTK(qdev
, IFUP
, CRIT
,
407 "Address type %d not yet supported.\n", type
);
414 /* Get a specific frame routing value from the CAM.
415 * Used for debug and reg dump.
417 int ql_get_routing_reg(struct ql_adapter
*qdev
, u32 index
, u32
*value
)
421 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MW
, 0);
425 ql_write32(qdev
, RT_IDX
,
426 RT_IDX_TYPE_NICQ
| RT_IDX_RS
| (index
<< RT_IDX_IDX_SHIFT
));
427 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MR
, 0);
430 *value
= ql_read32(qdev
, RT_DATA
);
435 /* The NIC function for this chip has 16 routing indexes. Each one can be used
436 * to route different frame types to various inbound queues. We send broadcast/
437 * multicast/error frames to the default queue for slow handling,
438 * and CAM hit/RSS frames to the fast handling queues.
440 static int ql_set_routing_reg(struct ql_adapter
*qdev
, u32 index
, u32 mask
,
443 int status
= -EINVAL
; /* Return error if no mask match. */
446 QPRINTK(qdev
, IFUP
, DEBUG
,
447 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
448 (enable
? "Adding" : "Removing"),
449 ((index
== RT_IDX_ALL_ERR_SLOT
) ? "MAC ERROR/ALL ERROR" : ""),
450 ((index
== RT_IDX_IP_CSUM_ERR_SLOT
) ? "IP CSUM ERROR" : ""),
452 RT_IDX_TCP_UDP_CSUM_ERR_SLOT
) ? "TCP/UDP CSUM ERROR" : ""),
453 ((index
== RT_IDX_BCAST_SLOT
) ? "BROADCAST" : ""),
454 ((index
== RT_IDX_MCAST_MATCH_SLOT
) ? "MULTICAST MATCH" : ""),
455 ((index
== RT_IDX_ALLMULTI_SLOT
) ? "ALL MULTICAST MATCH" : ""),
456 ((index
== RT_IDX_UNUSED6_SLOT
) ? "UNUSED6" : ""),
457 ((index
== RT_IDX_UNUSED7_SLOT
) ? "UNUSED7" : ""),
458 ((index
== RT_IDX_RSS_MATCH_SLOT
) ? "RSS ALL/IPV4 MATCH" : ""),
459 ((index
== RT_IDX_RSS_IPV6_SLOT
) ? "RSS IPV6" : ""),
460 ((index
== RT_IDX_RSS_TCP4_SLOT
) ? "RSS TCP4" : ""),
461 ((index
== RT_IDX_RSS_TCP6_SLOT
) ? "RSS TCP6" : ""),
462 ((index
== RT_IDX_CAM_HIT_SLOT
) ? "CAM HIT" : ""),
463 ((index
== RT_IDX_UNUSED013
) ? "UNUSED13" : ""),
464 ((index
== RT_IDX_UNUSED014
) ? "UNUSED14" : ""),
465 ((index
== RT_IDX_PROMISCUOUS_SLOT
) ? "PROMISCUOUS" : ""),
466 (enable
? "to" : "from"));
471 value
= RT_IDX_DST_CAM_Q
| /* dest */
472 RT_IDX_TYPE_NICQ
| /* type */
473 (RT_IDX_CAM_HIT_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
476 case RT_IDX_VALID
: /* Promiscuous Mode frames. */
478 value
= RT_IDX_DST_DFLT_Q
| /* dest */
479 RT_IDX_TYPE_NICQ
| /* type */
480 (RT_IDX_PROMISCUOUS_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
483 case RT_IDX_ERR
: /* Pass up MAC,IP,TCP/UDP error frames. */
485 value
= RT_IDX_DST_DFLT_Q
| /* dest */
486 RT_IDX_TYPE_NICQ
| /* type */
487 (RT_IDX_ALL_ERR_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
490 case RT_IDX_BCAST
: /* Pass up Broadcast frames to default Q. */
492 value
= RT_IDX_DST_DFLT_Q
| /* dest */
493 RT_IDX_TYPE_NICQ
| /* type */
494 (RT_IDX_BCAST_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
497 case RT_IDX_MCAST
: /* Pass up All Multicast frames. */
499 value
= RT_IDX_DST_CAM_Q
| /* dest */
500 RT_IDX_TYPE_NICQ
| /* type */
501 (RT_IDX_ALLMULTI_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
504 case RT_IDX_MCAST_MATCH
: /* Pass up matched Multicast frames. */
506 value
= RT_IDX_DST_CAM_Q
| /* dest */
507 RT_IDX_TYPE_NICQ
| /* type */
508 (RT_IDX_MCAST_MATCH_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
511 case RT_IDX_RSS_MATCH
: /* Pass up matched RSS frames. */
513 value
= RT_IDX_DST_RSS
| /* dest */
514 RT_IDX_TYPE_NICQ
| /* type */
515 (RT_IDX_RSS_MATCH_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
518 case 0: /* Clear the E-bit on an entry. */
520 value
= RT_IDX_DST_DFLT_Q
| /* dest */
521 RT_IDX_TYPE_NICQ
| /* type */
522 (index
<< RT_IDX_IDX_SHIFT
);/* index */
526 QPRINTK(qdev
, IFUP
, ERR
, "Mask type %d not yet supported.\n",
533 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MW
, 0);
536 value
|= (enable
? RT_IDX_E
: 0);
537 ql_write32(qdev
, RT_IDX
, value
);
538 ql_write32(qdev
, RT_DATA
, enable
? mask
: 0);
544 static void ql_enable_interrupts(struct ql_adapter
*qdev
)
546 ql_write32(qdev
, INTR_EN
, (INTR_EN_EI
<< 16) | INTR_EN_EI
);
549 static void ql_disable_interrupts(struct ql_adapter
*qdev
)
551 ql_write32(qdev
, INTR_EN
, (INTR_EN_EI
<< 16));
554 /* If we're running with multiple MSI-X vectors then we enable on the fly.
555 * Otherwise, we may have multiple outstanding workers and don't want to
556 * enable until the last one finishes. In this case, the irq_cnt gets
557 * incremented everytime we queue a worker and decremented everytime
558 * a worker finishes. Once it hits zero we enable the interrupt.
560 u32
ql_enable_completion_interrupt(struct ql_adapter
*qdev
, u32 intr
)
563 unsigned long hw_flags
= 0;
564 struct intr_context
*ctx
= qdev
->intr_context
+ intr
;
566 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) && intr
)) {
567 /* Always enable if we're MSIX multi interrupts and
568 * it's not the default (zeroeth) interrupt.
570 ql_write32(qdev
, INTR_EN
,
572 var
= ql_read32(qdev
, STS
);
576 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
577 if (atomic_dec_and_test(&ctx
->irq_cnt
)) {
578 ql_write32(qdev
, INTR_EN
,
580 var
= ql_read32(qdev
, STS
);
582 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
586 static u32
ql_disable_completion_interrupt(struct ql_adapter
*qdev
, u32 intr
)
589 unsigned long hw_flags
;
590 struct intr_context
*ctx
;
592 /* HW disables for us if we're MSIX multi interrupts and
593 * it's not the default (zeroeth) interrupt.
595 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) && intr
))
598 ctx
= qdev
->intr_context
+ intr
;
599 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
600 if (!atomic_read(&ctx
->irq_cnt
)) {
601 ql_write32(qdev
, INTR_EN
,
603 var
= ql_read32(qdev
, STS
);
605 atomic_inc(&ctx
->irq_cnt
);
606 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
610 static void ql_enable_all_completion_interrupts(struct ql_adapter
*qdev
)
613 for (i
= 0; i
< qdev
->intr_count
; i
++) {
614 /* The enable call does a atomic_dec_and_test
615 * and enables only if the result is zero.
616 * So we precharge it here.
618 if (unlikely(!test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) ||
620 atomic_set(&qdev
->intr_context
[i
].irq_cnt
, 1);
621 ql_enable_completion_interrupt(qdev
, i
);
626 static int ql_validate_flash(struct ql_adapter
*qdev
, u32 size
, const char *str
)
630 __le16
*flash
= (__le16
*)&qdev
->flash
;
632 status
= strncmp((char *)&qdev
->flash
, str
, 4);
634 QPRINTK(qdev
, IFUP
, ERR
, "Invalid flash signature.\n");
638 for (i
= 0; i
< size
; i
++)
639 csum
+= le16_to_cpu(*flash
++);
642 QPRINTK(qdev
, IFUP
, ERR
,
643 "Invalid flash checksum, csum = 0x%.04x.\n", csum
);
648 static int ql_read_flash_word(struct ql_adapter
*qdev
, int offset
, __le32
*data
)
651 /* wait for reg to come ready */
652 status
= ql_wait_reg_rdy(qdev
,
653 FLASH_ADDR
, FLASH_ADDR_RDY
, FLASH_ADDR_ERR
);
656 /* set up for reg read */
657 ql_write32(qdev
, FLASH_ADDR
, FLASH_ADDR_R
| offset
);
658 /* wait for reg to come ready */
659 status
= ql_wait_reg_rdy(qdev
,
660 FLASH_ADDR
, FLASH_ADDR_RDY
, FLASH_ADDR_ERR
);
663 /* This data is stored on flash as an array of
664 * __le32. Since ql_read32() returns cpu endian
665 * we need to swap it back.
667 *data
= cpu_to_le32(ql_read32(qdev
, FLASH_DATA
));
672 static int ql_get_8012_flash_params(struct ql_adapter
*qdev
)
676 __le32
*p
= (__le32
*)&qdev
->flash
;
678 u32 size
= sizeof(struct flash_params_8012
) / sizeof(u32
);
680 /* Second function's parameters follow the first
686 if (ql_sem_spinlock(qdev
, SEM_FLASH_MASK
))
689 for (i
= 0; i
< size
; i
++, p
++) {
690 status
= ql_read_flash_word(qdev
, i
+offset
, p
);
692 QPRINTK(qdev
, IFUP
, ERR
, "Error reading flash.\n");
698 status
= ql_validate_flash(qdev
,
699 sizeof(struct flash_params_8012
) / sizeof(u16
),
702 QPRINTK(qdev
, IFUP
, ERR
, "Invalid flash.\n");
707 if (!is_valid_ether_addr(qdev
->flash
.flash_params_8012
.mac_addr
)) {
712 memcpy(qdev
->ndev
->dev_addr
,
713 qdev
->flash
.flash_params_8012
.mac_addr
,
714 qdev
->ndev
->addr_len
);
717 ql_sem_unlock(qdev
, SEM_FLASH_MASK
);
721 /* xgmac register are located behind the xgmac_addr and xgmac_data
722 * register pair. Each read/write requires us to wait for the ready
723 * bit before reading/writing the data.
725 static int ql_write_xgmac_reg(struct ql_adapter
*qdev
, u32 reg
, u32 data
)
728 /* wait for reg to come ready */
729 status
= ql_wait_reg_rdy(qdev
,
730 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
733 /* write the data to the data reg */
734 ql_write32(qdev
, XGMAC_DATA
, data
);
735 /* trigger the write */
736 ql_write32(qdev
, XGMAC_ADDR
, reg
);
740 /* xgmac register are located behind the xgmac_addr and xgmac_data
741 * register pair. Each read/write requires us to wait for the ready
742 * bit before reading/writing the data.
744 int ql_read_xgmac_reg(struct ql_adapter
*qdev
, u32 reg
, u32
*data
)
747 /* wait for reg to come ready */
748 status
= ql_wait_reg_rdy(qdev
,
749 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
752 /* set up for reg read */
753 ql_write32(qdev
, XGMAC_ADDR
, reg
| XGMAC_ADDR_R
);
754 /* wait for reg to come ready */
755 status
= ql_wait_reg_rdy(qdev
,
756 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
760 *data
= ql_read32(qdev
, XGMAC_DATA
);
765 /* This is used for reading the 64-bit statistics regs. */
766 int ql_read_xgmac_reg64(struct ql_adapter
*qdev
, u32 reg
, u64
*data
)
772 status
= ql_read_xgmac_reg(qdev
, reg
, &lo
);
776 status
= ql_read_xgmac_reg(qdev
, reg
+ 4, &hi
);
780 *data
= (u64
) lo
| ((u64
) hi
<< 32);
786 /* Take the MAC Core out of reset.
787 * Enable statistics counting.
788 * Take the transmitter/receiver out of reset.
789 * This functionality may be done in the MPI firmware at a
792 static int ql_8012_port_initialize(struct ql_adapter
*qdev
)
797 if (ql_sem_trylock(qdev
, qdev
->xg_sem_mask
)) {
798 /* Another function has the semaphore, so
799 * wait for the port init bit to come ready.
801 QPRINTK(qdev
, LINK
, INFO
,
802 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
803 status
= ql_wait_reg_rdy(qdev
, STS
, qdev
->port_init
, 0);
805 QPRINTK(qdev
, LINK
, CRIT
,
806 "Port initialize timed out.\n");
811 QPRINTK(qdev
, LINK
, INFO
, "Got xgmac semaphore!.\n");
812 /* Set the core reset. */
813 status
= ql_read_xgmac_reg(qdev
, GLOBAL_CFG
, &data
);
816 data
|= GLOBAL_CFG_RESET
;
817 status
= ql_write_xgmac_reg(qdev
, GLOBAL_CFG
, data
);
821 /* Clear the core reset and turn on jumbo for receiver. */
822 data
&= ~GLOBAL_CFG_RESET
; /* Clear core reset. */
823 data
|= GLOBAL_CFG_JUMBO
; /* Turn on jumbo. */
824 data
|= GLOBAL_CFG_TX_STAT_EN
;
825 data
|= GLOBAL_CFG_RX_STAT_EN
;
826 status
= ql_write_xgmac_reg(qdev
, GLOBAL_CFG
, data
);
830 /* Enable transmitter, and clear it's reset. */
831 status
= ql_read_xgmac_reg(qdev
, TX_CFG
, &data
);
834 data
&= ~TX_CFG_RESET
; /* Clear the TX MAC reset. */
835 data
|= TX_CFG_EN
; /* Enable the transmitter. */
836 status
= ql_write_xgmac_reg(qdev
, TX_CFG
, data
);
840 /* Enable receiver and clear it's reset. */
841 status
= ql_read_xgmac_reg(qdev
, RX_CFG
, &data
);
844 data
&= ~RX_CFG_RESET
; /* Clear the RX MAC reset. */
845 data
|= RX_CFG_EN
; /* Enable the receiver. */
846 status
= ql_write_xgmac_reg(qdev
, RX_CFG
, data
);
852 ql_write_xgmac_reg(qdev
, MAC_TX_PARAMS
, MAC_TX_PARAMS_JUMBO
| (0x2580 << 16));
856 ql_write_xgmac_reg(qdev
, MAC_RX_PARAMS
, 0x2580);
860 /* Signal to the world that the port is enabled. */
861 ql_write32(qdev
, STS
, ((qdev
->port_init
<< 16) | qdev
->port_init
));
863 ql_sem_unlock(qdev
, qdev
->xg_sem_mask
);
867 /* Get the next large buffer. */
868 static struct bq_desc
*ql_get_curr_lbuf(struct rx_ring
*rx_ring
)
870 struct bq_desc
*lbq_desc
= &rx_ring
->lbq
[rx_ring
->lbq_curr_idx
];
871 rx_ring
->lbq_curr_idx
++;
872 if (rx_ring
->lbq_curr_idx
== rx_ring
->lbq_len
)
873 rx_ring
->lbq_curr_idx
= 0;
874 rx_ring
->lbq_free_cnt
++;
878 /* Get the next small buffer. */
879 static struct bq_desc
*ql_get_curr_sbuf(struct rx_ring
*rx_ring
)
881 struct bq_desc
*sbq_desc
= &rx_ring
->sbq
[rx_ring
->sbq_curr_idx
];
882 rx_ring
->sbq_curr_idx
++;
883 if (rx_ring
->sbq_curr_idx
== rx_ring
->sbq_len
)
884 rx_ring
->sbq_curr_idx
= 0;
885 rx_ring
->sbq_free_cnt
++;
889 /* Update an rx ring index. */
890 static void ql_update_cq(struct rx_ring
*rx_ring
)
892 rx_ring
->cnsmr_idx
++;
893 rx_ring
->curr_entry
++;
894 if (unlikely(rx_ring
->cnsmr_idx
== rx_ring
->cq_len
)) {
895 rx_ring
->cnsmr_idx
= 0;
896 rx_ring
->curr_entry
= rx_ring
->cq_base
;
900 static void ql_write_cq_idx(struct rx_ring
*rx_ring
)
902 ql_write_db_reg(rx_ring
->cnsmr_idx
, rx_ring
->cnsmr_idx_db_reg
);
905 /* Process (refill) a large buffer queue. */
906 static void ql_update_lbq(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
908 u32 clean_idx
= rx_ring
->lbq_clean_idx
;
909 u32 start_idx
= clean_idx
;
910 struct bq_desc
*lbq_desc
;
914 while (rx_ring
->lbq_free_cnt
> 16) {
915 for (i
= 0; i
< 16; i
++) {
916 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
917 "lbq: try cleaning clean_idx = %d.\n",
919 lbq_desc
= &rx_ring
->lbq
[clean_idx
];
920 if (lbq_desc
->p
.lbq_page
== NULL
) {
921 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
922 "lbq: getting new page for index %d.\n",
924 lbq_desc
->p
.lbq_page
= alloc_page(GFP_ATOMIC
);
925 if (lbq_desc
->p
.lbq_page
== NULL
) {
926 rx_ring
->lbq_clean_idx
= clean_idx
;
927 QPRINTK(qdev
, RX_STATUS
, ERR
,
928 "Couldn't get a page.\n");
931 map
= pci_map_page(qdev
->pdev
,
932 lbq_desc
->p
.lbq_page
,
935 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
936 rx_ring
->lbq_clean_idx
= clean_idx
;
937 put_page(lbq_desc
->p
.lbq_page
);
938 lbq_desc
->p
.lbq_page
= NULL
;
939 QPRINTK(qdev
, RX_STATUS
, ERR
,
940 "PCI mapping failed.\n");
943 pci_unmap_addr_set(lbq_desc
, mapaddr
, map
);
944 pci_unmap_len_set(lbq_desc
, maplen
, PAGE_SIZE
);
945 *lbq_desc
->addr
= cpu_to_le64(map
);
948 if (clean_idx
== rx_ring
->lbq_len
)
952 rx_ring
->lbq_clean_idx
= clean_idx
;
953 rx_ring
->lbq_prod_idx
+= 16;
954 if (rx_ring
->lbq_prod_idx
== rx_ring
->lbq_len
)
955 rx_ring
->lbq_prod_idx
= 0;
956 rx_ring
->lbq_free_cnt
-= 16;
959 if (start_idx
!= clean_idx
) {
960 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
961 "lbq: updating prod idx = %d.\n",
962 rx_ring
->lbq_prod_idx
);
963 ql_write_db_reg(rx_ring
->lbq_prod_idx
,
964 rx_ring
->lbq_prod_idx_db_reg
);
968 /* Process (refill) a small buffer queue. */
969 static void ql_update_sbq(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
971 u32 clean_idx
= rx_ring
->sbq_clean_idx
;
972 u32 start_idx
= clean_idx
;
973 struct bq_desc
*sbq_desc
;
977 while (rx_ring
->sbq_free_cnt
> 16) {
978 for (i
= 0; i
< 16; i
++) {
979 sbq_desc
= &rx_ring
->sbq
[clean_idx
];
980 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
981 "sbq: try cleaning clean_idx = %d.\n",
983 if (sbq_desc
->p
.skb
== NULL
) {
984 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
985 "sbq: getting new skb for index %d.\n",
988 netdev_alloc_skb(qdev
->ndev
,
989 rx_ring
->sbq_buf_size
);
990 if (sbq_desc
->p
.skb
== NULL
) {
991 QPRINTK(qdev
, PROBE
, ERR
,
992 "Couldn't get an skb.\n");
993 rx_ring
->sbq_clean_idx
= clean_idx
;
996 skb_reserve(sbq_desc
->p
.skb
, QLGE_SB_PAD
);
997 map
= pci_map_single(qdev
->pdev
,
998 sbq_desc
->p
.skb
->data
,
999 rx_ring
->sbq_buf_size
/
1000 2, PCI_DMA_FROMDEVICE
);
1001 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
1002 QPRINTK(qdev
, IFUP
, ERR
, "PCI mapping failed.\n");
1003 rx_ring
->sbq_clean_idx
= clean_idx
;
1004 dev_kfree_skb_any(sbq_desc
->p
.skb
);
1005 sbq_desc
->p
.skb
= NULL
;
1008 pci_unmap_addr_set(sbq_desc
, mapaddr
, map
);
1009 pci_unmap_len_set(sbq_desc
, maplen
,
1010 rx_ring
->sbq_buf_size
/ 2);
1011 *sbq_desc
->addr
= cpu_to_le64(map
);
1015 if (clean_idx
== rx_ring
->sbq_len
)
1018 rx_ring
->sbq_clean_idx
= clean_idx
;
1019 rx_ring
->sbq_prod_idx
+= 16;
1020 if (rx_ring
->sbq_prod_idx
== rx_ring
->sbq_len
)
1021 rx_ring
->sbq_prod_idx
= 0;
1022 rx_ring
->sbq_free_cnt
-= 16;
1025 if (start_idx
!= clean_idx
) {
1026 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1027 "sbq: updating prod idx = %d.\n",
1028 rx_ring
->sbq_prod_idx
);
1029 ql_write_db_reg(rx_ring
->sbq_prod_idx
,
1030 rx_ring
->sbq_prod_idx_db_reg
);
1034 static void ql_update_buffer_queues(struct ql_adapter
*qdev
,
1035 struct rx_ring
*rx_ring
)
1037 ql_update_sbq(qdev
, rx_ring
);
1038 ql_update_lbq(qdev
, rx_ring
);
1041 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1042 * fails at some stage, or from the interrupt when a tx completes.
1044 static void ql_unmap_send(struct ql_adapter
*qdev
,
1045 struct tx_ring_desc
*tx_ring_desc
, int mapped
)
1048 for (i
= 0; i
< mapped
; i
++) {
1049 if (i
== 0 || (i
== 7 && mapped
> 7)) {
1051 * Unmap the skb->data area, or the
1052 * external sglist (AKA the Outbound
1053 * Address List (OAL)).
1054 * If its the zeroeth element, then it's
1055 * the skb->data area. If it's the 7th
1056 * element and there is more than 6 frags,
1060 QPRINTK(qdev
, TX_DONE
, DEBUG
,
1061 "unmapping OAL area.\n");
1063 pci_unmap_single(qdev
->pdev
,
1064 pci_unmap_addr(&tx_ring_desc
->map
[i
],
1066 pci_unmap_len(&tx_ring_desc
->map
[i
],
1070 QPRINTK(qdev
, TX_DONE
, DEBUG
, "unmapping frag %d.\n",
1072 pci_unmap_page(qdev
->pdev
,
1073 pci_unmap_addr(&tx_ring_desc
->map
[i
],
1075 pci_unmap_len(&tx_ring_desc
->map
[i
],
1076 maplen
), PCI_DMA_TODEVICE
);
1082 /* Map the buffers for this transmit. This will return
1083 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1085 static int ql_map_send(struct ql_adapter
*qdev
,
1086 struct ob_mac_iocb_req
*mac_iocb_ptr
,
1087 struct sk_buff
*skb
, struct tx_ring_desc
*tx_ring_desc
)
1089 int len
= skb_headlen(skb
);
1091 int frag_idx
, err
, map_idx
= 0;
1092 struct tx_buf_desc
*tbd
= mac_iocb_ptr
->tbd
;
1093 int frag_cnt
= skb_shinfo(skb
)->nr_frags
;
1096 QPRINTK(qdev
, TX_QUEUED
, DEBUG
, "frag_cnt = %d.\n", frag_cnt
);
1099 * Map the skb buffer first.
1101 map
= pci_map_single(qdev
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
1103 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1105 QPRINTK(qdev
, TX_QUEUED
, ERR
,
1106 "PCI mapping failed with error: %d\n", err
);
1108 return NETDEV_TX_BUSY
;
1111 tbd
->len
= cpu_to_le32(len
);
1112 tbd
->addr
= cpu_to_le64(map
);
1113 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
, map
);
1114 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
, len
);
1118 * This loop fills the remainder of the 8 address descriptors
1119 * in the IOCB. If there are more than 7 fragments, then the
1120 * eighth address desc will point to an external list (OAL).
1121 * When this happens, the remainder of the frags will be stored
1124 for (frag_idx
= 0; frag_idx
< frag_cnt
; frag_idx
++, map_idx
++) {
1125 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[frag_idx
];
1127 if (frag_idx
== 6 && frag_cnt
> 7) {
1128 /* Let's tack on an sglist.
1129 * Our control block will now
1131 * iocb->seg[0] = skb->data
1132 * iocb->seg[1] = frag[0]
1133 * iocb->seg[2] = frag[1]
1134 * iocb->seg[3] = frag[2]
1135 * iocb->seg[4] = frag[3]
1136 * iocb->seg[5] = frag[4]
1137 * iocb->seg[6] = frag[5]
1138 * iocb->seg[7] = ptr to OAL (external sglist)
1139 * oal->seg[0] = frag[6]
1140 * oal->seg[1] = frag[7]
1141 * oal->seg[2] = frag[8]
1142 * oal->seg[3] = frag[9]
1143 * oal->seg[4] = frag[10]
1146 /* Tack on the OAL in the eighth segment of IOCB. */
1147 map
= pci_map_single(qdev
->pdev
, &tx_ring_desc
->oal
,
1150 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1152 QPRINTK(qdev
, TX_QUEUED
, ERR
,
1153 "PCI mapping outbound address list with error: %d\n",
1158 tbd
->addr
= cpu_to_le64(map
);
1160 * The length is the number of fragments
1161 * that remain to be mapped times the length
1162 * of our sglist (OAL).
1165 cpu_to_le32((sizeof(struct tx_buf_desc
) *
1166 (frag_cnt
- frag_idx
)) | TX_DESC_C
);
1167 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
,
1169 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
,
1170 sizeof(struct oal
));
1171 tbd
= (struct tx_buf_desc
*)&tx_ring_desc
->oal
;
1176 pci_map_page(qdev
->pdev
, frag
->page
,
1177 frag
->page_offset
, frag
->size
,
1180 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1182 QPRINTK(qdev
, TX_QUEUED
, ERR
,
1183 "PCI mapping frags failed with error: %d.\n",
1188 tbd
->addr
= cpu_to_le64(map
);
1189 tbd
->len
= cpu_to_le32(frag
->size
);
1190 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
, map
);
1191 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
,
1195 /* Save the number of segments we've mapped. */
1196 tx_ring_desc
->map_cnt
= map_idx
;
1197 /* Terminate the last segment. */
1198 tbd
->len
= cpu_to_le32(le32_to_cpu(tbd
->len
) | TX_DESC_E
);
1199 return NETDEV_TX_OK
;
1203 * If the first frag mapping failed, then i will be zero.
1204 * This causes the unmap of the skb->data area. Otherwise
1205 * we pass in the number of frags that mapped successfully
1206 * so they can be umapped.
1208 ql_unmap_send(qdev
, tx_ring_desc
, map_idx
);
1209 return NETDEV_TX_BUSY
;
1212 static void ql_realign_skb(struct sk_buff
*skb
, int len
)
1214 void *temp_addr
= skb
->data
;
1216 /* Undo the skb_reserve(skb,32) we did before
1217 * giving to hardware, and realign data on
1218 * a 2-byte boundary.
1220 skb
->data
-= QLGE_SB_PAD
- NET_IP_ALIGN
;
1221 skb
->tail
-= QLGE_SB_PAD
- NET_IP_ALIGN
;
1222 skb_copy_to_linear_data(skb
, temp_addr
,
1227 * This function builds an skb for the given inbound
1228 * completion. It will be rewritten for readability in the near
1229 * future, but for not it works well.
1231 static struct sk_buff
*ql_build_rx_skb(struct ql_adapter
*qdev
,
1232 struct rx_ring
*rx_ring
,
1233 struct ib_mac_iocb_rsp
*ib_mac_rsp
)
1235 struct bq_desc
*lbq_desc
;
1236 struct bq_desc
*sbq_desc
;
1237 struct sk_buff
*skb
= NULL
;
1238 u32 length
= le32_to_cpu(ib_mac_rsp
->data_len
);
1239 u32 hdr_len
= le32_to_cpu(ib_mac_rsp
->hdr_len
);
1242 * Handle the header buffer if present.
1244 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HV
&&
1245 ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1246 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "Header of %d bytes in small buffer.\n", hdr_len
);
1248 * Headers fit nicely into a small buffer.
1250 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1251 pci_unmap_single(qdev
->pdev
,
1252 pci_unmap_addr(sbq_desc
, mapaddr
),
1253 pci_unmap_len(sbq_desc
, maplen
),
1254 PCI_DMA_FROMDEVICE
);
1255 skb
= sbq_desc
->p
.skb
;
1256 ql_realign_skb(skb
, hdr_len
);
1257 skb_put(skb
, hdr_len
);
1258 sbq_desc
->p
.skb
= NULL
;
1262 * Handle the data buffer(s).
1264 if (unlikely(!length
)) { /* Is there data too? */
1265 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1266 "No Data buffer in this packet.\n");
1270 if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DS
) {
1271 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1272 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1273 "Headers in small, data of %d bytes in small, combine them.\n", length
);
1275 * Data is less than small buffer size so it's
1276 * stuffed in a small buffer.
1277 * For this case we append the data
1278 * from the "data" small buffer to the "header" small
1281 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1282 pci_dma_sync_single_for_cpu(qdev
->pdev
,
1284 (sbq_desc
, mapaddr
),
1287 PCI_DMA_FROMDEVICE
);
1288 memcpy(skb_put(skb
, length
),
1289 sbq_desc
->p
.skb
->data
, length
);
1290 pci_dma_sync_single_for_device(qdev
->pdev
,
1297 PCI_DMA_FROMDEVICE
);
1299 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1300 "%d bytes in a single small buffer.\n", length
);
1301 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1302 skb
= sbq_desc
->p
.skb
;
1303 ql_realign_skb(skb
, length
);
1304 skb_put(skb
, length
);
1305 pci_unmap_single(qdev
->pdev
,
1306 pci_unmap_addr(sbq_desc
,
1308 pci_unmap_len(sbq_desc
,
1310 PCI_DMA_FROMDEVICE
);
1311 sbq_desc
->p
.skb
= NULL
;
1313 } else if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DL
) {
1314 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1315 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1316 "Header in small, %d bytes in large. Chain large to small!\n", length
);
1318 * The data is in a single large buffer. We
1319 * chain it to the header buffer's skb and let
1322 lbq_desc
= ql_get_curr_lbuf(rx_ring
);
1323 pci_unmap_page(qdev
->pdev
,
1324 pci_unmap_addr(lbq_desc
,
1326 pci_unmap_len(lbq_desc
, maplen
),
1327 PCI_DMA_FROMDEVICE
);
1328 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1329 "Chaining page to skb.\n");
1330 skb_fill_page_desc(skb
, 0, lbq_desc
->p
.lbq_page
,
1333 skb
->data_len
+= length
;
1334 skb
->truesize
+= length
;
1335 lbq_desc
->p
.lbq_page
= NULL
;
1338 * The headers and data are in a single large buffer. We
1339 * copy it to a new skb and let it go. This can happen with
1340 * jumbo mtu on a non-TCP/UDP frame.
1342 lbq_desc
= ql_get_curr_lbuf(rx_ring
);
1343 skb
= netdev_alloc_skb(qdev
->ndev
, length
);
1345 QPRINTK(qdev
, PROBE
, DEBUG
,
1346 "No skb available, drop the packet.\n");
1349 pci_unmap_page(qdev
->pdev
,
1350 pci_unmap_addr(lbq_desc
,
1352 pci_unmap_len(lbq_desc
, maplen
),
1353 PCI_DMA_FROMDEVICE
);
1354 skb_reserve(skb
, NET_IP_ALIGN
);
1355 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1356 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length
);
1357 skb_fill_page_desc(skb
, 0, lbq_desc
->p
.lbq_page
,
1360 skb
->data_len
+= length
;
1361 skb
->truesize
+= length
;
1363 lbq_desc
->p
.lbq_page
= NULL
;
1364 __pskb_pull_tail(skb
,
1365 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
1366 VLAN_ETH_HLEN
: ETH_HLEN
);
1370 * The data is in a chain of large buffers
1371 * pointed to by a small buffer. We loop
1372 * thru and chain them to the our small header
1374 * frags: There are 18 max frags and our small
1375 * buffer will hold 32 of them. The thing is,
1376 * we'll use 3 max for our 9000 byte jumbo
1377 * frames. If the MTU goes up we could
1378 * eventually be in trouble.
1380 int size
, offset
, i
= 0;
1381 __le64
*bq
, bq_array
[8];
1382 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1383 pci_unmap_single(qdev
->pdev
,
1384 pci_unmap_addr(sbq_desc
, mapaddr
),
1385 pci_unmap_len(sbq_desc
, maplen
),
1386 PCI_DMA_FROMDEVICE
);
1387 if (!(ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
)) {
1389 * This is an non TCP/UDP IP frame, so
1390 * the headers aren't split into a small
1391 * buffer. We have to use the small buffer
1392 * that contains our sg list as our skb to
1393 * send upstairs. Copy the sg list here to
1394 * a local buffer and use it to find the
1397 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1398 "%d bytes of headers & data in chain of large.\n", length
);
1399 skb
= sbq_desc
->p
.skb
;
1401 memcpy(bq
, skb
->data
, sizeof(bq_array
));
1402 sbq_desc
->p
.skb
= NULL
;
1403 skb_reserve(skb
, NET_IP_ALIGN
);
1405 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1406 "Headers in small, %d bytes of data in chain of large.\n", length
);
1407 bq
= (__le64
*)sbq_desc
->p
.skb
->data
;
1409 while (length
> 0) {
1410 lbq_desc
= ql_get_curr_lbuf(rx_ring
);
1411 pci_unmap_page(qdev
->pdev
,
1412 pci_unmap_addr(lbq_desc
,
1414 pci_unmap_len(lbq_desc
,
1416 PCI_DMA_FROMDEVICE
);
1417 size
= (length
< PAGE_SIZE
) ? length
: PAGE_SIZE
;
1420 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1421 "Adding page %d to skb for %d bytes.\n",
1423 skb_fill_page_desc(skb
, i
, lbq_desc
->p
.lbq_page
,
1426 skb
->data_len
+= size
;
1427 skb
->truesize
+= size
;
1429 lbq_desc
->p
.lbq_page
= NULL
;
1433 __pskb_pull_tail(skb
, (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
1434 VLAN_ETH_HLEN
: ETH_HLEN
);
1439 /* Process an inbound completion from an rx ring. */
1440 static void ql_process_mac_rx_intr(struct ql_adapter
*qdev
,
1441 struct rx_ring
*rx_ring
,
1442 struct ib_mac_iocb_rsp
*ib_mac_rsp
)
1444 struct net_device
*ndev
= qdev
->ndev
;
1445 struct sk_buff
*skb
= NULL
;
1447 QL_DUMP_IB_MAC_RSP(ib_mac_rsp
);
1449 skb
= ql_build_rx_skb(qdev
, rx_ring
, ib_mac_rsp
);
1450 if (unlikely(!skb
)) {
1451 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1452 "No skb available, drop packet.\n");
1456 prefetch(skb
->data
);
1458 if (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) {
1459 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "%s%s%s Multicast.\n",
1460 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1461 IB_MAC_IOCB_RSP_M_HASH
? "Hash" : "",
1462 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1463 IB_MAC_IOCB_RSP_M_REG
? "Registered" : "",
1464 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1465 IB_MAC_IOCB_RSP_M_PROM
? "Promiscuous" : "");
1467 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_P
) {
1468 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "Promiscuous Packet.\n");
1470 if (ib_mac_rsp
->flags1
& (IB_MAC_IOCB_RSP_IE
| IB_MAC_IOCB_RSP_TE
)) {
1471 QPRINTK(qdev
, RX_STATUS
, ERR
,
1472 "Bad checksum for this %s packet.\n",
1474 flags2
& IB_MAC_IOCB_RSP_T
) ? "TCP" : "UDP"));
1475 skb
->ip_summed
= CHECKSUM_NONE
;
1476 } else if (qdev
->rx_csum
&&
1477 ((ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_T
) ||
1478 ((ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_U
) &&
1479 !(ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_NU
)))) {
1480 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "RX checksum done!\n");
1481 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1483 qdev
->stats
.rx_packets
++;
1484 qdev
->stats
.rx_bytes
+= skb
->len
;
1485 skb
->protocol
= eth_type_trans(skb
, ndev
);
1486 skb_record_rx_queue(skb
, rx_ring
- &qdev
->rx_ring
[0]);
1487 if (qdev
->vlgrp
&& (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
)) {
1488 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1489 "Passing a VLAN packet upstream.\n");
1490 vlan_hwaccel_receive_skb(skb
, qdev
->vlgrp
,
1491 le16_to_cpu(ib_mac_rsp
->vlan_id
));
1493 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1494 "Passing a normal packet upstream.\n");
1495 netif_receive_skb(skb
);
1499 /* Process an outbound completion from an rx ring. */
1500 static void ql_process_mac_tx_intr(struct ql_adapter
*qdev
,
1501 struct ob_mac_iocb_rsp
*mac_rsp
)
1503 struct tx_ring
*tx_ring
;
1504 struct tx_ring_desc
*tx_ring_desc
;
1506 QL_DUMP_OB_MAC_RSP(mac_rsp
);
1507 tx_ring
= &qdev
->tx_ring
[mac_rsp
->txq_idx
];
1508 tx_ring_desc
= &tx_ring
->q
[mac_rsp
->tid
];
1509 ql_unmap_send(qdev
, tx_ring_desc
, tx_ring_desc
->map_cnt
);
1510 qdev
->stats
.tx_bytes
+= tx_ring_desc
->map_cnt
;
1511 qdev
->stats
.tx_packets
++;
1512 dev_kfree_skb(tx_ring_desc
->skb
);
1513 tx_ring_desc
->skb
= NULL
;
1515 if (unlikely(mac_rsp
->flags1
& (OB_MAC_IOCB_RSP_E
|
1518 OB_MAC_IOCB_RSP_P
| OB_MAC_IOCB_RSP_B
))) {
1519 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_E
) {
1520 QPRINTK(qdev
, TX_DONE
, WARNING
,
1521 "Total descriptor length did not match transfer length.\n");
1523 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_S
) {
1524 QPRINTK(qdev
, TX_DONE
, WARNING
,
1525 "Frame too short to be legal, not sent.\n");
1527 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_L
) {
1528 QPRINTK(qdev
, TX_DONE
, WARNING
,
1529 "Frame too long, but sent anyway.\n");
1531 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_B
) {
1532 QPRINTK(qdev
, TX_DONE
, WARNING
,
1533 "PCI backplane error. Frame not sent.\n");
1536 atomic_inc(&tx_ring
->tx_count
);
1539 /* Fire up a handler to reset the MPI processor. */
1540 void ql_queue_fw_error(struct ql_adapter
*qdev
)
1542 netif_stop_queue(qdev
->ndev
);
1543 netif_carrier_off(qdev
->ndev
);
1544 queue_delayed_work(qdev
->workqueue
, &qdev
->mpi_reset_work
, 0);
1547 void ql_queue_asic_error(struct ql_adapter
*qdev
)
1549 netif_stop_queue(qdev
->ndev
);
1550 netif_carrier_off(qdev
->ndev
);
1551 ql_disable_interrupts(qdev
);
1552 /* Clear adapter up bit to signal the recovery
1553 * process that it shouldn't kill the reset worker
1556 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
1557 queue_delayed_work(qdev
->workqueue
, &qdev
->asic_reset_work
, 0);
1560 static void ql_process_chip_ae_intr(struct ql_adapter
*qdev
,
1561 struct ib_ae_iocb_rsp
*ib_ae_rsp
)
1563 switch (ib_ae_rsp
->event
) {
1564 case MGMT_ERR_EVENT
:
1565 QPRINTK(qdev
, RX_ERR
, ERR
,
1566 "Management Processor Fatal Error.\n");
1567 ql_queue_fw_error(qdev
);
1570 case CAM_LOOKUP_ERR_EVENT
:
1571 QPRINTK(qdev
, LINK
, ERR
,
1572 "Multiple CAM hits lookup occurred.\n");
1573 QPRINTK(qdev
, DRV
, ERR
, "This event shouldn't occur.\n");
1574 ql_queue_asic_error(qdev
);
1577 case SOFT_ECC_ERROR_EVENT
:
1578 QPRINTK(qdev
, RX_ERR
, ERR
, "Soft ECC error detected.\n");
1579 ql_queue_asic_error(qdev
);
1582 case PCI_ERR_ANON_BUF_RD
:
1583 QPRINTK(qdev
, RX_ERR
, ERR
,
1584 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1586 ql_queue_asic_error(qdev
);
1590 QPRINTK(qdev
, DRV
, ERR
, "Unexpected event %d.\n",
1592 ql_queue_asic_error(qdev
);
1597 static int ql_clean_outbound_rx_ring(struct rx_ring
*rx_ring
)
1599 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1600 u32 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1601 struct ob_mac_iocb_rsp
*net_rsp
= NULL
;
1604 /* While there are entries in the completion queue. */
1605 while (prod
!= rx_ring
->cnsmr_idx
) {
1607 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1608 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring
->cq_id
,
1609 prod
, rx_ring
->cnsmr_idx
);
1611 net_rsp
= (struct ob_mac_iocb_rsp
*)rx_ring
->curr_entry
;
1613 switch (net_rsp
->opcode
) {
1615 case OPCODE_OB_MAC_TSO_IOCB
:
1616 case OPCODE_OB_MAC_IOCB
:
1617 ql_process_mac_tx_intr(qdev
, net_rsp
);
1620 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1621 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1625 ql_update_cq(rx_ring
);
1626 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1628 ql_write_cq_idx(rx_ring
);
1629 if (netif_queue_stopped(qdev
->ndev
) && net_rsp
!= NULL
) {
1630 struct tx_ring
*tx_ring
= &qdev
->tx_ring
[net_rsp
->txq_idx
];
1631 if (atomic_read(&tx_ring
->queue_stopped
) &&
1632 (atomic_read(&tx_ring
->tx_count
) > (tx_ring
->wq_len
/ 4)))
1634 * The queue got stopped because the tx_ring was full.
1635 * Wake it up, because it's now at least 25% empty.
1637 netif_wake_queue(qdev
->ndev
);
1643 static int ql_clean_inbound_rx_ring(struct rx_ring
*rx_ring
, int budget
)
1645 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1646 u32 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1647 struct ql_net_rsp_iocb
*net_rsp
;
1650 /* While there are entries in the completion queue. */
1651 while (prod
!= rx_ring
->cnsmr_idx
) {
1653 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1654 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring
->cq_id
,
1655 prod
, rx_ring
->cnsmr_idx
);
1657 net_rsp
= rx_ring
->curr_entry
;
1659 switch (net_rsp
->opcode
) {
1660 case OPCODE_IB_MAC_IOCB
:
1661 ql_process_mac_rx_intr(qdev
, rx_ring
,
1662 (struct ib_mac_iocb_rsp
*)
1666 case OPCODE_IB_AE_IOCB
:
1667 ql_process_chip_ae_intr(qdev
, (struct ib_ae_iocb_rsp
*)
1672 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1673 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1678 ql_update_cq(rx_ring
);
1679 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1680 if (count
== budget
)
1683 ql_update_buffer_queues(qdev
, rx_ring
);
1684 ql_write_cq_idx(rx_ring
);
1688 static int ql_napi_poll_msix(struct napi_struct
*napi
, int budget
)
1690 struct rx_ring
*rx_ring
= container_of(napi
, struct rx_ring
, napi
);
1691 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1692 int work_done
= ql_clean_inbound_rx_ring(rx_ring
, budget
);
1694 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "Enter, NAPI POLL cq_id = %d.\n",
1697 if (work_done
< budget
) {
1698 __napi_complete(napi
);
1699 ql_enable_completion_interrupt(qdev
, rx_ring
->irq
);
1704 static void ql_vlan_rx_register(struct net_device
*ndev
, struct vlan_group
*grp
)
1706 struct ql_adapter
*qdev
= netdev_priv(ndev
);
1710 QPRINTK(qdev
, IFUP
, DEBUG
, "Turning on VLAN in NIC_RCV_CFG.\n");
1711 ql_write32(qdev
, NIC_RCV_CFG
, NIC_RCV_CFG_VLAN_MASK
|
1712 NIC_RCV_CFG_VLAN_MATCH_AND_NON
);
1714 QPRINTK(qdev
, IFUP
, DEBUG
,
1715 "Turning off VLAN in NIC_RCV_CFG.\n");
1716 ql_write32(qdev
, NIC_RCV_CFG
, NIC_RCV_CFG_VLAN_MASK
);
1720 static void ql_vlan_rx_add_vid(struct net_device
*ndev
, u16 vid
)
1722 struct ql_adapter
*qdev
= netdev_priv(ndev
);
1723 u32 enable_bit
= MAC_ADDR_E
;
1726 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
1729 spin_lock(&qdev
->hw_lock
);
1730 if (ql_set_mac_addr_reg
1731 (qdev
, (u8
*) &enable_bit
, MAC_ADDR_TYPE_VLAN
, vid
)) {
1732 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init vlan address.\n");
1734 spin_unlock(&qdev
->hw_lock
);
1735 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
1738 static void ql_vlan_rx_kill_vid(struct net_device
*ndev
, u16 vid
)
1740 struct ql_adapter
*qdev
= netdev_priv(ndev
);
1744 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
1748 spin_lock(&qdev
->hw_lock
);
1749 if (ql_set_mac_addr_reg
1750 (qdev
, (u8
*) &enable_bit
, MAC_ADDR_TYPE_VLAN
, vid
)) {
1751 QPRINTK(qdev
, IFUP
, ERR
, "Failed to clear vlan address.\n");
1753 spin_unlock(&qdev
->hw_lock
);
1754 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
1758 /* Worker thread to process a given rx_ring that is dedicated
1759 * to outbound completions.
1761 static void ql_tx_clean(struct work_struct
*work
)
1763 struct rx_ring
*rx_ring
=
1764 container_of(work
, struct rx_ring
, rx_work
.work
);
1765 ql_clean_outbound_rx_ring(rx_ring
);
1766 ql_enable_completion_interrupt(rx_ring
->qdev
, rx_ring
->irq
);
1770 /* Worker thread to process a given rx_ring that is dedicated
1771 * to inbound completions.
1773 static void ql_rx_clean(struct work_struct
*work
)
1775 struct rx_ring
*rx_ring
=
1776 container_of(work
, struct rx_ring
, rx_work
.work
);
1777 ql_clean_inbound_rx_ring(rx_ring
, 64);
1778 ql_enable_completion_interrupt(rx_ring
->qdev
, rx_ring
->irq
);
1781 /* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
1782 static irqreturn_t
qlge_msix_tx_isr(int irq
, void *dev_id
)
1784 struct rx_ring
*rx_ring
= dev_id
;
1785 queue_delayed_work_on(rx_ring
->cpu
, rx_ring
->qdev
->q_workqueue
,
1786 &rx_ring
->rx_work
, 0);
1790 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1791 static irqreturn_t
qlge_msix_rx_isr(int irq
, void *dev_id
)
1793 struct rx_ring
*rx_ring
= dev_id
;
1794 napi_schedule(&rx_ring
->napi
);
1798 /* This handles a fatal error, MPI activity, and the default
1799 * rx_ring in an MSI-X multiple vector environment.
1800 * In MSI/Legacy environment it also process the rest of
1803 static irqreturn_t
qlge_isr(int irq
, void *dev_id
)
1805 struct rx_ring
*rx_ring
= dev_id
;
1806 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1807 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
1812 spin_lock(&qdev
->hw_lock
);
1813 if (atomic_read(&qdev
->intr_context
[0].irq_cnt
)) {
1814 QPRINTK(qdev
, INTR
, DEBUG
, "Shared Interrupt, Not ours!\n");
1815 spin_unlock(&qdev
->hw_lock
);
1818 spin_unlock(&qdev
->hw_lock
);
1820 var
= ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
1823 * Check for fatal error.
1826 ql_queue_asic_error(qdev
);
1827 QPRINTK(qdev
, INTR
, ERR
, "Got fatal error, STS = %x.\n", var
);
1828 var
= ql_read32(qdev
, ERR_STS
);
1829 QPRINTK(qdev
, INTR
, ERR
,
1830 "Resetting chip. Error Status Register = 0x%x\n", var
);
1835 * Check MPI processor activity.
1839 * We've got an async event or mailbox completion.
1840 * Handle it and clear the source of the interrupt.
1842 QPRINTK(qdev
, INTR
, ERR
, "Got MPI processor interrupt.\n");
1843 ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
1844 queue_delayed_work_on(smp_processor_id(), qdev
->workqueue
,
1845 &qdev
->mpi_work
, 0);
1850 * Check the default queue and wake handler if active.
1852 rx_ring
= &qdev
->rx_ring
[0];
1853 if (ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
) != rx_ring
->cnsmr_idx
) {
1854 QPRINTK(qdev
, INTR
, INFO
, "Waking handler for rx_ring[0].\n");
1855 ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
1856 queue_delayed_work_on(smp_processor_id(), qdev
->q_workqueue
,
1857 &rx_ring
->rx_work
, 0);
1861 if (!test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
1863 * Start the DPC for each active queue.
1865 for (i
= 1; i
< qdev
->rx_ring_count
; i
++) {
1866 rx_ring
= &qdev
->rx_ring
[i
];
1867 if (ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
) !=
1868 rx_ring
->cnsmr_idx
) {
1869 QPRINTK(qdev
, INTR
, INFO
,
1870 "Waking handler for rx_ring[%d].\n", i
);
1871 ql_disable_completion_interrupt(qdev
,
1874 if (i
< qdev
->rss_ring_first_cq_id
)
1875 queue_delayed_work_on(rx_ring
->cpu
,
1880 napi_schedule(&rx_ring
->napi
);
1885 ql_enable_completion_interrupt(qdev
, intr_context
->intr
);
1886 return work_done
? IRQ_HANDLED
: IRQ_NONE
;
1889 static int ql_tso(struct sk_buff
*skb
, struct ob_mac_tso_iocb_req
*mac_iocb_ptr
)
1892 if (skb_is_gso(skb
)) {
1894 if (skb_header_cloned(skb
)) {
1895 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
1900 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_TSO_IOCB
;
1901 mac_iocb_ptr
->flags3
|= OB_MAC_TSO_IOCB_IC
;
1902 mac_iocb_ptr
->frame_len
= cpu_to_le32((u32
) skb
->len
);
1903 mac_iocb_ptr
->total_hdrs_len
=
1904 cpu_to_le16(skb_transport_offset(skb
) + tcp_hdrlen(skb
));
1905 mac_iocb_ptr
->net_trans_offset
=
1906 cpu_to_le16(skb_network_offset(skb
) |
1907 skb_transport_offset(skb
)
1908 << OB_MAC_TRANSPORT_HDR_SHIFT
);
1909 mac_iocb_ptr
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
1910 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_LSO
;
1911 if (likely(skb
->protocol
== htons(ETH_P_IP
))) {
1912 struct iphdr
*iph
= ip_hdr(skb
);
1914 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP4
;
1915 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
1919 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1920 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP6
;
1921 tcp_hdr(skb
)->check
=
1922 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
1923 &ipv6_hdr(skb
)->daddr
,
1931 static void ql_hw_csum_setup(struct sk_buff
*skb
,
1932 struct ob_mac_tso_iocb_req
*mac_iocb_ptr
)
1935 struct iphdr
*iph
= ip_hdr(skb
);
1937 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_TSO_IOCB
;
1938 mac_iocb_ptr
->frame_len
= cpu_to_le32((u32
) skb
->len
);
1939 mac_iocb_ptr
->net_trans_offset
=
1940 cpu_to_le16(skb_network_offset(skb
) |
1941 skb_transport_offset(skb
) << OB_MAC_TRANSPORT_HDR_SHIFT
);
1943 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP4
;
1944 len
= (ntohs(iph
->tot_len
) - (iph
->ihl
<< 2));
1945 if (likely(iph
->protocol
== IPPROTO_TCP
)) {
1946 check
= &(tcp_hdr(skb
)->check
);
1947 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_TC
;
1948 mac_iocb_ptr
->total_hdrs_len
=
1949 cpu_to_le16(skb_transport_offset(skb
) +
1950 (tcp_hdr(skb
)->doff
<< 2));
1952 check
= &(udp_hdr(skb
)->check
);
1953 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_UC
;
1954 mac_iocb_ptr
->total_hdrs_len
=
1955 cpu_to_le16(skb_transport_offset(skb
) +
1956 sizeof(struct udphdr
));
1958 *check
= ~csum_tcpudp_magic(iph
->saddr
,
1959 iph
->daddr
, len
, iph
->protocol
, 0);
1962 static int qlge_send(struct sk_buff
*skb
, struct net_device
*ndev
)
1964 struct tx_ring_desc
*tx_ring_desc
;
1965 struct ob_mac_iocb_req
*mac_iocb_ptr
;
1966 struct ql_adapter
*qdev
= netdev_priv(ndev
);
1968 struct tx_ring
*tx_ring
;
1969 u32 tx_ring_idx
= (u32
) QL_TXQ_IDX(qdev
, skb
);
1971 tx_ring
= &qdev
->tx_ring
[tx_ring_idx
];
1973 if (unlikely(atomic_read(&tx_ring
->tx_count
) < 2)) {
1974 QPRINTK(qdev
, TX_QUEUED
, INFO
,
1975 "%s: shutting down tx queue %d du to lack of resources.\n",
1976 __func__
, tx_ring_idx
);
1977 netif_stop_queue(ndev
);
1978 atomic_inc(&tx_ring
->queue_stopped
);
1979 return NETDEV_TX_BUSY
;
1981 tx_ring_desc
= &tx_ring
->q
[tx_ring
->prod_idx
];
1982 mac_iocb_ptr
= tx_ring_desc
->queue_entry
;
1983 memset((void *)mac_iocb_ptr
, 0, sizeof(mac_iocb_ptr
));
1985 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_IOCB
;
1986 mac_iocb_ptr
->tid
= tx_ring_desc
->index
;
1987 /* We use the upper 32-bits to store the tx queue for this IO.
1988 * When we get the completion we can use it to establish the context.
1990 mac_iocb_ptr
->txq_idx
= tx_ring_idx
;
1991 tx_ring_desc
->skb
= skb
;
1993 mac_iocb_ptr
->frame_len
= cpu_to_le16((u16
) skb
->len
);
1995 if (qdev
->vlgrp
&& vlan_tx_tag_present(skb
)) {
1996 QPRINTK(qdev
, TX_QUEUED
, DEBUG
, "Adding a vlan tag %d.\n",
1997 vlan_tx_tag_get(skb
));
1998 mac_iocb_ptr
->flags3
|= OB_MAC_IOCB_V
;
1999 mac_iocb_ptr
->vlan_tci
= cpu_to_le16(vlan_tx_tag_get(skb
));
2001 tso
= ql_tso(skb
, (struct ob_mac_tso_iocb_req
*)mac_iocb_ptr
);
2003 dev_kfree_skb_any(skb
);
2004 return NETDEV_TX_OK
;
2005 } else if (unlikely(!tso
) && (skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
2006 ql_hw_csum_setup(skb
,
2007 (struct ob_mac_tso_iocb_req
*)mac_iocb_ptr
);
2009 if (ql_map_send(qdev
, mac_iocb_ptr
, skb
, tx_ring_desc
) !=
2011 QPRINTK(qdev
, TX_QUEUED
, ERR
,
2012 "Could not map the segments.\n");
2013 return NETDEV_TX_BUSY
;
2015 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr
);
2016 tx_ring
->prod_idx
++;
2017 if (tx_ring
->prod_idx
== tx_ring
->wq_len
)
2018 tx_ring
->prod_idx
= 0;
2021 ql_write_db_reg(tx_ring
->prod_idx
, tx_ring
->prod_idx_db_reg
);
2022 ndev
->trans_start
= jiffies
;
2023 QPRINTK(qdev
, TX_QUEUED
, DEBUG
, "tx queued, slot %d, len %d\n",
2024 tx_ring
->prod_idx
, skb
->len
);
2026 atomic_dec(&tx_ring
->tx_count
);
2027 return NETDEV_TX_OK
;
2030 static void ql_free_shadow_space(struct ql_adapter
*qdev
)
2032 if (qdev
->rx_ring_shadow_reg_area
) {
2033 pci_free_consistent(qdev
->pdev
,
2035 qdev
->rx_ring_shadow_reg_area
,
2036 qdev
->rx_ring_shadow_reg_dma
);
2037 qdev
->rx_ring_shadow_reg_area
= NULL
;
2039 if (qdev
->tx_ring_shadow_reg_area
) {
2040 pci_free_consistent(qdev
->pdev
,
2042 qdev
->tx_ring_shadow_reg_area
,
2043 qdev
->tx_ring_shadow_reg_dma
);
2044 qdev
->tx_ring_shadow_reg_area
= NULL
;
2048 static int ql_alloc_shadow_space(struct ql_adapter
*qdev
)
2050 qdev
->rx_ring_shadow_reg_area
=
2051 pci_alloc_consistent(qdev
->pdev
,
2052 PAGE_SIZE
, &qdev
->rx_ring_shadow_reg_dma
);
2053 if (qdev
->rx_ring_shadow_reg_area
== NULL
) {
2054 QPRINTK(qdev
, IFUP
, ERR
,
2055 "Allocation of RX shadow space failed.\n");
2058 qdev
->tx_ring_shadow_reg_area
=
2059 pci_alloc_consistent(qdev
->pdev
, PAGE_SIZE
,
2060 &qdev
->tx_ring_shadow_reg_dma
);
2061 if (qdev
->tx_ring_shadow_reg_area
== NULL
) {
2062 QPRINTK(qdev
, IFUP
, ERR
,
2063 "Allocation of TX shadow space failed.\n");
2064 goto err_wqp_sh_area
;
2069 pci_free_consistent(qdev
->pdev
,
2071 qdev
->rx_ring_shadow_reg_area
,
2072 qdev
->rx_ring_shadow_reg_dma
);
2076 static void ql_init_tx_ring(struct ql_adapter
*qdev
, struct tx_ring
*tx_ring
)
2078 struct tx_ring_desc
*tx_ring_desc
;
2080 struct ob_mac_iocb_req
*mac_iocb_ptr
;
2082 mac_iocb_ptr
= tx_ring
->wq_base
;
2083 tx_ring_desc
= tx_ring
->q
;
2084 for (i
= 0; i
< tx_ring
->wq_len
; i
++) {
2085 tx_ring_desc
->index
= i
;
2086 tx_ring_desc
->skb
= NULL
;
2087 tx_ring_desc
->queue_entry
= mac_iocb_ptr
;
2091 atomic_set(&tx_ring
->tx_count
, tx_ring
->wq_len
);
2092 atomic_set(&tx_ring
->queue_stopped
, 0);
2095 static void ql_free_tx_resources(struct ql_adapter
*qdev
,
2096 struct tx_ring
*tx_ring
)
2098 if (tx_ring
->wq_base
) {
2099 pci_free_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2100 tx_ring
->wq_base
, tx_ring
->wq_base_dma
);
2101 tx_ring
->wq_base
= NULL
;
2107 static int ql_alloc_tx_resources(struct ql_adapter
*qdev
,
2108 struct tx_ring
*tx_ring
)
2111 pci_alloc_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2112 &tx_ring
->wq_base_dma
);
2114 if ((tx_ring
->wq_base
== NULL
)
2115 || tx_ring
->wq_base_dma
& (tx_ring
->wq_size
- 1)) {
2116 QPRINTK(qdev
, IFUP
, ERR
, "tx_ring alloc failed.\n");
2120 kmalloc(tx_ring
->wq_len
* sizeof(struct tx_ring_desc
), GFP_KERNEL
);
2121 if (tx_ring
->q
== NULL
)
2126 pci_free_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2127 tx_ring
->wq_base
, tx_ring
->wq_base_dma
);
2131 static void ql_free_lbq_buffers(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2134 struct bq_desc
*lbq_desc
;
2136 for (i
= 0; i
< rx_ring
->lbq_len
; i
++) {
2137 lbq_desc
= &rx_ring
->lbq
[i
];
2138 if (lbq_desc
->p
.lbq_page
) {
2139 pci_unmap_page(qdev
->pdev
,
2140 pci_unmap_addr(lbq_desc
, mapaddr
),
2141 pci_unmap_len(lbq_desc
, maplen
),
2142 PCI_DMA_FROMDEVICE
);
2144 put_page(lbq_desc
->p
.lbq_page
);
2145 lbq_desc
->p
.lbq_page
= NULL
;
2150 static void ql_free_sbq_buffers(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2153 struct bq_desc
*sbq_desc
;
2155 for (i
= 0; i
< rx_ring
->sbq_len
; i
++) {
2156 sbq_desc
= &rx_ring
->sbq
[i
];
2157 if (sbq_desc
== NULL
) {
2158 QPRINTK(qdev
, IFUP
, ERR
, "sbq_desc %d is NULL.\n", i
);
2161 if (sbq_desc
->p
.skb
) {
2162 pci_unmap_single(qdev
->pdev
,
2163 pci_unmap_addr(sbq_desc
, mapaddr
),
2164 pci_unmap_len(sbq_desc
, maplen
),
2165 PCI_DMA_FROMDEVICE
);
2166 dev_kfree_skb(sbq_desc
->p
.skb
);
2167 sbq_desc
->p
.skb
= NULL
;
2172 /* Free all large and small rx buffers associated
2173 * with the completion queues for this device.
2175 static void ql_free_rx_buffers(struct ql_adapter
*qdev
)
2178 struct rx_ring
*rx_ring
;
2180 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2181 rx_ring
= &qdev
->rx_ring
[i
];
2183 ql_free_lbq_buffers(qdev
, rx_ring
);
2185 ql_free_sbq_buffers(qdev
, rx_ring
);
2189 static void ql_alloc_rx_buffers(struct ql_adapter
*qdev
)
2191 struct rx_ring
*rx_ring
;
2194 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2195 rx_ring
= &qdev
->rx_ring
[i
];
2196 if (rx_ring
->type
!= TX_Q
)
2197 ql_update_buffer_queues(qdev
, rx_ring
);
2201 static void ql_init_lbq_ring(struct ql_adapter
*qdev
,
2202 struct rx_ring
*rx_ring
)
2205 struct bq_desc
*lbq_desc
;
2206 __le64
*bq
= rx_ring
->lbq_base
;
2208 memset(rx_ring
->lbq
, 0, rx_ring
->lbq_len
* sizeof(struct bq_desc
));
2209 for (i
= 0; i
< rx_ring
->lbq_len
; i
++) {
2210 lbq_desc
= &rx_ring
->lbq
[i
];
2211 memset(lbq_desc
, 0, sizeof(*lbq_desc
));
2212 lbq_desc
->index
= i
;
2213 lbq_desc
->addr
= bq
;
2218 static void ql_init_sbq_ring(struct ql_adapter
*qdev
,
2219 struct rx_ring
*rx_ring
)
2222 struct bq_desc
*sbq_desc
;
2223 __le64
*bq
= rx_ring
->sbq_base
;
2225 memset(rx_ring
->sbq
, 0, rx_ring
->sbq_len
* sizeof(struct bq_desc
));
2226 for (i
= 0; i
< rx_ring
->sbq_len
; i
++) {
2227 sbq_desc
= &rx_ring
->sbq
[i
];
2228 memset(sbq_desc
, 0, sizeof(*sbq_desc
));
2229 sbq_desc
->index
= i
;
2230 sbq_desc
->addr
= bq
;
2235 static void ql_free_rx_resources(struct ql_adapter
*qdev
,
2236 struct rx_ring
*rx_ring
)
2238 /* Free the small buffer queue. */
2239 if (rx_ring
->sbq_base
) {
2240 pci_free_consistent(qdev
->pdev
,
2242 rx_ring
->sbq_base
, rx_ring
->sbq_base_dma
);
2243 rx_ring
->sbq_base
= NULL
;
2246 /* Free the small buffer queue control blocks. */
2247 kfree(rx_ring
->sbq
);
2248 rx_ring
->sbq
= NULL
;
2250 /* Free the large buffer queue. */
2251 if (rx_ring
->lbq_base
) {
2252 pci_free_consistent(qdev
->pdev
,
2254 rx_ring
->lbq_base
, rx_ring
->lbq_base_dma
);
2255 rx_ring
->lbq_base
= NULL
;
2258 /* Free the large buffer queue control blocks. */
2259 kfree(rx_ring
->lbq
);
2260 rx_ring
->lbq
= NULL
;
2262 /* Free the rx queue. */
2263 if (rx_ring
->cq_base
) {
2264 pci_free_consistent(qdev
->pdev
,
2266 rx_ring
->cq_base
, rx_ring
->cq_base_dma
);
2267 rx_ring
->cq_base
= NULL
;
2271 /* Allocate queues and buffers for this completions queue based
2272 * on the values in the parameter structure. */
2273 static int ql_alloc_rx_resources(struct ql_adapter
*qdev
,
2274 struct rx_ring
*rx_ring
)
2278 * Allocate the completion queue for this rx_ring.
2281 pci_alloc_consistent(qdev
->pdev
, rx_ring
->cq_size
,
2282 &rx_ring
->cq_base_dma
);
2284 if (rx_ring
->cq_base
== NULL
) {
2285 QPRINTK(qdev
, IFUP
, ERR
, "rx_ring alloc failed.\n");
2289 if (rx_ring
->sbq_len
) {
2291 * Allocate small buffer queue.
2294 pci_alloc_consistent(qdev
->pdev
, rx_ring
->sbq_size
,
2295 &rx_ring
->sbq_base_dma
);
2297 if (rx_ring
->sbq_base
== NULL
) {
2298 QPRINTK(qdev
, IFUP
, ERR
,
2299 "Small buffer queue allocation failed.\n");
2304 * Allocate small buffer queue control blocks.
2307 kmalloc(rx_ring
->sbq_len
* sizeof(struct bq_desc
),
2309 if (rx_ring
->sbq
== NULL
) {
2310 QPRINTK(qdev
, IFUP
, ERR
,
2311 "Small buffer queue control block allocation failed.\n");
2315 ql_init_sbq_ring(qdev
, rx_ring
);
2318 if (rx_ring
->lbq_len
) {
2320 * Allocate large buffer queue.
2323 pci_alloc_consistent(qdev
->pdev
, rx_ring
->lbq_size
,
2324 &rx_ring
->lbq_base_dma
);
2326 if (rx_ring
->lbq_base
== NULL
) {
2327 QPRINTK(qdev
, IFUP
, ERR
,
2328 "Large buffer queue allocation failed.\n");
2332 * Allocate large buffer queue control blocks.
2335 kmalloc(rx_ring
->lbq_len
* sizeof(struct bq_desc
),
2337 if (rx_ring
->lbq
== NULL
) {
2338 QPRINTK(qdev
, IFUP
, ERR
,
2339 "Large buffer queue control block allocation failed.\n");
2343 ql_init_lbq_ring(qdev
, rx_ring
);
2349 ql_free_rx_resources(qdev
, rx_ring
);
2353 static void ql_tx_ring_clean(struct ql_adapter
*qdev
)
2355 struct tx_ring
*tx_ring
;
2356 struct tx_ring_desc
*tx_ring_desc
;
2360 * Loop through all queues and free
2363 for (j
= 0; j
< qdev
->tx_ring_count
; j
++) {
2364 tx_ring
= &qdev
->tx_ring
[j
];
2365 for (i
= 0; i
< tx_ring
->wq_len
; i
++) {
2366 tx_ring_desc
= &tx_ring
->q
[i
];
2367 if (tx_ring_desc
&& tx_ring_desc
->skb
) {
2368 QPRINTK(qdev
, IFDOWN
, ERR
,
2369 "Freeing lost SKB %p, from queue %d, index %d.\n",
2370 tx_ring_desc
->skb
, j
,
2371 tx_ring_desc
->index
);
2372 ql_unmap_send(qdev
, tx_ring_desc
,
2373 tx_ring_desc
->map_cnt
);
2374 dev_kfree_skb(tx_ring_desc
->skb
);
2375 tx_ring_desc
->skb
= NULL
;
2381 static void ql_free_mem_resources(struct ql_adapter
*qdev
)
2385 for (i
= 0; i
< qdev
->tx_ring_count
; i
++)
2386 ql_free_tx_resources(qdev
, &qdev
->tx_ring
[i
]);
2387 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
2388 ql_free_rx_resources(qdev
, &qdev
->rx_ring
[i
]);
2389 ql_free_shadow_space(qdev
);
2392 static int ql_alloc_mem_resources(struct ql_adapter
*qdev
)
2396 /* Allocate space for our shadow registers and such. */
2397 if (ql_alloc_shadow_space(qdev
))
2400 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2401 if (ql_alloc_rx_resources(qdev
, &qdev
->rx_ring
[i
]) != 0) {
2402 QPRINTK(qdev
, IFUP
, ERR
,
2403 "RX resource allocation failed.\n");
2407 /* Allocate tx queue resources */
2408 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
2409 if (ql_alloc_tx_resources(qdev
, &qdev
->tx_ring
[i
]) != 0) {
2410 QPRINTK(qdev
, IFUP
, ERR
,
2411 "TX resource allocation failed.\n");
2418 ql_free_mem_resources(qdev
);
2422 /* Set up the rx ring control block and pass it to the chip.
2423 * The control block is defined as
2424 * "Completion Queue Initialization Control Block", or cqicb.
2426 static int ql_start_rx_ring(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2428 struct cqicb
*cqicb
= &rx_ring
->cqicb
;
2429 void *shadow_reg
= qdev
->rx_ring_shadow_reg_area
+
2430 (rx_ring
->cq_id
* sizeof(u64
) * 4);
2431 u64 shadow_reg_dma
= qdev
->rx_ring_shadow_reg_dma
+
2432 (rx_ring
->cq_id
* sizeof(u64
) * 4);
2433 void __iomem
*doorbell_area
=
2434 qdev
->doorbell_area
+ (DB_PAGE_SIZE
* (128 + rx_ring
->cq_id
));
2438 /* Set up the shadow registers for this ring. */
2439 rx_ring
->prod_idx_sh_reg
= shadow_reg
;
2440 rx_ring
->prod_idx_sh_reg_dma
= shadow_reg_dma
;
2441 shadow_reg
+= sizeof(u64
);
2442 shadow_reg_dma
+= sizeof(u64
);
2443 rx_ring
->lbq_base_indirect
= shadow_reg
;
2444 rx_ring
->lbq_base_indirect_dma
= shadow_reg_dma
;
2445 shadow_reg
+= sizeof(u64
);
2446 shadow_reg_dma
+= sizeof(u64
);
2447 rx_ring
->sbq_base_indirect
= shadow_reg
;
2448 rx_ring
->sbq_base_indirect_dma
= shadow_reg_dma
;
2450 /* PCI doorbell mem area + 0x00 for consumer index register */
2451 rx_ring
->cnsmr_idx_db_reg
= (u32 __iomem
*) doorbell_area
;
2452 rx_ring
->cnsmr_idx
= 0;
2453 rx_ring
->curr_entry
= rx_ring
->cq_base
;
2455 /* PCI doorbell mem area + 0x04 for valid register */
2456 rx_ring
->valid_db_reg
= doorbell_area
+ 0x04;
2458 /* PCI doorbell mem area + 0x18 for large buffer consumer */
2459 rx_ring
->lbq_prod_idx_db_reg
= (u32 __iomem
*) (doorbell_area
+ 0x18);
2461 /* PCI doorbell mem area + 0x1c */
2462 rx_ring
->sbq_prod_idx_db_reg
= (u32 __iomem
*) (doorbell_area
+ 0x1c);
2464 memset((void *)cqicb
, 0, sizeof(struct cqicb
));
2465 cqicb
->msix_vect
= rx_ring
->irq
;
2467 bq_len
= (rx_ring
->cq_len
== 65536) ? 0 : (u16
) rx_ring
->cq_len
;
2468 cqicb
->len
= cpu_to_le16(bq_len
| LEN_V
| LEN_CPP_CONT
);
2470 cqicb
->addr
= cpu_to_le64(rx_ring
->cq_base_dma
);
2472 cqicb
->prod_idx_addr
= cpu_to_le64(rx_ring
->prod_idx_sh_reg_dma
);
2475 * Set up the control block load flags.
2477 cqicb
->flags
= FLAGS_LC
| /* Load queue base address */
2478 FLAGS_LV
| /* Load MSI-X vector */
2479 FLAGS_LI
; /* Load irq delay values */
2480 if (rx_ring
->lbq_len
) {
2481 cqicb
->flags
|= FLAGS_LL
; /* Load lbq values */
2482 *((u64
*) rx_ring
->lbq_base_indirect
) = rx_ring
->lbq_base_dma
;
2484 cpu_to_le64(rx_ring
->lbq_base_indirect_dma
);
2485 bq_len
= (rx_ring
->lbq_buf_size
== 65536) ? 0 :
2486 (u16
) rx_ring
->lbq_buf_size
;
2487 cqicb
->lbq_buf_size
= cpu_to_le16(bq_len
);
2488 bq_len
= (rx_ring
->lbq_len
== 65536) ? 0 :
2489 (u16
) rx_ring
->lbq_len
;
2490 cqicb
->lbq_len
= cpu_to_le16(bq_len
);
2491 rx_ring
->lbq_prod_idx
= 0;
2492 rx_ring
->lbq_curr_idx
= 0;
2493 rx_ring
->lbq_clean_idx
= 0;
2494 rx_ring
->lbq_free_cnt
= rx_ring
->lbq_len
;
2496 if (rx_ring
->sbq_len
) {
2497 cqicb
->flags
|= FLAGS_LS
; /* Load sbq values */
2498 *((u64
*) rx_ring
->sbq_base_indirect
) = rx_ring
->sbq_base_dma
;
2500 cpu_to_le64(rx_ring
->sbq_base_indirect_dma
);
2501 cqicb
->sbq_buf_size
=
2502 cpu_to_le16(((rx_ring
->sbq_buf_size
/ 2) + 8) & 0xfffffff8);
2503 bq_len
= (rx_ring
->sbq_len
== 65536) ? 0 :
2504 (u16
) rx_ring
->sbq_len
;
2505 cqicb
->sbq_len
= cpu_to_le16(bq_len
);
2506 rx_ring
->sbq_prod_idx
= 0;
2507 rx_ring
->sbq_curr_idx
= 0;
2508 rx_ring
->sbq_clean_idx
= 0;
2509 rx_ring
->sbq_free_cnt
= rx_ring
->sbq_len
;
2511 switch (rx_ring
->type
) {
2513 /* If there's only one interrupt, then we use
2514 * worker threads to process the outbound
2515 * completion handling rx_rings. We do this so
2516 * they can be run on multiple CPUs. There is
2517 * room to play with this more where we would only
2518 * run in a worker if there are more than x number
2519 * of outbound completions on the queue and more
2520 * than one queue active. Some threshold that
2521 * would indicate a benefit in spite of the cost
2522 * of a context switch.
2523 * If there's more than one interrupt, then the
2524 * outbound completions are processed in the ISR.
2526 if (!test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))
2527 INIT_DELAYED_WORK(&rx_ring
->rx_work
, ql_tx_clean
);
2529 /* With all debug warnings on we see a WARN_ON message
2530 * when we free the skb in the interrupt context.
2532 INIT_DELAYED_WORK(&rx_ring
->rx_work
, ql_tx_clean
);
2534 cqicb
->irq_delay
= cpu_to_le16(qdev
->tx_coalesce_usecs
);
2535 cqicb
->pkt_delay
= cpu_to_le16(qdev
->tx_max_coalesced_frames
);
2538 INIT_DELAYED_WORK(&rx_ring
->rx_work
, ql_rx_clean
);
2539 cqicb
->irq_delay
= 0;
2540 cqicb
->pkt_delay
= 0;
2543 /* Inbound completion handling rx_rings run in
2544 * separate NAPI contexts.
2546 netif_napi_add(qdev
->ndev
, &rx_ring
->napi
, ql_napi_poll_msix
,
2548 cqicb
->irq_delay
= cpu_to_le16(qdev
->rx_coalesce_usecs
);
2549 cqicb
->pkt_delay
= cpu_to_le16(qdev
->rx_max_coalesced_frames
);
2552 QPRINTK(qdev
, IFUP
, DEBUG
, "Invalid rx_ring->type = %d.\n",
2555 QPRINTK(qdev
, IFUP
, DEBUG
, "Initializing rx work queue.\n");
2556 err
= ql_write_cfg(qdev
, cqicb
, sizeof(struct cqicb
),
2557 CFG_LCQ
, rx_ring
->cq_id
);
2559 QPRINTK(qdev
, IFUP
, ERR
, "Failed to load CQICB.\n");
2565 static int ql_start_tx_ring(struct ql_adapter
*qdev
, struct tx_ring
*tx_ring
)
2567 struct wqicb
*wqicb
= (struct wqicb
*)tx_ring
;
2568 void __iomem
*doorbell_area
=
2569 qdev
->doorbell_area
+ (DB_PAGE_SIZE
* tx_ring
->wq_id
);
2570 void *shadow_reg
= qdev
->tx_ring_shadow_reg_area
+
2571 (tx_ring
->wq_id
* sizeof(u64
));
2572 u64 shadow_reg_dma
= qdev
->tx_ring_shadow_reg_dma
+
2573 (tx_ring
->wq_id
* sizeof(u64
));
2577 * Assign doorbell registers for this tx_ring.
2579 /* TX PCI doorbell mem area for tx producer index */
2580 tx_ring
->prod_idx_db_reg
= (u32 __iomem
*) doorbell_area
;
2581 tx_ring
->prod_idx
= 0;
2582 /* TX PCI doorbell mem area + 0x04 */
2583 tx_ring
->valid_db_reg
= doorbell_area
+ 0x04;
2586 * Assign shadow registers for this tx_ring.
2588 tx_ring
->cnsmr_idx_sh_reg
= shadow_reg
;
2589 tx_ring
->cnsmr_idx_sh_reg_dma
= shadow_reg_dma
;
2591 wqicb
->len
= cpu_to_le16(tx_ring
->wq_len
| Q_LEN_V
| Q_LEN_CPP_CONT
);
2592 wqicb
->flags
= cpu_to_le16(Q_FLAGS_LC
|
2593 Q_FLAGS_LB
| Q_FLAGS_LI
| Q_FLAGS_LO
);
2594 wqicb
->cq_id_rss
= cpu_to_le16(tx_ring
->cq_id
);
2596 wqicb
->addr
= cpu_to_le64(tx_ring
->wq_base_dma
);
2598 wqicb
->cnsmr_idx_addr
= cpu_to_le64(tx_ring
->cnsmr_idx_sh_reg_dma
);
2600 ql_init_tx_ring(qdev
, tx_ring
);
2602 err
= ql_write_cfg(qdev
, wqicb
, sizeof(wqicb
), CFG_LRQ
,
2603 (u16
) tx_ring
->wq_id
);
2605 QPRINTK(qdev
, IFUP
, ERR
, "Failed to load tx_ring.\n");
2608 QPRINTK(qdev
, IFUP
, DEBUG
, "Successfully loaded WQICB.\n");
2612 static void ql_disable_msix(struct ql_adapter
*qdev
)
2614 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
2615 pci_disable_msix(qdev
->pdev
);
2616 clear_bit(QL_MSIX_ENABLED
, &qdev
->flags
);
2617 kfree(qdev
->msi_x_entry
);
2618 qdev
->msi_x_entry
= NULL
;
2619 } else if (test_bit(QL_MSI_ENABLED
, &qdev
->flags
)) {
2620 pci_disable_msi(qdev
->pdev
);
2621 clear_bit(QL_MSI_ENABLED
, &qdev
->flags
);
2625 static void ql_enable_msix(struct ql_adapter
*qdev
)
2629 qdev
->intr_count
= 1;
2630 /* Get the MSIX vectors. */
2631 if (irq_type
== MSIX_IRQ
) {
2632 /* Try to alloc space for the msix struct,
2633 * if it fails then go to MSI/legacy.
2635 qdev
->msi_x_entry
= kcalloc(qdev
->rx_ring_count
,
2636 sizeof(struct msix_entry
),
2638 if (!qdev
->msi_x_entry
) {
2643 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
2644 qdev
->msi_x_entry
[i
].entry
= i
;
2646 if (!pci_enable_msix
2647 (qdev
->pdev
, qdev
->msi_x_entry
, qdev
->rx_ring_count
)) {
2648 set_bit(QL_MSIX_ENABLED
, &qdev
->flags
);
2649 qdev
->intr_count
= qdev
->rx_ring_count
;
2650 QPRINTK(qdev
, IFUP
, DEBUG
,
2651 "MSI-X Enabled, got %d vectors.\n",
2655 kfree(qdev
->msi_x_entry
);
2656 qdev
->msi_x_entry
= NULL
;
2657 QPRINTK(qdev
, IFUP
, WARNING
,
2658 "MSI-X Enable failed, trying MSI.\n");
2663 if (irq_type
== MSI_IRQ
) {
2664 if (!pci_enable_msi(qdev
->pdev
)) {
2665 set_bit(QL_MSI_ENABLED
, &qdev
->flags
);
2666 QPRINTK(qdev
, IFUP
, INFO
,
2667 "Running with MSI interrupts.\n");
2672 QPRINTK(qdev
, IFUP
, DEBUG
, "Running with legacy interrupts.\n");
2676 * Here we build the intr_context structures based on
2677 * our rx_ring count and intr vector count.
2678 * The intr_context structure is used to hook each vector
2679 * to possibly different handlers.
2681 static void ql_resolve_queues_to_irqs(struct ql_adapter
*qdev
)
2684 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
2686 ql_enable_msix(qdev
);
2688 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
2689 /* Each rx_ring has it's
2690 * own intr_context since we have separate
2691 * vectors for each queue.
2692 * This only true when MSI-X is enabled.
2694 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
2695 qdev
->rx_ring
[i
].irq
= i
;
2696 intr_context
->intr
= i
;
2697 intr_context
->qdev
= qdev
;
2699 * We set up each vectors enable/disable/read bits so
2700 * there's no bit/mask calculations in the critical path.
2702 intr_context
->intr_en_mask
=
2703 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
2704 INTR_EN_TYPE_ENABLE
| INTR_EN_IHD_MASK
| INTR_EN_IHD
2706 intr_context
->intr_dis_mask
=
2707 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
2708 INTR_EN_TYPE_DISABLE
| INTR_EN_IHD_MASK
|
2710 intr_context
->intr_read_mask
=
2711 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
2712 INTR_EN_TYPE_READ
| INTR_EN_IHD_MASK
| INTR_EN_IHD
|
2717 * Default queue handles bcast/mcast plus
2718 * async events. Needs buffers.
2720 intr_context
->handler
= qlge_isr
;
2721 sprintf(intr_context
->name
, "%s-default-queue",
2723 } else if (i
< qdev
->rss_ring_first_cq_id
) {
2725 * Outbound queue is for outbound completions only.
2727 intr_context
->handler
= qlge_msix_tx_isr
;
2728 sprintf(intr_context
->name
, "%s-tx-%d",
2729 qdev
->ndev
->name
, i
);
2732 * Inbound queues handle unicast frames only.
2734 intr_context
->handler
= qlge_msix_rx_isr
;
2735 sprintf(intr_context
->name
, "%s-rx-%d",
2736 qdev
->ndev
->name
, i
);
2741 * All rx_rings use the same intr_context since
2742 * there is only one vector.
2744 intr_context
->intr
= 0;
2745 intr_context
->qdev
= qdev
;
2747 * We set up each vectors enable/disable/read bits so
2748 * there's no bit/mask calculations in the critical path.
2750 intr_context
->intr_en_mask
=
2751 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
| INTR_EN_TYPE_ENABLE
;
2752 intr_context
->intr_dis_mask
=
2753 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
2754 INTR_EN_TYPE_DISABLE
;
2755 intr_context
->intr_read_mask
=
2756 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
| INTR_EN_TYPE_READ
;
2758 * Single interrupt means one handler for all rings.
2760 intr_context
->handler
= qlge_isr
;
2761 sprintf(intr_context
->name
, "%s-single_irq", qdev
->ndev
->name
);
2762 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
2763 qdev
->rx_ring
[i
].irq
= 0;
2767 static void ql_free_irq(struct ql_adapter
*qdev
)
2770 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
2772 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
2773 if (intr_context
->hooked
) {
2774 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
2775 free_irq(qdev
->msi_x_entry
[i
].vector
,
2777 QPRINTK(qdev
, IFDOWN
, DEBUG
,
2778 "freeing msix interrupt %d.\n", i
);
2780 free_irq(qdev
->pdev
->irq
, &qdev
->rx_ring
[0]);
2781 QPRINTK(qdev
, IFDOWN
, DEBUG
,
2782 "freeing msi interrupt %d.\n", i
);
2786 ql_disable_msix(qdev
);
2789 static int ql_request_irq(struct ql_adapter
*qdev
)
2793 struct pci_dev
*pdev
= qdev
->pdev
;
2794 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
2796 ql_resolve_queues_to_irqs(qdev
);
2798 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
2799 atomic_set(&intr_context
->irq_cnt
, 0);
2800 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
2801 status
= request_irq(qdev
->msi_x_entry
[i
].vector
,
2802 intr_context
->handler
,
2807 QPRINTK(qdev
, IFUP
, ERR
,
2808 "Failed request for MSIX interrupt %d.\n",
2812 QPRINTK(qdev
, IFUP
, DEBUG
,
2813 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2815 qdev
->rx_ring
[i
].type
==
2816 DEFAULT_Q
? "DEFAULT_Q" : "",
2817 qdev
->rx_ring
[i
].type
==
2819 qdev
->rx_ring
[i
].type
==
2820 RX_Q
? "RX_Q" : "", intr_context
->name
);
2823 QPRINTK(qdev
, IFUP
, DEBUG
,
2824 "trying msi or legacy interrupts.\n");
2825 QPRINTK(qdev
, IFUP
, DEBUG
,
2826 "%s: irq = %d.\n", __func__
, pdev
->irq
);
2827 QPRINTK(qdev
, IFUP
, DEBUG
,
2828 "%s: context->name = %s.\n", __func__
,
2829 intr_context
->name
);
2830 QPRINTK(qdev
, IFUP
, DEBUG
,
2831 "%s: dev_id = 0x%p.\n", __func__
,
2834 request_irq(pdev
->irq
, qlge_isr
,
2835 test_bit(QL_MSI_ENABLED
,
2837 flags
) ? 0 : IRQF_SHARED
,
2838 intr_context
->name
, &qdev
->rx_ring
[0]);
2842 QPRINTK(qdev
, IFUP
, ERR
,
2843 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2845 qdev
->rx_ring
[0].type
==
2846 DEFAULT_Q
? "DEFAULT_Q" : "",
2847 qdev
->rx_ring
[0].type
== TX_Q
? "TX_Q" : "",
2848 qdev
->rx_ring
[0].type
== RX_Q
? "RX_Q" : "",
2849 intr_context
->name
);
2851 intr_context
->hooked
= 1;
2855 QPRINTK(qdev
, IFUP
, ERR
, "Failed to get the interrupts!!!/n");
2860 static int ql_start_rss(struct ql_adapter
*qdev
)
2862 struct ricb
*ricb
= &qdev
->ricb
;
2865 u8
*hash_id
= (u8
*) ricb
->hash_cq_id
;
2867 memset((void *)ricb
, 0, sizeof(ricb
));
2869 ricb
->base_cq
= qdev
->rss_ring_first_cq_id
| RSS_L4K
;
2871 (RSS_L6K
| RSS_LI
| RSS_LB
| RSS_LM
| RSS_RI4
| RSS_RI6
| RSS_RT4
|
2873 ricb
->mask
= cpu_to_le16(qdev
->rss_ring_count
- 1);
2876 * Fill out the Indirection Table.
2878 for (i
= 0; i
< 256; i
++)
2879 hash_id
[i
] = i
& (qdev
->rss_ring_count
- 1);
2882 * Random values for the IPv6 and IPv4 Hash Keys.
2884 get_random_bytes((void *)&ricb
->ipv6_hash_key
[0], 40);
2885 get_random_bytes((void *)&ricb
->ipv4_hash_key
[0], 16);
2887 QPRINTK(qdev
, IFUP
, DEBUG
, "Initializing RSS.\n");
2889 status
= ql_write_cfg(qdev
, ricb
, sizeof(ricb
), CFG_LR
, 0);
2891 QPRINTK(qdev
, IFUP
, ERR
, "Failed to load RICB.\n");
2894 QPRINTK(qdev
, IFUP
, DEBUG
, "Successfully loaded RICB.\n");
2898 /* Initialize the frame-to-queue routing. */
2899 static int ql_route_initialize(struct ql_adapter
*qdev
)
2904 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
2908 /* Clear all the entries in the routing table. */
2909 for (i
= 0; i
< 16; i
++) {
2910 status
= ql_set_routing_reg(qdev
, i
, 0, 0);
2912 QPRINTK(qdev
, IFUP
, ERR
,
2913 "Failed to init routing register for CAM packets.\n");
2918 status
= ql_set_routing_reg(qdev
, RT_IDX_ALL_ERR_SLOT
, RT_IDX_ERR
, 1);
2920 QPRINTK(qdev
, IFUP
, ERR
,
2921 "Failed to init routing register for error packets.\n");
2924 status
= ql_set_routing_reg(qdev
, RT_IDX_BCAST_SLOT
, RT_IDX_BCAST
, 1);
2926 QPRINTK(qdev
, IFUP
, ERR
,
2927 "Failed to init routing register for broadcast packets.\n");
2930 /* If we have more than one inbound queue, then turn on RSS in the
2933 if (qdev
->rss_ring_count
> 1) {
2934 status
= ql_set_routing_reg(qdev
, RT_IDX_RSS_MATCH_SLOT
,
2935 RT_IDX_RSS_MATCH
, 1);
2937 QPRINTK(qdev
, IFUP
, ERR
,
2938 "Failed to init routing register for MATCH RSS packets.\n");
2943 status
= ql_set_routing_reg(qdev
, RT_IDX_CAM_HIT_SLOT
,
2946 QPRINTK(qdev
, IFUP
, ERR
,
2947 "Failed to init routing register for CAM packets.\n");
2949 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
2953 static int ql_cam_route_initialize(struct ql_adapter
*qdev
)
2957 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
2960 status
= ql_set_mac_addr_reg(qdev
, (u8
*) qdev
->ndev
->perm_addr
,
2961 MAC_ADDR_TYPE_CAM_MAC
, qdev
->func
* MAX_CQ
);
2962 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
2964 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init mac address.\n");
2968 status
= ql_route_initialize(qdev
);
2970 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init routing table.\n");
2975 static int ql_adapter_initialize(struct ql_adapter
*qdev
)
2982 * Set up the System register to halt on errors.
2984 value
= SYS_EFE
| SYS_FAE
;
2986 ql_write32(qdev
, SYS
, mask
| value
);
2988 /* Set the default queue. */
2989 value
= NIC_RCV_CFG_DFQ
;
2990 mask
= NIC_RCV_CFG_DFQ_MASK
;
2991 ql_write32(qdev
, NIC_RCV_CFG
, (mask
| value
));
2993 /* Set the MPI interrupt to enabled. */
2994 ql_write32(qdev
, INTR_MASK
, (INTR_MASK_PI
<< 16) | INTR_MASK_PI
);
2996 /* Enable the function, set pagesize, enable error checking. */
2997 value
= FSC_FE
| FSC_EPC_INBOUND
| FSC_EPC_OUTBOUND
|
2998 FSC_EC
| FSC_VM_PAGE_4K
| FSC_SH
;
3000 /* Set/clear header splitting. */
3001 mask
= FSC_VM_PAGESIZE_MASK
|
3002 FSC_DBL_MASK
| FSC_DBRST_MASK
| (value
<< 16);
3003 ql_write32(qdev
, FSC
, mask
| value
);
3005 ql_write32(qdev
, SPLT_HDR
, SPLT_HDR_EP
|
3006 min(SMALL_BUFFER_SIZE
, MAX_SPLIT_SIZE
));
3008 /* Start up the rx queues. */
3009 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
3010 status
= ql_start_rx_ring(qdev
, &qdev
->rx_ring
[i
]);
3012 QPRINTK(qdev
, IFUP
, ERR
,
3013 "Failed to start rx ring[%d].\n", i
);
3018 /* If there is more than one inbound completion queue
3019 * then download a RICB to configure RSS.
3021 if (qdev
->rss_ring_count
> 1) {
3022 status
= ql_start_rss(qdev
);
3024 QPRINTK(qdev
, IFUP
, ERR
, "Failed to start RSS.\n");
3029 /* Start up the tx queues. */
3030 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
3031 status
= ql_start_tx_ring(qdev
, &qdev
->tx_ring
[i
]);
3033 QPRINTK(qdev
, IFUP
, ERR
,
3034 "Failed to start tx ring[%d].\n", i
);
3039 /* Initialize the port and set the max framesize. */
3040 status
= qdev
->nic_ops
->port_initialize(qdev
);
3042 QPRINTK(qdev
, IFUP
, ERR
, "Failed to start port.\n");
3046 /* Set up the MAC address and frame routing filter. */
3047 status
= ql_cam_route_initialize(qdev
);
3049 QPRINTK(qdev
, IFUP
, ERR
,
3050 "Failed to init CAM/Routing tables.\n");
3054 /* Start NAPI for the RSS queues. */
3055 for (i
= qdev
->rss_ring_first_cq_id
; i
< qdev
->rx_ring_count
; i
++) {
3056 QPRINTK(qdev
, IFUP
, DEBUG
, "Enabling NAPI for rx_ring[%d].\n",
3058 napi_enable(&qdev
->rx_ring
[i
].napi
);
3064 /* Issue soft reset to chip. */
3065 static int ql_adapter_reset(struct ql_adapter
*qdev
)
3072 #define MAX_RESET_CNT 1
3075 QPRINTK(qdev
, IFDOWN
, DEBUG
, "Issue soft reset to chip.\n");
3076 ql_write32(qdev
, RST_FO
, (RST_FO_FR
<< 16) | RST_FO_FR
);
3077 /* Wait for reset to complete. */
3079 QPRINTK(qdev
, IFDOWN
, DEBUG
, "Wait %d seconds for reset to complete.\n",
3082 value
= ql_read32(qdev
, RST_FO
);
3083 if ((value
& RST_FO_FR
) == 0)
3087 } while ((--max_wait_time
));
3088 if (value
& RST_FO_FR
) {
3089 QPRINTK(qdev
, IFDOWN
, ERR
,
3090 "Stuck in SoftReset: FSC_SR:0x%08x\n", value
);
3091 if (resetCnt
< MAX_RESET_CNT
)
3094 if (max_wait_time
== 0) {
3095 status
= -ETIMEDOUT
;
3096 QPRINTK(qdev
, IFDOWN
, ERR
,
3097 "ETIMEOUT!!! errored out of resetting the chip!\n");
3103 static void ql_display_dev_info(struct net_device
*ndev
)
3105 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3107 QPRINTK(qdev
, PROBE
, INFO
,
3108 "Function #%d, NIC Roll %d, NIC Rev = %d, "
3109 "XG Roll = %d, XG Rev = %d.\n",
3111 qdev
->chip_rev_id
& 0x0000000f,
3112 qdev
->chip_rev_id
>> 4 & 0x0000000f,
3113 qdev
->chip_rev_id
>> 8 & 0x0000000f,
3114 qdev
->chip_rev_id
>> 12 & 0x0000000f);
3115 QPRINTK(qdev
, PROBE
, INFO
, "MAC address %pM\n", ndev
->dev_addr
);
3118 static int ql_adapter_down(struct ql_adapter
*qdev
)
3120 struct net_device
*ndev
= qdev
->ndev
;
3122 struct rx_ring
*rx_ring
;
3124 netif_stop_queue(ndev
);
3125 netif_carrier_off(ndev
);
3127 /* Don't kill the reset worker thread if we
3128 * are in the process of recovery.
3130 if (test_bit(QL_ADAPTER_UP
, &qdev
->flags
))
3131 cancel_delayed_work_sync(&qdev
->asic_reset_work
);
3132 cancel_delayed_work_sync(&qdev
->mpi_reset_work
);
3133 cancel_delayed_work_sync(&qdev
->mpi_work
);
3135 /* The default queue at index 0 is always processed in
3138 cancel_delayed_work_sync(&qdev
->rx_ring
[0].rx_work
);
3140 /* The rest of the rx_rings are processed in
3141 * a workqueue only if it's a single interrupt
3142 * environment (MSI/Legacy).
3144 for (i
= 1; i
< qdev
->rx_ring_count
; i
++) {
3145 rx_ring
= &qdev
->rx_ring
[i
];
3146 /* Only the RSS rings use NAPI on multi irq
3147 * environment. Outbound completion processing
3148 * is done in interrupt context.
3150 if (i
>= qdev
->rss_ring_first_cq_id
) {
3151 napi_disable(&rx_ring
->napi
);
3153 cancel_delayed_work_sync(&rx_ring
->rx_work
);
3157 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3159 ql_disable_interrupts(qdev
);
3161 ql_tx_ring_clean(qdev
);
3163 ql_free_rx_buffers(qdev
);
3164 spin_lock(&qdev
->hw_lock
);
3165 status
= ql_adapter_reset(qdev
);
3167 QPRINTK(qdev
, IFDOWN
, ERR
, "reset(func #%d) FAILED!\n",
3169 spin_unlock(&qdev
->hw_lock
);
3173 static int ql_adapter_up(struct ql_adapter
*qdev
)
3177 spin_lock(&qdev
->hw_lock
);
3178 err
= ql_adapter_initialize(qdev
);
3180 QPRINTK(qdev
, IFUP
, INFO
, "Unable to initialize adapter.\n");
3181 spin_unlock(&qdev
->hw_lock
);
3184 spin_unlock(&qdev
->hw_lock
);
3185 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3186 ql_alloc_rx_buffers(qdev
);
3187 ql_enable_interrupts(qdev
);
3188 ql_enable_all_completion_interrupts(qdev
);
3189 if ((ql_read32(qdev
, STS
) & qdev
->port_init
)) {
3190 netif_carrier_on(qdev
->ndev
);
3191 netif_start_queue(qdev
->ndev
);
3196 ql_adapter_reset(qdev
);
3200 static int ql_cycle_adapter(struct ql_adapter
*qdev
)
3204 status
= ql_adapter_down(qdev
);
3208 status
= ql_adapter_up(qdev
);
3214 QPRINTK(qdev
, IFUP
, ALERT
,
3215 "Driver up/down cycle failed, closing device\n");
3217 dev_close(qdev
->ndev
);
3222 static void ql_release_adapter_resources(struct ql_adapter
*qdev
)
3224 ql_free_mem_resources(qdev
);
3228 static int ql_get_adapter_resources(struct ql_adapter
*qdev
)
3232 if (ql_alloc_mem_resources(qdev
)) {
3233 QPRINTK(qdev
, IFUP
, ERR
, "Unable to allocate memory.\n");
3236 status
= ql_request_irq(qdev
);
3241 ql_free_mem_resources(qdev
);
3245 static int qlge_close(struct net_device
*ndev
)
3247 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3250 * Wait for device to recover from a reset.
3251 * (Rarely happens, but possible.)
3253 while (!test_bit(QL_ADAPTER_UP
, &qdev
->flags
))
3255 ql_adapter_down(qdev
);
3256 ql_release_adapter_resources(qdev
);
3260 static int ql_configure_rings(struct ql_adapter
*qdev
)
3263 struct rx_ring
*rx_ring
;
3264 struct tx_ring
*tx_ring
;
3265 int cpu_cnt
= num_online_cpus();
3268 * For each processor present we allocate one
3269 * rx_ring for outbound completions, and one
3270 * rx_ring for inbound completions. Plus there is
3271 * always the one default queue. For the CPU
3272 * counts we end up with the following rx_rings:
3274 * one default queue +
3275 * (CPU count * outbound completion rx_ring) +
3276 * (CPU count * inbound (RSS) completion rx_ring)
3277 * To keep it simple we limit the total number of
3278 * queues to < 32, so we truncate CPU to 8.
3279 * This limitation can be removed when requested.
3282 if (cpu_cnt
> MAX_CPUS
)
3286 * rx_ring[0] is always the default queue.
3288 /* Allocate outbound completion ring for each CPU. */
3289 qdev
->tx_ring_count
= cpu_cnt
;
3290 /* Allocate inbound completion (RSS) ring for each CPU. */
3291 qdev
->rss_ring_count
= cpu_cnt
;
3292 /* cq_id for the first inbound ring handler. */
3293 qdev
->rss_ring_first_cq_id
= cpu_cnt
+ 1;
3295 * qdev->rx_ring_count:
3296 * Total number of rx_rings. This includes the one
3297 * default queue, a number of outbound completion
3298 * handler rx_rings, and the number of inbound
3299 * completion handler rx_rings.
3301 qdev
->rx_ring_count
= qdev
->tx_ring_count
+ qdev
->rss_ring_count
+ 1;
3303 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
3304 tx_ring
= &qdev
->tx_ring
[i
];
3305 memset((void *)tx_ring
, 0, sizeof(tx_ring
));
3306 tx_ring
->qdev
= qdev
;
3308 tx_ring
->wq_len
= qdev
->tx_ring_size
;
3310 tx_ring
->wq_len
* sizeof(struct ob_mac_iocb_req
);
3313 * The completion queue ID for the tx rings start
3314 * immediately after the default Q ID, which is zero.
3316 tx_ring
->cq_id
= i
+ 1;
3319 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
3320 rx_ring
= &qdev
->rx_ring
[i
];
3321 memset((void *)rx_ring
, 0, sizeof(rx_ring
));
3322 rx_ring
->qdev
= qdev
;
3324 rx_ring
->cpu
= i
% cpu_cnt
; /* CPU to run handler on. */
3325 if (i
== 0) { /* Default queue at index 0. */
3327 * Default queue handles bcast/mcast plus
3328 * async events. Needs buffers.
3330 rx_ring
->cq_len
= qdev
->rx_ring_size
;
3332 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
3333 rx_ring
->lbq_len
= NUM_LARGE_BUFFERS
;
3335 rx_ring
->lbq_len
* sizeof(__le64
);
3336 rx_ring
->lbq_buf_size
= LARGE_BUFFER_SIZE
;
3337 rx_ring
->sbq_len
= NUM_SMALL_BUFFERS
;
3339 rx_ring
->sbq_len
* sizeof(__le64
);
3340 rx_ring
->sbq_buf_size
= SMALL_BUFFER_SIZE
* 2;
3341 rx_ring
->type
= DEFAULT_Q
;
3342 } else if (i
< qdev
->rss_ring_first_cq_id
) {
3344 * Outbound queue handles outbound completions only.
3346 /* outbound cq is same size as tx_ring it services. */
3347 rx_ring
->cq_len
= qdev
->tx_ring_size
;
3349 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
3350 rx_ring
->lbq_len
= 0;
3351 rx_ring
->lbq_size
= 0;
3352 rx_ring
->lbq_buf_size
= 0;
3353 rx_ring
->sbq_len
= 0;
3354 rx_ring
->sbq_size
= 0;
3355 rx_ring
->sbq_buf_size
= 0;
3356 rx_ring
->type
= TX_Q
;
3357 } else { /* Inbound completions (RSS) queues */
3359 * Inbound queues handle unicast frames only.
3361 rx_ring
->cq_len
= qdev
->rx_ring_size
;
3363 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
3364 rx_ring
->lbq_len
= NUM_LARGE_BUFFERS
;
3366 rx_ring
->lbq_len
* sizeof(__le64
);
3367 rx_ring
->lbq_buf_size
= LARGE_BUFFER_SIZE
;
3368 rx_ring
->sbq_len
= NUM_SMALL_BUFFERS
;
3370 rx_ring
->sbq_len
* sizeof(__le64
);
3371 rx_ring
->sbq_buf_size
= SMALL_BUFFER_SIZE
* 2;
3372 rx_ring
->type
= RX_Q
;
3378 static int qlge_open(struct net_device
*ndev
)
3381 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3383 err
= ql_configure_rings(qdev
);
3387 err
= ql_get_adapter_resources(qdev
);
3391 err
= ql_adapter_up(qdev
);
3398 ql_release_adapter_resources(qdev
);
3402 static int qlge_change_mtu(struct net_device
*ndev
, int new_mtu
)
3404 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3406 if (ndev
->mtu
== 1500 && new_mtu
== 9000) {
3407 QPRINTK(qdev
, IFUP
, ERR
, "Changing to jumbo MTU.\n");
3408 } else if (ndev
->mtu
== 9000 && new_mtu
== 1500) {
3409 QPRINTK(qdev
, IFUP
, ERR
, "Changing to normal MTU.\n");
3410 } else if ((ndev
->mtu
== 1500 && new_mtu
== 1500) ||
3411 (ndev
->mtu
== 9000 && new_mtu
== 9000)) {
3415 ndev
->mtu
= new_mtu
;
3419 static struct net_device_stats
*qlge_get_stats(struct net_device
3422 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3423 return &qdev
->stats
;
3426 static void qlge_set_multicast_list(struct net_device
*ndev
)
3428 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3429 struct dev_mc_list
*mc_ptr
;
3432 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
3435 spin_lock(&qdev
->hw_lock
);
3437 * Set or clear promiscuous mode if a
3438 * transition is taking place.
3440 if (ndev
->flags
& IFF_PROMISC
) {
3441 if (!test_bit(QL_PROMISCUOUS
, &qdev
->flags
)) {
3442 if (ql_set_routing_reg
3443 (qdev
, RT_IDX_PROMISCUOUS_SLOT
, RT_IDX_VALID
, 1)) {
3444 QPRINTK(qdev
, HW
, ERR
,
3445 "Failed to set promiscous mode.\n");
3447 set_bit(QL_PROMISCUOUS
, &qdev
->flags
);
3451 if (test_bit(QL_PROMISCUOUS
, &qdev
->flags
)) {
3452 if (ql_set_routing_reg
3453 (qdev
, RT_IDX_PROMISCUOUS_SLOT
, RT_IDX_VALID
, 0)) {
3454 QPRINTK(qdev
, HW
, ERR
,
3455 "Failed to clear promiscous mode.\n");
3457 clear_bit(QL_PROMISCUOUS
, &qdev
->flags
);
3463 * Set or clear all multicast mode if a
3464 * transition is taking place.
3466 if ((ndev
->flags
& IFF_ALLMULTI
) ||
3467 (ndev
->mc_count
> MAX_MULTICAST_ENTRIES
)) {
3468 if (!test_bit(QL_ALLMULTI
, &qdev
->flags
)) {
3469 if (ql_set_routing_reg
3470 (qdev
, RT_IDX_ALLMULTI_SLOT
, RT_IDX_MCAST
, 1)) {
3471 QPRINTK(qdev
, HW
, ERR
,
3472 "Failed to set all-multi mode.\n");
3474 set_bit(QL_ALLMULTI
, &qdev
->flags
);
3478 if (test_bit(QL_ALLMULTI
, &qdev
->flags
)) {
3479 if (ql_set_routing_reg
3480 (qdev
, RT_IDX_ALLMULTI_SLOT
, RT_IDX_MCAST
, 0)) {
3481 QPRINTK(qdev
, HW
, ERR
,
3482 "Failed to clear all-multi mode.\n");
3484 clear_bit(QL_ALLMULTI
, &qdev
->flags
);
3489 if (ndev
->mc_count
) {
3490 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
3493 for (i
= 0, mc_ptr
= ndev
->mc_list
; mc_ptr
;
3494 i
++, mc_ptr
= mc_ptr
->next
)
3495 if (ql_set_mac_addr_reg(qdev
, (u8
*) mc_ptr
->dmi_addr
,
3496 MAC_ADDR_TYPE_MULTI_MAC
, i
)) {
3497 QPRINTK(qdev
, HW
, ERR
,
3498 "Failed to loadmulticast address.\n");
3499 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
3502 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
3503 if (ql_set_routing_reg
3504 (qdev
, RT_IDX_MCAST_MATCH_SLOT
, RT_IDX_MCAST_MATCH
, 1)) {
3505 QPRINTK(qdev
, HW
, ERR
,
3506 "Failed to set multicast match mode.\n");
3508 set_bit(QL_ALLMULTI
, &qdev
->flags
);
3512 spin_unlock(&qdev
->hw_lock
);
3513 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
3516 static int qlge_set_mac_address(struct net_device
*ndev
, void *p
)
3518 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3519 struct sockaddr
*addr
= p
;
3522 if (netif_running(ndev
))
3525 if (!is_valid_ether_addr(addr
->sa_data
))
3526 return -EADDRNOTAVAIL
;
3527 memcpy(ndev
->dev_addr
, addr
->sa_data
, ndev
->addr_len
);
3529 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
3532 spin_lock(&qdev
->hw_lock
);
3533 status
= ql_set_mac_addr_reg(qdev
, (u8
*) ndev
->dev_addr
,
3534 MAC_ADDR_TYPE_CAM_MAC
, qdev
->func
* MAX_CQ
);
3535 spin_unlock(&qdev
->hw_lock
);
3537 QPRINTK(qdev
, HW
, ERR
, "Failed to load MAC address.\n");
3538 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
3542 static void qlge_tx_timeout(struct net_device
*ndev
)
3544 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3545 ql_queue_asic_error(qdev
);
3548 static void ql_asic_reset_work(struct work_struct
*work
)
3550 struct ql_adapter
*qdev
=
3551 container_of(work
, struct ql_adapter
, asic_reset_work
.work
);
3552 ql_cycle_adapter(qdev
);
3555 static struct nic_operations qla8012_nic_ops
= {
3556 .get_flash
= ql_get_8012_flash_params
,
3557 .port_initialize
= ql_8012_port_initialize
,
3561 static void ql_get_board_info(struct ql_adapter
*qdev
)
3564 (ql_read32(qdev
, STS
) & STS_FUNC_ID_MASK
) >> STS_FUNC_ID_SHIFT
;
3566 qdev
->xg_sem_mask
= SEM_XGMAC1_MASK
;
3567 qdev
->port_link_up
= STS_PL1
;
3568 qdev
->port_init
= STS_PI1
;
3569 qdev
->mailbox_in
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC2_MBI
;
3570 qdev
->mailbox_out
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC2_MBO
;
3572 qdev
->xg_sem_mask
= SEM_XGMAC0_MASK
;
3573 qdev
->port_link_up
= STS_PL0
;
3574 qdev
->port_init
= STS_PI0
;
3575 qdev
->mailbox_in
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC0_MBI
;
3576 qdev
->mailbox_out
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC0_MBO
;
3578 qdev
->chip_rev_id
= ql_read32(qdev
, REV_ID
);
3579 qdev
->device_id
= qdev
->pdev
->device
;
3580 if (qdev
->device_id
== QLGE_DEVICE_ID_8012
)
3581 qdev
->nic_ops
= &qla8012_nic_ops
;
3584 static void ql_release_all(struct pci_dev
*pdev
)
3586 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3587 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3589 if (qdev
->workqueue
) {
3590 destroy_workqueue(qdev
->workqueue
);
3591 qdev
->workqueue
= NULL
;
3593 if (qdev
->q_workqueue
) {
3594 destroy_workqueue(qdev
->q_workqueue
);
3595 qdev
->q_workqueue
= NULL
;
3598 iounmap(qdev
->reg_base
);
3599 if (qdev
->doorbell_area
)
3600 iounmap(qdev
->doorbell_area
);
3601 pci_release_regions(pdev
);
3602 pci_set_drvdata(pdev
, NULL
);
3605 static int __devinit
ql_init_device(struct pci_dev
*pdev
,
3606 struct net_device
*ndev
, int cards_found
)
3608 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3612 memset((void *)qdev
, 0, sizeof(qdev
));
3613 err
= pci_enable_device(pdev
);
3615 dev_err(&pdev
->dev
, "PCI device enable failed.\n");
3619 pos
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
3621 dev_err(&pdev
->dev
, PFX
"Cannot find PCI Express capability, "
3625 pci_read_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, &val16
);
3626 val16
&= ~PCI_EXP_DEVCTL_NOSNOOP_EN
;
3627 val16
|= (PCI_EXP_DEVCTL_CERE
|
3628 PCI_EXP_DEVCTL_NFERE
|
3629 PCI_EXP_DEVCTL_FERE
| PCI_EXP_DEVCTL_URRE
);
3630 pci_write_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, val16
);
3633 err
= pci_request_regions(pdev
, DRV_NAME
);
3635 dev_err(&pdev
->dev
, "PCI region request failed.\n");
3639 pci_set_master(pdev
);
3640 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
3641 set_bit(QL_DMA64
, &qdev
->flags
);
3642 err
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
3644 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
3646 err
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
3650 dev_err(&pdev
->dev
, "No usable DMA configuration.\n");
3654 pci_set_drvdata(pdev
, ndev
);
3656 ioremap_nocache(pci_resource_start(pdev
, 1),
3657 pci_resource_len(pdev
, 1));
3658 if (!qdev
->reg_base
) {
3659 dev_err(&pdev
->dev
, "Register mapping failed.\n");
3664 qdev
->doorbell_area_size
= pci_resource_len(pdev
, 3);
3665 qdev
->doorbell_area
=
3666 ioremap_nocache(pci_resource_start(pdev
, 3),
3667 pci_resource_len(pdev
, 3));
3668 if (!qdev
->doorbell_area
) {
3669 dev_err(&pdev
->dev
, "Doorbell register mapping failed.\n");
3676 ql_get_board_info(qdev
);
3677 qdev
->msg_enable
= netif_msg_init(debug
, default_msg
);
3678 spin_lock_init(&qdev
->hw_lock
);
3679 spin_lock_init(&qdev
->stats_lock
);
3681 /* make sure the EEPROM is good */
3682 err
= qdev
->nic_ops
->get_flash(qdev
);
3684 dev_err(&pdev
->dev
, "Invalid FLASH.\n");
3688 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
3690 /* Set up the default ring sizes. */
3691 qdev
->tx_ring_size
= NUM_TX_RING_ENTRIES
;
3692 qdev
->rx_ring_size
= NUM_RX_RING_ENTRIES
;
3694 /* Set up the coalescing parameters. */
3695 qdev
->rx_coalesce_usecs
= DFLT_COALESCE_WAIT
;
3696 qdev
->tx_coalesce_usecs
= DFLT_COALESCE_WAIT
;
3697 qdev
->rx_max_coalesced_frames
= DFLT_INTER_FRAME_WAIT
;
3698 qdev
->tx_max_coalesced_frames
= DFLT_INTER_FRAME_WAIT
;
3701 * Set up the operating parameters.
3705 qdev
->q_workqueue
= create_workqueue(ndev
->name
);
3706 qdev
->workqueue
= create_singlethread_workqueue(ndev
->name
);
3707 INIT_DELAYED_WORK(&qdev
->asic_reset_work
, ql_asic_reset_work
);
3708 INIT_DELAYED_WORK(&qdev
->mpi_reset_work
, ql_mpi_reset_work
);
3709 INIT_DELAYED_WORK(&qdev
->mpi_work
, ql_mpi_work
);
3710 mutex_init(&qdev
->mpi_mutex
);
3713 dev_info(&pdev
->dev
, "%s\n", DRV_STRING
);
3714 dev_info(&pdev
->dev
, "Driver name: %s, Version: %s.\n",
3715 DRV_NAME
, DRV_VERSION
);
3719 ql_release_all(pdev
);
3720 pci_disable_device(pdev
);
3725 static const struct net_device_ops qlge_netdev_ops
= {
3726 .ndo_open
= qlge_open
,
3727 .ndo_stop
= qlge_close
,
3728 .ndo_start_xmit
= qlge_send
,
3729 .ndo_change_mtu
= qlge_change_mtu
,
3730 .ndo_get_stats
= qlge_get_stats
,
3731 .ndo_set_multicast_list
= qlge_set_multicast_list
,
3732 .ndo_set_mac_address
= qlge_set_mac_address
,
3733 .ndo_validate_addr
= eth_validate_addr
,
3734 .ndo_tx_timeout
= qlge_tx_timeout
,
3735 .ndo_vlan_rx_register
= ql_vlan_rx_register
,
3736 .ndo_vlan_rx_add_vid
= ql_vlan_rx_add_vid
,
3737 .ndo_vlan_rx_kill_vid
= ql_vlan_rx_kill_vid
,
3740 static int __devinit
qlge_probe(struct pci_dev
*pdev
,
3741 const struct pci_device_id
*pci_entry
)
3743 struct net_device
*ndev
= NULL
;
3744 struct ql_adapter
*qdev
= NULL
;
3745 static int cards_found
= 0;
3748 ndev
= alloc_etherdev(sizeof(struct ql_adapter
));
3752 err
= ql_init_device(pdev
, ndev
, cards_found
);
3758 qdev
= netdev_priv(ndev
);
3759 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
3766 | NETIF_F_HW_VLAN_TX
3767 | NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
);
3769 if (test_bit(QL_DMA64
, &qdev
->flags
))
3770 ndev
->features
|= NETIF_F_HIGHDMA
;
3773 * Set up net_device structure.
3775 ndev
->tx_queue_len
= qdev
->tx_ring_size
;
3776 ndev
->irq
= pdev
->irq
;
3778 ndev
->netdev_ops
= &qlge_netdev_ops
;
3779 SET_ETHTOOL_OPS(ndev
, &qlge_ethtool_ops
);
3780 ndev
->watchdog_timeo
= 10 * HZ
;
3782 err
= register_netdev(ndev
);
3784 dev_err(&pdev
->dev
, "net device registration failed.\n");
3785 ql_release_all(pdev
);
3786 pci_disable_device(pdev
);
3789 netif_carrier_off(ndev
);
3790 netif_stop_queue(ndev
);
3791 ql_display_dev_info(ndev
);
3796 static void __devexit
qlge_remove(struct pci_dev
*pdev
)
3798 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3799 unregister_netdev(ndev
);
3800 ql_release_all(pdev
);
3801 pci_disable_device(pdev
);
3806 * This callback is called by the PCI subsystem whenever
3807 * a PCI bus error is detected.
3809 static pci_ers_result_t
qlge_io_error_detected(struct pci_dev
*pdev
,
3810 enum pci_channel_state state
)
3812 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3813 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3815 if (netif_running(ndev
))
3816 ql_adapter_down(qdev
);
3818 pci_disable_device(pdev
);
3820 /* Request a slot reset. */
3821 return PCI_ERS_RESULT_NEED_RESET
;
3825 * This callback is called after the PCI buss has been reset.
3826 * Basically, this tries to restart the card from scratch.
3827 * This is a shortened version of the device probe/discovery code,
3828 * it resembles the first-half of the () routine.
3830 static pci_ers_result_t
qlge_io_slot_reset(struct pci_dev
*pdev
)
3832 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3833 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3835 if (pci_enable_device(pdev
)) {
3836 QPRINTK(qdev
, IFUP
, ERR
,
3837 "Cannot re-enable PCI device after reset.\n");
3838 return PCI_ERS_RESULT_DISCONNECT
;
3841 pci_set_master(pdev
);
3843 netif_carrier_off(ndev
);
3844 netif_stop_queue(ndev
);
3845 ql_adapter_reset(qdev
);
3847 /* Make sure the EEPROM is good */
3848 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
3850 if (!is_valid_ether_addr(ndev
->perm_addr
)) {
3851 QPRINTK(qdev
, IFUP
, ERR
, "After reset, invalid MAC address.\n");
3852 return PCI_ERS_RESULT_DISCONNECT
;
3855 return PCI_ERS_RESULT_RECOVERED
;
3858 static void qlge_io_resume(struct pci_dev
*pdev
)
3860 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3861 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3863 pci_set_master(pdev
);
3865 if (netif_running(ndev
)) {
3866 if (ql_adapter_up(qdev
)) {
3867 QPRINTK(qdev
, IFUP
, ERR
,
3868 "Device initialization failed after reset.\n");
3873 netif_device_attach(ndev
);
3876 static struct pci_error_handlers qlge_err_handler
= {
3877 .error_detected
= qlge_io_error_detected
,
3878 .slot_reset
= qlge_io_slot_reset
,
3879 .resume
= qlge_io_resume
,
3882 static int qlge_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3884 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3885 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3888 netif_device_detach(ndev
);
3890 if (netif_running(ndev
)) {
3891 err
= ql_adapter_down(qdev
);
3896 for (i
= qdev
->rss_ring_first_cq_id
; i
< qdev
->rx_ring_count
; i
++)
3897 netif_napi_del(&qdev
->rx_ring
[i
].napi
);
3899 err
= pci_save_state(pdev
);
3903 pci_disable_device(pdev
);
3905 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
3911 static int qlge_resume(struct pci_dev
*pdev
)
3913 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3914 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3917 pci_set_power_state(pdev
, PCI_D0
);
3918 pci_restore_state(pdev
);
3919 err
= pci_enable_device(pdev
);
3921 QPRINTK(qdev
, IFUP
, ERR
, "Cannot enable PCI device from suspend\n");
3924 pci_set_master(pdev
);
3926 pci_enable_wake(pdev
, PCI_D3hot
, 0);
3927 pci_enable_wake(pdev
, PCI_D3cold
, 0);
3929 if (netif_running(ndev
)) {
3930 err
= ql_adapter_up(qdev
);
3935 netif_device_attach(ndev
);
3939 #endif /* CONFIG_PM */
3941 static void qlge_shutdown(struct pci_dev
*pdev
)
3943 qlge_suspend(pdev
, PMSG_SUSPEND
);
3946 static struct pci_driver qlge_driver
= {
3948 .id_table
= qlge_pci_tbl
,
3949 .probe
= qlge_probe
,
3950 .remove
= __devexit_p(qlge_remove
),
3952 .suspend
= qlge_suspend
,
3953 .resume
= qlge_resume
,
3955 .shutdown
= qlge_shutdown
,
3956 .err_handler
= &qlge_err_handler
3959 static int __init
qlge_init_module(void)
3961 return pci_register_driver(&qlge_driver
);
3964 static void __exit
qlge_exit(void)
3966 pci_unregister_driver(&qlge_driver
);
3969 module_init(qlge_init_module
);
3970 module_exit(qlge_exit
);