2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/if_vlan.h>
39 #include <linux/delay.h>
41 #include <linux/vmalloc.h>
42 #include <net/ip6_checksum.h>
46 char qlge_driver_name
[] = DRV_NAME
;
47 const char qlge_driver_version
[] = DRV_VERSION
;
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING
" ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION
);
54 static const u32 default_msg
=
55 NETIF_MSG_DRV
| NETIF_MSG_PROBE
| NETIF_MSG_LINK
|
56 /* NETIF_MSG_TIMER | */
61 /* NETIF_MSG_TX_QUEUED | */
62 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
63 /* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW
| NETIF_MSG_WOL
| 0;
66 static int debug
= 0x00007fff; /* defaults above */
67 module_param(debug
, int, 0);
68 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
73 static int irq_type
= MSIX_IRQ
;
74 module_param(irq_type
, int, MSIX_IRQ
);
75 MODULE_PARM_DESC(irq_type
, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77 static struct pci_device_id qlge_pci_tbl
[] __devinitdata
= {
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QLGE_DEVICE_ID_8012
)},
79 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QLGE_DEVICE_ID_8000
)},
80 /* required last entry */
84 MODULE_DEVICE_TABLE(pci
, qlge_pci_tbl
);
86 /* This hardware semaphore causes exclusive access to
87 * resources shared between the NIC driver, MPI firmware,
88 * FCOE firmware and the FC driver.
90 static int ql_sem_trylock(struct ql_adapter
*qdev
, u32 sem_mask
)
96 sem_bits
= SEM_SET
<< SEM_XGMAC0_SHIFT
;
99 sem_bits
= SEM_SET
<< SEM_XGMAC1_SHIFT
;
102 sem_bits
= SEM_SET
<< SEM_ICB_SHIFT
;
104 case SEM_MAC_ADDR_MASK
:
105 sem_bits
= SEM_SET
<< SEM_MAC_ADDR_SHIFT
;
108 sem_bits
= SEM_SET
<< SEM_FLASH_SHIFT
;
111 sem_bits
= SEM_SET
<< SEM_PROBE_SHIFT
;
113 case SEM_RT_IDX_MASK
:
114 sem_bits
= SEM_SET
<< SEM_RT_IDX_SHIFT
;
116 case SEM_PROC_REG_MASK
:
117 sem_bits
= SEM_SET
<< SEM_PROC_REG_SHIFT
;
120 QPRINTK(qdev
, PROBE
, ALERT
, "Bad Semaphore mask!.\n");
124 ql_write32(qdev
, SEM
, sem_bits
| sem_mask
);
125 return !(ql_read32(qdev
, SEM
) & sem_bits
);
128 int ql_sem_spinlock(struct ql_adapter
*qdev
, u32 sem_mask
)
130 unsigned int wait_count
= 30;
132 if (!ql_sem_trylock(qdev
, sem_mask
))
135 } while (--wait_count
);
139 void ql_sem_unlock(struct ql_adapter
*qdev
, u32 sem_mask
)
141 ql_write32(qdev
, SEM
, sem_mask
);
142 ql_read32(qdev
, SEM
); /* flush */
145 /* This function waits for a specific bit to come ready
146 * in a given register. It is used mostly by the initialize
147 * process, but is also used in kernel thread API such as
148 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
150 int ql_wait_reg_rdy(struct ql_adapter
*qdev
, u32 reg
, u32 bit
, u32 err_bit
)
153 int count
= UDELAY_COUNT
;
156 temp
= ql_read32(qdev
, reg
);
158 /* check for errors */
159 if (temp
& err_bit
) {
160 QPRINTK(qdev
, PROBE
, ALERT
,
161 "register 0x%.08x access error, value = 0x%.08x!.\n",
164 } else if (temp
& bit
)
166 udelay(UDELAY_DELAY
);
169 QPRINTK(qdev
, PROBE
, ALERT
,
170 "Timed out waiting for reg %x to come ready.\n", reg
);
174 /* The CFG register is used to download TX and RX control blocks
175 * to the chip. This function waits for an operation to complete.
177 static int ql_wait_cfg(struct ql_adapter
*qdev
, u32 bit
)
179 int count
= UDELAY_COUNT
;
183 temp
= ql_read32(qdev
, CFG
);
188 udelay(UDELAY_DELAY
);
195 /* Used to issue init control blocks to hw. Maps control block,
196 * sets address, triggers download, waits for completion.
198 int ql_write_cfg(struct ql_adapter
*qdev
, void *ptr
, int size
, u32 bit
,
208 (bit
& (CFG_LRQ
| CFG_LR
| CFG_LCQ
)) ? PCI_DMA_TODEVICE
:
211 map
= pci_map_single(qdev
->pdev
, ptr
, size
, direction
);
212 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
213 QPRINTK(qdev
, IFUP
, ERR
, "Couldn't map DMA area.\n");
217 status
= ql_sem_spinlock(qdev
, SEM_ICB_MASK
);
221 status
= ql_wait_cfg(qdev
, bit
);
223 QPRINTK(qdev
, IFUP
, ERR
,
224 "Timed out waiting for CFG to come ready.\n");
228 ql_write32(qdev
, ICB_L
, (u32
) map
);
229 ql_write32(qdev
, ICB_H
, (u32
) (map
>> 32));
231 mask
= CFG_Q_MASK
| (bit
<< 16);
232 value
= bit
| (q_id
<< CFG_Q_SHIFT
);
233 ql_write32(qdev
, CFG
, (mask
| value
));
236 * Wait for the bit to clear after signaling hw.
238 status
= ql_wait_cfg(qdev
, bit
);
240 ql_sem_unlock(qdev
, SEM_ICB_MASK
); /* does flush too */
241 pci_unmap_single(qdev
->pdev
, map
, size
, direction
);
245 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
246 int ql_get_mac_addr_reg(struct ql_adapter
*qdev
, u32 type
, u16 index
,
253 case MAC_ADDR_TYPE_MULTI_MAC
:
254 case MAC_ADDR_TYPE_CAM_MAC
:
257 ql_wait_reg_rdy(qdev
,
258 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
261 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
262 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
263 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
265 ql_wait_reg_rdy(qdev
,
266 MAC_ADDR_IDX
, MAC_ADDR_MR
, 0);
269 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
271 ql_wait_reg_rdy(qdev
,
272 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
275 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
276 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
277 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
279 ql_wait_reg_rdy(qdev
,
280 MAC_ADDR_IDX
, MAC_ADDR_MR
, 0);
283 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
284 if (type
== MAC_ADDR_TYPE_CAM_MAC
) {
286 ql_wait_reg_rdy(qdev
,
287 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
290 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
291 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
292 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
294 ql_wait_reg_rdy(qdev
, MAC_ADDR_IDX
,
298 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
302 case MAC_ADDR_TYPE_VLAN
:
303 case MAC_ADDR_TYPE_MULTI_FLTR
:
305 QPRINTK(qdev
, IFUP
, CRIT
,
306 "Address type %d not yet supported.\n", type
);
313 /* Set up a MAC, multicast or VLAN address for the
314 * inbound frame matching.
316 static int ql_set_mac_addr_reg(struct ql_adapter
*qdev
, u8
*addr
, u32 type
,
323 case MAC_ADDR_TYPE_MULTI_MAC
:
324 case MAC_ADDR_TYPE_CAM_MAC
:
327 u32 upper
= (addr
[0] << 8) | addr
[1];
329 (addr
[2] << 24) | (addr
[3] << 16) | (addr
[4] << 8) |
332 QPRINTK(qdev
, IFUP
, DEBUG
,
333 "Adding %s address %pM"
334 " at index %d in the CAM.\n",
336 MAC_ADDR_TYPE_MULTI_MAC
) ? "MULTICAST" :
337 "UNICAST"), addr
, index
);
340 ql_wait_reg_rdy(qdev
,
341 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
344 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
345 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
347 ql_write32(qdev
, MAC_ADDR_DATA
, lower
);
349 ql_wait_reg_rdy(qdev
,
350 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
353 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
354 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
356 ql_write32(qdev
, MAC_ADDR_DATA
, upper
);
358 ql_wait_reg_rdy(qdev
,
359 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
362 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
) | /* offset */
363 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
365 /* This field should also include the queue id
366 and possibly the function id. Right now we hardcode
367 the route field to NIC core.
369 if (type
== MAC_ADDR_TYPE_CAM_MAC
) {
370 cam_output
= (CAM_OUT_ROUTE_NIC
|
372 func
<< CAM_OUT_FUNC_SHIFT
) |
374 rss_ring_first_cq_id
<<
375 CAM_OUT_CQ_ID_SHIFT
));
377 cam_output
|= CAM_OUT_RV
;
378 /* route to NIC core */
379 ql_write32(qdev
, MAC_ADDR_DATA
, cam_output
);
383 case MAC_ADDR_TYPE_VLAN
:
385 u32 enable_bit
= *((u32
*) &addr
[0]);
386 /* For VLAN, the addr actually holds a bit that
387 * either enables or disables the vlan id we are
388 * addressing. It's either MAC_ADDR_E on or off.
389 * That's bit-27 we're talking about.
391 QPRINTK(qdev
, IFUP
, INFO
, "%s VLAN ID %d %s the CAM.\n",
392 (enable_bit
? "Adding" : "Removing"),
393 index
, (enable_bit
? "to" : "from"));
396 ql_wait_reg_rdy(qdev
,
397 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
400 ql_write32(qdev
, MAC_ADDR_IDX
, offset
| /* offset */
401 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
403 enable_bit
); /* enable/disable */
406 case MAC_ADDR_TYPE_MULTI_FLTR
:
408 QPRINTK(qdev
, IFUP
, CRIT
,
409 "Address type %d not yet supported.\n", type
);
416 /* Get a specific frame routing value from the CAM.
417 * Used for debug and reg dump.
419 int ql_get_routing_reg(struct ql_adapter
*qdev
, u32 index
, u32
*value
)
423 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MW
, 0);
427 ql_write32(qdev
, RT_IDX
,
428 RT_IDX_TYPE_NICQ
| RT_IDX_RS
| (index
<< RT_IDX_IDX_SHIFT
));
429 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MR
, 0);
432 *value
= ql_read32(qdev
, RT_DATA
);
437 /* The NIC function for this chip has 16 routing indexes. Each one can be used
438 * to route different frame types to various inbound queues. We send broadcast/
439 * multicast/error frames to the default queue for slow handling,
440 * and CAM hit/RSS frames to the fast handling queues.
442 static int ql_set_routing_reg(struct ql_adapter
*qdev
, u32 index
, u32 mask
,
445 int status
= -EINVAL
; /* Return error if no mask match. */
448 QPRINTK(qdev
, IFUP
, DEBUG
,
449 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
450 (enable
? "Adding" : "Removing"),
451 ((index
== RT_IDX_ALL_ERR_SLOT
) ? "MAC ERROR/ALL ERROR" : ""),
452 ((index
== RT_IDX_IP_CSUM_ERR_SLOT
) ? "IP CSUM ERROR" : ""),
454 RT_IDX_TCP_UDP_CSUM_ERR_SLOT
) ? "TCP/UDP CSUM ERROR" : ""),
455 ((index
== RT_IDX_BCAST_SLOT
) ? "BROADCAST" : ""),
456 ((index
== RT_IDX_MCAST_MATCH_SLOT
) ? "MULTICAST MATCH" : ""),
457 ((index
== RT_IDX_ALLMULTI_SLOT
) ? "ALL MULTICAST MATCH" : ""),
458 ((index
== RT_IDX_UNUSED6_SLOT
) ? "UNUSED6" : ""),
459 ((index
== RT_IDX_UNUSED7_SLOT
) ? "UNUSED7" : ""),
460 ((index
== RT_IDX_RSS_MATCH_SLOT
) ? "RSS ALL/IPV4 MATCH" : ""),
461 ((index
== RT_IDX_RSS_IPV6_SLOT
) ? "RSS IPV6" : ""),
462 ((index
== RT_IDX_RSS_TCP4_SLOT
) ? "RSS TCP4" : ""),
463 ((index
== RT_IDX_RSS_TCP6_SLOT
) ? "RSS TCP6" : ""),
464 ((index
== RT_IDX_CAM_HIT_SLOT
) ? "CAM HIT" : ""),
465 ((index
== RT_IDX_UNUSED013
) ? "UNUSED13" : ""),
466 ((index
== RT_IDX_UNUSED014
) ? "UNUSED14" : ""),
467 ((index
== RT_IDX_PROMISCUOUS_SLOT
) ? "PROMISCUOUS" : ""),
468 (enable
? "to" : "from"));
473 value
= RT_IDX_DST_CAM_Q
| /* dest */
474 RT_IDX_TYPE_NICQ
| /* type */
475 (RT_IDX_CAM_HIT_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
478 case RT_IDX_VALID
: /* Promiscuous Mode frames. */
480 value
= RT_IDX_DST_DFLT_Q
| /* dest */
481 RT_IDX_TYPE_NICQ
| /* type */
482 (RT_IDX_PROMISCUOUS_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
485 case RT_IDX_ERR
: /* Pass up MAC,IP,TCP/UDP error frames. */
487 value
= RT_IDX_DST_DFLT_Q
| /* dest */
488 RT_IDX_TYPE_NICQ
| /* type */
489 (RT_IDX_ALL_ERR_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
492 case RT_IDX_BCAST
: /* Pass up Broadcast frames to default Q. */
494 value
= RT_IDX_DST_DFLT_Q
| /* dest */
495 RT_IDX_TYPE_NICQ
| /* type */
496 (RT_IDX_BCAST_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
499 case RT_IDX_MCAST
: /* Pass up All Multicast frames. */
501 value
= RT_IDX_DST_CAM_Q
| /* dest */
502 RT_IDX_TYPE_NICQ
| /* type */
503 (RT_IDX_ALLMULTI_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
506 case RT_IDX_MCAST_MATCH
: /* Pass up matched Multicast frames. */
508 value
= RT_IDX_DST_CAM_Q
| /* dest */
509 RT_IDX_TYPE_NICQ
| /* type */
510 (RT_IDX_MCAST_MATCH_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
513 case RT_IDX_RSS_MATCH
: /* Pass up matched RSS frames. */
515 value
= RT_IDX_DST_RSS
| /* dest */
516 RT_IDX_TYPE_NICQ
| /* type */
517 (RT_IDX_RSS_MATCH_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
520 case 0: /* Clear the E-bit on an entry. */
522 value
= RT_IDX_DST_DFLT_Q
| /* dest */
523 RT_IDX_TYPE_NICQ
| /* type */
524 (index
<< RT_IDX_IDX_SHIFT
);/* index */
528 QPRINTK(qdev
, IFUP
, ERR
, "Mask type %d not yet supported.\n",
535 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MW
, 0);
538 value
|= (enable
? RT_IDX_E
: 0);
539 ql_write32(qdev
, RT_IDX
, value
);
540 ql_write32(qdev
, RT_DATA
, enable
? mask
: 0);
546 static void ql_enable_interrupts(struct ql_adapter
*qdev
)
548 ql_write32(qdev
, INTR_EN
, (INTR_EN_EI
<< 16) | INTR_EN_EI
);
551 static void ql_disable_interrupts(struct ql_adapter
*qdev
)
553 ql_write32(qdev
, INTR_EN
, (INTR_EN_EI
<< 16));
556 /* If we're running with multiple MSI-X vectors then we enable on the fly.
557 * Otherwise, we may have multiple outstanding workers and don't want to
558 * enable until the last one finishes. In this case, the irq_cnt gets
559 * incremented everytime we queue a worker and decremented everytime
560 * a worker finishes. Once it hits zero we enable the interrupt.
562 u32
ql_enable_completion_interrupt(struct ql_adapter
*qdev
, u32 intr
)
565 unsigned long hw_flags
= 0;
566 struct intr_context
*ctx
= qdev
->intr_context
+ intr
;
568 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) && intr
)) {
569 /* Always enable if we're MSIX multi interrupts and
570 * it's not the default (zeroeth) interrupt.
572 ql_write32(qdev
, INTR_EN
,
574 var
= ql_read32(qdev
, STS
);
578 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
579 if (atomic_dec_and_test(&ctx
->irq_cnt
)) {
580 ql_write32(qdev
, INTR_EN
,
582 var
= ql_read32(qdev
, STS
);
584 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
588 static u32
ql_disable_completion_interrupt(struct ql_adapter
*qdev
, u32 intr
)
591 struct intr_context
*ctx
;
593 /* HW disables for us if we're MSIX multi interrupts and
594 * it's not the default (zeroeth) interrupt.
596 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) && intr
))
599 ctx
= qdev
->intr_context
+ intr
;
600 spin_lock(&qdev
->hw_lock
);
601 if (!atomic_read(&ctx
->irq_cnt
)) {
602 ql_write32(qdev
, INTR_EN
,
604 var
= ql_read32(qdev
, STS
);
606 atomic_inc(&ctx
->irq_cnt
);
607 spin_unlock(&qdev
->hw_lock
);
611 static void ql_enable_all_completion_interrupts(struct ql_adapter
*qdev
)
614 for (i
= 0; i
< qdev
->intr_count
; i
++) {
615 /* The enable call does a atomic_dec_and_test
616 * and enables only if the result is zero.
617 * So we precharge it here.
619 if (unlikely(!test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) ||
621 atomic_set(&qdev
->intr_context
[i
].irq_cnt
, 1);
622 ql_enable_completion_interrupt(qdev
, i
);
627 static int ql_validate_flash(struct ql_adapter
*qdev
, u32 size
, const char *str
)
631 __le16
*flash
= (__le16
*)&qdev
->flash
;
633 status
= strncmp((char *)&qdev
->flash
, str
, 4);
635 QPRINTK(qdev
, IFUP
, ERR
, "Invalid flash signature.\n");
639 for (i
= 0; i
< size
; i
++)
640 csum
+= le16_to_cpu(*flash
++);
643 QPRINTK(qdev
, IFUP
, ERR
,
644 "Invalid flash checksum, csum = 0x%.04x.\n", csum
);
649 static int ql_read_flash_word(struct ql_adapter
*qdev
, int offset
, __le32
*data
)
652 /* wait for reg to come ready */
653 status
= ql_wait_reg_rdy(qdev
,
654 FLASH_ADDR
, FLASH_ADDR_RDY
, FLASH_ADDR_ERR
);
657 /* set up for reg read */
658 ql_write32(qdev
, FLASH_ADDR
, FLASH_ADDR_R
| offset
);
659 /* wait for reg to come ready */
660 status
= ql_wait_reg_rdy(qdev
,
661 FLASH_ADDR
, FLASH_ADDR_RDY
, FLASH_ADDR_ERR
);
664 /* This data is stored on flash as an array of
665 * __le32. Since ql_read32() returns cpu endian
666 * we need to swap it back.
668 *data
= cpu_to_le32(ql_read32(qdev
, FLASH_DATA
));
673 static int ql_get_8000_flash_params(struct ql_adapter
*qdev
)
677 __le32
*p
= (__le32
*)&qdev
->flash
;
681 /* Get flash offset for function and adjust
685 offset
= FUNC0_FLASH_OFFSET
/ sizeof(u32
);
687 offset
= FUNC1_FLASH_OFFSET
/ sizeof(u32
);
689 if (ql_sem_spinlock(qdev
, SEM_FLASH_MASK
))
692 size
= sizeof(struct flash_params_8000
) / sizeof(u32
);
693 for (i
= 0; i
< size
; i
++, p
++) {
694 status
= ql_read_flash_word(qdev
, i
+offset
, p
);
696 QPRINTK(qdev
, IFUP
, ERR
, "Error reading flash.\n");
701 status
= ql_validate_flash(qdev
,
702 sizeof(struct flash_params_8000
) / sizeof(u16
),
705 QPRINTK(qdev
, IFUP
, ERR
, "Invalid flash.\n");
710 /* Extract either manufacturer or BOFM modified
713 if (qdev
->flash
.flash_params_8000
.data_type1
== 2)
715 qdev
->flash
.flash_params_8000
.mac_addr1
,
716 qdev
->ndev
->addr_len
);
719 qdev
->flash
.flash_params_8000
.mac_addr
,
720 qdev
->ndev
->addr_len
);
722 if (!is_valid_ether_addr(mac_addr
)) {
723 QPRINTK(qdev
, IFUP
, ERR
, "Invalid MAC address.\n");
728 memcpy(qdev
->ndev
->dev_addr
,
730 qdev
->ndev
->addr_len
);
733 ql_sem_unlock(qdev
, SEM_FLASH_MASK
);
737 static int ql_get_8012_flash_params(struct ql_adapter
*qdev
)
741 __le32
*p
= (__le32
*)&qdev
->flash
;
743 u32 size
= sizeof(struct flash_params_8012
) / sizeof(u32
);
745 /* Second function's parameters follow the first
751 if (ql_sem_spinlock(qdev
, SEM_FLASH_MASK
))
754 for (i
= 0; i
< size
; i
++, p
++) {
755 status
= ql_read_flash_word(qdev
, i
+offset
, p
);
757 QPRINTK(qdev
, IFUP
, ERR
, "Error reading flash.\n");
763 status
= ql_validate_flash(qdev
,
764 sizeof(struct flash_params_8012
) / sizeof(u16
),
767 QPRINTK(qdev
, IFUP
, ERR
, "Invalid flash.\n");
772 if (!is_valid_ether_addr(qdev
->flash
.flash_params_8012
.mac_addr
)) {
777 memcpy(qdev
->ndev
->dev_addr
,
778 qdev
->flash
.flash_params_8012
.mac_addr
,
779 qdev
->ndev
->addr_len
);
782 ql_sem_unlock(qdev
, SEM_FLASH_MASK
);
786 /* xgmac register are located behind the xgmac_addr and xgmac_data
787 * register pair. Each read/write requires us to wait for the ready
788 * bit before reading/writing the data.
790 static int ql_write_xgmac_reg(struct ql_adapter
*qdev
, u32 reg
, u32 data
)
793 /* wait for reg to come ready */
794 status
= ql_wait_reg_rdy(qdev
,
795 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
798 /* write the data to the data reg */
799 ql_write32(qdev
, XGMAC_DATA
, data
);
800 /* trigger the write */
801 ql_write32(qdev
, XGMAC_ADDR
, reg
);
805 /* xgmac register are located behind the xgmac_addr and xgmac_data
806 * register pair. Each read/write requires us to wait for the ready
807 * bit before reading/writing the data.
809 int ql_read_xgmac_reg(struct ql_adapter
*qdev
, u32 reg
, u32
*data
)
812 /* wait for reg to come ready */
813 status
= ql_wait_reg_rdy(qdev
,
814 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
817 /* set up for reg read */
818 ql_write32(qdev
, XGMAC_ADDR
, reg
| XGMAC_ADDR_R
);
819 /* wait for reg to come ready */
820 status
= ql_wait_reg_rdy(qdev
,
821 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
825 *data
= ql_read32(qdev
, XGMAC_DATA
);
830 /* This is used for reading the 64-bit statistics regs. */
831 int ql_read_xgmac_reg64(struct ql_adapter
*qdev
, u32 reg
, u64
*data
)
837 status
= ql_read_xgmac_reg(qdev
, reg
, &lo
);
841 status
= ql_read_xgmac_reg(qdev
, reg
+ 4, &hi
);
845 *data
= (u64
) lo
| ((u64
) hi
<< 32);
851 static int ql_8000_port_initialize(struct ql_adapter
*qdev
)
855 * Get MPI firmware version for driver banner
858 status
= ql_mb_about_fw(qdev
);
861 status
= ql_mb_get_fw_state(qdev
);
864 /* Wake up a worker to get/set the TX/RX frame sizes. */
865 queue_delayed_work(qdev
->workqueue
, &qdev
->mpi_port_cfg_work
, 0);
870 /* Take the MAC Core out of reset.
871 * Enable statistics counting.
872 * Take the transmitter/receiver out of reset.
873 * This functionality may be done in the MPI firmware at a
876 static int ql_8012_port_initialize(struct ql_adapter
*qdev
)
881 if (ql_sem_trylock(qdev
, qdev
->xg_sem_mask
)) {
882 /* Another function has the semaphore, so
883 * wait for the port init bit to come ready.
885 QPRINTK(qdev
, LINK
, INFO
,
886 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
887 status
= ql_wait_reg_rdy(qdev
, STS
, qdev
->port_init
, 0);
889 QPRINTK(qdev
, LINK
, CRIT
,
890 "Port initialize timed out.\n");
895 QPRINTK(qdev
, LINK
, INFO
, "Got xgmac semaphore!.\n");
896 /* Set the core reset. */
897 status
= ql_read_xgmac_reg(qdev
, GLOBAL_CFG
, &data
);
900 data
|= GLOBAL_CFG_RESET
;
901 status
= ql_write_xgmac_reg(qdev
, GLOBAL_CFG
, data
);
905 /* Clear the core reset and turn on jumbo for receiver. */
906 data
&= ~GLOBAL_CFG_RESET
; /* Clear core reset. */
907 data
|= GLOBAL_CFG_JUMBO
; /* Turn on jumbo. */
908 data
|= GLOBAL_CFG_TX_STAT_EN
;
909 data
|= GLOBAL_CFG_RX_STAT_EN
;
910 status
= ql_write_xgmac_reg(qdev
, GLOBAL_CFG
, data
);
914 /* Enable transmitter, and clear it's reset. */
915 status
= ql_read_xgmac_reg(qdev
, TX_CFG
, &data
);
918 data
&= ~TX_CFG_RESET
; /* Clear the TX MAC reset. */
919 data
|= TX_CFG_EN
; /* Enable the transmitter. */
920 status
= ql_write_xgmac_reg(qdev
, TX_CFG
, data
);
924 /* Enable receiver and clear it's reset. */
925 status
= ql_read_xgmac_reg(qdev
, RX_CFG
, &data
);
928 data
&= ~RX_CFG_RESET
; /* Clear the RX MAC reset. */
929 data
|= RX_CFG_EN
; /* Enable the receiver. */
930 status
= ql_write_xgmac_reg(qdev
, RX_CFG
, data
);
936 ql_write_xgmac_reg(qdev
, MAC_TX_PARAMS
, MAC_TX_PARAMS_JUMBO
| (0x2580 << 16));
940 ql_write_xgmac_reg(qdev
, MAC_RX_PARAMS
, 0x2580);
944 /* Signal to the world that the port is enabled. */
945 ql_write32(qdev
, STS
, ((qdev
->port_init
<< 16) | qdev
->port_init
));
947 ql_sem_unlock(qdev
, qdev
->xg_sem_mask
);
951 /* Get the next large buffer. */
952 static struct bq_desc
*ql_get_curr_lbuf(struct rx_ring
*rx_ring
)
954 struct bq_desc
*lbq_desc
= &rx_ring
->lbq
[rx_ring
->lbq_curr_idx
];
955 rx_ring
->lbq_curr_idx
++;
956 if (rx_ring
->lbq_curr_idx
== rx_ring
->lbq_len
)
957 rx_ring
->lbq_curr_idx
= 0;
958 rx_ring
->lbq_free_cnt
++;
962 /* Get the next small buffer. */
963 static struct bq_desc
*ql_get_curr_sbuf(struct rx_ring
*rx_ring
)
965 struct bq_desc
*sbq_desc
= &rx_ring
->sbq
[rx_ring
->sbq_curr_idx
];
966 rx_ring
->sbq_curr_idx
++;
967 if (rx_ring
->sbq_curr_idx
== rx_ring
->sbq_len
)
968 rx_ring
->sbq_curr_idx
= 0;
969 rx_ring
->sbq_free_cnt
++;
973 /* Update an rx ring index. */
974 static void ql_update_cq(struct rx_ring
*rx_ring
)
976 rx_ring
->cnsmr_idx
++;
977 rx_ring
->curr_entry
++;
978 if (unlikely(rx_ring
->cnsmr_idx
== rx_ring
->cq_len
)) {
979 rx_ring
->cnsmr_idx
= 0;
980 rx_ring
->curr_entry
= rx_ring
->cq_base
;
984 static void ql_write_cq_idx(struct rx_ring
*rx_ring
)
986 ql_write_db_reg(rx_ring
->cnsmr_idx
, rx_ring
->cnsmr_idx_db_reg
);
989 /* Process (refill) a large buffer queue. */
990 static void ql_update_lbq(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
992 u32 clean_idx
= rx_ring
->lbq_clean_idx
;
993 u32 start_idx
= clean_idx
;
994 struct bq_desc
*lbq_desc
;
998 while (rx_ring
->lbq_free_cnt
> 16) {
999 for (i
= 0; i
< 16; i
++) {
1000 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1001 "lbq: try cleaning clean_idx = %d.\n",
1003 lbq_desc
= &rx_ring
->lbq
[clean_idx
];
1004 if (lbq_desc
->p
.lbq_page
== NULL
) {
1005 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1006 "lbq: getting new page for index %d.\n",
1008 lbq_desc
->p
.lbq_page
= alloc_page(GFP_ATOMIC
);
1009 if (lbq_desc
->p
.lbq_page
== NULL
) {
1010 rx_ring
->lbq_clean_idx
= clean_idx
;
1011 QPRINTK(qdev
, RX_STATUS
, ERR
,
1012 "Couldn't get a page.\n");
1015 map
= pci_map_page(qdev
->pdev
,
1016 lbq_desc
->p
.lbq_page
,
1018 PCI_DMA_FROMDEVICE
);
1019 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
1020 rx_ring
->lbq_clean_idx
= clean_idx
;
1021 put_page(lbq_desc
->p
.lbq_page
);
1022 lbq_desc
->p
.lbq_page
= NULL
;
1023 QPRINTK(qdev
, RX_STATUS
, ERR
,
1024 "PCI mapping failed.\n");
1027 pci_unmap_addr_set(lbq_desc
, mapaddr
, map
);
1028 pci_unmap_len_set(lbq_desc
, maplen
, PAGE_SIZE
);
1029 *lbq_desc
->addr
= cpu_to_le64(map
);
1032 if (clean_idx
== rx_ring
->lbq_len
)
1036 rx_ring
->lbq_clean_idx
= clean_idx
;
1037 rx_ring
->lbq_prod_idx
+= 16;
1038 if (rx_ring
->lbq_prod_idx
== rx_ring
->lbq_len
)
1039 rx_ring
->lbq_prod_idx
= 0;
1040 rx_ring
->lbq_free_cnt
-= 16;
1043 if (start_idx
!= clean_idx
) {
1044 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1045 "lbq: updating prod idx = %d.\n",
1046 rx_ring
->lbq_prod_idx
);
1047 ql_write_db_reg(rx_ring
->lbq_prod_idx
,
1048 rx_ring
->lbq_prod_idx_db_reg
);
1052 /* Process (refill) a small buffer queue. */
1053 static void ql_update_sbq(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
1055 u32 clean_idx
= rx_ring
->sbq_clean_idx
;
1056 u32 start_idx
= clean_idx
;
1057 struct bq_desc
*sbq_desc
;
1061 while (rx_ring
->sbq_free_cnt
> 16) {
1062 for (i
= 0; i
< 16; i
++) {
1063 sbq_desc
= &rx_ring
->sbq
[clean_idx
];
1064 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1065 "sbq: try cleaning clean_idx = %d.\n",
1067 if (sbq_desc
->p
.skb
== NULL
) {
1068 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1069 "sbq: getting new skb for index %d.\n",
1072 netdev_alloc_skb(qdev
->ndev
,
1073 rx_ring
->sbq_buf_size
);
1074 if (sbq_desc
->p
.skb
== NULL
) {
1075 QPRINTK(qdev
, PROBE
, ERR
,
1076 "Couldn't get an skb.\n");
1077 rx_ring
->sbq_clean_idx
= clean_idx
;
1080 skb_reserve(sbq_desc
->p
.skb
, QLGE_SB_PAD
);
1081 map
= pci_map_single(qdev
->pdev
,
1082 sbq_desc
->p
.skb
->data
,
1083 rx_ring
->sbq_buf_size
/
1084 2, PCI_DMA_FROMDEVICE
);
1085 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
1086 QPRINTK(qdev
, IFUP
, ERR
, "PCI mapping failed.\n");
1087 rx_ring
->sbq_clean_idx
= clean_idx
;
1088 dev_kfree_skb_any(sbq_desc
->p
.skb
);
1089 sbq_desc
->p
.skb
= NULL
;
1092 pci_unmap_addr_set(sbq_desc
, mapaddr
, map
);
1093 pci_unmap_len_set(sbq_desc
, maplen
,
1094 rx_ring
->sbq_buf_size
/ 2);
1095 *sbq_desc
->addr
= cpu_to_le64(map
);
1099 if (clean_idx
== rx_ring
->sbq_len
)
1102 rx_ring
->sbq_clean_idx
= clean_idx
;
1103 rx_ring
->sbq_prod_idx
+= 16;
1104 if (rx_ring
->sbq_prod_idx
== rx_ring
->sbq_len
)
1105 rx_ring
->sbq_prod_idx
= 0;
1106 rx_ring
->sbq_free_cnt
-= 16;
1109 if (start_idx
!= clean_idx
) {
1110 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1111 "sbq: updating prod idx = %d.\n",
1112 rx_ring
->sbq_prod_idx
);
1113 ql_write_db_reg(rx_ring
->sbq_prod_idx
,
1114 rx_ring
->sbq_prod_idx_db_reg
);
1118 static void ql_update_buffer_queues(struct ql_adapter
*qdev
,
1119 struct rx_ring
*rx_ring
)
1121 ql_update_sbq(qdev
, rx_ring
);
1122 ql_update_lbq(qdev
, rx_ring
);
1125 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1126 * fails at some stage, or from the interrupt when a tx completes.
1128 static void ql_unmap_send(struct ql_adapter
*qdev
,
1129 struct tx_ring_desc
*tx_ring_desc
, int mapped
)
1132 for (i
= 0; i
< mapped
; i
++) {
1133 if (i
== 0 || (i
== 7 && mapped
> 7)) {
1135 * Unmap the skb->data area, or the
1136 * external sglist (AKA the Outbound
1137 * Address List (OAL)).
1138 * If its the zeroeth element, then it's
1139 * the skb->data area. If it's the 7th
1140 * element and there is more than 6 frags,
1144 QPRINTK(qdev
, TX_DONE
, DEBUG
,
1145 "unmapping OAL area.\n");
1147 pci_unmap_single(qdev
->pdev
,
1148 pci_unmap_addr(&tx_ring_desc
->map
[i
],
1150 pci_unmap_len(&tx_ring_desc
->map
[i
],
1154 QPRINTK(qdev
, TX_DONE
, DEBUG
, "unmapping frag %d.\n",
1156 pci_unmap_page(qdev
->pdev
,
1157 pci_unmap_addr(&tx_ring_desc
->map
[i
],
1159 pci_unmap_len(&tx_ring_desc
->map
[i
],
1160 maplen
), PCI_DMA_TODEVICE
);
1166 /* Map the buffers for this transmit. This will return
1167 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1169 static int ql_map_send(struct ql_adapter
*qdev
,
1170 struct ob_mac_iocb_req
*mac_iocb_ptr
,
1171 struct sk_buff
*skb
, struct tx_ring_desc
*tx_ring_desc
)
1173 int len
= skb_headlen(skb
);
1175 int frag_idx
, err
, map_idx
= 0;
1176 struct tx_buf_desc
*tbd
= mac_iocb_ptr
->tbd
;
1177 int frag_cnt
= skb_shinfo(skb
)->nr_frags
;
1180 QPRINTK(qdev
, TX_QUEUED
, DEBUG
, "frag_cnt = %d.\n", frag_cnt
);
1183 * Map the skb buffer first.
1185 map
= pci_map_single(qdev
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
1187 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1189 QPRINTK(qdev
, TX_QUEUED
, ERR
,
1190 "PCI mapping failed with error: %d\n", err
);
1192 return NETDEV_TX_BUSY
;
1195 tbd
->len
= cpu_to_le32(len
);
1196 tbd
->addr
= cpu_to_le64(map
);
1197 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
, map
);
1198 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
, len
);
1202 * This loop fills the remainder of the 8 address descriptors
1203 * in the IOCB. If there are more than 7 fragments, then the
1204 * eighth address desc will point to an external list (OAL).
1205 * When this happens, the remainder of the frags will be stored
1208 for (frag_idx
= 0; frag_idx
< frag_cnt
; frag_idx
++, map_idx
++) {
1209 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[frag_idx
];
1211 if (frag_idx
== 6 && frag_cnt
> 7) {
1212 /* Let's tack on an sglist.
1213 * Our control block will now
1215 * iocb->seg[0] = skb->data
1216 * iocb->seg[1] = frag[0]
1217 * iocb->seg[2] = frag[1]
1218 * iocb->seg[3] = frag[2]
1219 * iocb->seg[4] = frag[3]
1220 * iocb->seg[5] = frag[4]
1221 * iocb->seg[6] = frag[5]
1222 * iocb->seg[7] = ptr to OAL (external sglist)
1223 * oal->seg[0] = frag[6]
1224 * oal->seg[1] = frag[7]
1225 * oal->seg[2] = frag[8]
1226 * oal->seg[3] = frag[9]
1227 * oal->seg[4] = frag[10]
1230 /* Tack on the OAL in the eighth segment of IOCB. */
1231 map
= pci_map_single(qdev
->pdev
, &tx_ring_desc
->oal
,
1234 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1236 QPRINTK(qdev
, TX_QUEUED
, ERR
,
1237 "PCI mapping outbound address list with error: %d\n",
1242 tbd
->addr
= cpu_to_le64(map
);
1244 * The length is the number of fragments
1245 * that remain to be mapped times the length
1246 * of our sglist (OAL).
1249 cpu_to_le32((sizeof(struct tx_buf_desc
) *
1250 (frag_cnt
- frag_idx
)) | TX_DESC_C
);
1251 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
,
1253 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
,
1254 sizeof(struct oal
));
1255 tbd
= (struct tx_buf_desc
*)&tx_ring_desc
->oal
;
1260 pci_map_page(qdev
->pdev
, frag
->page
,
1261 frag
->page_offset
, frag
->size
,
1264 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1266 QPRINTK(qdev
, TX_QUEUED
, ERR
,
1267 "PCI mapping frags failed with error: %d.\n",
1272 tbd
->addr
= cpu_to_le64(map
);
1273 tbd
->len
= cpu_to_le32(frag
->size
);
1274 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
, map
);
1275 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
,
1279 /* Save the number of segments we've mapped. */
1280 tx_ring_desc
->map_cnt
= map_idx
;
1281 /* Terminate the last segment. */
1282 tbd
->len
= cpu_to_le32(le32_to_cpu(tbd
->len
) | TX_DESC_E
);
1283 return NETDEV_TX_OK
;
1287 * If the first frag mapping failed, then i will be zero.
1288 * This causes the unmap of the skb->data area. Otherwise
1289 * we pass in the number of frags that mapped successfully
1290 * so they can be umapped.
1292 ql_unmap_send(qdev
, tx_ring_desc
, map_idx
);
1293 return NETDEV_TX_BUSY
;
1296 static void ql_realign_skb(struct sk_buff
*skb
, int len
)
1298 void *temp_addr
= skb
->data
;
1300 /* Undo the skb_reserve(skb,32) we did before
1301 * giving to hardware, and realign data on
1302 * a 2-byte boundary.
1304 skb
->data
-= QLGE_SB_PAD
- NET_IP_ALIGN
;
1305 skb
->tail
-= QLGE_SB_PAD
- NET_IP_ALIGN
;
1306 skb_copy_to_linear_data(skb
, temp_addr
,
1311 * This function builds an skb for the given inbound
1312 * completion. It will be rewritten for readability in the near
1313 * future, but for not it works well.
1315 static struct sk_buff
*ql_build_rx_skb(struct ql_adapter
*qdev
,
1316 struct rx_ring
*rx_ring
,
1317 struct ib_mac_iocb_rsp
*ib_mac_rsp
)
1319 struct bq_desc
*lbq_desc
;
1320 struct bq_desc
*sbq_desc
;
1321 struct sk_buff
*skb
= NULL
;
1322 u32 length
= le32_to_cpu(ib_mac_rsp
->data_len
);
1323 u32 hdr_len
= le32_to_cpu(ib_mac_rsp
->hdr_len
);
1326 * Handle the header buffer if present.
1328 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HV
&&
1329 ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1330 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "Header of %d bytes in small buffer.\n", hdr_len
);
1332 * Headers fit nicely into a small buffer.
1334 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1335 pci_unmap_single(qdev
->pdev
,
1336 pci_unmap_addr(sbq_desc
, mapaddr
),
1337 pci_unmap_len(sbq_desc
, maplen
),
1338 PCI_DMA_FROMDEVICE
);
1339 skb
= sbq_desc
->p
.skb
;
1340 ql_realign_skb(skb
, hdr_len
);
1341 skb_put(skb
, hdr_len
);
1342 sbq_desc
->p
.skb
= NULL
;
1346 * Handle the data buffer(s).
1348 if (unlikely(!length
)) { /* Is there data too? */
1349 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1350 "No Data buffer in this packet.\n");
1354 if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DS
) {
1355 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1356 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1357 "Headers in small, data of %d bytes in small, combine them.\n", length
);
1359 * Data is less than small buffer size so it's
1360 * stuffed in a small buffer.
1361 * For this case we append the data
1362 * from the "data" small buffer to the "header" small
1365 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1366 pci_dma_sync_single_for_cpu(qdev
->pdev
,
1368 (sbq_desc
, mapaddr
),
1371 PCI_DMA_FROMDEVICE
);
1372 memcpy(skb_put(skb
, length
),
1373 sbq_desc
->p
.skb
->data
, length
);
1374 pci_dma_sync_single_for_device(qdev
->pdev
,
1381 PCI_DMA_FROMDEVICE
);
1383 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1384 "%d bytes in a single small buffer.\n", length
);
1385 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1386 skb
= sbq_desc
->p
.skb
;
1387 ql_realign_skb(skb
, length
);
1388 skb_put(skb
, length
);
1389 pci_unmap_single(qdev
->pdev
,
1390 pci_unmap_addr(sbq_desc
,
1392 pci_unmap_len(sbq_desc
,
1394 PCI_DMA_FROMDEVICE
);
1395 sbq_desc
->p
.skb
= NULL
;
1397 } else if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DL
) {
1398 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1399 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1400 "Header in small, %d bytes in large. Chain large to small!\n", length
);
1402 * The data is in a single large buffer. We
1403 * chain it to the header buffer's skb and let
1406 lbq_desc
= ql_get_curr_lbuf(rx_ring
);
1407 pci_unmap_page(qdev
->pdev
,
1408 pci_unmap_addr(lbq_desc
,
1410 pci_unmap_len(lbq_desc
, maplen
),
1411 PCI_DMA_FROMDEVICE
);
1412 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1413 "Chaining page to skb.\n");
1414 skb_fill_page_desc(skb
, 0, lbq_desc
->p
.lbq_page
,
1417 skb
->data_len
+= length
;
1418 skb
->truesize
+= length
;
1419 lbq_desc
->p
.lbq_page
= NULL
;
1422 * The headers and data are in a single large buffer. We
1423 * copy it to a new skb and let it go. This can happen with
1424 * jumbo mtu on a non-TCP/UDP frame.
1426 lbq_desc
= ql_get_curr_lbuf(rx_ring
);
1427 skb
= netdev_alloc_skb(qdev
->ndev
, length
);
1429 QPRINTK(qdev
, PROBE
, DEBUG
,
1430 "No skb available, drop the packet.\n");
1433 pci_unmap_page(qdev
->pdev
,
1434 pci_unmap_addr(lbq_desc
,
1436 pci_unmap_len(lbq_desc
, maplen
),
1437 PCI_DMA_FROMDEVICE
);
1438 skb_reserve(skb
, NET_IP_ALIGN
);
1439 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1440 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length
);
1441 skb_fill_page_desc(skb
, 0, lbq_desc
->p
.lbq_page
,
1444 skb
->data_len
+= length
;
1445 skb
->truesize
+= length
;
1447 lbq_desc
->p
.lbq_page
= NULL
;
1448 __pskb_pull_tail(skb
,
1449 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
1450 VLAN_ETH_HLEN
: ETH_HLEN
);
1454 * The data is in a chain of large buffers
1455 * pointed to by a small buffer. We loop
1456 * thru and chain them to the our small header
1458 * frags: There are 18 max frags and our small
1459 * buffer will hold 32 of them. The thing is,
1460 * we'll use 3 max for our 9000 byte jumbo
1461 * frames. If the MTU goes up we could
1462 * eventually be in trouble.
1464 int size
, offset
, i
= 0;
1465 __le64
*bq
, bq_array
[8];
1466 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1467 pci_unmap_single(qdev
->pdev
,
1468 pci_unmap_addr(sbq_desc
, mapaddr
),
1469 pci_unmap_len(sbq_desc
, maplen
),
1470 PCI_DMA_FROMDEVICE
);
1471 if (!(ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
)) {
1473 * This is an non TCP/UDP IP frame, so
1474 * the headers aren't split into a small
1475 * buffer. We have to use the small buffer
1476 * that contains our sg list as our skb to
1477 * send upstairs. Copy the sg list here to
1478 * a local buffer and use it to find the
1481 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1482 "%d bytes of headers & data in chain of large.\n", length
);
1483 skb
= sbq_desc
->p
.skb
;
1485 memcpy(bq
, skb
->data
, sizeof(bq_array
));
1486 sbq_desc
->p
.skb
= NULL
;
1487 skb_reserve(skb
, NET_IP_ALIGN
);
1489 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1490 "Headers in small, %d bytes of data in chain of large.\n", length
);
1491 bq
= (__le64
*)sbq_desc
->p
.skb
->data
;
1493 while (length
> 0) {
1494 lbq_desc
= ql_get_curr_lbuf(rx_ring
);
1495 pci_unmap_page(qdev
->pdev
,
1496 pci_unmap_addr(lbq_desc
,
1498 pci_unmap_len(lbq_desc
,
1500 PCI_DMA_FROMDEVICE
);
1501 size
= (length
< PAGE_SIZE
) ? length
: PAGE_SIZE
;
1504 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1505 "Adding page %d to skb for %d bytes.\n",
1507 skb_fill_page_desc(skb
, i
, lbq_desc
->p
.lbq_page
,
1510 skb
->data_len
+= size
;
1511 skb
->truesize
+= size
;
1513 lbq_desc
->p
.lbq_page
= NULL
;
1517 __pskb_pull_tail(skb
, (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
1518 VLAN_ETH_HLEN
: ETH_HLEN
);
1523 /* Process an inbound completion from an rx ring. */
1524 static void ql_process_mac_rx_intr(struct ql_adapter
*qdev
,
1525 struct rx_ring
*rx_ring
,
1526 struct ib_mac_iocb_rsp
*ib_mac_rsp
)
1528 struct net_device
*ndev
= qdev
->ndev
;
1529 struct sk_buff
*skb
= NULL
;
1530 u16 vlan_id
= (le16_to_cpu(ib_mac_rsp
->vlan_id
) &
1531 IB_MAC_IOCB_RSP_VLAN_MASK
)
1533 QL_DUMP_IB_MAC_RSP(ib_mac_rsp
);
1535 skb
= ql_build_rx_skb(qdev
, rx_ring
, ib_mac_rsp
);
1536 if (unlikely(!skb
)) {
1537 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1538 "No skb available, drop packet.\n");
1542 /* Frame error, so drop the packet. */
1543 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_ERR_MASK
) {
1544 QPRINTK(qdev
, DRV
, ERR
, "Receive error, flags2 = 0x%x\n",
1545 ib_mac_rsp
->flags2
);
1546 dev_kfree_skb_any(skb
);
1550 /* The max framesize filter on this chip is set higher than
1551 * MTU since FCoE uses 2k frames.
1553 if (skb
->len
> ndev
->mtu
+ ETH_HLEN
) {
1554 dev_kfree_skb_any(skb
);
1558 prefetch(skb
->data
);
1560 if (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) {
1561 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "%s%s%s Multicast.\n",
1562 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1563 IB_MAC_IOCB_RSP_M_HASH
? "Hash" : "",
1564 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1565 IB_MAC_IOCB_RSP_M_REG
? "Registered" : "",
1566 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1567 IB_MAC_IOCB_RSP_M_PROM
? "Promiscuous" : "");
1569 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_P
) {
1570 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "Promiscuous Packet.\n");
1573 skb
->protocol
= eth_type_trans(skb
, ndev
);
1574 skb
->ip_summed
= CHECKSUM_NONE
;
1576 /* If rx checksum is on, and there are no
1577 * csum or frame errors.
1579 if (qdev
->rx_csum
&&
1580 !(ib_mac_rsp
->flags1
& IB_MAC_CSUM_ERR_MASK
)) {
1582 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_T
) {
1583 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1584 "TCP checksum done!\n");
1585 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1586 } else if ((ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_U
) &&
1587 (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_V4
)) {
1588 /* Unfragmented ipv4 UDP frame. */
1589 struct iphdr
*iph
= (struct iphdr
*) skb
->data
;
1590 if (!(iph
->frag_off
&
1591 cpu_to_be16(IP_MF
|IP_OFFSET
))) {
1592 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1593 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1594 "TCP checksum done!\n");
1599 qdev
->stats
.rx_packets
++;
1600 qdev
->stats
.rx_bytes
+= skb
->len
;
1601 skb_record_rx_queue(skb
,
1602 rx_ring
->cq_id
- qdev
->rss_ring_first_cq_id
);
1603 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
1605 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) &&
1607 vlan_gro_receive(&rx_ring
->napi
, qdev
->vlgrp
,
1610 napi_gro_receive(&rx_ring
->napi
, skb
);
1613 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) &&
1615 vlan_hwaccel_receive_skb(skb
, qdev
->vlgrp
, vlan_id
);
1617 netif_receive_skb(skb
);
1621 /* Process an outbound completion from an rx ring. */
1622 static void ql_process_mac_tx_intr(struct ql_adapter
*qdev
,
1623 struct ob_mac_iocb_rsp
*mac_rsp
)
1625 struct tx_ring
*tx_ring
;
1626 struct tx_ring_desc
*tx_ring_desc
;
1628 QL_DUMP_OB_MAC_RSP(mac_rsp
);
1629 tx_ring
= &qdev
->tx_ring
[mac_rsp
->txq_idx
];
1630 tx_ring_desc
= &tx_ring
->q
[mac_rsp
->tid
];
1631 ql_unmap_send(qdev
, tx_ring_desc
, tx_ring_desc
->map_cnt
);
1632 qdev
->stats
.tx_bytes
+= tx_ring_desc
->map_cnt
;
1633 qdev
->stats
.tx_packets
++;
1634 dev_kfree_skb(tx_ring_desc
->skb
);
1635 tx_ring_desc
->skb
= NULL
;
1637 if (unlikely(mac_rsp
->flags1
& (OB_MAC_IOCB_RSP_E
|
1640 OB_MAC_IOCB_RSP_P
| OB_MAC_IOCB_RSP_B
))) {
1641 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_E
) {
1642 QPRINTK(qdev
, TX_DONE
, WARNING
,
1643 "Total descriptor length did not match transfer length.\n");
1645 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_S
) {
1646 QPRINTK(qdev
, TX_DONE
, WARNING
,
1647 "Frame too short to be legal, not sent.\n");
1649 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_L
) {
1650 QPRINTK(qdev
, TX_DONE
, WARNING
,
1651 "Frame too long, but sent anyway.\n");
1653 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_B
) {
1654 QPRINTK(qdev
, TX_DONE
, WARNING
,
1655 "PCI backplane error. Frame not sent.\n");
1658 atomic_inc(&tx_ring
->tx_count
);
1661 /* Fire up a handler to reset the MPI processor. */
1662 void ql_queue_fw_error(struct ql_adapter
*qdev
)
1664 netif_carrier_off(qdev
->ndev
);
1665 queue_delayed_work(qdev
->workqueue
, &qdev
->mpi_reset_work
, 0);
1668 void ql_queue_asic_error(struct ql_adapter
*qdev
)
1670 netif_carrier_off(qdev
->ndev
);
1671 ql_disable_interrupts(qdev
);
1672 /* Clear adapter up bit to signal the recovery
1673 * process that it shouldn't kill the reset worker
1676 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
1677 queue_delayed_work(qdev
->workqueue
, &qdev
->asic_reset_work
, 0);
1680 static void ql_process_chip_ae_intr(struct ql_adapter
*qdev
,
1681 struct ib_ae_iocb_rsp
*ib_ae_rsp
)
1683 switch (ib_ae_rsp
->event
) {
1684 case MGMT_ERR_EVENT
:
1685 QPRINTK(qdev
, RX_ERR
, ERR
,
1686 "Management Processor Fatal Error.\n");
1687 ql_queue_fw_error(qdev
);
1690 case CAM_LOOKUP_ERR_EVENT
:
1691 QPRINTK(qdev
, LINK
, ERR
,
1692 "Multiple CAM hits lookup occurred.\n");
1693 QPRINTK(qdev
, DRV
, ERR
, "This event shouldn't occur.\n");
1694 ql_queue_asic_error(qdev
);
1697 case SOFT_ECC_ERROR_EVENT
:
1698 QPRINTK(qdev
, RX_ERR
, ERR
, "Soft ECC error detected.\n");
1699 ql_queue_asic_error(qdev
);
1702 case PCI_ERR_ANON_BUF_RD
:
1703 QPRINTK(qdev
, RX_ERR
, ERR
,
1704 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1706 ql_queue_asic_error(qdev
);
1710 QPRINTK(qdev
, DRV
, ERR
, "Unexpected event %d.\n",
1712 ql_queue_asic_error(qdev
);
1717 static int ql_clean_outbound_rx_ring(struct rx_ring
*rx_ring
)
1719 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1720 u32 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1721 struct ob_mac_iocb_rsp
*net_rsp
= NULL
;
1724 struct tx_ring
*tx_ring
;
1725 /* While there are entries in the completion queue. */
1726 while (prod
!= rx_ring
->cnsmr_idx
) {
1728 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1729 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring
->cq_id
,
1730 prod
, rx_ring
->cnsmr_idx
);
1732 net_rsp
= (struct ob_mac_iocb_rsp
*)rx_ring
->curr_entry
;
1734 switch (net_rsp
->opcode
) {
1736 case OPCODE_OB_MAC_TSO_IOCB
:
1737 case OPCODE_OB_MAC_IOCB
:
1738 ql_process_mac_tx_intr(qdev
, net_rsp
);
1741 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1742 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1746 ql_update_cq(rx_ring
);
1747 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1749 ql_write_cq_idx(rx_ring
);
1750 tx_ring
= &qdev
->tx_ring
[net_rsp
->txq_idx
];
1751 if (__netif_subqueue_stopped(qdev
->ndev
, tx_ring
->wq_id
) &&
1753 if (atomic_read(&tx_ring
->queue_stopped
) &&
1754 (atomic_read(&tx_ring
->tx_count
) > (tx_ring
->wq_len
/ 4)))
1756 * The queue got stopped because the tx_ring was full.
1757 * Wake it up, because it's now at least 25% empty.
1759 netif_wake_subqueue(qdev
->ndev
, tx_ring
->wq_id
);
1765 static int ql_clean_inbound_rx_ring(struct rx_ring
*rx_ring
, int budget
)
1767 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1768 u32 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1769 struct ql_net_rsp_iocb
*net_rsp
;
1772 /* While there are entries in the completion queue. */
1773 while (prod
!= rx_ring
->cnsmr_idx
) {
1775 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1776 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring
->cq_id
,
1777 prod
, rx_ring
->cnsmr_idx
);
1779 net_rsp
= rx_ring
->curr_entry
;
1781 switch (net_rsp
->opcode
) {
1782 case OPCODE_IB_MAC_IOCB
:
1783 ql_process_mac_rx_intr(qdev
, rx_ring
,
1784 (struct ib_mac_iocb_rsp
*)
1788 case OPCODE_IB_AE_IOCB
:
1789 ql_process_chip_ae_intr(qdev
, (struct ib_ae_iocb_rsp
*)
1794 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1795 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1800 ql_update_cq(rx_ring
);
1801 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1802 if (count
== budget
)
1805 ql_update_buffer_queues(qdev
, rx_ring
);
1806 ql_write_cq_idx(rx_ring
);
1810 static int ql_napi_poll_msix(struct napi_struct
*napi
, int budget
)
1812 struct rx_ring
*rx_ring
= container_of(napi
, struct rx_ring
, napi
);
1813 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1814 int work_done
= ql_clean_inbound_rx_ring(rx_ring
, budget
);
1816 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "Enter, NAPI POLL cq_id = %d.\n",
1819 if (work_done
< budget
) {
1820 napi_complete(napi
);
1821 ql_enable_completion_interrupt(qdev
, rx_ring
->irq
);
1826 static void ql_vlan_rx_register(struct net_device
*ndev
, struct vlan_group
*grp
)
1828 struct ql_adapter
*qdev
= netdev_priv(ndev
);
1832 QPRINTK(qdev
, IFUP
, DEBUG
, "Turning on VLAN in NIC_RCV_CFG.\n");
1833 ql_write32(qdev
, NIC_RCV_CFG
, NIC_RCV_CFG_VLAN_MASK
|
1834 NIC_RCV_CFG_VLAN_MATCH_AND_NON
);
1836 QPRINTK(qdev
, IFUP
, DEBUG
,
1837 "Turning off VLAN in NIC_RCV_CFG.\n");
1838 ql_write32(qdev
, NIC_RCV_CFG
, NIC_RCV_CFG_VLAN_MASK
);
1842 static void ql_vlan_rx_add_vid(struct net_device
*ndev
, u16 vid
)
1844 struct ql_adapter
*qdev
= netdev_priv(ndev
);
1845 u32 enable_bit
= MAC_ADDR_E
;
1848 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
1851 spin_lock(&qdev
->hw_lock
);
1852 if (ql_set_mac_addr_reg
1853 (qdev
, (u8
*) &enable_bit
, MAC_ADDR_TYPE_VLAN
, vid
)) {
1854 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init vlan address.\n");
1856 spin_unlock(&qdev
->hw_lock
);
1857 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
1860 static void ql_vlan_rx_kill_vid(struct net_device
*ndev
, u16 vid
)
1862 struct ql_adapter
*qdev
= netdev_priv(ndev
);
1866 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
1870 spin_lock(&qdev
->hw_lock
);
1871 if (ql_set_mac_addr_reg
1872 (qdev
, (u8
*) &enable_bit
, MAC_ADDR_TYPE_VLAN
, vid
)) {
1873 QPRINTK(qdev
, IFUP
, ERR
, "Failed to clear vlan address.\n");
1875 spin_unlock(&qdev
->hw_lock
);
1876 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
1880 /* Worker thread to process a given rx_ring that is dedicated
1881 * to outbound completions.
1883 static void ql_tx_clean(struct work_struct
*work
)
1885 struct rx_ring
*rx_ring
=
1886 container_of(work
, struct rx_ring
, rx_work
.work
);
1887 ql_clean_outbound_rx_ring(rx_ring
);
1888 ql_enable_completion_interrupt(rx_ring
->qdev
, rx_ring
->irq
);
1892 /* Worker thread to process a given rx_ring that is dedicated
1893 * to inbound completions.
1895 static void ql_rx_clean(struct work_struct
*work
)
1897 struct rx_ring
*rx_ring
=
1898 container_of(work
, struct rx_ring
, rx_work
.work
);
1899 ql_clean_inbound_rx_ring(rx_ring
, 64);
1900 ql_enable_completion_interrupt(rx_ring
->qdev
, rx_ring
->irq
);
1903 /* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
1904 static irqreturn_t
qlge_msix_tx_isr(int irq
, void *dev_id
)
1906 struct rx_ring
*rx_ring
= dev_id
;
1907 queue_delayed_work_on(rx_ring
->cpu
, rx_ring
->qdev
->q_workqueue
,
1908 &rx_ring
->rx_work
, 0);
1912 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1913 static irqreturn_t
qlge_msix_rx_isr(int irq
, void *dev_id
)
1915 struct rx_ring
*rx_ring
= dev_id
;
1916 napi_schedule(&rx_ring
->napi
);
1920 /* This handles a fatal error, MPI activity, and the default
1921 * rx_ring in an MSI-X multiple vector environment.
1922 * In MSI/Legacy environment it also process the rest of
1925 static irqreturn_t
qlge_isr(int irq
, void *dev_id
)
1927 struct rx_ring
*rx_ring
= dev_id
;
1928 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1929 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
1934 spin_lock(&qdev
->hw_lock
);
1935 if (atomic_read(&qdev
->intr_context
[0].irq_cnt
)) {
1936 QPRINTK(qdev
, INTR
, DEBUG
, "Shared Interrupt, Not ours!\n");
1937 spin_unlock(&qdev
->hw_lock
);
1940 spin_unlock(&qdev
->hw_lock
);
1942 var
= ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
1945 * Check for fatal error.
1948 ql_queue_asic_error(qdev
);
1949 QPRINTK(qdev
, INTR
, ERR
, "Got fatal error, STS = %x.\n", var
);
1950 var
= ql_read32(qdev
, ERR_STS
);
1951 QPRINTK(qdev
, INTR
, ERR
,
1952 "Resetting chip. Error Status Register = 0x%x\n", var
);
1957 * Check MPI processor activity.
1961 * We've got an async event or mailbox completion.
1962 * Handle it and clear the source of the interrupt.
1964 QPRINTK(qdev
, INTR
, ERR
, "Got MPI processor interrupt.\n");
1965 ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
1966 queue_delayed_work_on(smp_processor_id(), qdev
->workqueue
,
1967 &qdev
->mpi_work
, 0);
1972 * Check the default queue and wake handler if active.
1974 rx_ring
= &qdev
->rx_ring
[0];
1975 if (ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
) != rx_ring
->cnsmr_idx
) {
1976 QPRINTK(qdev
, INTR
, INFO
, "Waking handler for rx_ring[0].\n");
1977 ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
1978 queue_delayed_work_on(smp_processor_id(), qdev
->q_workqueue
,
1979 &rx_ring
->rx_work
, 0);
1983 if (!test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
1985 * Start the DPC for each active queue.
1987 for (i
= 1; i
< qdev
->rx_ring_count
; i
++) {
1988 rx_ring
= &qdev
->rx_ring
[i
];
1989 if (ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
) !=
1990 rx_ring
->cnsmr_idx
) {
1991 QPRINTK(qdev
, INTR
, INFO
,
1992 "Waking handler for rx_ring[%d].\n", i
);
1993 ql_disable_completion_interrupt(qdev
,
1996 if (i
< qdev
->rss_ring_first_cq_id
)
1997 queue_delayed_work_on(rx_ring
->cpu
,
2002 napi_schedule(&rx_ring
->napi
);
2007 ql_enable_completion_interrupt(qdev
, intr_context
->intr
);
2008 return work_done
? IRQ_HANDLED
: IRQ_NONE
;
2011 static int ql_tso(struct sk_buff
*skb
, struct ob_mac_tso_iocb_req
*mac_iocb_ptr
)
2014 if (skb_is_gso(skb
)) {
2016 if (skb_header_cloned(skb
)) {
2017 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2022 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_TSO_IOCB
;
2023 mac_iocb_ptr
->flags3
|= OB_MAC_TSO_IOCB_IC
;
2024 mac_iocb_ptr
->frame_len
= cpu_to_le32((u32
) skb
->len
);
2025 mac_iocb_ptr
->total_hdrs_len
=
2026 cpu_to_le16(skb_transport_offset(skb
) + tcp_hdrlen(skb
));
2027 mac_iocb_ptr
->net_trans_offset
=
2028 cpu_to_le16(skb_network_offset(skb
) |
2029 skb_transport_offset(skb
)
2030 << OB_MAC_TRANSPORT_HDR_SHIFT
);
2031 mac_iocb_ptr
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
2032 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_LSO
;
2033 if (likely(skb
->protocol
== htons(ETH_P_IP
))) {
2034 struct iphdr
*iph
= ip_hdr(skb
);
2036 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP4
;
2037 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2041 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
2042 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP6
;
2043 tcp_hdr(skb
)->check
=
2044 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2045 &ipv6_hdr(skb
)->daddr
,
2053 static void ql_hw_csum_setup(struct sk_buff
*skb
,
2054 struct ob_mac_tso_iocb_req
*mac_iocb_ptr
)
2057 struct iphdr
*iph
= ip_hdr(skb
);
2059 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_TSO_IOCB
;
2060 mac_iocb_ptr
->frame_len
= cpu_to_le32((u32
) skb
->len
);
2061 mac_iocb_ptr
->net_trans_offset
=
2062 cpu_to_le16(skb_network_offset(skb
) |
2063 skb_transport_offset(skb
) << OB_MAC_TRANSPORT_HDR_SHIFT
);
2065 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP4
;
2066 len
= (ntohs(iph
->tot_len
) - (iph
->ihl
<< 2));
2067 if (likely(iph
->protocol
== IPPROTO_TCP
)) {
2068 check
= &(tcp_hdr(skb
)->check
);
2069 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_TC
;
2070 mac_iocb_ptr
->total_hdrs_len
=
2071 cpu_to_le16(skb_transport_offset(skb
) +
2072 (tcp_hdr(skb
)->doff
<< 2));
2074 check
= &(udp_hdr(skb
)->check
);
2075 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_UC
;
2076 mac_iocb_ptr
->total_hdrs_len
=
2077 cpu_to_le16(skb_transport_offset(skb
) +
2078 sizeof(struct udphdr
));
2080 *check
= ~csum_tcpudp_magic(iph
->saddr
,
2081 iph
->daddr
, len
, iph
->protocol
, 0);
2084 static int qlge_send(struct sk_buff
*skb
, struct net_device
*ndev
)
2086 struct tx_ring_desc
*tx_ring_desc
;
2087 struct ob_mac_iocb_req
*mac_iocb_ptr
;
2088 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2090 struct tx_ring
*tx_ring
;
2091 u32 tx_ring_idx
= (u32
) skb
->queue_mapping
;
2093 tx_ring
= &qdev
->tx_ring
[tx_ring_idx
];
2095 if (skb_padto(skb
, ETH_ZLEN
))
2096 return NETDEV_TX_OK
;
2098 if (unlikely(atomic_read(&tx_ring
->tx_count
) < 2)) {
2099 QPRINTK(qdev
, TX_QUEUED
, INFO
,
2100 "%s: shutting down tx queue %d du to lack of resources.\n",
2101 __func__
, tx_ring_idx
);
2102 netif_stop_subqueue(ndev
, tx_ring
->wq_id
);
2103 atomic_inc(&tx_ring
->queue_stopped
);
2104 return NETDEV_TX_BUSY
;
2106 tx_ring_desc
= &tx_ring
->q
[tx_ring
->prod_idx
];
2107 mac_iocb_ptr
= tx_ring_desc
->queue_entry
;
2108 memset((void *)mac_iocb_ptr
, 0, sizeof(mac_iocb_ptr
));
2110 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_IOCB
;
2111 mac_iocb_ptr
->tid
= tx_ring_desc
->index
;
2112 /* We use the upper 32-bits to store the tx queue for this IO.
2113 * When we get the completion we can use it to establish the context.
2115 mac_iocb_ptr
->txq_idx
= tx_ring_idx
;
2116 tx_ring_desc
->skb
= skb
;
2118 mac_iocb_ptr
->frame_len
= cpu_to_le16((u16
) skb
->len
);
2120 if (qdev
->vlgrp
&& vlan_tx_tag_present(skb
)) {
2121 QPRINTK(qdev
, TX_QUEUED
, DEBUG
, "Adding a vlan tag %d.\n",
2122 vlan_tx_tag_get(skb
));
2123 mac_iocb_ptr
->flags3
|= OB_MAC_IOCB_V
;
2124 mac_iocb_ptr
->vlan_tci
= cpu_to_le16(vlan_tx_tag_get(skb
));
2126 tso
= ql_tso(skb
, (struct ob_mac_tso_iocb_req
*)mac_iocb_ptr
);
2128 dev_kfree_skb_any(skb
);
2129 return NETDEV_TX_OK
;
2130 } else if (unlikely(!tso
) && (skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
2131 ql_hw_csum_setup(skb
,
2132 (struct ob_mac_tso_iocb_req
*)mac_iocb_ptr
);
2134 if (ql_map_send(qdev
, mac_iocb_ptr
, skb
, tx_ring_desc
) !=
2136 QPRINTK(qdev
, TX_QUEUED
, ERR
,
2137 "Could not map the segments.\n");
2138 return NETDEV_TX_BUSY
;
2140 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr
);
2141 tx_ring
->prod_idx
++;
2142 if (tx_ring
->prod_idx
== tx_ring
->wq_len
)
2143 tx_ring
->prod_idx
= 0;
2146 ql_write_db_reg(tx_ring
->prod_idx
, tx_ring
->prod_idx_db_reg
);
2147 QPRINTK(qdev
, TX_QUEUED
, DEBUG
, "tx queued, slot %d, len %d\n",
2148 tx_ring
->prod_idx
, skb
->len
);
2150 atomic_dec(&tx_ring
->tx_count
);
2151 return NETDEV_TX_OK
;
2154 static void ql_free_shadow_space(struct ql_adapter
*qdev
)
2156 if (qdev
->rx_ring_shadow_reg_area
) {
2157 pci_free_consistent(qdev
->pdev
,
2159 qdev
->rx_ring_shadow_reg_area
,
2160 qdev
->rx_ring_shadow_reg_dma
);
2161 qdev
->rx_ring_shadow_reg_area
= NULL
;
2163 if (qdev
->tx_ring_shadow_reg_area
) {
2164 pci_free_consistent(qdev
->pdev
,
2166 qdev
->tx_ring_shadow_reg_area
,
2167 qdev
->tx_ring_shadow_reg_dma
);
2168 qdev
->tx_ring_shadow_reg_area
= NULL
;
2172 static int ql_alloc_shadow_space(struct ql_adapter
*qdev
)
2174 qdev
->rx_ring_shadow_reg_area
=
2175 pci_alloc_consistent(qdev
->pdev
,
2176 PAGE_SIZE
, &qdev
->rx_ring_shadow_reg_dma
);
2177 if (qdev
->rx_ring_shadow_reg_area
== NULL
) {
2178 QPRINTK(qdev
, IFUP
, ERR
,
2179 "Allocation of RX shadow space failed.\n");
2182 memset(qdev
->rx_ring_shadow_reg_area
, 0, PAGE_SIZE
);
2183 qdev
->tx_ring_shadow_reg_area
=
2184 pci_alloc_consistent(qdev
->pdev
, PAGE_SIZE
,
2185 &qdev
->tx_ring_shadow_reg_dma
);
2186 if (qdev
->tx_ring_shadow_reg_area
== NULL
) {
2187 QPRINTK(qdev
, IFUP
, ERR
,
2188 "Allocation of TX shadow space failed.\n");
2189 goto err_wqp_sh_area
;
2191 memset(qdev
->tx_ring_shadow_reg_area
, 0, PAGE_SIZE
);
2195 pci_free_consistent(qdev
->pdev
,
2197 qdev
->rx_ring_shadow_reg_area
,
2198 qdev
->rx_ring_shadow_reg_dma
);
2202 static void ql_init_tx_ring(struct ql_adapter
*qdev
, struct tx_ring
*tx_ring
)
2204 struct tx_ring_desc
*tx_ring_desc
;
2206 struct ob_mac_iocb_req
*mac_iocb_ptr
;
2208 mac_iocb_ptr
= tx_ring
->wq_base
;
2209 tx_ring_desc
= tx_ring
->q
;
2210 for (i
= 0; i
< tx_ring
->wq_len
; i
++) {
2211 tx_ring_desc
->index
= i
;
2212 tx_ring_desc
->skb
= NULL
;
2213 tx_ring_desc
->queue_entry
= mac_iocb_ptr
;
2217 atomic_set(&tx_ring
->tx_count
, tx_ring
->wq_len
);
2218 atomic_set(&tx_ring
->queue_stopped
, 0);
2221 static void ql_free_tx_resources(struct ql_adapter
*qdev
,
2222 struct tx_ring
*tx_ring
)
2224 if (tx_ring
->wq_base
) {
2225 pci_free_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2226 tx_ring
->wq_base
, tx_ring
->wq_base_dma
);
2227 tx_ring
->wq_base
= NULL
;
2233 static int ql_alloc_tx_resources(struct ql_adapter
*qdev
,
2234 struct tx_ring
*tx_ring
)
2237 pci_alloc_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2238 &tx_ring
->wq_base_dma
);
2240 if ((tx_ring
->wq_base
== NULL
)
2241 || tx_ring
->wq_base_dma
& WQ_ADDR_ALIGN
) {
2242 QPRINTK(qdev
, IFUP
, ERR
, "tx_ring alloc failed.\n");
2246 kmalloc(tx_ring
->wq_len
* sizeof(struct tx_ring_desc
), GFP_KERNEL
);
2247 if (tx_ring
->q
== NULL
)
2252 pci_free_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2253 tx_ring
->wq_base
, tx_ring
->wq_base_dma
);
2257 static void ql_free_lbq_buffers(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2260 struct bq_desc
*lbq_desc
;
2262 for (i
= 0; i
< rx_ring
->lbq_len
; i
++) {
2263 lbq_desc
= &rx_ring
->lbq
[i
];
2264 if (lbq_desc
->p
.lbq_page
) {
2265 pci_unmap_page(qdev
->pdev
,
2266 pci_unmap_addr(lbq_desc
, mapaddr
),
2267 pci_unmap_len(lbq_desc
, maplen
),
2268 PCI_DMA_FROMDEVICE
);
2270 put_page(lbq_desc
->p
.lbq_page
);
2271 lbq_desc
->p
.lbq_page
= NULL
;
2276 static void ql_free_sbq_buffers(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2279 struct bq_desc
*sbq_desc
;
2281 for (i
= 0; i
< rx_ring
->sbq_len
; i
++) {
2282 sbq_desc
= &rx_ring
->sbq
[i
];
2283 if (sbq_desc
== NULL
) {
2284 QPRINTK(qdev
, IFUP
, ERR
, "sbq_desc %d is NULL.\n", i
);
2287 if (sbq_desc
->p
.skb
) {
2288 pci_unmap_single(qdev
->pdev
,
2289 pci_unmap_addr(sbq_desc
, mapaddr
),
2290 pci_unmap_len(sbq_desc
, maplen
),
2291 PCI_DMA_FROMDEVICE
);
2292 dev_kfree_skb(sbq_desc
->p
.skb
);
2293 sbq_desc
->p
.skb
= NULL
;
2298 /* Free all large and small rx buffers associated
2299 * with the completion queues for this device.
2301 static void ql_free_rx_buffers(struct ql_adapter
*qdev
)
2304 struct rx_ring
*rx_ring
;
2306 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2307 rx_ring
= &qdev
->rx_ring
[i
];
2309 ql_free_lbq_buffers(qdev
, rx_ring
);
2311 ql_free_sbq_buffers(qdev
, rx_ring
);
2315 static void ql_alloc_rx_buffers(struct ql_adapter
*qdev
)
2317 struct rx_ring
*rx_ring
;
2320 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2321 rx_ring
= &qdev
->rx_ring
[i
];
2322 if (rx_ring
->type
!= TX_Q
)
2323 ql_update_buffer_queues(qdev
, rx_ring
);
2327 static void ql_init_lbq_ring(struct ql_adapter
*qdev
,
2328 struct rx_ring
*rx_ring
)
2331 struct bq_desc
*lbq_desc
;
2332 __le64
*bq
= rx_ring
->lbq_base
;
2334 memset(rx_ring
->lbq
, 0, rx_ring
->lbq_len
* sizeof(struct bq_desc
));
2335 for (i
= 0; i
< rx_ring
->lbq_len
; i
++) {
2336 lbq_desc
= &rx_ring
->lbq
[i
];
2337 memset(lbq_desc
, 0, sizeof(*lbq_desc
));
2338 lbq_desc
->index
= i
;
2339 lbq_desc
->addr
= bq
;
2344 static void ql_init_sbq_ring(struct ql_adapter
*qdev
,
2345 struct rx_ring
*rx_ring
)
2348 struct bq_desc
*sbq_desc
;
2349 __le64
*bq
= rx_ring
->sbq_base
;
2351 memset(rx_ring
->sbq
, 0, rx_ring
->sbq_len
* sizeof(struct bq_desc
));
2352 for (i
= 0; i
< rx_ring
->sbq_len
; i
++) {
2353 sbq_desc
= &rx_ring
->sbq
[i
];
2354 memset(sbq_desc
, 0, sizeof(*sbq_desc
));
2355 sbq_desc
->index
= i
;
2356 sbq_desc
->addr
= bq
;
2361 static void ql_free_rx_resources(struct ql_adapter
*qdev
,
2362 struct rx_ring
*rx_ring
)
2364 /* Free the small buffer queue. */
2365 if (rx_ring
->sbq_base
) {
2366 pci_free_consistent(qdev
->pdev
,
2368 rx_ring
->sbq_base
, rx_ring
->sbq_base_dma
);
2369 rx_ring
->sbq_base
= NULL
;
2372 /* Free the small buffer queue control blocks. */
2373 kfree(rx_ring
->sbq
);
2374 rx_ring
->sbq
= NULL
;
2376 /* Free the large buffer queue. */
2377 if (rx_ring
->lbq_base
) {
2378 pci_free_consistent(qdev
->pdev
,
2380 rx_ring
->lbq_base
, rx_ring
->lbq_base_dma
);
2381 rx_ring
->lbq_base
= NULL
;
2384 /* Free the large buffer queue control blocks. */
2385 kfree(rx_ring
->lbq
);
2386 rx_ring
->lbq
= NULL
;
2388 /* Free the rx queue. */
2389 if (rx_ring
->cq_base
) {
2390 pci_free_consistent(qdev
->pdev
,
2392 rx_ring
->cq_base
, rx_ring
->cq_base_dma
);
2393 rx_ring
->cq_base
= NULL
;
2397 /* Allocate queues and buffers for this completions queue based
2398 * on the values in the parameter structure. */
2399 static int ql_alloc_rx_resources(struct ql_adapter
*qdev
,
2400 struct rx_ring
*rx_ring
)
2404 * Allocate the completion queue for this rx_ring.
2407 pci_alloc_consistent(qdev
->pdev
, rx_ring
->cq_size
,
2408 &rx_ring
->cq_base_dma
);
2410 if (rx_ring
->cq_base
== NULL
) {
2411 QPRINTK(qdev
, IFUP
, ERR
, "rx_ring alloc failed.\n");
2415 if (rx_ring
->sbq_len
) {
2417 * Allocate small buffer queue.
2420 pci_alloc_consistent(qdev
->pdev
, rx_ring
->sbq_size
,
2421 &rx_ring
->sbq_base_dma
);
2423 if (rx_ring
->sbq_base
== NULL
) {
2424 QPRINTK(qdev
, IFUP
, ERR
,
2425 "Small buffer queue allocation failed.\n");
2430 * Allocate small buffer queue control blocks.
2433 kmalloc(rx_ring
->sbq_len
* sizeof(struct bq_desc
),
2435 if (rx_ring
->sbq
== NULL
) {
2436 QPRINTK(qdev
, IFUP
, ERR
,
2437 "Small buffer queue control block allocation failed.\n");
2441 ql_init_sbq_ring(qdev
, rx_ring
);
2444 if (rx_ring
->lbq_len
) {
2446 * Allocate large buffer queue.
2449 pci_alloc_consistent(qdev
->pdev
, rx_ring
->lbq_size
,
2450 &rx_ring
->lbq_base_dma
);
2452 if (rx_ring
->lbq_base
== NULL
) {
2453 QPRINTK(qdev
, IFUP
, ERR
,
2454 "Large buffer queue allocation failed.\n");
2458 * Allocate large buffer queue control blocks.
2461 kmalloc(rx_ring
->lbq_len
* sizeof(struct bq_desc
),
2463 if (rx_ring
->lbq
== NULL
) {
2464 QPRINTK(qdev
, IFUP
, ERR
,
2465 "Large buffer queue control block allocation failed.\n");
2469 ql_init_lbq_ring(qdev
, rx_ring
);
2475 ql_free_rx_resources(qdev
, rx_ring
);
2479 static void ql_tx_ring_clean(struct ql_adapter
*qdev
)
2481 struct tx_ring
*tx_ring
;
2482 struct tx_ring_desc
*tx_ring_desc
;
2486 * Loop through all queues and free
2489 for (j
= 0; j
< qdev
->tx_ring_count
; j
++) {
2490 tx_ring
= &qdev
->tx_ring
[j
];
2491 for (i
= 0; i
< tx_ring
->wq_len
; i
++) {
2492 tx_ring_desc
= &tx_ring
->q
[i
];
2493 if (tx_ring_desc
&& tx_ring_desc
->skb
) {
2494 QPRINTK(qdev
, IFDOWN
, ERR
,
2495 "Freeing lost SKB %p, from queue %d, index %d.\n",
2496 tx_ring_desc
->skb
, j
,
2497 tx_ring_desc
->index
);
2498 ql_unmap_send(qdev
, tx_ring_desc
,
2499 tx_ring_desc
->map_cnt
);
2500 dev_kfree_skb(tx_ring_desc
->skb
);
2501 tx_ring_desc
->skb
= NULL
;
2507 static void ql_free_mem_resources(struct ql_adapter
*qdev
)
2511 for (i
= 0; i
< qdev
->tx_ring_count
; i
++)
2512 ql_free_tx_resources(qdev
, &qdev
->tx_ring
[i
]);
2513 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
2514 ql_free_rx_resources(qdev
, &qdev
->rx_ring
[i
]);
2515 ql_free_shadow_space(qdev
);
2518 static int ql_alloc_mem_resources(struct ql_adapter
*qdev
)
2522 /* Allocate space for our shadow registers and such. */
2523 if (ql_alloc_shadow_space(qdev
))
2526 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2527 if (ql_alloc_rx_resources(qdev
, &qdev
->rx_ring
[i
]) != 0) {
2528 QPRINTK(qdev
, IFUP
, ERR
,
2529 "RX resource allocation failed.\n");
2533 /* Allocate tx queue resources */
2534 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
2535 if (ql_alloc_tx_resources(qdev
, &qdev
->tx_ring
[i
]) != 0) {
2536 QPRINTK(qdev
, IFUP
, ERR
,
2537 "TX resource allocation failed.\n");
2544 ql_free_mem_resources(qdev
);
2548 /* Set up the rx ring control block and pass it to the chip.
2549 * The control block is defined as
2550 * "Completion Queue Initialization Control Block", or cqicb.
2552 static int ql_start_rx_ring(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2554 struct cqicb
*cqicb
= &rx_ring
->cqicb
;
2555 void *shadow_reg
= qdev
->rx_ring_shadow_reg_area
+
2556 (rx_ring
->cq_id
* RX_RING_SHADOW_SPACE
);
2557 u64 shadow_reg_dma
= qdev
->rx_ring_shadow_reg_dma
+
2558 (rx_ring
->cq_id
* RX_RING_SHADOW_SPACE
);
2559 void __iomem
*doorbell_area
=
2560 qdev
->doorbell_area
+ (DB_PAGE_SIZE
* (128 + rx_ring
->cq_id
));
2564 __le64
*base_indirect_ptr
;
2567 /* Set up the shadow registers for this ring. */
2568 rx_ring
->prod_idx_sh_reg
= shadow_reg
;
2569 rx_ring
->prod_idx_sh_reg_dma
= shadow_reg_dma
;
2570 shadow_reg
+= sizeof(u64
);
2571 shadow_reg_dma
+= sizeof(u64
);
2572 rx_ring
->lbq_base_indirect
= shadow_reg
;
2573 rx_ring
->lbq_base_indirect_dma
= shadow_reg_dma
;
2574 shadow_reg
+= (sizeof(u64
) * MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
2575 shadow_reg_dma
+= (sizeof(u64
) * MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
2576 rx_ring
->sbq_base_indirect
= shadow_reg
;
2577 rx_ring
->sbq_base_indirect_dma
= shadow_reg_dma
;
2579 /* PCI doorbell mem area + 0x00 for consumer index register */
2580 rx_ring
->cnsmr_idx_db_reg
= (u32 __iomem
*) doorbell_area
;
2581 rx_ring
->cnsmr_idx
= 0;
2582 rx_ring
->curr_entry
= rx_ring
->cq_base
;
2584 /* PCI doorbell mem area + 0x04 for valid register */
2585 rx_ring
->valid_db_reg
= doorbell_area
+ 0x04;
2587 /* PCI doorbell mem area + 0x18 for large buffer consumer */
2588 rx_ring
->lbq_prod_idx_db_reg
= (u32 __iomem
*) (doorbell_area
+ 0x18);
2590 /* PCI doorbell mem area + 0x1c */
2591 rx_ring
->sbq_prod_idx_db_reg
= (u32 __iomem
*) (doorbell_area
+ 0x1c);
2593 memset((void *)cqicb
, 0, sizeof(struct cqicb
));
2594 cqicb
->msix_vect
= rx_ring
->irq
;
2596 bq_len
= (rx_ring
->cq_len
== 65536) ? 0 : (u16
) rx_ring
->cq_len
;
2597 cqicb
->len
= cpu_to_le16(bq_len
| LEN_V
| LEN_CPP_CONT
);
2599 cqicb
->addr
= cpu_to_le64(rx_ring
->cq_base_dma
);
2601 cqicb
->prod_idx_addr
= cpu_to_le64(rx_ring
->prod_idx_sh_reg_dma
);
2604 * Set up the control block load flags.
2606 cqicb
->flags
= FLAGS_LC
| /* Load queue base address */
2607 FLAGS_LV
| /* Load MSI-X vector */
2608 FLAGS_LI
; /* Load irq delay values */
2609 if (rx_ring
->lbq_len
) {
2610 cqicb
->flags
|= FLAGS_LL
; /* Load lbq values */
2611 tmp
= (u64
)rx_ring
->lbq_base_dma
;;
2612 base_indirect_ptr
= (__le64
*) rx_ring
->lbq_base_indirect
;
2615 *base_indirect_ptr
= cpu_to_le64(tmp
);
2616 tmp
+= DB_PAGE_SIZE
;
2617 base_indirect_ptr
++;
2619 } while (page_entries
< MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
2621 cpu_to_le64(rx_ring
->lbq_base_indirect_dma
);
2622 bq_len
= (rx_ring
->lbq_buf_size
== 65536) ? 0 :
2623 (u16
) rx_ring
->lbq_buf_size
;
2624 cqicb
->lbq_buf_size
= cpu_to_le16(bq_len
);
2625 bq_len
= (rx_ring
->lbq_len
== 65536) ? 0 :
2626 (u16
) rx_ring
->lbq_len
;
2627 cqicb
->lbq_len
= cpu_to_le16(bq_len
);
2628 rx_ring
->lbq_prod_idx
= 0;
2629 rx_ring
->lbq_curr_idx
= 0;
2630 rx_ring
->lbq_clean_idx
= 0;
2631 rx_ring
->lbq_free_cnt
= rx_ring
->lbq_len
;
2633 if (rx_ring
->sbq_len
) {
2634 cqicb
->flags
|= FLAGS_LS
; /* Load sbq values */
2635 tmp
= (u64
)rx_ring
->sbq_base_dma
;;
2636 base_indirect_ptr
= (__le64
*) rx_ring
->sbq_base_indirect
;
2639 *base_indirect_ptr
= cpu_to_le64(tmp
);
2640 tmp
+= DB_PAGE_SIZE
;
2641 base_indirect_ptr
++;
2643 } while (page_entries
< MAX_DB_PAGES_PER_BQ(rx_ring
->sbq_len
));
2645 cpu_to_le64(rx_ring
->sbq_base_indirect_dma
);
2646 cqicb
->sbq_buf_size
=
2647 cpu_to_le16((u16
)(rx_ring
->sbq_buf_size
/2));
2648 bq_len
= (rx_ring
->sbq_len
== 65536) ? 0 :
2649 (u16
) rx_ring
->sbq_len
;
2650 cqicb
->sbq_len
= cpu_to_le16(bq_len
);
2651 rx_ring
->sbq_prod_idx
= 0;
2652 rx_ring
->sbq_curr_idx
= 0;
2653 rx_ring
->sbq_clean_idx
= 0;
2654 rx_ring
->sbq_free_cnt
= rx_ring
->sbq_len
;
2656 switch (rx_ring
->type
) {
2658 /* If there's only one interrupt, then we use
2659 * worker threads to process the outbound
2660 * completion handling rx_rings. We do this so
2661 * they can be run on multiple CPUs. There is
2662 * room to play with this more where we would only
2663 * run in a worker if there are more than x number
2664 * of outbound completions on the queue and more
2665 * than one queue active. Some threshold that
2666 * would indicate a benefit in spite of the cost
2667 * of a context switch.
2668 * If there's more than one interrupt, then the
2669 * outbound completions are processed in the ISR.
2671 if (!test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))
2672 INIT_DELAYED_WORK(&rx_ring
->rx_work
, ql_tx_clean
);
2674 /* With all debug warnings on we see a WARN_ON message
2675 * when we free the skb in the interrupt context.
2677 INIT_DELAYED_WORK(&rx_ring
->rx_work
, ql_tx_clean
);
2679 cqicb
->irq_delay
= cpu_to_le16(qdev
->tx_coalesce_usecs
);
2680 cqicb
->pkt_delay
= cpu_to_le16(qdev
->tx_max_coalesced_frames
);
2683 INIT_DELAYED_WORK(&rx_ring
->rx_work
, ql_rx_clean
);
2684 cqicb
->irq_delay
= 0;
2685 cqicb
->pkt_delay
= 0;
2688 /* Inbound completion handling rx_rings run in
2689 * separate NAPI contexts.
2691 netif_napi_add(qdev
->ndev
, &rx_ring
->napi
, ql_napi_poll_msix
,
2693 cqicb
->irq_delay
= cpu_to_le16(qdev
->rx_coalesce_usecs
);
2694 cqicb
->pkt_delay
= cpu_to_le16(qdev
->rx_max_coalesced_frames
);
2697 QPRINTK(qdev
, IFUP
, DEBUG
, "Invalid rx_ring->type = %d.\n",
2700 QPRINTK(qdev
, IFUP
, DEBUG
, "Initializing rx work queue.\n");
2701 err
= ql_write_cfg(qdev
, cqicb
, sizeof(struct cqicb
),
2702 CFG_LCQ
, rx_ring
->cq_id
);
2704 QPRINTK(qdev
, IFUP
, ERR
, "Failed to load CQICB.\n");
2710 static int ql_start_tx_ring(struct ql_adapter
*qdev
, struct tx_ring
*tx_ring
)
2712 struct wqicb
*wqicb
= (struct wqicb
*)tx_ring
;
2713 void __iomem
*doorbell_area
=
2714 qdev
->doorbell_area
+ (DB_PAGE_SIZE
* tx_ring
->wq_id
);
2715 void *shadow_reg
= qdev
->tx_ring_shadow_reg_area
+
2716 (tx_ring
->wq_id
* sizeof(u64
));
2717 u64 shadow_reg_dma
= qdev
->tx_ring_shadow_reg_dma
+
2718 (tx_ring
->wq_id
* sizeof(u64
));
2722 * Assign doorbell registers for this tx_ring.
2724 /* TX PCI doorbell mem area for tx producer index */
2725 tx_ring
->prod_idx_db_reg
= (u32 __iomem
*) doorbell_area
;
2726 tx_ring
->prod_idx
= 0;
2727 /* TX PCI doorbell mem area + 0x04 */
2728 tx_ring
->valid_db_reg
= doorbell_area
+ 0x04;
2731 * Assign shadow registers for this tx_ring.
2733 tx_ring
->cnsmr_idx_sh_reg
= shadow_reg
;
2734 tx_ring
->cnsmr_idx_sh_reg_dma
= shadow_reg_dma
;
2736 wqicb
->len
= cpu_to_le16(tx_ring
->wq_len
| Q_LEN_V
| Q_LEN_CPP_CONT
);
2737 wqicb
->flags
= cpu_to_le16(Q_FLAGS_LC
|
2738 Q_FLAGS_LB
| Q_FLAGS_LI
| Q_FLAGS_LO
);
2739 wqicb
->cq_id_rss
= cpu_to_le16(tx_ring
->cq_id
);
2741 wqicb
->addr
= cpu_to_le64(tx_ring
->wq_base_dma
);
2743 wqicb
->cnsmr_idx_addr
= cpu_to_le64(tx_ring
->cnsmr_idx_sh_reg_dma
);
2745 ql_init_tx_ring(qdev
, tx_ring
);
2747 err
= ql_write_cfg(qdev
, wqicb
, sizeof(wqicb
), CFG_LRQ
,
2748 (u16
) tx_ring
->wq_id
);
2750 QPRINTK(qdev
, IFUP
, ERR
, "Failed to load tx_ring.\n");
2753 QPRINTK(qdev
, IFUP
, DEBUG
, "Successfully loaded WQICB.\n");
2757 static void ql_disable_msix(struct ql_adapter
*qdev
)
2759 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
2760 pci_disable_msix(qdev
->pdev
);
2761 clear_bit(QL_MSIX_ENABLED
, &qdev
->flags
);
2762 kfree(qdev
->msi_x_entry
);
2763 qdev
->msi_x_entry
= NULL
;
2764 } else if (test_bit(QL_MSI_ENABLED
, &qdev
->flags
)) {
2765 pci_disable_msi(qdev
->pdev
);
2766 clear_bit(QL_MSI_ENABLED
, &qdev
->flags
);
2770 static void ql_enable_msix(struct ql_adapter
*qdev
)
2774 qdev
->intr_count
= 1;
2775 /* Get the MSIX vectors. */
2776 if (irq_type
== MSIX_IRQ
) {
2777 /* Try to alloc space for the msix struct,
2778 * if it fails then go to MSI/legacy.
2780 qdev
->msi_x_entry
= kcalloc(qdev
->rx_ring_count
,
2781 sizeof(struct msix_entry
),
2783 if (!qdev
->msi_x_entry
) {
2788 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
2789 qdev
->msi_x_entry
[i
].entry
= i
;
2791 if (!pci_enable_msix
2792 (qdev
->pdev
, qdev
->msi_x_entry
, qdev
->rx_ring_count
)) {
2793 set_bit(QL_MSIX_ENABLED
, &qdev
->flags
);
2794 qdev
->intr_count
= qdev
->rx_ring_count
;
2795 QPRINTK(qdev
, IFUP
, DEBUG
,
2796 "MSI-X Enabled, got %d vectors.\n",
2800 kfree(qdev
->msi_x_entry
);
2801 qdev
->msi_x_entry
= NULL
;
2802 QPRINTK(qdev
, IFUP
, WARNING
,
2803 "MSI-X Enable failed, trying MSI.\n");
2808 if (irq_type
== MSI_IRQ
) {
2809 if (!pci_enable_msi(qdev
->pdev
)) {
2810 set_bit(QL_MSI_ENABLED
, &qdev
->flags
);
2811 QPRINTK(qdev
, IFUP
, INFO
,
2812 "Running with MSI interrupts.\n");
2817 QPRINTK(qdev
, IFUP
, DEBUG
, "Running with legacy interrupts.\n");
2821 * Here we build the intr_context structures based on
2822 * our rx_ring count and intr vector count.
2823 * The intr_context structure is used to hook each vector
2824 * to possibly different handlers.
2826 static void ql_resolve_queues_to_irqs(struct ql_adapter
*qdev
)
2829 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
2831 ql_enable_msix(qdev
);
2833 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
2834 /* Each rx_ring has it's
2835 * own intr_context since we have separate
2836 * vectors for each queue.
2837 * This only true when MSI-X is enabled.
2839 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
2840 qdev
->rx_ring
[i
].irq
= i
;
2841 intr_context
->intr
= i
;
2842 intr_context
->qdev
= qdev
;
2844 * We set up each vectors enable/disable/read bits so
2845 * there's no bit/mask calculations in the critical path.
2847 intr_context
->intr_en_mask
=
2848 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
2849 INTR_EN_TYPE_ENABLE
| INTR_EN_IHD_MASK
| INTR_EN_IHD
2851 intr_context
->intr_dis_mask
=
2852 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
2853 INTR_EN_TYPE_DISABLE
| INTR_EN_IHD_MASK
|
2855 intr_context
->intr_read_mask
=
2856 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
2857 INTR_EN_TYPE_READ
| INTR_EN_IHD_MASK
| INTR_EN_IHD
|
2862 * Default queue handles bcast/mcast plus
2863 * async events. Needs buffers.
2865 intr_context
->handler
= qlge_isr
;
2866 sprintf(intr_context
->name
, "%s-default-queue",
2868 } else if (i
< qdev
->rss_ring_first_cq_id
) {
2870 * Outbound queue is for outbound completions only.
2872 intr_context
->handler
= qlge_msix_tx_isr
;
2873 sprintf(intr_context
->name
, "%s-tx-%d",
2874 qdev
->ndev
->name
, i
);
2877 * Inbound queues handle unicast frames only.
2879 intr_context
->handler
= qlge_msix_rx_isr
;
2880 sprintf(intr_context
->name
, "%s-rx-%d",
2881 qdev
->ndev
->name
, i
);
2886 * All rx_rings use the same intr_context since
2887 * there is only one vector.
2889 intr_context
->intr
= 0;
2890 intr_context
->qdev
= qdev
;
2892 * We set up each vectors enable/disable/read bits so
2893 * there's no bit/mask calculations in the critical path.
2895 intr_context
->intr_en_mask
=
2896 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
| INTR_EN_TYPE_ENABLE
;
2897 intr_context
->intr_dis_mask
=
2898 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
2899 INTR_EN_TYPE_DISABLE
;
2900 intr_context
->intr_read_mask
=
2901 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
| INTR_EN_TYPE_READ
;
2903 * Single interrupt means one handler for all rings.
2905 intr_context
->handler
= qlge_isr
;
2906 sprintf(intr_context
->name
, "%s-single_irq", qdev
->ndev
->name
);
2907 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
2908 qdev
->rx_ring
[i
].irq
= 0;
2912 static void ql_free_irq(struct ql_adapter
*qdev
)
2915 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
2917 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
2918 if (intr_context
->hooked
) {
2919 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
2920 free_irq(qdev
->msi_x_entry
[i
].vector
,
2922 QPRINTK(qdev
, IFDOWN
, DEBUG
,
2923 "freeing msix interrupt %d.\n", i
);
2925 free_irq(qdev
->pdev
->irq
, &qdev
->rx_ring
[0]);
2926 QPRINTK(qdev
, IFDOWN
, DEBUG
,
2927 "freeing msi interrupt %d.\n", i
);
2931 ql_disable_msix(qdev
);
2934 static int ql_request_irq(struct ql_adapter
*qdev
)
2938 struct pci_dev
*pdev
= qdev
->pdev
;
2939 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
2941 ql_resolve_queues_to_irqs(qdev
);
2943 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
2944 atomic_set(&intr_context
->irq_cnt
, 0);
2945 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
2946 status
= request_irq(qdev
->msi_x_entry
[i
].vector
,
2947 intr_context
->handler
,
2952 QPRINTK(qdev
, IFUP
, ERR
,
2953 "Failed request for MSIX interrupt %d.\n",
2957 QPRINTK(qdev
, IFUP
, DEBUG
,
2958 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2960 qdev
->rx_ring
[i
].type
==
2961 DEFAULT_Q
? "DEFAULT_Q" : "",
2962 qdev
->rx_ring
[i
].type
==
2964 qdev
->rx_ring
[i
].type
==
2965 RX_Q
? "RX_Q" : "", intr_context
->name
);
2968 QPRINTK(qdev
, IFUP
, DEBUG
,
2969 "trying msi or legacy interrupts.\n");
2970 QPRINTK(qdev
, IFUP
, DEBUG
,
2971 "%s: irq = %d.\n", __func__
, pdev
->irq
);
2972 QPRINTK(qdev
, IFUP
, DEBUG
,
2973 "%s: context->name = %s.\n", __func__
,
2974 intr_context
->name
);
2975 QPRINTK(qdev
, IFUP
, DEBUG
,
2976 "%s: dev_id = 0x%p.\n", __func__
,
2979 request_irq(pdev
->irq
, qlge_isr
,
2980 test_bit(QL_MSI_ENABLED
,
2982 flags
) ? 0 : IRQF_SHARED
,
2983 intr_context
->name
, &qdev
->rx_ring
[0]);
2987 QPRINTK(qdev
, IFUP
, ERR
,
2988 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2990 qdev
->rx_ring
[0].type
==
2991 DEFAULT_Q
? "DEFAULT_Q" : "",
2992 qdev
->rx_ring
[0].type
== TX_Q
? "TX_Q" : "",
2993 qdev
->rx_ring
[0].type
== RX_Q
? "RX_Q" : "",
2994 intr_context
->name
);
2996 intr_context
->hooked
= 1;
3000 QPRINTK(qdev
, IFUP
, ERR
, "Failed to get the interrupts!!!/n");
3005 static int ql_start_rss(struct ql_adapter
*qdev
)
3007 struct ricb
*ricb
= &qdev
->ricb
;
3010 u8
*hash_id
= (u8
*) ricb
->hash_cq_id
;
3012 memset((void *)ricb
, 0, sizeof(ricb
));
3014 ricb
->base_cq
= qdev
->rss_ring_first_cq_id
| RSS_L4K
;
3016 (RSS_L6K
| RSS_LI
| RSS_LB
| RSS_LM
| RSS_RI4
| RSS_RI6
| RSS_RT4
|
3018 ricb
->mask
= cpu_to_le16(qdev
->rss_ring_count
- 1);
3021 * Fill out the Indirection Table.
3023 for (i
= 0; i
< 256; i
++)
3024 hash_id
[i
] = i
& (qdev
->rss_ring_count
- 1);
3027 * Random values for the IPv6 and IPv4 Hash Keys.
3029 get_random_bytes((void *)&ricb
->ipv6_hash_key
[0], 40);
3030 get_random_bytes((void *)&ricb
->ipv4_hash_key
[0], 16);
3032 QPRINTK(qdev
, IFUP
, DEBUG
, "Initializing RSS.\n");
3034 status
= ql_write_cfg(qdev
, ricb
, sizeof(ricb
), CFG_LR
, 0);
3036 QPRINTK(qdev
, IFUP
, ERR
, "Failed to load RICB.\n");
3039 QPRINTK(qdev
, IFUP
, DEBUG
, "Successfully loaded RICB.\n");
3043 /* Initialize the frame-to-queue routing. */
3044 static int ql_route_initialize(struct ql_adapter
*qdev
)
3049 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
3053 /* Clear all the entries in the routing table. */
3054 for (i
= 0; i
< 16; i
++) {
3055 status
= ql_set_routing_reg(qdev
, i
, 0, 0);
3057 QPRINTK(qdev
, IFUP
, ERR
,
3058 "Failed to init routing register for CAM packets.\n");
3063 status
= ql_set_routing_reg(qdev
, RT_IDX_ALL_ERR_SLOT
, RT_IDX_ERR
, 1);
3065 QPRINTK(qdev
, IFUP
, ERR
,
3066 "Failed to init routing register for error packets.\n");
3069 status
= ql_set_routing_reg(qdev
, RT_IDX_BCAST_SLOT
, RT_IDX_BCAST
, 1);
3071 QPRINTK(qdev
, IFUP
, ERR
,
3072 "Failed to init routing register for broadcast packets.\n");
3075 /* If we have more than one inbound queue, then turn on RSS in the
3078 if (qdev
->rss_ring_count
> 1) {
3079 status
= ql_set_routing_reg(qdev
, RT_IDX_RSS_MATCH_SLOT
,
3080 RT_IDX_RSS_MATCH
, 1);
3082 QPRINTK(qdev
, IFUP
, ERR
,
3083 "Failed to init routing register for MATCH RSS packets.\n");
3088 status
= ql_set_routing_reg(qdev
, RT_IDX_CAM_HIT_SLOT
,
3091 QPRINTK(qdev
, IFUP
, ERR
,
3092 "Failed to init routing register for CAM packets.\n");
3094 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
3098 int ql_cam_route_initialize(struct ql_adapter
*qdev
)
3102 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
3105 status
= ql_set_mac_addr_reg(qdev
, (u8
*) qdev
->ndev
->perm_addr
,
3106 MAC_ADDR_TYPE_CAM_MAC
, qdev
->func
* MAX_CQ
);
3107 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
3109 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init mac address.\n");
3113 status
= ql_route_initialize(qdev
);
3115 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init routing table.\n");
3120 static int ql_adapter_initialize(struct ql_adapter
*qdev
)
3127 * Set up the System register to halt on errors.
3129 value
= SYS_EFE
| SYS_FAE
;
3131 ql_write32(qdev
, SYS
, mask
| value
);
3133 /* Set the default queue, and VLAN behavior. */
3134 value
= NIC_RCV_CFG_DFQ
| NIC_RCV_CFG_RV
;
3135 mask
= NIC_RCV_CFG_DFQ_MASK
| (NIC_RCV_CFG_RV
<< 16);
3136 ql_write32(qdev
, NIC_RCV_CFG
, (mask
| value
));
3138 /* Set the MPI interrupt to enabled. */
3139 ql_write32(qdev
, INTR_MASK
, (INTR_MASK_PI
<< 16) | INTR_MASK_PI
);
3141 /* Enable the function, set pagesize, enable error checking. */
3142 value
= FSC_FE
| FSC_EPC_INBOUND
| FSC_EPC_OUTBOUND
|
3143 FSC_EC
| FSC_VM_PAGE_4K
| FSC_SH
;
3145 /* Set/clear header splitting. */
3146 mask
= FSC_VM_PAGESIZE_MASK
|
3147 FSC_DBL_MASK
| FSC_DBRST_MASK
| (value
<< 16);
3148 ql_write32(qdev
, FSC
, mask
| value
);
3150 ql_write32(qdev
, SPLT_HDR
, SPLT_HDR_EP
|
3151 min(SMALL_BUFFER_SIZE
, MAX_SPLIT_SIZE
));
3153 /* Start up the rx queues. */
3154 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
3155 status
= ql_start_rx_ring(qdev
, &qdev
->rx_ring
[i
]);
3157 QPRINTK(qdev
, IFUP
, ERR
,
3158 "Failed to start rx ring[%d].\n", i
);
3163 /* If there is more than one inbound completion queue
3164 * then download a RICB to configure RSS.
3166 if (qdev
->rss_ring_count
> 1) {
3167 status
= ql_start_rss(qdev
);
3169 QPRINTK(qdev
, IFUP
, ERR
, "Failed to start RSS.\n");
3174 /* Start up the tx queues. */
3175 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
3176 status
= ql_start_tx_ring(qdev
, &qdev
->tx_ring
[i
]);
3178 QPRINTK(qdev
, IFUP
, ERR
,
3179 "Failed to start tx ring[%d].\n", i
);
3184 /* Initialize the port and set the max framesize. */
3185 status
= qdev
->nic_ops
->port_initialize(qdev
);
3187 QPRINTK(qdev
, IFUP
, ERR
, "Failed to start port.\n");
3191 /* Set up the MAC address and frame routing filter. */
3192 status
= ql_cam_route_initialize(qdev
);
3194 QPRINTK(qdev
, IFUP
, ERR
,
3195 "Failed to init CAM/Routing tables.\n");
3199 /* Start NAPI for the RSS queues. */
3200 for (i
= qdev
->rss_ring_first_cq_id
; i
< qdev
->rx_ring_count
; i
++) {
3201 QPRINTK(qdev
, IFUP
, DEBUG
, "Enabling NAPI for rx_ring[%d].\n",
3203 napi_enable(&qdev
->rx_ring
[i
].napi
);
3209 /* Issue soft reset to chip. */
3210 static int ql_adapter_reset(struct ql_adapter
*qdev
)
3214 unsigned long end_jiffies
= jiffies
+
3215 max((unsigned long)1, usecs_to_jiffies(30));
3217 ql_write32(qdev
, RST_FO
, (RST_FO_FR
<< 16) | RST_FO_FR
);
3220 value
= ql_read32(qdev
, RST_FO
);
3221 if ((value
& RST_FO_FR
) == 0)
3224 } while (time_before(jiffies
, end_jiffies
));
3226 if (value
& RST_FO_FR
) {
3227 QPRINTK(qdev
, IFDOWN
, ERR
,
3228 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3229 status
= -ETIMEDOUT
;
3235 static void ql_display_dev_info(struct net_device
*ndev
)
3237 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3239 QPRINTK(qdev
, PROBE
, INFO
,
3240 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3241 "XG Roll = %d, XG Rev = %d.\n",
3244 qdev
->chip_rev_id
& 0x0000000f,
3245 qdev
->chip_rev_id
>> 4 & 0x0000000f,
3246 qdev
->chip_rev_id
>> 8 & 0x0000000f,
3247 qdev
->chip_rev_id
>> 12 & 0x0000000f);
3248 QPRINTK(qdev
, PROBE
, INFO
, "MAC address %pM\n", ndev
->dev_addr
);
3251 static int ql_adapter_down(struct ql_adapter
*qdev
)
3254 struct rx_ring
*rx_ring
;
3256 netif_carrier_off(qdev
->ndev
);
3258 /* Don't kill the reset worker thread if we
3259 * are in the process of recovery.
3261 if (test_bit(QL_ADAPTER_UP
, &qdev
->flags
))
3262 cancel_delayed_work_sync(&qdev
->asic_reset_work
);
3263 cancel_delayed_work_sync(&qdev
->mpi_reset_work
);
3264 cancel_delayed_work_sync(&qdev
->mpi_work
);
3265 cancel_delayed_work_sync(&qdev
->mpi_idc_work
);
3266 cancel_delayed_work_sync(&qdev
->mpi_port_cfg_work
);
3268 /* The default queue at index 0 is always processed in
3271 cancel_delayed_work_sync(&qdev
->rx_ring
[0].rx_work
);
3273 /* The rest of the rx_rings are processed in
3274 * a workqueue only if it's a single interrupt
3275 * environment (MSI/Legacy).
3277 for (i
= 1; i
< qdev
->rx_ring_count
; i
++) {
3278 rx_ring
= &qdev
->rx_ring
[i
];
3279 /* Only the RSS rings use NAPI on multi irq
3280 * environment. Outbound completion processing
3281 * is done in interrupt context.
3283 if (i
>= qdev
->rss_ring_first_cq_id
) {
3284 napi_disable(&rx_ring
->napi
);
3286 cancel_delayed_work_sync(&rx_ring
->rx_work
);
3290 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3292 ql_disable_interrupts(qdev
);
3294 ql_tx_ring_clean(qdev
);
3296 /* Call netif_napi_del() from common point.
3298 for (i
= qdev
->rss_ring_first_cq_id
; i
< qdev
->rx_ring_count
; i
++)
3299 netif_napi_del(&qdev
->rx_ring
[i
].napi
);
3301 ql_free_rx_buffers(qdev
);
3303 spin_lock(&qdev
->hw_lock
);
3304 status
= ql_adapter_reset(qdev
);
3306 QPRINTK(qdev
, IFDOWN
, ERR
, "reset(func #%d) FAILED!\n",
3308 spin_unlock(&qdev
->hw_lock
);
3312 static int ql_adapter_up(struct ql_adapter
*qdev
)
3316 err
= ql_adapter_initialize(qdev
);
3318 QPRINTK(qdev
, IFUP
, INFO
, "Unable to initialize adapter.\n");
3321 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3322 ql_alloc_rx_buffers(qdev
);
3323 if ((ql_read32(qdev
, STS
) & qdev
->port_init
))
3324 netif_carrier_on(qdev
->ndev
);
3325 ql_enable_interrupts(qdev
);
3326 ql_enable_all_completion_interrupts(qdev
);
3327 netif_tx_start_all_queues(qdev
->ndev
);
3331 ql_adapter_reset(qdev
);
3335 static void ql_release_adapter_resources(struct ql_adapter
*qdev
)
3337 ql_free_mem_resources(qdev
);
3341 static int ql_get_adapter_resources(struct ql_adapter
*qdev
)
3345 if (ql_alloc_mem_resources(qdev
)) {
3346 QPRINTK(qdev
, IFUP
, ERR
, "Unable to allocate memory.\n");
3349 status
= ql_request_irq(qdev
);
3354 ql_free_mem_resources(qdev
);
3358 static int qlge_close(struct net_device
*ndev
)
3360 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3363 * Wait for device to recover from a reset.
3364 * (Rarely happens, but possible.)
3366 while (!test_bit(QL_ADAPTER_UP
, &qdev
->flags
))
3368 ql_adapter_down(qdev
);
3369 ql_release_adapter_resources(qdev
);
3373 static int ql_configure_rings(struct ql_adapter
*qdev
)
3376 struct rx_ring
*rx_ring
;
3377 struct tx_ring
*tx_ring
;
3378 int cpu_cnt
= num_online_cpus();
3381 * For each processor present we allocate one
3382 * rx_ring for outbound completions, and one
3383 * rx_ring for inbound completions. Plus there is
3384 * always the one default queue. For the CPU
3385 * counts we end up with the following rx_rings:
3387 * one default queue +
3388 * (CPU count * outbound completion rx_ring) +
3389 * (CPU count * inbound (RSS) completion rx_ring)
3390 * To keep it simple we limit the total number of
3391 * queues to < 32, so we truncate CPU to 8.
3392 * This limitation can be removed when requested.
3395 if (cpu_cnt
> MAX_CPUS
)
3399 * rx_ring[0] is always the default queue.
3401 /* Allocate outbound completion ring for each CPU. */
3402 qdev
->tx_ring_count
= cpu_cnt
;
3403 /* Allocate inbound completion (RSS) ring for each CPU. */
3404 qdev
->rss_ring_count
= cpu_cnt
;
3405 /* cq_id for the first inbound ring handler. */
3406 qdev
->rss_ring_first_cq_id
= cpu_cnt
+ 1;
3408 * qdev->rx_ring_count:
3409 * Total number of rx_rings. This includes the one
3410 * default queue, a number of outbound completion
3411 * handler rx_rings, and the number of inbound
3412 * completion handler rx_rings.
3414 qdev
->rx_ring_count
= qdev
->tx_ring_count
+ qdev
->rss_ring_count
+ 1;
3416 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
3417 tx_ring
= &qdev
->tx_ring
[i
];
3418 memset((void *)tx_ring
, 0, sizeof(tx_ring
));
3419 tx_ring
->qdev
= qdev
;
3421 tx_ring
->wq_len
= qdev
->tx_ring_size
;
3423 tx_ring
->wq_len
* sizeof(struct ob_mac_iocb_req
);
3426 * The completion queue ID for the tx rings start
3427 * immediately after the default Q ID, which is zero.
3429 tx_ring
->cq_id
= i
+ 1;
3432 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
3433 rx_ring
= &qdev
->rx_ring
[i
];
3434 memset((void *)rx_ring
, 0, sizeof(rx_ring
));
3435 rx_ring
->qdev
= qdev
;
3437 rx_ring
->cpu
= i
% cpu_cnt
; /* CPU to run handler on. */
3438 if (i
== 0) { /* Default queue at index 0. */
3440 * Default queue handles bcast/mcast plus
3441 * async events. Needs buffers.
3443 rx_ring
->cq_len
= qdev
->rx_ring_size
;
3445 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
3446 rx_ring
->lbq_len
= NUM_LARGE_BUFFERS
;
3448 rx_ring
->lbq_len
* sizeof(__le64
);
3449 rx_ring
->lbq_buf_size
= LARGE_BUFFER_SIZE
;
3450 rx_ring
->sbq_len
= NUM_SMALL_BUFFERS
;
3452 rx_ring
->sbq_len
* sizeof(__le64
);
3453 rx_ring
->sbq_buf_size
= SMALL_BUFFER_SIZE
* 2;
3454 rx_ring
->type
= DEFAULT_Q
;
3455 } else if (i
< qdev
->rss_ring_first_cq_id
) {
3457 * Outbound queue handles outbound completions only.
3459 /* outbound cq is same size as tx_ring it services. */
3460 rx_ring
->cq_len
= qdev
->tx_ring_size
;
3462 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
3463 rx_ring
->lbq_len
= 0;
3464 rx_ring
->lbq_size
= 0;
3465 rx_ring
->lbq_buf_size
= 0;
3466 rx_ring
->sbq_len
= 0;
3467 rx_ring
->sbq_size
= 0;
3468 rx_ring
->sbq_buf_size
= 0;
3469 rx_ring
->type
= TX_Q
;
3470 } else { /* Inbound completions (RSS) queues */
3472 * Inbound queues handle unicast frames only.
3474 rx_ring
->cq_len
= qdev
->rx_ring_size
;
3476 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
3477 rx_ring
->lbq_len
= NUM_LARGE_BUFFERS
;
3479 rx_ring
->lbq_len
* sizeof(__le64
);
3480 rx_ring
->lbq_buf_size
= LARGE_BUFFER_SIZE
;
3481 rx_ring
->sbq_len
= NUM_SMALL_BUFFERS
;
3483 rx_ring
->sbq_len
* sizeof(__le64
);
3484 rx_ring
->sbq_buf_size
= SMALL_BUFFER_SIZE
* 2;
3485 rx_ring
->type
= RX_Q
;
3491 static int qlge_open(struct net_device
*ndev
)
3494 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3496 err
= ql_configure_rings(qdev
);
3500 err
= ql_get_adapter_resources(qdev
);
3504 err
= ql_adapter_up(qdev
);
3511 ql_release_adapter_resources(qdev
);
3515 static int qlge_change_mtu(struct net_device
*ndev
, int new_mtu
)
3517 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3519 if (ndev
->mtu
== 1500 && new_mtu
== 9000) {
3520 QPRINTK(qdev
, IFUP
, ERR
, "Changing to jumbo MTU.\n");
3521 queue_delayed_work(qdev
->workqueue
,
3522 &qdev
->mpi_port_cfg_work
, 0);
3523 } else if (ndev
->mtu
== 9000 && new_mtu
== 1500) {
3524 QPRINTK(qdev
, IFUP
, ERR
, "Changing to normal MTU.\n");
3525 } else if ((ndev
->mtu
== 1500 && new_mtu
== 1500) ||
3526 (ndev
->mtu
== 9000 && new_mtu
== 9000)) {
3530 ndev
->mtu
= new_mtu
;
3534 static struct net_device_stats
*qlge_get_stats(struct net_device
3537 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3538 return &qdev
->stats
;
3541 static void qlge_set_multicast_list(struct net_device
*ndev
)
3543 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3544 struct dev_mc_list
*mc_ptr
;
3547 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
3550 spin_lock(&qdev
->hw_lock
);
3552 * Set or clear promiscuous mode if a
3553 * transition is taking place.
3555 if (ndev
->flags
& IFF_PROMISC
) {
3556 if (!test_bit(QL_PROMISCUOUS
, &qdev
->flags
)) {
3557 if (ql_set_routing_reg
3558 (qdev
, RT_IDX_PROMISCUOUS_SLOT
, RT_IDX_VALID
, 1)) {
3559 QPRINTK(qdev
, HW
, ERR
,
3560 "Failed to set promiscous mode.\n");
3562 set_bit(QL_PROMISCUOUS
, &qdev
->flags
);
3566 if (test_bit(QL_PROMISCUOUS
, &qdev
->flags
)) {
3567 if (ql_set_routing_reg
3568 (qdev
, RT_IDX_PROMISCUOUS_SLOT
, RT_IDX_VALID
, 0)) {
3569 QPRINTK(qdev
, HW
, ERR
,
3570 "Failed to clear promiscous mode.\n");
3572 clear_bit(QL_PROMISCUOUS
, &qdev
->flags
);
3578 * Set or clear all multicast mode if a
3579 * transition is taking place.
3581 if ((ndev
->flags
& IFF_ALLMULTI
) ||
3582 (ndev
->mc_count
> MAX_MULTICAST_ENTRIES
)) {
3583 if (!test_bit(QL_ALLMULTI
, &qdev
->flags
)) {
3584 if (ql_set_routing_reg
3585 (qdev
, RT_IDX_ALLMULTI_SLOT
, RT_IDX_MCAST
, 1)) {
3586 QPRINTK(qdev
, HW
, ERR
,
3587 "Failed to set all-multi mode.\n");
3589 set_bit(QL_ALLMULTI
, &qdev
->flags
);
3593 if (test_bit(QL_ALLMULTI
, &qdev
->flags
)) {
3594 if (ql_set_routing_reg
3595 (qdev
, RT_IDX_ALLMULTI_SLOT
, RT_IDX_MCAST
, 0)) {
3596 QPRINTK(qdev
, HW
, ERR
,
3597 "Failed to clear all-multi mode.\n");
3599 clear_bit(QL_ALLMULTI
, &qdev
->flags
);
3604 if (ndev
->mc_count
) {
3605 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
3608 for (i
= 0, mc_ptr
= ndev
->mc_list
; mc_ptr
;
3609 i
++, mc_ptr
= mc_ptr
->next
)
3610 if (ql_set_mac_addr_reg(qdev
, (u8
*) mc_ptr
->dmi_addr
,
3611 MAC_ADDR_TYPE_MULTI_MAC
, i
)) {
3612 QPRINTK(qdev
, HW
, ERR
,
3613 "Failed to loadmulticast address.\n");
3614 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
3617 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
3618 if (ql_set_routing_reg
3619 (qdev
, RT_IDX_MCAST_MATCH_SLOT
, RT_IDX_MCAST_MATCH
, 1)) {
3620 QPRINTK(qdev
, HW
, ERR
,
3621 "Failed to set multicast match mode.\n");
3623 set_bit(QL_ALLMULTI
, &qdev
->flags
);
3627 spin_unlock(&qdev
->hw_lock
);
3628 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
3631 static int qlge_set_mac_address(struct net_device
*ndev
, void *p
)
3633 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3634 struct sockaddr
*addr
= p
;
3637 if (netif_running(ndev
))
3640 if (!is_valid_ether_addr(addr
->sa_data
))
3641 return -EADDRNOTAVAIL
;
3642 memcpy(ndev
->dev_addr
, addr
->sa_data
, ndev
->addr_len
);
3644 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
3647 spin_lock(&qdev
->hw_lock
);
3648 status
= ql_set_mac_addr_reg(qdev
, (u8
*) ndev
->dev_addr
,
3649 MAC_ADDR_TYPE_CAM_MAC
, qdev
->func
* MAX_CQ
);
3650 spin_unlock(&qdev
->hw_lock
);
3652 QPRINTK(qdev
, HW
, ERR
, "Failed to load MAC address.\n");
3653 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
3657 static void qlge_tx_timeout(struct net_device
*ndev
)
3659 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3660 ql_queue_asic_error(qdev
);
3663 static void ql_asic_reset_work(struct work_struct
*work
)
3665 struct ql_adapter
*qdev
=
3666 container_of(work
, struct ql_adapter
, asic_reset_work
.work
);
3669 status
= ql_adapter_down(qdev
);
3673 status
= ql_adapter_up(qdev
);
3679 QPRINTK(qdev
, IFUP
, ALERT
,
3680 "Driver up/down cycle failed, closing device\n");
3682 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3683 dev_close(qdev
->ndev
);
3687 static struct nic_operations qla8012_nic_ops
= {
3688 .get_flash
= ql_get_8012_flash_params
,
3689 .port_initialize
= ql_8012_port_initialize
,
3692 static struct nic_operations qla8000_nic_ops
= {
3693 .get_flash
= ql_get_8000_flash_params
,
3694 .port_initialize
= ql_8000_port_initialize
,
3697 /* Find the pcie function number for the other NIC
3698 * on this chip. Since both NIC functions share a
3699 * common firmware we have the lowest enabled function
3700 * do any common work. Examples would be resetting
3701 * after a fatal firmware error, or doing a firmware
3704 static int ql_get_alt_pcie_func(struct ql_adapter
*qdev
)
3708 u32 nic_func1
, nic_func2
;
3710 status
= ql_read_mpi_reg(qdev
, MPI_TEST_FUNC_PORT_CFG
,
3715 nic_func1
= ((temp
>> MPI_TEST_NIC1_FUNC_SHIFT
) &
3716 MPI_TEST_NIC_FUNC_MASK
);
3717 nic_func2
= ((temp
>> MPI_TEST_NIC2_FUNC_SHIFT
) &
3718 MPI_TEST_NIC_FUNC_MASK
);
3720 if (qdev
->func
== nic_func1
)
3721 qdev
->alt_func
= nic_func2
;
3722 else if (qdev
->func
== nic_func2
)
3723 qdev
->alt_func
= nic_func1
;
3730 static int ql_get_board_info(struct ql_adapter
*qdev
)
3734 (ql_read32(qdev
, STS
) & STS_FUNC_ID_MASK
) >> STS_FUNC_ID_SHIFT
;
3738 status
= ql_get_alt_pcie_func(qdev
);
3742 qdev
->port
= (qdev
->func
< qdev
->alt_func
) ? 0 : 1;
3744 qdev
->xg_sem_mask
= SEM_XGMAC1_MASK
;
3745 qdev
->port_link_up
= STS_PL1
;
3746 qdev
->port_init
= STS_PI1
;
3747 qdev
->mailbox_in
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC2_MBI
;
3748 qdev
->mailbox_out
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC2_MBO
;
3750 qdev
->xg_sem_mask
= SEM_XGMAC0_MASK
;
3751 qdev
->port_link_up
= STS_PL0
;
3752 qdev
->port_init
= STS_PI0
;
3753 qdev
->mailbox_in
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC0_MBI
;
3754 qdev
->mailbox_out
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC0_MBO
;
3756 qdev
->chip_rev_id
= ql_read32(qdev
, REV_ID
);
3757 qdev
->device_id
= qdev
->pdev
->device
;
3758 if (qdev
->device_id
== QLGE_DEVICE_ID_8012
)
3759 qdev
->nic_ops
= &qla8012_nic_ops
;
3760 else if (qdev
->device_id
== QLGE_DEVICE_ID_8000
)
3761 qdev
->nic_ops
= &qla8000_nic_ops
;
3765 static void ql_release_all(struct pci_dev
*pdev
)
3767 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3768 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3770 if (qdev
->workqueue
) {
3771 destroy_workqueue(qdev
->workqueue
);
3772 qdev
->workqueue
= NULL
;
3774 if (qdev
->q_workqueue
) {
3775 destroy_workqueue(qdev
->q_workqueue
);
3776 qdev
->q_workqueue
= NULL
;
3779 iounmap(qdev
->reg_base
);
3780 if (qdev
->doorbell_area
)
3781 iounmap(qdev
->doorbell_area
);
3782 pci_release_regions(pdev
);
3783 pci_set_drvdata(pdev
, NULL
);
3786 static int __devinit
ql_init_device(struct pci_dev
*pdev
,
3787 struct net_device
*ndev
, int cards_found
)
3789 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3793 memset((void *)qdev
, 0, sizeof(qdev
));
3794 err
= pci_enable_device(pdev
);
3796 dev_err(&pdev
->dev
, "PCI device enable failed.\n");
3800 pos
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
3802 dev_err(&pdev
->dev
, PFX
"Cannot find PCI Express capability, "
3806 pci_read_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, &val16
);
3807 val16
&= ~PCI_EXP_DEVCTL_NOSNOOP_EN
;
3808 val16
|= (PCI_EXP_DEVCTL_CERE
|
3809 PCI_EXP_DEVCTL_NFERE
|
3810 PCI_EXP_DEVCTL_FERE
| PCI_EXP_DEVCTL_URRE
);
3811 pci_write_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, val16
);
3814 err
= pci_request_regions(pdev
, DRV_NAME
);
3816 dev_err(&pdev
->dev
, "PCI region request failed.\n");
3820 pci_set_master(pdev
);
3821 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
3822 set_bit(QL_DMA64
, &qdev
->flags
);
3823 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
3825 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
3827 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
3831 dev_err(&pdev
->dev
, "No usable DMA configuration.\n");
3835 pci_set_drvdata(pdev
, ndev
);
3837 ioremap_nocache(pci_resource_start(pdev
, 1),
3838 pci_resource_len(pdev
, 1));
3839 if (!qdev
->reg_base
) {
3840 dev_err(&pdev
->dev
, "Register mapping failed.\n");
3845 qdev
->doorbell_area_size
= pci_resource_len(pdev
, 3);
3846 qdev
->doorbell_area
=
3847 ioremap_nocache(pci_resource_start(pdev
, 3),
3848 pci_resource_len(pdev
, 3));
3849 if (!qdev
->doorbell_area
) {
3850 dev_err(&pdev
->dev
, "Doorbell register mapping failed.\n");
3857 err
= ql_get_board_info(qdev
);
3859 dev_err(&pdev
->dev
, "Register access failed.\n");
3863 qdev
->msg_enable
= netif_msg_init(debug
, default_msg
);
3864 spin_lock_init(&qdev
->hw_lock
);
3865 spin_lock_init(&qdev
->stats_lock
);
3867 /* make sure the EEPROM is good */
3868 err
= qdev
->nic_ops
->get_flash(qdev
);
3870 dev_err(&pdev
->dev
, "Invalid FLASH.\n");
3874 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
3876 /* Set up the default ring sizes. */
3877 qdev
->tx_ring_size
= NUM_TX_RING_ENTRIES
;
3878 qdev
->rx_ring_size
= NUM_RX_RING_ENTRIES
;
3880 /* Set up the coalescing parameters. */
3881 qdev
->rx_coalesce_usecs
= DFLT_COALESCE_WAIT
;
3882 qdev
->tx_coalesce_usecs
= DFLT_COALESCE_WAIT
;
3883 qdev
->rx_max_coalesced_frames
= DFLT_INTER_FRAME_WAIT
;
3884 qdev
->tx_max_coalesced_frames
= DFLT_INTER_FRAME_WAIT
;
3887 * Set up the operating parameters.
3891 qdev
->q_workqueue
= create_workqueue(ndev
->name
);
3892 qdev
->workqueue
= create_singlethread_workqueue(ndev
->name
);
3893 INIT_DELAYED_WORK(&qdev
->asic_reset_work
, ql_asic_reset_work
);
3894 INIT_DELAYED_WORK(&qdev
->mpi_reset_work
, ql_mpi_reset_work
);
3895 INIT_DELAYED_WORK(&qdev
->mpi_work
, ql_mpi_work
);
3896 INIT_DELAYED_WORK(&qdev
->mpi_port_cfg_work
, ql_mpi_port_cfg_work
);
3897 INIT_DELAYED_WORK(&qdev
->mpi_idc_work
, ql_mpi_idc_work
);
3898 mutex_init(&qdev
->mpi_mutex
);
3899 init_completion(&qdev
->ide_completion
);
3902 dev_info(&pdev
->dev
, "%s\n", DRV_STRING
);
3903 dev_info(&pdev
->dev
, "Driver name: %s, Version: %s.\n",
3904 DRV_NAME
, DRV_VERSION
);
3908 ql_release_all(pdev
);
3909 pci_disable_device(pdev
);
3914 static const struct net_device_ops qlge_netdev_ops
= {
3915 .ndo_open
= qlge_open
,
3916 .ndo_stop
= qlge_close
,
3917 .ndo_start_xmit
= qlge_send
,
3918 .ndo_change_mtu
= qlge_change_mtu
,
3919 .ndo_get_stats
= qlge_get_stats
,
3920 .ndo_set_multicast_list
= qlge_set_multicast_list
,
3921 .ndo_set_mac_address
= qlge_set_mac_address
,
3922 .ndo_validate_addr
= eth_validate_addr
,
3923 .ndo_tx_timeout
= qlge_tx_timeout
,
3924 .ndo_vlan_rx_register
= ql_vlan_rx_register
,
3925 .ndo_vlan_rx_add_vid
= ql_vlan_rx_add_vid
,
3926 .ndo_vlan_rx_kill_vid
= ql_vlan_rx_kill_vid
,
3929 static int __devinit
qlge_probe(struct pci_dev
*pdev
,
3930 const struct pci_device_id
*pci_entry
)
3932 struct net_device
*ndev
= NULL
;
3933 struct ql_adapter
*qdev
= NULL
;
3934 static int cards_found
= 0;
3937 ndev
= alloc_etherdev_mq(sizeof(struct ql_adapter
),
3938 min(MAX_CPUS
, (int)num_online_cpus()));
3942 err
= ql_init_device(pdev
, ndev
, cards_found
);
3948 qdev
= netdev_priv(ndev
);
3949 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
3956 | NETIF_F_HW_VLAN_TX
3957 | NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
);
3958 ndev
->features
|= NETIF_F_GRO
;
3960 if (test_bit(QL_DMA64
, &qdev
->flags
))
3961 ndev
->features
|= NETIF_F_HIGHDMA
;
3964 * Set up net_device structure.
3966 ndev
->tx_queue_len
= qdev
->tx_ring_size
;
3967 ndev
->irq
= pdev
->irq
;
3969 ndev
->netdev_ops
= &qlge_netdev_ops
;
3970 SET_ETHTOOL_OPS(ndev
, &qlge_ethtool_ops
);
3971 ndev
->watchdog_timeo
= 10 * HZ
;
3973 err
= register_netdev(ndev
);
3975 dev_err(&pdev
->dev
, "net device registration failed.\n");
3976 ql_release_all(pdev
);
3977 pci_disable_device(pdev
);
3980 netif_carrier_off(ndev
);
3981 ql_display_dev_info(ndev
);
3986 static void __devexit
qlge_remove(struct pci_dev
*pdev
)
3988 struct net_device
*ndev
= pci_get_drvdata(pdev
);
3989 unregister_netdev(ndev
);
3990 ql_release_all(pdev
);
3991 pci_disable_device(pdev
);
3996 * This callback is called by the PCI subsystem whenever
3997 * a PCI bus error is detected.
3999 static pci_ers_result_t
qlge_io_error_detected(struct pci_dev
*pdev
,
4000 enum pci_channel_state state
)
4002 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4003 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4005 if (netif_running(ndev
))
4006 ql_adapter_down(qdev
);
4008 pci_disable_device(pdev
);
4010 /* Request a slot reset. */
4011 return PCI_ERS_RESULT_NEED_RESET
;
4015 * This callback is called after the PCI buss has been reset.
4016 * Basically, this tries to restart the card from scratch.
4017 * This is a shortened version of the device probe/discovery code,
4018 * it resembles the first-half of the () routine.
4020 static pci_ers_result_t
qlge_io_slot_reset(struct pci_dev
*pdev
)
4022 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4023 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4025 if (pci_enable_device(pdev
)) {
4026 QPRINTK(qdev
, IFUP
, ERR
,
4027 "Cannot re-enable PCI device after reset.\n");
4028 return PCI_ERS_RESULT_DISCONNECT
;
4031 pci_set_master(pdev
);
4033 netif_carrier_off(ndev
);
4034 ql_adapter_reset(qdev
);
4036 /* Make sure the EEPROM is good */
4037 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
4039 if (!is_valid_ether_addr(ndev
->perm_addr
)) {
4040 QPRINTK(qdev
, IFUP
, ERR
, "After reset, invalid MAC address.\n");
4041 return PCI_ERS_RESULT_DISCONNECT
;
4044 return PCI_ERS_RESULT_RECOVERED
;
4047 static void qlge_io_resume(struct pci_dev
*pdev
)
4049 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4050 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4052 pci_set_master(pdev
);
4054 if (netif_running(ndev
)) {
4055 if (ql_adapter_up(qdev
)) {
4056 QPRINTK(qdev
, IFUP
, ERR
,
4057 "Device initialization failed after reset.\n");
4062 netif_device_attach(ndev
);
4065 static struct pci_error_handlers qlge_err_handler
= {
4066 .error_detected
= qlge_io_error_detected
,
4067 .slot_reset
= qlge_io_slot_reset
,
4068 .resume
= qlge_io_resume
,
4071 static int qlge_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4073 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4074 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4077 netif_device_detach(ndev
);
4079 if (netif_running(ndev
)) {
4080 err
= ql_adapter_down(qdev
);
4085 err
= pci_save_state(pdev
);
4089 pci_disable_device(pdev
);
4091 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
4097 static int qlge_resume(struct pci_dev
*pdev
)
4099 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4100 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4103 pci_set_power_state(pdev
, PCI_D0
);
4104 pci_restore_state(pdev
);
4105 err
= pci_enable_device(pdev
);
4107 QPRINTK(qdev
, IFUP
, ERR
, "Cannot enable PCI device from suspend\n");
4110 pci_set_master(pdev
);
4112 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4113 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4115 if (netif_running(ndev
)) {
4116 err
= ql_adapter_up(qdev
);
4121 netif_device_attach(ndev
);
4125 #endif /* CONFIG_PM */
4127 static void qlge_shutdown(struct pci_dev
*pdev
)
4129 qlge_suspend(pdev
, PMSG_SUSPEND
);
4132 static struct pci_driver qlge_driver
= {
4134 .id_table
= qlge_pci_tbl
,
4135 .probe
= qlge_probe
,
4136 .remove
= __devexit_p(qlge_remove
),
4138 .suspend
= qlge_suspend
,
4139 .resume
= qlge_resume
,
4141 .shutdown
= qlge_shutdown
,
4142 .err_handler
= &qlge_err_handler
4145 static int __init
qlge_init_module(void)
4147 return pci_register_driver(&qlge_driver
);
4150 static void __exit
qlge_exit(void)
4152 pci_unregister_driver(&qlge_driver
);
4155 module_init(qlge_init_module
);
4156 module_exit(qlge_exit
);